mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-28 04:52:55 +00:00
Compare commits
233 Commits
addconvers
...
binggan-0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
80538175e8 | ||
|
|
8dc942e8e7 | ||
|
|
c17e513377 | ||
|
|
2f5a269c70 | ||
|
|
50532260e3 | ||
|
|
8bd6eb06e6 | ||
|
|
55b0b52457 | ||
|
|
56fc56c5b9 | ||
|
|
85395d942a | ||
|
|
a206c3ccd3 | ||
|
|
dc5d31c116 | ||
|
|
95a4ddea3e | ||
|
|
ab5125d3dc | ||
|
|
9f81d59ecd | ||
|
|
c71ec8086d | ||
|
|
27be6aed91 | ||
|
|
3d1c4b313a | ||
|
|
0d4e319965 | ||
|
|
75dc3eb298 | ||
|
|
3f6d225086 | ||
|
|
d8843c608c | ||
|
|
7ebcc15b17 | ||
|
|
1b4076691f | ||
|
|
eab660873a | ||
|
|
232f37126e | ||
|
|
13e9885dfd | ||
|
|
56d79cb203 | ||
|
|
0f4c2e27cf | ||
|
|
f9ae295507 | ||
|
|
d9db5302d9 | ||
|
|
e453848134 | ||
|
|
59084143ef | ||
|
|
511b027350 | ||
|
|
322f47eb47 | ||
|
|
72f61ff89c | ||
|
|
a141c3ec59 | ||
|
|
e90e7a25ae | ||
|
|
c3b92a5412 | ||
|
|
2f55511064 | ||
|
|
08b9fc0b31 | ||
|
|
714f363d43 | ||
|
|
93ff7365b0 | ||
|
|
8151925068 | ||
|
|
b960e40bc8 | ||
|
|
1095c9b073 | ||
|
|
c0686515a9 | ||
|
|
455156f51c | ||
|
|
4143d31865 | ||
|
|
0c634adbe1 | ||
|
|
2e3641c2ae | ||
|
|
b806122c81 | ||
|
|
e1679f3fb9 | ||
|
|
5a80420b10 | ||
|
|
aa26ff5029 | ||
|
|
e197b59258 | ||
|
|
5b7cca13e5 | ||
|
|
a79590477e | ||
|
|
6181c1eb5e | ||
|
|
1ee5f90761 | ||
|
|
71f3b4e4e3 | ||
|
|
8cd7ddc535 | ||
|
|
2b76335a95 | ||
|
|
c6b213d8f0 | ||
|
|
eea70030bf | ||
|
|
92b5526310 | ||
|
|
99a59ad37e | ||
|
|
6a66a71cbb | ||
|
|
ff40764204 | ||
|
|
047da20b5b | ||
|
|
1417eaf3a7 | ||
|
|
4f8493d2de | ||
|
|
8861366137 | ||
|
|
0e9fced336 | ||
|
|
b257b960b3 | ||
|
|
4708171a32 | ||
|
|
b493743f8d | ||
|
|
d2955a3fd2 | ||
|
|
17d5869ad6 | ||
|
|
dfa3aed32d | ||
|
|
398817ce7b | ||
|
|
74940e9345 | ||
|
|
1e9fc51535 | ||
|
|
92c32979d2 | ||
|
|
b644d78a32 | ||
|
|
4e79e11007 | ||
|
|
67ebba3c3c | ||
|
|
7ce950f141 | ||
|
|
0cffe5fb09 | ||
|
|
b0e65560a1 | ||
|
|
ec37295b2f | ||
|
|
f6b0cc1aab | ||
|
|
7e41d31c6e | ||
|
|
40aa4abfe5 | ||
|
|
2650317622 | ||
|
|
6739357314 | ||
|
|
d57622d54b | ||
|
|
f745dbc054 | ||
|
|
79b041f81f | ||
|
|
0e16ed9ef7 | ||
|
|
88a3275dbb | ||
|
|
1223a87eb2 | ||
|
|
48630ceec9 | ||
|
|
72002e8a89 | ||
|
|
3c9297dd64 | ||
|
|
0e04ec3136 | ||
|
|
9b7f3a55cf | ||
|
|
1dacdb6c85 | ||
|
|
30483310ca | ||
|
|
e1d18b5114 | ||
|
|
108f30ba23 | ||
|
|
5943ee46bd | ||
|
|
f95a76293f | ||
|
|
014328e378 | ||
|
|
53f2fe1fbe | ||
|
|
9c75942aaf | ||
|
|
bff7c58497 | ||
|
|
9ebc5ed053 | ||
|
|
0b56c88e69 | ||
|
|
24841f0b2a | ||
|
|
1a9fc10be9 | ||
|
|
07573a7f19 | ||
|
|
daad2dc151 | ||
|
|
054f49dc31 | ||
|
|
47009ed2d3 | ||
|
|
0aae31d7d7 | ||
|
|
9caab45136 | ||
|
|
6d9a7b7eb0 | ||
|
|
7a2c5804b1 | ||
|
|
5319977171 | ||
|
|
828632e8c4 | ||
|
|
6b59ec6fd5 | ||
|
|
b60d862150 | ||
|
|
4837c7811a | ||
|
|
5a2397d57e | ||
|
|
927b4432c9 | ||
|
|
7a0064db1f | ||
|
|
2e7327205d | ||
|
|
7bc5bf78e2 | ||
|
|
ef603c8c7e | ||
|
|
28dd6b6546 | ||
|
|
1dda2bb537 | ||
|
|
bf6544cf28 | ||
|
|
ccecf946f7 | ||
|
|
19a859d6fd | ||
|
|
83af14caa4 | ||
|
|
4feeb2323d | ||
|
|
07bf66a197 | ||
|
|
0d4589219b | ||
|
|
c2b0469180 | ||
|
|
7e1980b218 | ||
|
|
ecb9a89a9f | ||
|
|
5e06e504e6 | ||
|
|
182f58cea6 | ||
|
|
337ffadefd | ||
|
|
22aa4daf19 | ||
|
|
493f9b2f2a | ||
|
|
e246e5765d | ||
|
|
6097235eff | ||
|
|
b700c42246 | ||
|
|
5b1bf1a993 | ||
|
|
041d4fced7 | ||
|
|
166fc15239 | ||
|
|
514a6e7fef | ||
|
|
82d9127191 | ||
|
|
03a1f40767 | ||
|
|
1c7c6fd591 | ||
|
|
b525f653c0 | ||
|
|
90586bc1e2 | ||
|
|
832f1633de | ||
|
|
38db53c465 | ||
|
|
34920d31f5 | ||
|
|
0241a05b90 | ||
|
|
e125f3b041 | ||
|
|
c520ac46fc | ||
|
|
2d7390341c | ||
|
|
03fcdce016 | ||
|
|
e4e416ac42 | ||
|
|
19325132b7 | ||
|
|
389d36f760 | ||
|
|
49448b31c6 | ||
|
|
ebede0bed7 | ||
|
|
b1d8b072db | ||
|
|
ee6a7c2bbb | ||
|
|
c4e2708901 | ||
|
|
5c8cfa50eb | ||
|
|
73cb71762f | ||
|
|
267dfe58d7 | ||
|
|
131c10d318 | ||
|
|
e6cacc40a9 | ||
|
|
48d4847b38 | ||
|
|
59460c767f | ||
|
|
756156beaf | ||
|
|
480763db0d | ||
|
|
62ece86f24 | ||
|
|
52d9e6f298 | ||
|
|
47b315ff18 | ||
|
|
ed1deee902 | ||
|
|
2e109018b7 | ||
|
|
22c35b1e00 | ||
|
|
b92082b748 | ||
|
|
c2be6603a2 | ||
|
|
c805f08ca7 | ||
|
|
ccc0335158 | ||
|
|
42acd334f4 | ||
|
|
820f126075 | ||
|
|
7e6c4a1856 | ||
|
|
5fafe4b1ab | ||
|
|
1e7cd48cfa | ||
|
|
7f51d85bbd | ||
|
|
ad76e32398 | ||
|
|
7575f9bf1c | ||
|
|
67bdf3f5f6 | ||
|
|
3c300666ad | ||
|
|
b91d3f6be4 | ||
|
|
a8e76513bb | ||
|
|
0a23201338 | ||
|
|
81330aaf89 | ||
|
|
98a3b01992 | ||
|
|
d341520938 | ||
|
|
5c9af73e41 | ||
|
|
ad4c940fa3 | ||
|
|
910b0b0c61 | ||
|
|
3fef052bf1 | ||
|
|
040554f2f9 | ||
|
|
17186ca9c9 | ||
|
|
212d59c9ab | ||
|
|
1a1f252a3f | ||
|
|
d73706dede | ||
|
|
44850e1036 | ||
|
|
3b0cbf8102 | ||
|
|
4aa131c3db | ||
|
|
59962097d0 | ||
|
|
ebc78127f3 |
8
.github/workflows/coverage.yml
vendored
8
.github/workflows/coverage.yml
vendored
@@ -3,8 +3,6 @@ name: Coverage
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
# Ensures that we cancel running jobs for the same PR / same workflow.
|
||||
concurrency:
|
||||
@@ -15,13 +13,13 @@ jobs:
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Rust
|
||||
run: rustup toolchain install nightly --profile minimal --component llvm-tools-preview
|
||||
run: rustup toolchain install nightly-2024-07-01 --profile minimal --component llvm-tools-preview
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Generate code coverage
|
||||
run: cargo +nightly llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
|
||||
run: cargo +nightly-2024-07-01 llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
continue-on-error: true
|
||||
|
||||
2
.github/workflows/long_running.yml
vendored
2
.github/workflows/long_running.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
|
||||
13
.github/workflows/test.yml
vendored
13
.github/workflows/test.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install nightly
|
||||
uses: actions-rs/toolchain@v1
|
||||
@@ -39,6 +39,13 @@ jobs:
|
||||
|
||||
- name: Check Formatting
|
||||
run: cargo +nightly fmt --all -- --check
|
||||
|
||||
- name: Check Stable Compilation
|
||||
run: cargo build --all-features
|
||||
|
||||
|
||||
- name: Check Bench Compilation
|
||||
run: cargo +nightly bench --no-run --profile=dev --all-features
|
||||
|
||||
- uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
@@ -53,14 +60,14 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
features: [
|
||||
{ label: "all", flags: "mmap,stopwords,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints" },
|
||||
{ label: "all", flags: "mmap,stopwords,lz4-compression,zstd-compression,failpoints" },
|
||||
{ label: "quickwit", flags: "mmap,quickwit,failpoints" }
|
||||
]
|
||||
|
||||
name: test-${{ matrix.features.label}}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
|
||||
188
CHANGELOG.md
188
CHANGELOG.md
@@ -1,5 +1,175 @@
|
||||
Tantivy 0.23 - Unreleased
|
||||
================================
|
||||
Tantivy 0.23 will be backwards compatible with indices created with v0.22 and v0.21.
|
||||
|
||||
Tantivy 0.20 [Unreleased]
|
||||
#### Bugfixes
|
||||
- fix potential endless loop in merge [#2457](https://github.com/quickwit-oss/tantivy/pull/2457)(@PSeitz)
|
||||
- fix bug that causes out-of-order sstable key. [#2445](https://github.com/quickwit-oss/tantivy/pull/2445)(@fulmicoton)
|
||||
- fix ReferenceValue API flaw [#2372](https://github.com/quickwit-oss/tantivy/pull/2372)(@PSeitz)
|
||||
|
||||
#### Breaking API Changes
|
||||
- remove index sorting [#2434](https://github.com/quickwit-oss/tantivy/pull/2434)(@PSeitz)
|
||||
|
||||
#### Features/Improvements
|
||||
- **Aggregation**
|
||||
- Support for cardinality aggregation [#2337](https://github.com/quickwit-oss/tantivy/pull/2337) [#2446](https://github.com/quickwit-oss/tantivy/pull/2446) (@raphaelcoeffic @PSeitz)
|
||||
- Support for extended stats aggregation [#2247](https://github.com/quickwit-oss/tantivy/pull/2247)(@giovannicuccu)
|
||||
- Add Key::I64 and Key::U64 variants in aggregation to avoid f64 precision issues [#2468](https://github.com/quickwit-oss/tantivy/pull/2468)(@PSeitz)
|
||||
- Faster term aggregation fetch terms [#2447](https://github.com/quickwit-oss/tantivy/pull/2447)(@PSeitz)
|
||||
- Improve custom order deserialization [#2451](https://github.com/quickwit-oss/tantivy/pull/2451)(@PSeitz)
|
||||
- Change AggregationLimits behavior [#2495](https://github.com/quickwit-oss/tantivy/pull/2495)(@PSeitz)
|
||||
- lower contention on AggregationLimits [#2394](https://github.com/quickwit-oss/tantivy/pull/2394)(@PSeitz)
|
||||
- fix postcard compatibility for top_hits, add postcard test [#2346](https://github.com/quickwit-oss/tantivy/pull/2346)(@PSeitz)
|
||||
- reduce top hits memory consumption [#2426](https://github.com/quickwit-oss/tantivy/pull/2426)(@PSeitz)
|
||||
- check unsupported parameters top_hits [#2351](https://github.com/quickwit-oss/tantivy/pull/2351)(@PSeitz)
|
||||
- Change AggregationLimits to AggregationLimitsGuard [#2495](https://github.com/quickwit-oss/tantivy/pull/2495)(@PSeitz)
|
||||
- **Range Queries**
|
||||
- Support fast field range queries on json fields [#2456](https://github.com/quickwit-oss/tantivy/pull/2456)(@PSeitz)
|
||||
- Add support for str fast field range query [#2460](https://github.com/quickwit-oss/tantivy/pull/2460) [#2452](https://github.com/quickwit-oss/tantivy/pull/2452) [#2453](https://github.com/quickwit-oss/tantivy/pull/2453)(@PSeitz)
|
||||
- modify fastfield range query heuristic [#2375](https://github.com/quickwit-oss/tantivy/pull/2375)(@trinity-1686a)
|
||||
- add FastFieldRangeQuery for explicit range queries on fast field (for `RangeQuery` it is autodetected) [#2477](https://github.com/quickwit-oss/tantivy/pull/2477)(@PSeitz)
|
||||
|
||||
- add format backwards-compatibility tests [#2485](https://github.com/quickwit-oss/tantivy/pull/2485)(@PSeitz)
|
||||
- add columnar format compatibility tests [#2433](https://github.com/quickwit-oss/tantivy/pull/2433)(@PSeitz)
|
||||
- Improved snippet ranges algorithm [#2474](https://github.com/quickwit-oss/tantivy/pull/2474)(@gezihuzi)
|
||||
- make find_field_with_default return json fields without path [#2476](https://github.com/quickwit-oss/tantivy/pull/2476)(@trinity-1686a)
|
||||
- feat(query): Make `BooleanQuery` support `minimum_number_should_match` [#2405](https://github.com/quickwit-oss/tantivy/pull/2405)(@LebranceBW)
|
||||
|
||||
- **Optional Index in Multivalue Columnar Index** For mostly empty multivalued indices there was a large overhead during creation when iterating all docids (merge case). This is alleviated by placing an optional index in the multivalued index to mark documents that have values. This will slightly increase space and access time. [#2439](https://github.com/quickwit-oss/tantivy/pull/2439)(@PSeitz)
|
||||
|
||||
- **Performace/Memory**
|
||||
- lift clauses in LogicalAst for optimized ast during execution [#2449](https://github.com/quickwit-oss/tantivy/pull/2449)(@PSeitz)
|
||||
- Use Vec instead of BTreeMap to back OwnedValue object [#2364](https://github.com/quickwit-oss/tantivy/pull/2364)(@fulmicoton)
|
||||
- Replace TantivyDocument with CompactDoc. CompactDoc is much smaller and provides similar performance. [#2402](https://github.com/quickwit-oss/tantivy/pull/2402)(@PSeitz)
|
||||
- Recycling buffer in PrefixPhraseScorer [#2443](https://github.com/quickwit-oss/tantivy/pull/2443)(@fulmicoton)
|
||||
|
||||
- **Json Type**
|
||||
- JSON supports now all values on the root level. Previously an object was required. This enables support for flat mixed types. allow more JSON values, fix i64 special case [#2383](https://github.com/quickwit-oss/tantivy/pull/2383)(@PSeitz)
|
||||
- add json path constructor to term [#2367](https://github.com/quickwit-oss/tantivy/pull/2367)(@PSeitz)
|
||||
|
||||
- **QueryParser**
|
||||
- fix de-escaping too much in query parser [#2427](https://github.com/quickwit-oss/tantivy/pull/2427)(@trinity-1686a)
|
||||
- improve query parser [#2416](https://github.com/quickwit-oss/tantivy/pull/2416)(@trinity-1686a)
|
||||
- Support field grouping `title:(return AND "pink panther")` [#2333](https://github.com/quickwit-oss/tantivy/pull/2333)(@trinity-1686a)
|
||||
|
||||
- add access benchmark for columnar [#2432](https://github.com/quickwit-oss/tantivy/pull/2432)(@PSeitz)
|
||||
- extend indexwriter proptests [#2342](https://github.com/quickwit-oss/tantivy/pull/2342)(@PSeitz)
|
||||
- add bench & test for columnar merging [#2428](https://github.com/quickwit-oss/tantivy/pull/2428)(@PSeitz)
|
||||
- Change in Executor API [#2391](https://github.com/quickwit-oss/tantivy/pull/2391)(@fulmicoton)
|
||||
- Removed usage of num_cpus [#2387](https://github.com/quickwit-oss/tantivy/pull/2387)(@fulmicoton)
|
||||
- use bingang for agg benchmark [#2378](https://github.com/quickwit-oss/tantivy/pull/2378)(@PSeitz)
|
||||
- cleanup top level exports [#2382](https://github.com/quickwit-oss/tantivy/pull/2382)(@PSeitz)
|
||||
- make convert_to_fast_value_and_append_to_json_term pub [#2370](https://github.com/quickwit-oss/tantivy/pull/2370)(@PSeitz)
|
||||
- remove JsonTermWriter [#2238](https://github.com/quickwit-oss/tantivy/pull/2238)(@PSeitz)
|
||||
- validate sort by field type [#2336](https://github.com/quickwit-oss/tantivy/pull/2336)(@PSeitz)
|
||||
- Fix trait bound of StoreReader::iter [#2360](https://github.com/quickwit-oss/tantivy/pull/2360)(@adamreichold)
|
||||
|
||||
Tantivy 0.22
|
||||
================================
|
||||
|
||||
Tantivy 0.22 will be able to read indices created with Tantivy 0.21.
|
||||
|
||||
#### Bugfixes
|
||||
- Fix null byte handling in JSON paths (null bytes in json keys caused panic during indexing) [#2345](https://github.com/quickwit-oss/tantivy/pull/2345)(@PSeitz)
|
||||
- Fix bug that can cause `get_docids_for_value_range` to panic. [#2295](https://github.com/quickwit-oss/tantivy/pull/2295)(@fulmicoton)
|
||||
- Avoid 1 document indices by increase min memory to 15MB for indexing [#2176](https://github.com/quickwit-oss/tantivy/pull/2176)(@PSeitz)
|
||||
- Fix merge panic for JSON fields [#2284](https://github.com/quickwit-oss/tantivy/pull/2284)(@PSeitz)
|
||||
- Fix bug occurring when merging JSON object indexed with positions. [#2253](https://github.com/quickwit-oss/tantivy/pull/2253)(@fulmicoton)
|
||||
- Fix empty DateHistogram gap bug [#2183](https://github.com/quickwit-oss/tantivy/pull/2183)(@PSeitz)
|
||||
- Fix range query end check (fields with less than 1 value per doc are affected) [#2226](https://github.com/quickwit-oss/tantivy/pull/2226)(@PSeitz)
|
||||
- Handle exclusive out of bounds ranges on fastfield range queries [#2174](https://github.com/quickwit-oss/tantivy/pull/2174)(@PSeitz)
|
||||
|
||||
#### Breaking API Changes
|
||||
- rename ReloadPolicy onCommit to onCommitWithDelay [#2235](https://github.com/quickwit-oss/tantivy/pull/2235)(@giovannicuccu)
|
||||
- Move exports from the root into modules [#2220](https://github.com/quickwit-oss/tantivy/pull/2220)(@PSeitz)
|
||||
- Accept field name instead of `Field` in FilterCollector [#2196](https://github.com/quickwit-oss/tantivy/pull/2196)(@PSeitz)
|
||||
- remove deprecated IntOptions and DateTime [#2353](https://github.com/quickwit-oss/tantivy/pull/2353)(@PSeitz)
|
||||
|
||||
#### Features/Improvements
|
||||
- Tantivy documents as a trait: Index data directly without converting to tantivy types first [#2071](https://github.com/quickwit-oss/tantivy/pull/2071)(@ChillFish8)
|
||||
- encode some part of posting list as -1 instead of direct values (smaller inverted indices) [#2185](https://github.com/quickwit-oss/tantivy/pull/2185)(@trinity-1686a)
|
||||
- **Aggregation**
|
||||
- Support to deserialize f64 from string [#2311](https://github.com/quickwit-oss/tantivy/pull/2311)(@PSeitz)
|
||||
- Add a top_hits aggregator [#2198](https://github.com/quickwit-oss/tantivy/pull/2198)(@ditsuke)
|
||||
- Support bool type in term aggregation [#2318](https://github.com/quickwit-oss/tantivy/pull/2318)(@PSeitz)
|
||||
- Support ip addresses in term aggregation [#2319](https://github.com/quickwit-oss/tantivy/pull/2319)(@PSeitz)
|
||||
- Support date type in term aggregation [#2172](https://github.com/quickwit-oss/tantivy/pull/2172)(@PSeitz)
|
||||
- Support escaped dot when addressing field [#2250](https://github.com/quickwit-oss/tantivy/pull/2250)(@PSeitz)
|
||||
|
||||
- Add ExistsQuery to check documents that have a value [#2160](https://github.com/quickwit-oss/tantivy/pull/2160)(@imotov)
|
||||
- Expose TopDocs::order_by_u64_field again [#2282](https://github.com/quickwit-oss/tantivy/pull/2282)(@ditsuke)
|
||||
|
||||
- **Memory/Performance**
|
||||
- Faster TopN: replace BinaryHeap with TopNComputer [#2186](https://github.com/quickwit-oss/tantivy/pull/2186)(@PSeitz)
|
||||
- reduce number of allocations during indexing [#2257](https://github.com/quickwit-oss/tantivy/pull/2257)(@PSeitz)
|
||||
- Less Memory while indexing: docid deltas while indexing [#2249](https://github.com/quickwit-oss/tantivy/pull/2249)(@PSeitz)
|
||||
- Faster indexing: use term hashmap in fastfield [#2243](https://github.com/quickwit-oss/tantivy/pull/2243)(@PSeitz)
|
||||
- term hashmap remove copy in is_empty, unused unordered_id [#2229](https://github.com/quickwit-oss/tantivy/pull/2229)(@PSeitz)
|
||||
- add method to fetch block of first values in columnar [#2330](https://github.com/quickwit-oss/tantivy/pull/2330)(@PSeitz)
|
||||
- Faster aggregations: add fast path for full columns in fetch_block [#2328](https://github.com/quickwit-oss/tantivy/pull/2328)(@PSeitz)
|
||||
- Faster sstable loading: use fst for sstable index [#2268](https://github.com/quickwit-oss/tantivy/pull/2268)(@trinity-1686a)
|
||||
|
||||
- **QueryParser**
|
||||
- allow newline where we allow space in query parser [#2302](https://github.com/quickwit-oss/tantivy/pull/2302)(@trinity-1686a)
|
||||
- allow some mixing of occur and bool in strict query parser [#2323](https://github.com/quickwit-oss/tantivy/pull/2323)(@trinity-1686a)
|
||||
- handle * inside term in lenient query parser [#2228](https://github.com/quickwit-oss/tantivy/pull/2228)(@trinity-1686a)
|
||||
- add support for exists query syntax in query parser [#2170](https://github.com/quickwit-oss/tantivy/pull/2170)(@trinity-1686a)
|
||||
- Add shared search executor [#2312](https://github.com/quickwit-oss/tantivy/pull/2312)(@MochiXu)
|
||||
- Truncate keys to u16::MAX in term hashmap [#2299](https://github.com/quickwit-oss/tantivy/pull/2299)(@PSeitz)
|
||||
- report if a term matched when warming up posting list [#2309](https://github.com/quickwit-oss/tantivy/pull/2309)(@trinity-1686a)
|
||||
- Support json fields in FuzzyTermQuery [#2173](https://github.com/quickwit-oss/tantivy/pull/2173)(@PingXia-at)
|
||||
- Read list of fields encoded in term dictionary for JSON fields [#2184](https://github.com/quickwit-oss/tantivy/pull/2184)(@PSeitz)
|
||||
- add collect_block to BoxableSegmentCollector [#2331](https://github.com/quickwit-oss/tantivy/pull/2331)(@PSeitz)
|
||||
- expose collect_block buffer size [#2326](https://github.com/quickwit-oss/tantivy/pull/2326)(@PSeitz)
|
||||
- Forward regex parser errors [#2288](https://github.com/quickwit-oss/tantivy/pull/2288)(@adamreichold)
|
||||
- Make FacetCounts defaultable and cloneable. [#2322](https://github.com/quickwit-oss/tantivy/pull/2322)(@adamreichold)
|
||||
- Derive Debug for SchemaBuilder [#2254](https://github.com/quickwit-oss/tantivy/pull/2254)(@GodTamIt)
|
||||
- add missing inlines to tantivy options [#2245](https://github.com/quickwit-oss/tantivy/pull/2245)(@PSeitz)
|
||||
|
||||
Tantivy 0.21.1
|
||||
================================
|
||||
#### Bugfixes
|
||||
- Range queries on fast fields with less values on that field than documents had an invalid end condition, leading to missing results. [#2226](https://github.com/quickwit-oss/tantivy/issues/2226)(@appaquet @PSeitz)
|
||||
- Increase the minimum memory budget from 3MB to 15MB to avoid single doc segments (API fix). [#2176](https://github.com/quickwit-oss/tantivy/issues/2176)(@PSeitz)
|
||||
|
||||
Tantivy 0.21
|
||||
================================
|
||||
#### Bugfixes
|
||||
- Fix track fast field memory consumption, which led to higher memory consumption than the budget allowed during indexing [#2148](https://github.com/quickwit-oss/tantivy/issues/2148)[#2147](https://github.com/quickwit-oss/tantivy/issues/2147)(@PSeitz)
|
||||
- Fix a regression from 0.20 where sort index by date wasn't working anymore [#2124](https://github.com/quickwit-oss/tantivy/issues/2124)(@PSeitz)
|
||||
- Fix getting the root facet on the `FacetCollector`. [#2086](https://github.com/quickwit-oss/tantivy/issues/2086)(@adamreichold)
|
||||
- Align numerical type priority order of columnar and query. [#2088](https://github.com/quickwit-oss/tantivy/issues/2088)(@fmassot)
|
||||
#### Breaking Changes
|
||||
- Remove support for Brotli and Snappy compression [#2123](https://github.com/quickwit-oss/tantivy/issues/2123)(@adamreichold)
|
||||
#### Features/Improvements
|
||||
- Implement lenient query parser [#2129](https://github.com/quickwit-oss/tantivy/pull/2129)(@trinity-1686a)
|
||||
- order_by_u64_field and order_by_fast_field allow sorting in ascending and descending order [#2111](https://github.com/quickwit-oss/tantivy/issues/2111)(@naveenann)
|
||||
- Allow dynamic filters in text analyzer builder [#2110](https://github.com/quickwit-oss/tantivy/issues/2110)(@fulmicoton @fmassot)
|
||||
- **Aggregation**
|
||||
- Add missing parameter for term aggregation [#2149](https://github.com/quickwit-oss/tantivy/issues/2149)[#2103](https://github.com/quickwit-oss/tantivy/issues/2103)(@PSeitz)
|
||||
- Add missing parameter for percentiles [#2157](https://github.com/quickwit-oss/tantivy/issues/2157)(@PSeitz)
|
||||
- Add missing parameter for stats,min,max,count,sum,avg [#2151](https://github.com/quickwit-oss/tantivy/issues/2151)(@PSeitz)
|
||||
- Improve aggregation deserialization error message [#2150](https://github.com/quickwit-oss/tantivy/issues/2150)(@PSeitz)
|
||||
- Add validation for type Bytes to term_agg [#2077](https://github.com/quickwit-oss/tantivy/issues/2077)(@PSeitz)
|
||||
- Alternative mixed field collection [#2135](https://github.com/quickwit-oss/tantivy/issues/2135)(@PSeitz)
|
||||
- Add missing query_terms impl for TermSetQuery. [#2120](https://github.com/quickwit-oss/tantivy/issues/2120)(@adamreichold)
|
||||
- Minor improvements to OwnedBytes [#2134](https://github.com/quickwit-oss/tantivy/issues/2134)(@adamreichold)
|
||||
- Remove allocations in split compound words [#2080](https://github.com/quickwit-oss/tantivy/issues/2080)(@PSeitz)
|
||||
- Ngram tokenizer now returns an error with invalid arguments [#2102](https://github.com/quickwit-oss/tantivy/issues/2102)(@fmassot)
|
||||
- Make TextAnalyzerBuilder public [#2097](https://github.com/quickwit-oss/tantivy/issues/2097)(@adamreichold)
|
||||
- Return an error when tokenizer is not found while indexing [#2093](https://github.com/quickwit-oss/tantivy/issues/2093)(@naveenann)
|
||||
- Delayed column opening during merge [#2132](https://github.com/quickwit-oss/tantivy/issues/2132)(@PSeitz)
|
||||
|
||||
Tantivy 0.20.2
|
||||
================================
|
||||
- Align numerical type priority order on the search side. [#2088](https://github.com/quickwit-oss/tantivy/issues/2088) (@fmassot)
|
||||
- Fix is_child_of function not considering the root facet. [#2086](https://github.com/quickwit-oss/tantivy/issues/2086) (@adamreichhold)
|
||||
|
||||
Tantivy 0.20.1
|
||||
================================
|
||||
- Fix building on windows with mmap [#2070](https://github.com/quickwit-oss/tantivy/issues/2070) (@ChillFish8)
|
||||
|
||||
Tantivy 0.20
|
||||
================================
|
||||
#### Bugfixes
|
||||
- Fix phrase queries with slop (slop supports now transpositions, algorithm that carries slop so far for num terms > 2) [#2031](https://github.com/quickwit-oss/tantivy/issues/2031)[#2020](https://github.com/quickwit-oss/tantivy/issues/2020)(@PSeitz)
|
||||
@@ -12,7 +182,7 @@ Tantivy 0.20 [Unreleased]
|
||||
- Add PhrasePrefixQuery [#1842](https://github.com/quickwit-oss/tantivy/issues/1842) (@trinity-1686a)
|
||||
- Add `coerce` option for text and numbers types (convert the value instead of returning an error during indexing) [#1904](https://github.com/quickwit-oss/tantivy/issues/1904) (@PSeitz)
|
||||
- Add regex tokenizer [#1759](https://github.com/quickwit-oss/tantivy/issues/1759)(@mkleen)
|
||||
- Move tokenizer API to seperate crate. Having a seperate crate with a stable API will allow us to use tokenizers with different tantivy versions. [#1767](https://github.com/quickwit-oss/tantivy/issues/1767) (@PSeitz)
|
||||
- Move tokenizer API to separate crate. Having a separate crate with a stable API will allow us to use tokenizers with different tantivy versions. [#1767](https://github.com/quickwit-oss/tantivy/issues/1767) (@PSeitz)
|
||||
- **Columnar crate**: New fast field handling (@fulmicoton @PSeitz) [#1806](https://github.com/quickwit-oss/tantivy/issues/1806)[#1809](https://github.com/quickwit-oss/tantivy/issues/1809)
|
||||
- Support for fast fields with optional values. Previously tantivy supported only single-valued and multi-value fast fields. The encoding of optional fast fields is now very compact.
|
||||
- Fast field Support for JSON (schemaless fast fields). Support multiple types on the same column. [#1876](https://github.com/quickwit-oss/tantivy/issues/1876) (@fulmicoton)
|
||||
@@ -38,12 +208,14 @@ Tantivy 0.20 [Unreleased]
|
||||
- Add aggregation support for JSON type [#1888](https://github.com/quickwit-oss/tantivy/issues/1888) (@PSeitz)
|
||||
- Mixed types support on JSON fields in aggs [#1971](https://github.com/quickwit-oss/tantivy/issues/1971) (@PSeitz)
|
||||
- Perf: Fetch blocks of vals in aggregation for all cardinality [#1950](https://github.com/quickwit-oss/tantivy/issues/1950) (@PSeitz)
|
||||
- Allow histogram bounds to be passed as Rfc3339 [#2076](https://github.com/quickwit-oss/tantivy/issues/2076) (@PSeitz)
|
||||
- `Searcher` with disabled scoring via `EnableScoring::Disabled` [#1780](https://github.com/quickwit-oss/tantivy/issues/1780) (@shikhar)
|
||||
- Enable tokenizer on json fields [#2053](https://github.com/quickwit-oss/tantivy/issues/2053) (@PSeitz)
|
||||
- Enforcing "NOT" and "-" queries consistency in UserInputAst [#1609](https://github.com/quickwit-oss/tantivy/issues/1609) (@bazhenov)
|
||||
- Faster indexing
|
||||
- Refactor tokenization pipeline to use GATs [#1924](https://github.com/quickwit-oss/tantivy/issues/1924) (@trinity-1686a)
|
||||
- Faster term hash map [#2058](https://github.com/quickwit-oss/tantivy/issues/2058)[#1940](https://github.com/quickwit-oss/tantivy/issues/1940) (@PSeitz)
|
||||
- tokenizer-api: reduce Tokenizer allocation overhead [#2062](https://github.com/quickwit-oss/tantivy/issues/2062) (@PSeitz)
|
||||
- Refactor vint [#2010](https://github.com/quickwit-oss/tantivy/issues/2010) (@PSeitz)
|
||||
- Faster search
|
||||
- Work in batches of docs on the SegmentCollector (Only for cases without score for now) [#1937](https://github.com/quickwit-oss/tantivy/issues/1937) (@PSeitz)
|
||||
@@ -57,13 +229,13 @@ Tantivy 0.20 [Unreleased]
|
||||
- Auto downgrade index record option, instead of vint error [#1857](https://github.com/quickwit-oss/tantivy/issues/1857) (@PSeitz)
|
||||
- Enable range query on fast field for u64 compatible types [#1762](https://github.com/quickwit-oss/tantivy/issues/1762) (@PSeitz) [#1876]
|
||||
- sstable
|
||||
- Isolating sstable and stacker in independant crates. [#1718](https://github.com/quickwit-oss/tantivy/issues/1718) (@fulmicoton)
|
||||
- Isolating sstable and stacker in independent crates. [#1718](https://github.com/quickwit-oss/tantivy/issues/1718) (@fulmicoton)
|
||||
- New sstable format [#1943](https://github.com/quickwit-oss/tantivy/issues/1943)[#1953](https://github.com/quickwit-oss/tantivy/issues/1953) (@trinity-1686a)
|
||||
- Use DeltaReader directly to implement Dictionnary::ord_to_term [#1928](https://github.com/quickwit-oss/tantivy/issues/1928) (@trinity-1686a)
|
||||
- Use DeltaReader directly to implement Dictionnary::term_ord [#1925](https://github.com/quickwit-oss/tantivy/issues/1925) (@trinity-1686a)
|
||||
- Add seperate tokenizer manager for fast fields [#2019](https://github.com/quickwit-oss/tantivy/issues/2019) (@PSeitz)
|
||||
- Use DeltaReader directly to implement Dictionary::ord_to_term [#1928](https://github.com/quickwit-oss/tantivy/issues/1928) (@trinity-1686a)
|
||||
- Use DeltaReader directly to implement Dictionary::term_ord [#1925](https://github.com/quickwit-oss/tantivy/issues/1925) (@trinity-1686a)
|
||||
- Add separate tokenizer manager for fast fields [#2019](https://github.com/quickwit-oss/tantivy/issues/2019) (@PSeitz)
|
||||
- Make construction of LevenshteinAutomatonBuilder for FuzzyTermQuery instances lazy. [#1756](https://github.com/quickwit-oss/tantivy/issues/1756) (@adamreichold)
|
||||
- Added support for madvise when opening an mmaped Index [#2036](https://github.com/quickwit-oss/tantivy/issues/2036) (@fulmicoton)
|
||||
- Added support for madvise when opening an mmapped Index [#2036](https://github.com/quickwit-oss/tantivy/issues/2036) (@fulmicoton)
|
||||
- Rename `DatePrecision` to `DateTimePrecision` [#2051](https://github.com/quickwit-oss/tantivy/issues/2051) (@guilload)
|
||||
- Query Parser
|
||||
- Quotation mark can now be used for phrase queries. [#2050](https://github.com/quickwit-oss/tantivy/issues/2050) (@fulmicoton)
|
||||
@@ -102,7 +274,7 @@ Tantivy 0.19
|
||||
- Add support for phrase slop in query language [#1393](https://github.com/quickwit-oss/tantivy/pull/1393) (@saroh)
|
||||
- Aggregation
|
||||
- Add aggregation support for date type [#1693](https://github.com/quickwit-oss/tantivy/pull/1693)(@PSeitz)
|
||||
- Add support for keyed parameter in range and histgram aggregations [#1424](https://github.com/quickwit-oss/tantivy/pull/1424) (@k-yomo)
|
||||
- Add support for keyed parameter in range and histogram aggregations [#1424](https://github.com/quickwit-oss/tantivy/pull/1424) (@k-yomo)
|
||||
- Add aggregation bucket limit [#1363](https://github.com/quickwit-oss/tantivy/pull/1363) (@PSeitz)
|
||||
- Faster indexing
|
||||
- [#1610](https://github.com/quickwit-oss/tantivy/pull/1610) (@PSeitz)
|
||||
|
||||
103
Cargo.toml
103
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.20.2"
|
||||
version = "0.23.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -11,78 +11,85 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2021"
|
||||
rust-version = "1.62"
|
||||
rust-version = "1.66"
|
||||
exclude = ["benches/*.json", "benches/*.txt"]
|
||||
|
||||
[dependencies]
|
||||
oneshot = "0.1.5"
|
||||
base64 = "0.21.0"
|
||||
oneshot = "0.1.7"
|
||||
base64 = "0.22.0"
|
||||
byteorder = "1.4.3"
|
||||
crc32fast = "1.3.2"
|
||||
once_cell = "1.10.0"
|
||||
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
||||
regex = { version = "1.5.5", default-features = false, features = [
|
||||
"std",
|
||||
"unicode",
|
||||
] }
|
||||
aho-corasick = "1.0"
|
||||
tantivy-fst = "0.4.0"
|
||||
memmap2 = { version = "0.6.0", optional = true }
|
||||
lz4_flex = { version = "0.10", default-features = false, features = ["checked-decode"], optional = true }
|
||||
brotli = { version = "3.3.4", optional = true }
|
||||
zstd = { version = "0.12", optional = true, default-features = false }
|
||||
snap = { version = "1.0.5", optional = true }
|
||||
tempfile = { version = "3.3.0", optional = true }
|
||||
tantivy-fst = "0.5"
|
||||
memmap2 = { version = "0.9.0", optional = true }
|
||||
lz4_flex = { version = "0.11", default-features = false, optional = true }
|
||||
zstd = { version = "0.13", optional = true, default-features = false }
|
||||
tempfile = { version = "3.12.0", optional = true }
|
||||
log = "0.4.16"
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = "1.0.79"
|
||||
num_cpus = "1.13.1"
|
||||
fs4 = { version = "0.6.3", optional = true }
|
||||
fs4 = { version = "0.8.0", optional = true }
|
||||
levenshtein_automata = "0.2.1"
|
||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||
crossbeam-channel = "0.5.4"
|
||||
rust-stemmers = "1.2.0"
|
||||
downcast-rs = "1.2.0"
|
||||
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
|
||||
census = "0.4.0"
|
||||
downcast-rs = "1.2.1"
|
||||
bitpacking = { version = "0.9.2", default-features = false, features = [
|
||||
"bitpacker4x",
|
||||
] }
|
||||
census = "0.4.2"
|
||||
rustc-hash = "1.1.0"
|
||||
thiserror = "1.0.30"
|
||||
htmlescape = "0.3.1"
|
||||
fail = "0.5.0"
|
||||
murmurhash32 = "0.3.0"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
fail = { version = "0.5.0", optional = true }
|
||||
time = { version = "0.3.35", features = ["serde-well-known"] }
|
||||
smallvec = "1.8.0"
|
||||
rayon = "1.5.2"
|
||||
lru = "0.10.0"
|
||||
lru = "0.12.0"
|
||||
fastdivide = "0.4.0"
|
||||
itertools = "0.10.3"
|
||||
itertools = "0.13.0"
|
||||
measure_time = "0.8.2"
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
|
||||
columnar = { version= "0.1", path="./columnar", package ="tantivy-columnar" }
|
||||
sstable = { version= "0.1", path="./sstable", package ="tantivy-sstable", optional = true }
|
||||
stacker = { version= "0.1", path="./stacker", package ="tantivy-stacker" }
|
||||
query-grammar = { version= "0.20.0", path="./query-grammar", package = "tantivy-query-grammar" }
|
||||
tantivy-bitpacker = { version= "0.4", path="./bitpacker" }
|
||||
common = { version= "0.5", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version= "0.1", path="./tokenizer-api", package="tantivy-tokenizer-api" }
|
||||
sketches-ddsketch = { version = "0.2.1", features = ["use_serde"] }
|
||||
columnar = { version = "0.3", path = "./columnar", package = "tantivy-columnar" }
|
||||
sstable = { version = "0.3", path = "./sstable", package = "tantivy-sstable", optional = true }
|
||||
stacker = { version = "0.3", path = "./stacker", package = "tantivy-stacker" }
|
||||
query-grammar = { version = "0.22.0", path = "./query-grammar", package = "tantivy-query-grammar" }
|
||||
tantivy-bitpacker = { version = "0.6", path = "./bitpacker" }
|
||||
common = { version = "0.7", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version = "0.3", path = "./tokenizer-api", package = "tantivy-tokenizer-api" }
|
||||
sketches-ddsketch = { version = "0.3.0", features = ["use_serde"] }
|
||||
hyperloglogplus = { version = "0.4.1", features = ["const-loop"] }
|
||||
futures-util = { version = "0.3.28", optional = true }
|
||||
fnv = "1.0.7"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3.9"
|
||||
|
||||
[dev-dependencies]
|
||||
binggan = "0.12.0"
|
||||
rand = "0.8.5"
|
||||
maplit = "1.0.2"
|
||||
matches = "0.1.9"
|
||||
pretty_assertions = "1.2.1"
|
||||
proptest = "1.0.0"
|
||||
criterion = "0.5"
|
||||
test-log = "0.2.10"
|
||||
env_logger = "0.10.0"
|
||||
pprof = { git = "https://github.com/PSeitz/pprof-rs/", rev = "53af24b", features = ["flamegraph", "criterion"] } # temp fork that works with criterion 0.5
|
||||
futures = "0.3.21"
|
||||
paste = "1.0.11"
|
||||
more-asserts = "0.3.1"
|
||||
rand_distr = "0.4.3"
|
||||
time = { version = "0.3.10", features = ["serde-well-known", "macros"] }
|
||||
postcard = { version = "1.0.4", features = [
|
||||
"use-std",
|
||||
], default-features = false }
|
||||
|
||||
[target.'cfg(not(windows))'.dev-dependencies]
|
||||
criterion = { version = "0.5", default-features = false }
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.5.0"
|
||||
@@ -107,18 +114,30 @@ default = ["mmap", "stopwords", "lz4-compression"]
|
||||
mmap = ["fs4", "tempfile", "memmap2"]
|
||||
stopwords = []
|
||||
|
||||
brotli-compression = ["brotli"]
|
||||
lz4-compression = ["lz4_flex"]
|
||||
snappy-compression = ["snap"]
|
||||
zstd-compression = ["zstd"]
|
||||
|
||||
failpoints = ["fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
failpoints = ["fail", "fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
|
||||
quickwit = ["sstable", "futures-util"]
|
||||
|
||||
# Compares only the hash of a string when indexing data.
|
||||
# Increases indexing speed, but may lead to extremely rare missing terms, when there's a hash collision.
|
||||
# Uses 64bit ahash.
|
||||
compare_hash_only = ["stacker/compare_hash_only"]
|
||||
|
||||
[workspace]
|
||||
members = ["query-grammar", "bitpacker", "common", "ownedbytes", "stacker", "sstable", "tokenizer-api", "columnar"]
|
||||
members = [
|
||||
"query-grammar",
|
||||
"bitpacker",
|
||||
"common",
|
||||
"ownedbytes",
|
||||
"stacker",
|
||||
"sstable",
|
||||
"tokenizer-api",
|
||||
"columnar",
|
||||
]
|
||||
|
||||
# Following the "fail" crate best practises, we isolate
|
||||
# tests that define specific behavior in fail check points
|
||||
@@ -130,7 +149,7 @@ members = ["query-grammar", "bitpacker", "common", "ownedbytes", "stacker", "sst
|
||||
[[test]]
|
||||
name = "failpoints"
|
||||
path = "tests/failpoints/mod.rs"
|
||||
required-features = ["fail/failpoints"]
|
||||
required-features = ["failpoints"]
|
||||
|
||||
[[bench]]
|
||||
name = "analyzer"
|
||||
@@ -139,3 +158,7 @@ harness = false
|
||||
[[bench]]
|
||||
name = "index-bench"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "agg_bench"
|
||||
harness = false
|
||||
|
||||
40
README.md
40
README.md
@@ -5,21 +5,20 @@
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://crates.io/crates/tantivy)
|
||||
|
||||

|
||||
<img src="https://tantivy-search.github.io/logo/tantivy-logo.png" alt="Tantivy, the fastest full-text search engine library written in Rust" height="250">
|
||||
|
||||
**Tantivy** is a **full-text search engine library** written in Rust.
|
||||
## Fast full-text search engine library written in Rust
|
||||
|
||||
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
an off-the-shelf search engine server, but rather a crate that can be used
|
||||
to build such a search engine.
|
||||
**If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our distributed search engine built on top of Tantivy.**
|
||||
|
||||
Tantivy is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
an off-the-shelf search engine server, but rather a crate that can be used to build such a search engine.
|
||||
|
||||
Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||
|
||||
If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy.
|
||||
## Benchmark
|
||||
|
||||
# Benchmark
|
||||
|
||||
The following [benchmark](https://tantivy-search.github.io/bench/) breakdowns
|
||||
The following [benchmark](https://tantivy-search.github.io/bench/) breaks down the
|
||||
performance for different types of queries/collections.
|
||||
|
||||
Your mileage WILL vary depending on the nature of queries and their load.
|
||||
@@ -28,7 +27,7 @@ Your mileage WILL vary depending on the nature of queries and their load.
|
||||
|
||||
Details about the benchmark can be found at this [repository](https://github.com/quickwit-oss/search-benchmark-game).
|
||||
|
||||
# Features
|
||||
## Features
|
||||
|
||||
- Full-text search
|
||||
- Configurable tokenizer (stemming available for 17 Latin languages) with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy), [Vaporetto](https://crates.io/crates/vaporetto_tantivy), and [tantivy-tokenizer-tiny-segmenter](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
|
||||
@@ -44,7 +43,7 @@ Details about the benchmark can be found at this [repository](https://github.com
|
||||
- Single valued and multivalued u64, i64, and f64 fast fields (equivalent of doc values in Lucene)
|
||||
- `&[u8]` fast fields
|
||||
- Text, i64, u64, f64, dates, ip, bool, and hierarchical facet fields
|
||||
- Compressed document store (LZ4, Zstd, None, Brotli, Snap)
|
||||
- Compressed document store (LZ4, Zstd, None)
|
||||
- Range queries
|
||||
- Faceted search
|
||||
- Configurable indexing (optional term frequency and position indexing)
|
||||
@@ -54,11 +53,11 @@ Details about the benchmark can be found at this [repository](https://github.com
|
||||
- Searcher Warmer API
|
||||
- Cheesy logo with a horse
|
||||
|
||||
## Non-features
|
||||
### Non-features
|
||||
|
||||
Distributed search is out of the scope of Tantivy, but if you are looking for this feature, check out [Quickwit](https://github.com/quickwit-oss/quickwit/).
|
||||
|
||||
# Getting started
|
||||
## Getting started
|
||||
|
||||
Tantivy works on stable Rust and supports Linux, macOS, and Windows.
|
||||
|
||||
@@ -68,7 +67,7 @@ index documents, and search via the CLI or a small server with a REST API.
|
||||
It walks you through getting a Wikipedia search engine up and running in a few minutes.
|
||||
- [Reference doc for the last released version](https://docs.rs/tantivy/)
|
||||
|
||||
# How can I support this project?
|
||||
## How can I support this project?
|
||||
|
||||
There are many ways to support this project.
|
||||
|
||||
@@ -79,16 +78,16 @@ There are many ways to support this project.
|
||||
- Contribute code (you can join [our Discord server](https://discord.gg/MT27AG5EVE))
|
||||
- Talk about Tantivy around you
|
||||
|
||||
# Contributing code
|
||||
## Contributing code
|
||||
|
||||
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
||||
Feel free to update CHANGELOG.md with your contribution.
|
||||
|
||||
## Tokenizer
|
||||
### Tokenizer
|
||||
|
||||
When implementing a tokenizer for tantivy depend on the `tantivy-tokenizer-api` crate.
|
||||
|
||||
## Clone and build locally
|
||||
### Clone and build locally
|
||||
|
||||
Tantivy compiles on stable Rust.
|
||||
To check out and run tests, you can simply run:
|
||||
@@ -99,10 +98,11 @@ cd tantivy
|
||||
cargo test
|
||||
```
|
||||
|
||||
# Companies Using Tantivy
|
||||
## Companies Using Tantivy
|
||||
|
||||
<p align="left">
|
||||
<img align="center" src="doc/assets/images/etsy.png" alt="Etsy" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/etsy.png" alt="Etsy" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/paradedb.png" alt="ParadeDB" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/Nuclia.png#gh-light-mode-only" alt="Nuclia" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/humanfirst.png#gh-light-mode-only" alt="Humanfirst.ai" height="30" width="auto" />
|
||||
<img align="center" src="doc/assets/images/element.io.svg#gh-light-mode-only" alt="Element.io" height="25" width="auto" />
|
||||
@@ -111,7 +111,7 @@ cargo test
|
||||
<img align="center" src="doc/assets/images/element-dark-theme.png#gh-dark-mode-only" alt="Element.io" height="25" width="auto" />
|
||||
</p>
|
||||
|
||||
# FAQ
|
||||
## FAQ
|
||||
|
||||
### Can I use Tantivy in other languages?
|
||||
|
||||
|
||||
2
TODO.txt
2
TODO.txt
@@ -1,7 +1,7 @@
|
||||
Make schema_builder API fluent.
|
||||
fix doc serialization and prevent compression problems
|
||||
|
||||
u64 , etc. shoudl return Resutl<Option> now that we support optional missing a column is really not an error
|
||||
u64 , etc. should return Result<Option> now that we support optional missing a column is really not an error
|
||||
remove fastfield codecs
|
||||
ditch the first_or_default trick. if it is still useful, improve its implementation.
|
||||
rename FastFieldReaders::open to load
|
||||
|
||||
463
benches/agg_bench.rs
Normal file
463
benches/agg_bench.rs
Normal file
@@ -0,0 +1,463 @@
|
||||
use binggan::plugins::PeakMemAllocPlugin;
|
||||
use binggan::{black_box, InputGroup, PeakMemAlloc, INSTRUMENTED_SYSTEM};
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_distr::Distribution;
|
||||
use serde_json::json;
|
||||
use tantivy::aggregation::agg_req::Aggregations;
|
||||
use tantivy::aggregation::AggregationCollector;
|
||||
use tantivy::query::{AllQuery, TermQuery};
|
||||
use tantivy::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||
use tantivy::{doc, Index, Term};
|
||||
|
||||
#[global_allocator]
|
||||
pub static GLOBAL: &PeakMemAlloc<std::alloc::System> = &INSTRUMENTED_SYSTEM;
|
||||
|
||||
/// Mini macro to register a function via its name
|
||||
/// runner.register("average_u64", move |index| average_u64(index));
|
||||
macro_rules! register {
|
||||
($runner:expr, $func:ident) => {
|
||||
$runner.register(stringify!($func), move |index| {
|
||||
$func(index);
|
||||
None
|
||||
})
|
||||
};
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let inputs = vec![
|
||||
("full", get_test_index_bench(Cardinality::Full).unwrap()),
|
||||
(
|
||||
"dense",
|
||||
get_test_index_bench(Cardinality::OptionalDense).unwrap(),
|
||||
),
|
||||
(
|
||||
"sparse",
|
||||
get_test_index_bench(Cardinality::OptionalSparse).unwrap(),
|
||||
),
|
||||
(
|
||||
"multivalue",
|
||||
get_test_index_bench(Cardinality::Multivalued).unwrap(),
|
||||
),
|
||||
];
|
||||
|
||||
bench_agg(InputGroup::new_with_inputs(inputs));
|
||||
}
|
||||
|
||||
fn bench_agg(mut group: InputGroup<Index>) {
|
||||
group.add_plugin(PeakMemAllocPlugin::new(GLOBAL));
|
||||
|
||||
register!(group, average_u64);
|
||||
register!(group, average_f64);
|
||||
register!(group, average_f64_u64);
|
||||
register!(group, stats_f64);
|
||||
register!(group, extendedstats_f64);
|
||||
register!(group, percentiles_f64);
|
||||
register!(group, terms_few);
|
||||
register!(group, terms_many);
|
||||
register!(group, terms_many_top_1000);
|
||||
register!(group, terms_many_order_by_term);
|
||||
register!(group, terms_many_with_top_hits);
|
||||
register!(group, terms_many_with_avg_sub_agg);
|
||||
register!(group, terms_many_json_mixed_type_with_avg_sub_agg);
|
||||
|
||||
register!(group, cardinality_agg);
|
||||
register!(group, terms_few_with_cardinality_agg);
|
||||
|
||||
register!(group, range_agg);
|
||||
register!(group, range_agg_with_avg_sub_agg);
|
||||
register!(group, range_agg_with_term_agg_few);
|
||||
register!(group, range_agg_with_term_agg_many);
|
||||
register!(group, histogram);
|
||||
register!(group, histogram_hard_bounds);
|
||||
register!(group, histogram_with_avg_sub_agg);
|
||||
register!(group, avg_and_range_with_avg_sub_agg);
|
||||
|
||||
group.run();
|
||||
}
|
||||
|
||||
fn exec_term_with_agg(index: &Index, agg_req: serde_json::Value) {
|
||||
let agg_req: Aggregations = serde_json::from_value(agg_req).unwrap();
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "cool"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
let collector = get_collector(agg_req);
|
||||
let searcher = reader.searcher();
|
||||
black_box(searcher.search(&term_query, &collector).unwrap());
|
||||
}
|
||||
|
||||
fn average_u64(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"average": { "avg": { "field": "score", } }
|
||||
});
|
||||
exec_term_with_agg(index, agg_req)
|
||||
}
|
||||
fn average_f64(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"average": { "avg": { "field": "score_f64", } }
|
||||
});
|
||||
exec_term_with_agg(index, agg_req)
|
||||
}
|
||||
fn average_f64_u64(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"average_f64": { "avg": { "field": "score_f64" } },
|
||||
"average": { "avg": { "field": "score" } },
|
||||
});
|
||||
exec_term_with_agg(index, agg_req)
|
||||
}
|
||||
fn stats_f64(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"average_f64": { "stats": { "field": "score_f64", } }
|
||||
});
|
||||
exec_term_with_agg(index, agg_req)
|
||||
}
|
||||
fn extendedstats_f64(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"extendedstats_f64": { "extended_stats": { "field": "score_f64", } }
|
||||
});
|
||||
exec_term_with_agg(index, agg_req)
|
||||
}
|
||||
fn percentiles_f64(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"mypercentiles": {
|
||||
"percentiles": {
|
||||
"field": "score_f64",
|
||||
"percents": [ 95, 99, 99.9 ]
|
||||
}
|
||||
}
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
|
||||
fn cardinality_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"cardinality": {
|
||||
"cardinality": {
|
||||
"field": "text_many_terms"
|
||||
},
|
||||
}
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn terms_few_with_cardinality_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": {
|
||||
"terms": { "field": "text_few_terms" },
|
||||
"aggs": {
|
||||
"cardinality": {
|
||||
"cardinality": {
|
||||
"field": "text_many_terms"
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
|
||||
fn terms_few(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": { "terms": { "field": "text_few_terms" } },
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn terms_many(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": { "terms": { "field": "text_many_terms" } },
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn terms_many_top_1000(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": { "terms": { "field": "text_many_terms", "size": 1000 } },
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn terms_many_order_by_term(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": { "terms": { "field": "text_many_terms", "order": { "_key": "desc" } } },
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn terms_many_with_top_hits(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": {
|
||||
"terms": { "field": "text_many_terms" },
|
||||
"aggs": {
|
||||
"top_hits": { "top_hits":
|
||||
{
|
||||
"sort": [
|
||||
{ "score": "desc" }
|
||||
],
|
||||
"size": 2,
|
||||
"doc_value_fields": ["score_f64"]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn terms_many_with_avg_sub_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": {
|
||||
"terms": { "field": "text_many_terms" },
|
||||
"aggs": {
|
||||
"average_f64": { "avg": { "field": "score_f64" } }
|
||||
}
|
||||
},
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn terms_many_json_mixed_type_with_avg_sub_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": {
|
||||
"terms": { "field": "json.mixed_type" },
|
||||
"aggs": {
|
||||
"average_f64": { "avg": { "field": "score_f64" } }
|
||||
}
|
||||
},
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
|
||||
fn execute_agg(index: &Index, agg_req: serde_json::Value) {
|
||||
let agg_req: Aggregations = serde_json::from_value(agg_req).unwrap();
|
||||
let collector = get_collector(agg_req);
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
black_box(searcher.search(&AllQuery, &collector).unwrap());
|
||||
}
|
||||
fn range_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"range_f64": { "range": { "field": "score_f64", "ranges": [
|
||||
{ "from": 3, "to": 7000 },
|
||||
{ "from": 7000, "to": 20000 },
|
||||
{ "from": 20000, "to": 30000 },
|
||||
{ "from": 30000, "to": 40000 },
|
||||
{ "from": 40000, "to": 50000 },
|
||||
{ "from": 50000, "to": 60000 }
|
||||
] } },
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn range_agg_with_avg_sub_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
"range": {
|
||||
"field": "score_f64",
|
||||
"ranges": [
|
||||
{ "from": 3, "to": 7000 },
|
||||
{ "from": 7000, "to": 20000 },
|
||||
{ "from": 20000, "to": 30000 },
|
||||
{ "from": 30000, "to": 40000 },
|
||||
{ "from": 40000, "to": 50000 },
|
||||
{ "from": 50000, "to": 60000 }
|
||||
]
|
||||
},
|
||||
"aggs": {
|
||||
"average_f64": { "avg": { "field": "score_f64" } }
|
||||
}
|
||||
},
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
|
||||
fn range_agg_with_term_agg_few(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
"range": {
|
||||
"field": "score_f64",
|
||||
"ranges": [
|
||||
{ "from": 3, "to": 7000 },
|
||||
{ "from": 7000, "to": 20000 },
|
||||
{ "from": 20000, "to": 30000 },
|
||||
{ "from": 30000, "to": 40000 },
|
||||
{ "from": 40000, "to": 50000 },
|
||||
{ "from": 50000, "to": 60000 }
|
||||
]
|
||||
},
|
||||
"aggs": {
|
||||
"my_texts": { "terms": { "field": "text_few_terms" } },
|
||||
}
|
||||
},
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn range_agg_with_term_agg_many(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
"range": {
|
||||
"field": "score_f64",
|
||||
"ranges": [
|
||||
{ "from": 3, "to": 7000 },
|
||||
{ "from": 7000, "to": 20000 },
|
||||
{ "from": 20000, "to": 30000 },
|
||||
{ "from": 30000, "to": 40000 },
|
||||
{ "from": 40000, "to": 50000 },
|
||||
{ "from": 50000, "to": 60000 }
|
||||
]
|
||||
},
|
||||
"aggs": {
|
||||
"my_texts": { "terms": { "field": "text_many_terms" } },
|
||||
}
|
||||
},
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
|
||||
fn histogram(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
"histogram": {
|
||||
"field": "score_f64",
|
||||
"interval": 100 // 1000 buckets
|
||||
},
|
||||
}
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn histogram_hard_bounds(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": { "histogram": { "field": "score_f64", "interval": 100, "hard_bounds": { "min": 1000, "max": 300000 } } },
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn histogram_with_avg_sub_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
"histogram": { "field": "score_f64", "interval": 100 },
|
||||
"aggs": {
|
||||
"average_f64": { "avg": { "field": "score_f64" } }
|
||||
}
|
||||
}
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn avg_and_range_with_avg_sub_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
"range": {
|
||||
"field": "score_f64",
|
||||
"ranges": [
|
||||
{ "from": 3, "to": 7000 },
|
||||
{ "from": 7000, "to": 20000 },
|
||||
{ "from": 20000, "to": 60000 }
|
||||
]
|
||||
},
|
||||
"aggs": {
|
||||
"average_in_range": { "avg": { "field": "score" } }
|
||||
}
|
||||
},
|
||||
"average": { "avg": { "field": "score" } }
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Hash, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
enum Cardinality {
|
||||
/// All documents contain exactly one value.
|
||||
/// `Full` is the default for auto-detecting the Cardinality, since it is the most strict.
|
||||
#[default]
|
||||
Full = 0,
|
||||
/// All documents contain at most one value.
|
||||
OptionalDense = 1,
|
||||
/// All documents may contain any number of values.
|
||||
Multivalued = 2,
|
||||
/// 1 / 20 documents has a value
|
||||
OptionalSparse = 3,
|
||||
}
|
||||
|
||||
fn get_collector(agg_req: Aggregations) -> AggregationCollector {
|
||||
AggregationCollector::from_aggs(agg_req, Default::default())
|
||||
}
|
||||
|
||||
fn get_test_index_bench(cardinality: Cardinality) -> tantivy::Result<Index> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_fieldtype = tantivy::schema::TextOptions::default()
|
||||
.set_indexing_options(
|
||||
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
||||
)
|
||||
.set_stored();
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let json_field = schema_builder.add_json_field("json", FAST);
|
||||
let text_field_many_terms = schema_builder.add_text_field("text_many_terms", STRING | FAST);
|
||||
let text_field_few_terms = schema_builder.add_text_field("text_few_terms", STRING | FAST);
|
||||
let score_fieldtype = tantivy::schema::NumericOptions::default().set_fast();
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
|
||||
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
||||
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
||||
let index = Index::create_from_tempdir(schema_builder.build())?;
|
||||
let few_terms_data = ["INFO", "ERROR", "WARN", "DEBUG"];
|
||||
|
||||
let lg_norm = rand_distr::LogNormal::new(2.996f64, 0.979f64).unwrap();
|
||||
|
||||
let many_terms_data = (0..150_000)
|
||||
.map(|num| format!("author{num}"))
|
||||
.collect::<Vec<_>>();
|
||||
{
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 200_000_000)?;
|
||||
// To make the different test cases comparable we just change one doc to force the
|
||||
// cardinality
|
||||
if cardinality == Cardinality::OptionalDense {
|
||||
index_writer.add_document(doc!())?;
|
||||
}
|
||||
if cardinality == Cardinality::Multivalued {
|
||||
index_writer.add_document(doc!(
|
||||
json_field => json!({"mixed_type": 10.0}),
|
||||
json_field => json!({"mixed_type": 10.0}),
|
||||
text_field => "cool",
|
||||
text_field => "cool",
|
||||
text_field_many_terms => "cool",
|
||||
text_field_many_terms => "cool",
|
||||
text_field_few_terms => "cool",
|
||||
text_field_few_terms => "cool",
|
||||
score_field => 1u64,
|
||||
score_field => 1u64,
|
||||
score_field_f64 => lg_norm.sample(&mut rng),
|
||||
score_field_f64 => lg_norm.sample(&mut rng),
|
||||
score_field_i64 => 1i64,
|
||||
score_field_i64 => 1i64,
|
||||
))?;
|
||||
}
|
||||
let mut doc_with_value = 1_000_000;
|
||||
if cardinality == Cardinality::OptionalSparse {
|
||||
doc_with_value /= 20;
|
||||
}
|
||||
let _val_max = 1_000_000.0;
|
||||
for _ in 0..doc_with_value {
|
||||
let val: f64 = rng.gen_range(0.0..1_000_000.0);
|
||||
let json = if rng.gen_bool(0.1) {
|
||||
// 10% are numeric values
|
||||
json!({ "mixed_type": val })
|
||||
} else {
|
||||
json!({"mixed_type": many_terms_data.choose(&mut rng).unwrap().to_string()})
|
||||
};
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
json_field => json,
|
||||
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
|
||||
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
|
||||
score_field => val as u64,
|
||||
score_field_f64 => lg_norm.sample(&mut rng),
|
||||
score_field_i64 => val as i64,
|
||||
))?;
|
||||
if cardinality == Cardinality::OptionalSparse {
|
||||
for _ in 0..20 {
|
||||
index_writer.add_document(doc!(text_field => "cool"))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
// writing the segment
|
||||
index_writer.commit()?;
|
||||
}
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use tantivy::tokenizer::TokenizerManager;
|
||||
use tantivy::tokenizer::{
|
||||
LowerCaser, RemoveLongFilter, SimpleTokenizer, TextAnalyzer, TokenizerManager,
|
||||
};
|
||||
|
||||
const ALICE_TXT: &str = include_str!("alice.txt");
|
||||
|
||||
@@ -16,7 +18,26 @@ pub fn criterion_benchmark(c: &mut Criterion) {
|
||||
assert_eq!(word_count, 30_731);
|
||||
})
|
||||
});
|
||||
let mut dynamic_analyzer = TextAnalyzer::builder(SimpleTokenizer::default())
|
||||
.dynamic()
|
||||
.filter_dynamic(RemoveLongFilter::limit(40))
|
||||
.filter_dynamic(LowerCaser)
|
||||
.build();
|
||||
c.bench_function("dynamic-tokenize-alice", |b| {
|
||||
b.iter(|| {
|
||||
let mut word_count = 0;
|
||||
let mut token_stream = dynamic_analyzer.token_stream(ALICE_TXT);
|
||||
while token_stream.advance() {
|
||||
word_count += 1;
|
||||
}
|
||||
assert_eq!(word_count, 30_731);
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, criterion_benchmark);
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(200);
|
||||
targets = criterion_benchmark
|
||||
}
|
||||
criterion_main!(benches);
|
||||
|
||||
@@ -1,14 +1,98 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
|
||||
use pprof::criterion::{Output, PProfProfiler};
|
||||
use tantivy::schema::{FAST, INDEXED, STORED, STRING, TEXT};
|
||||
use tantivy::Index;
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, Bencher, Criterion, Throughput};
|
||||
use tantivy::schema::{TantivyDocument, FAST, INDEXED, STORED, STRING, TEXT};
|
||||
use tantivy::{tokenizer, Index, IndexWriter};
|
||||
|
||||
const HDFS_LOGS: &str = include_str!("hdfs.json");
|
||||
const GH_LOGS: &str = include_str!("gh.json");
|
||||
const WIKI: &str = include_str!("wiki.json");
|
||||
|
||||
fn get_lines(input: &str) -> Vec<&str> {
|
||||
input.trim().split('\n').collect()
|
||||
fn benchmark(
|
||||
b: &mut Bencher,
|
||||
input: &str,
|
||||
schema: tantivy::schema::Schema,
|
||||
commit: bool,
|
||||
parse_json: bool,
|
||||
is_dynamic: bool,
|
||||
) {
|
||||
if is_dynamic {
|
||||
benchmark_dynamic_json(b, input, schema, commit, parse_json)
|
||||
} else {
|
||||
_benchmark(b, input, schema, commit, parse_json, |schema, doc_json| {
|
||||
TantivyDocument::parse_json(schema, doc_json).unwrap()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn get_index(schema: tantivy::schema::Schema) -> Index {
|
||||
let mut index = Index::create_in_ram(schema.clone());
|
||||
let ff_tokenizer_manager = tokenizer::TokenizerManager::default();
|
||||
ff_tokenizer_manager.register(
|
||||
"raw",
|
||||
tokenizer::TextAnalyzer::builder(tokenizer::RawTokenizer::default())
|
||||
.filter(tokenizer::RemoveLongFilter::limit(255))
|
||||
.build(),
|
||||
);
|
||||
index.set_fast_field_tokenizers(ff_tokenizer_manager.clone());
|
||||
index
|
||||
}
|
||||
|
||||
fn _benchmark(
|
||||
b: &mut Bencher,
|
||||
input: &str,
|
||||
schema: tantivy::schema::Schema,
|
||||
commit: bool,
|
||||
include_json_parsing: bool,
|
||||
create_doc: impl Fn(&tantivy::schema::Schema, &str) -> TantivyDocument,
|
||||
) {
|
||||
if include_json_parsing {
|
||||
let lines: Vec<&str> = input.trim().split('\n').collect();
|
||||
b.iter(|| {
|
||||
let index = get_index(schema.clone());
|
||||
let mut index_writer: IndexWriter =
|
||||
index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = create_doc(&schema, doc_json);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
if commit {
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
})
|
||||
} else {
|
||||
let docs: Vec<_> = input
|
||||
.trim()
|
||||
.split('\n')
|
||||
.map(|doc_json| create_doc(&schema, doc_json))
|
||||
.collect();
|
||||
b.iter_batched(
|
||||
|| docs.clone(),
|
||||
|docs| {
|
||||
let index = get_index(schema.clone());
|
||||
let mut index_writer: IndexWriter =
|
||||
index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc in docs {
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
if commit {
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
)
|
||||
}
|
||||
}
|
||||
fn benchmark_dynamic_json(
|
||||
b: &mut Bencher,
|
||||
input: &str,
|
||||
schema: tantivy::schema::Schema,
|
||||
commit: bool,
|
||||
parse_json: bool,
|
||||
) {
|
||||
let json_field = schema.get_field("json").unwrap();
|
||||
_benchmark(b, input, schema, commit, parse_json, |_schema, doc_json| {
|
||||
let json_val: serde_json::Value = serde_json::from_str(doc_json).unwrap();
|
||||
tantivy::doc!(json_field=>json_val)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
@@ -19,7 +103,14 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
schema_builder.add_text_field("severity", STRING);
|
||||
schema_builder.build()
|
||||
};
|
||||
let schema_with_store = {
|
||||
let schema_only_fast = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_u64_field("timestamp", FAST);
|
||||
schema_builder.add_text_field("body", FAST);
|
||||
schema_builder.add_text_field("severity", FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
let _schema_with_store = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_u64_field("timestamp", INDEXED | STORED);
|
||||
schema_builder.add_text_field("body", TEXT | STORED);
|
||||
@@ -28,74 +119,40 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
};
|
||||
let dynamic_schema = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_json_field("json", TEXT);
|
||||
schema_builder.add_json_field("json", TEXT | FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group("index-hdfs");
|
||||
group.throughput(Throughput::Bytes(HDFS_LOGS.len() as u64));
|
||||
group.sample_size(20);
|
||||
group.bench_function("index-hdfs-no-commit", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
|
||||
let benches = [
|
||||
("only-indexed-".to_string(), schema, false),
|
||||
//("stored-".to_string(), _schema_with_store, false),
|
||||
("only-fast-".to_string(), schema_only_fast, false),
|
||||
("dynamic-".to_string(), dynamic_schema, true),
|
||||
];
|
||||
|
||||
for (prefix, schema, is_dynamic) in benches {
|
||||
for commit in [false, true] {
|
||||
let suffix = if commit { "with-commit" } else { "no-commit" };
|
||||
{
|
||||
let parse_json = false;
|
||||
// for parse_json in [false, true] {
|
||||
let suffix = if parse_json {
|
||||
format!("{suffix}-with-json-parsing")
|
||||
} else {
|
||||
suffix.to_string()
|
||||
};
|
||||
|
||||
let bench_name = format!("{prefix}{suffix}");
|
||||
group.bench_function(bench_name, |b| {
|
||||
benchmark(b, HDFS_LOGS, schema.clone(), commit, parse_json, is_dynamic)
|
||||
});
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit-with-docstore", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit-with-docstore", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit-json-without-docstore", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gh_index_benchmark(c: &mut Criterion) {
|
||||
@@ -104,38 +161,24 @@ pub fn gh_index_benchmark(c: &mut Criterion) {
|
||||
schema_builder.add_json_field("json", TEXT | FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
let dynamic_schema_fast = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_json_field("json", FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group("index-gh");
|
||||
group.throughput(Throughput::Bytes(GH_LOGS.len() as u64));
|
||||
|
||||
group.bench_function("index-gh-no-commit", |b| {
|
||||
let lines = get_lines(GH_LOGS);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
})
|
||||
benchmark_dynamic_json(b, GH_LOGS, dynamic_schema.clone(), false, false)
|
||||
});
|
||||
group.bench_function("index-gh-with-commit", |b| {
|
||||
let lines = get_lines(GH_LOGS);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
group.bench_function("index-gh-fast", |b| {
|
||||
benchmark_dynamic_json(b, GH_LOGS, dynamic_schema_fast.clone(), false, false)
|
||||
});
|
||||
|
||||
group.bench_function("index-gh-fast-with-commit", |b| {
|
||||
benchmark_dynamic_json(b, GH_LOGS, dynamic_schema_fast.clone(), true, false)
|
||||
});
|
||||
}
|
||||
|
||||
@@ -150,33 +193,10 @@ pub fn wiki_index_benchmark(c: &mut Criterion) {
|
||||
group.throughput(Throughput::Bytes(WIKI.len() as u64));
|
||||
|
||||
group.bench_function("index-wiki-no-commit", |b| {
|
||||
let lines = get_lines(WIKI);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
})
|
||||
benchmark_dynamic_json(b, WIKI, dynamic_schema.clone(), false, false)
|
||||
});
|
||||
group.bench_function("index-wiki-with-commit", |b| {
|
||||
let lines = get_lines(WIKI);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
benchmark_dynamic_json(b, WIKI, dynamic_schema.clone(), true, false)
|
||||
});
|
||||
}
|
||||
|
||||
@@ -187,12 +207,12 @@ criterion_group! {
|
||||
}
|
||||
criterion_group! {
|
||||
name = gh_benches;
|
||||
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
|
||||
config = Criterion::default();
|
||||
targets = gh_index_benchmark
|
||||
}
|
||||
criterion_group! {
|
||||
name = wiki_benches;
|
||||
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
|
||||
config = Criterion::default();
|
||||
targets = wiki_index_benchmark
|
||||
}
|
||||
criterion_main!(benches, gh_benches, wiki_benches);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-bitpacker"
|
||||
version = "0.4.0"
|
||||
version = "0.6.0"
|
||||
edition = "2021"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
@@ -15,7 +15,7 @@ homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
bitpacking = {version="0.8", default-features=false, features = ["bitpacker1x"]}
|
||||
bitpacking = { version = "0.9.2", default-features = false, features = ["bitpacker1x"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
|
||||
@@ -367,11 +366,11 @@ mod test {
|
||||
let mut output: Vec<u32> = Vec::new();
|
||||
for len in [0, 1, 2, 32, 33, 34, 64] {
|
||||
for start_idx in 0u32..32u32 {
|
||||
output.resize(len as usize, 0);
|
||||
output.resize(len, 0);
|
||||
bitunpacker.get_batch_u32s(start_idx, &buffer, &mut output);
|
||||
for i in 0..len {
|
||||
for (i, output_byte) in output.iter().enumerate() {
|
||||
let expected = (start_idx + i as u32) & mask;
|
||||
assert_eq!(output[i], expected);
|
||||
assert_eq!(*output_byte, expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,10 +64,8 @@ fn mem_usage<T>(items: &Vec<T>) -> usize {
|
||||
|
||||
impl BlockedBitpacker {
|
||||
pub fn new() -> Self {
|
||||
let mut compressed_blocks = vec![];
|
||||
compressed_blocks.resize(8, 0);
|
||||
Self {
|
||||
compressed_blocks,
|
||||
compressed_blocks: vec![0; 8],
|
||||
buffer: vec![],
|
||||
offset_and_bits: vec![],
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
#[cfg(any(target_arch = "x86_64"))]
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
mod avx2;
|
||||
|
||||
mod scalar;
|
||||
|
||||
82
cliff.toml
82
cliff.toml
@@ -1,6 +1,10 @@
|
||||
# configuration file for git-cliff{ pattern = "foo", replace = "bar"}
|
||||
# see https://github.com/orhun/git-cliff#configuration-file
|
||||
|
||||
[remote.github]
|
||||
owner = "quickwit-oss"
|
||||
repo = "tantivy"
|
||||
|
||||
[changelog]
|
||||
# changelog header
|
||||
header = """
|
||||
@@ -8,15 +12,43 @@ header = """
|
||||
# template for the changelog body
|
||||
# https://tera.netlify.app/docs/#introduction
|
||||
body = """
|
||||
{% if version %}\
|
||||
{{ version | trim_start_matches(pat="v") }} ({{ timestamp | date(format="%Y-%m-%d") }})
|
||||
==================
|
||||
{% else %}\
|
||||
## [unreleased]
|
||||
{% endif %}\
|
||||
## What's Changed
|
||||
|
||||
{%- if version %} in {{ version }}{%- endif -%}
|
||||
{% for commit in commits %}
|
||||
- {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | split(pat="\n") | first | trim | upper_first }}(@{{ commit.author.name }})\
|
||||
{% endfor %}
|
||||
{% if commit.github.pr_title -%}
|
||||
{%- set commit_message = commit.github.pr_title -%}
|
||||
{%- else -%}
|
||||
{%- set commit_message = commit.message -%}
|
||||
{%- endif -%}
|
||||
- {{ commit_message | split(pat="\n") | first | trim }}\
|
||||
{% if commit.github.pr_number %} \
|
||||
[#{{ commit.github.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.github.pr_number }}){% if commit.github.username %}(@{{ commit.github.username }}){%- endif -%} \
|
||||
{%- endif %}
|
||||
{%- endfor -%}
|
||||
|
||||
{% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
|
||||
{% raw %}\n{% endraw -%}
|
||||
## New Contributors
|
||||
{%- endif %}\
|
||||
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
|
||||
* @{{ contributor.username }} made their first contribution
|
||||
{%- if contributor.pr_number %} in \
|
||||
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
|
||||
{%- endif %}
|
||||
{%- endfor -%}
|
||||
|
||||
{% if version %}
|
||||
{% if previous.version %}
|
||||
**Full Changelog**: {{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}
|
||||
{% endif %}
|
||||
{% else -%}
|
||||
{% raw %}\n{% endraw %}
|
||||
{% endif %}
|
||||
|
||||
{%- macro remote_url() -%}
|
||||
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
|
||||
{%- endmacro -%}
|
||||
"""
|
||||
# remove the leading and trailing whitespace from the template
|
||||
trim = true
|
||||
@@ -25,52 +57,24 @@ footer = """
|
||||
"""
|
||||
|
||||
postprocessors = [
|
||||
{ pattern = 'Paul Masurel', replace = "fulmicoton"}, # replace with github user
|
||||
{ pattern = 'PSeitz', replace = "PSeitz"}, # replace with github user
|
||||
{ pattern = 'Adam Reichold', replace = "adamreichold"}, # replace with github user
|
||||
{ pattern = 'trinity-1686a', replace = "trinity-1686a"}, # replace with github user
|
||||
{ pattern = 'Michael Kleen', replace = "mkleen"}, # replace with github user
|
||||
{ pattern = 'Adrien Guillo', replace = "guilload"}, # replace with github user
|
||||
{ pattern = 'François Massot', replace = "fmassot"}, # replace with github user
|
||||
{ pattern = '', replace = ""}, # replace with github user
|
||||
]
|
||||
|
||||
[git]
|
||||
# parse the commits based on https://www.conventionalcommits.org
|
||||
# This is required or commit.message contains the whole commit message and not just the title
|
||||
conventional_commits = true
|
||||
conventional_commits = false
|
||||
# filter out the commits that are not conventional
|
||||
filter_unconventional = false
|
||||
filter_unconventional = true
|
||||
# process each line of a commit as an individual commit
|
||||
split_commits = false
|
||||
# regex for preprocessing the commit messages
|
||||
commit_preprocessors = [
|
||||
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "[#${2}](https://github.com/quickwit-oss/tantivy/issues/${2})"}, # replace issue numbers
|
||||
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = ""},
|
||||
]
|
||||
#link_parsers = [
|
||||
#{ pattern = "#(\\d+)", href = "https://github.com/quickwit-oss/tantivy/pulls/$1"},
|
||||
#]
|
||||
# regex for parsing and grouping commits
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "Features"},
|
||||
{ message = "^fix", group = "Bug Fixes"},
|
||||
{ message = "^doc", group = "Documentation"},
|
||||
{ message = "^perf", group = "Performance"},
|
||||
{ message = "^refactor", group = "Refactor"},
|
||||
{ message = "^style", group = "Styling"},
|
||||
{ message = "^test", group = "Testing"},
|
||||
{ message = "^chore\\(release\\): prepare for", skip = true},
|
||||
{ message = "(?i)clippy", skip = true},
|
||||
{ message = "(?i)dependabot", skip = true},
|
||||
{ message = "(?i)fmt", skip = true},
|
||||
{ message = "(?i)bump", skip = true},
|
||||
{ message = "(?i)readme", skip = true},
|
||||
{ message = "(?i)comment", skip = true},
|
||||
{ message = "(?i)spelling", skip = true},
|
||||
{ message = "^chore", group = "Miscellaneous Tasks"},
|
||||
{ body = ".*security", group = "Security"},
|
||||
{ message = ".*", group = "Other", default_scope = "other"},
|
||||
]
|
||||
# protect breaking changes from being skipped due to matching a skipping commit_parser
|
||||
protect_breaking_commits = false
|
||||
# filter out the commits that are not matched by commit parsers
|
||||
|
||||
@@ -1,28 +1,38 @@
|
||||
[package]
|
||||
name = "tantivy-columnar"
|
||||
version = "0.1.0"
|
||||
version = "0.3.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
desciption = "column oriented storage for tantivy"
|
||||
description = "column oriented storage for tantivy"
|
||||
categories = ["database-implementations", "data-structures", "compression"]
|
||||
|
||||
[dependencies]
|
||||
itertools = "0.10.5"
|
||||
fnv = "1.0.7"
|
||||
itertools = "0.13.0"
|
||||
fastdivide = "0.4.0"
|
||||
|
||||
stacker = { version= "0.1", path = "../stacker", package="tantivy-stacker"}
|
||||
sstable = { version= "0.1", path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { version= "0.5", path = "../common", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.4", path = "../bitpacker/" }
|
||||
stacker = { version= "0.3", path = "../stacker", package="tantivy-stacker"}
|
||||
sstable = { version= "0.3", path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { version= "0.7", path = "../common", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.6", path = "../bitpacker/" }
|
||||
serde = "1.0.152"
|
||||
downcast-rs = "1.2.0"
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1"
|
||||
more-asserts = "0.3.1"
|
||||
rand = "0.8"
|
||||
binggan = "0.12.0"
|
||||
|
||||
[[bench]]
|
||||
name = "bench_merge"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_access"
|
||||
harness = false
|
||||
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
|
||||
@@ -31,7 +31,7 @@ restriction on 50% of the values (e.g. a 64-bit hash). On the other hand, a lot
|
||||
# Columnar format
|
||||
|
||||
This columnar format may have more than one column (with different types) associated to the same `column_name` (see [Coercion rules](#coercion-rules) above).
|
||||
The `(column_name, columne_type)` couple however uniquely identifies a column.
|
||||
The `(column_name, column_type)` couple however uniquely identifies a column.
|
||||
That couple is serialized as a column `column_key`. The format of that key is:
|
||||
`[column_name][ZERO_BYTE][column_type_header: u8]`
|
||||
|
||||
|
||||
69
columnar/benches/bench_access.rs
Normal file
69
columnar/benches/bench_access.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
use binggan::{black_box, InputGroup};
|
||||
use common::*;
|
||||
use tantivy_columnar::Column;
|
||||
|
||||
pub mod common;
|
||||
|
||||
const NUM_DOCS: u32 = 2_000_000;
|
||||
|
||||
pub fn generate_columnar_and_open(card: Card, num_docs: u32) -> Column {
|
||||
let reader = generate_columnar_with_name(card, num_docs, "price");
|
||||
reader.read_columns("price").unwrap()[0]
|
||||
.open_u64_lenient()
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut inputs = Vec::new();
|
||||
|
||||
let mut add_card = |card1: Card| {
|
||||
inputs.push((
|
||||
format!("{card1}"),
|
||||
generate_columnar_and_open(card1, NUM_DOCS),
|
||||
));
|
||||
};
|
||||
|
||||
add_card(Card::MultiSparse);
|
||||
add_card(Card::Multi);
|
||||
add_card(Card::Sparse);
|
||||
add_card(Card::Dense);
|
||||
add_card(Card::Full);
|
||||
|
||||
bench_group(InputGroup::new_with_inputs(inputs));
|
||||
}
|
||||
|
||||
fn bench_group(mut runner: InputGroup<Column>) {
|
||||
runner.register("access_values_for_doc", |column| {
|
||||
let mut sum = 0;
|
||||
for i in 0..NUM_DOCS {
|
||||
for value in column.values_for_doc(i) {
|
||||
sum += value;
|
||||
}
|
||||
}
|
||||
black_box(sum);
|
||||
None
|
||||
});
|
||||
runner.register("access_first_vals", |column| {
|
||||
let mut sum = 0;
|
||||
const BLOCK_SIZE: usize = 32;
|
||||
let mut docs = vec![0; BLOCK_SIZE];
|
||||
let mut buffer = vec![None; BLOCK_SIZE];
|
||||
for i in (0..NUM_DOCS).step_by(BLOCK_SIZE) {
|
||||
// fill docs
|
||||
for idx in 0..BLOCK_SIZE {
|
||||
docs[idx] = idx as u32 + i;
|
||||
}
|
||||
|
||||
column.first_vals(&docs, &mut buffer);
|
||||
for val in buffer.iter() {
|
||||
let Some(val) = val else { continue };
|
||||
sum += *val;
|
||||
}
|
||||
}
|
||||
|
||||
black_box(sum);
|
||||
None
|
||||
});
|
||||
runner.run();
|
||||
}
|
||||
155
columnar/benches/bench_first_vals.rs
Normal file
155
columnar/benches/bench_first_vals.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand::prelude::*;
|
||||
use tantivy_columnar::column_values::{serialize_and_load_u64_based_column_values, CodecType};
|
||||
use tantivy_columnar::*;
|
||||
use test::{black_box, Bencher};
|
||||
|
||||
struct Columns {
|
||||
pub optional: Column,
|
||||
pub full: Column,
|
||||
pub multi: Column,
|
||||
}
|
||||
|
||||
fn get_test_columns() -> Columns {
|
||||
let data = generate_permutation();
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
for (idx, val) in data.iter().enumerate() {
|
||||
dataframe_writer.record_numerical(idx as u32, "full_values", NumericalValue::U64(*val));
|
||||
if idx % 2 == 0 {
|
||||
dataframe_writer.record_numerical(
|
||||
idx as u32,
|
||||
"optional_values",
|
||||
NumericalValue::U64(*val),
|
||||
);
|
||||
}
|
||||
dataframe_writer.record_numerical(idx as u32, "multi_values", NumericalValue::U64(*val));
|
||||
dataframe_writer.record_numerical(idx as u32, "multi_values", NumericalValue::U64(*val));
|
||||
}
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(data.len() as u32, &mut buffer)
|
||||
.unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("optional_values").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
let optional = cols[0].open_u64_lenient().unwrap().unwrap();
|
||||
assert_eq!(optional.index.get_cardinality(), Cardinality::Optional);
|
||||
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("full_values").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
let column_full = cols[0].open_u64_lenient().unwrap().unwrap();
|
||||
assert_eq!(column_full.index.get_cardinality(), Cardinality::Full);
|
||||
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("multi_values").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
let multi = cols[0].open_u64_lenient().unwrap().unwrap();
|
||||
assert_eq!(multi.index.get_cardinality(), Cardinality::Multivalued);
|
||||
|
||||
Columns {
|
||||
optional,
|
||||
full: column_full,
|
||||
multi,
|
||||
}
|
||||
}
|
||||
|
||||
const NUM_VALUES: u64 = 100_000;
|
||||
fn generate_permutation() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..NUM_VALUES).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
pub fn serialize_and_load(column: &[u64], codec_type: CodecType) -> Arc<dyn ColumnValues<u64>> {
|
||||
serialize_and_load_u64_based_column_values(&column, &[codec_type])
|
||||
}
|
||||
|
||||
fn run_bench_on_column_full_scan(b: &mut Bencher, column: Column) {
|
||||
let num_iter = black_box(NUM_VALUES);
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for i in 0..num_iter as u32 {
|
||||
let val = column.first(i);
|
||||
sum += val.unwrap_or(0);
|
||||
}
|
||||
sum
|
||||
});
|
||||
}
|
||||
fn run_bench_on_column_block_fetch(b: &mut Bencher, column: Column) {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
b.iter(move || {
|
||||
column.first_vals(&fetch_docids, &mut block);
|
||||
block[0]
|
||||
});
|
||||
}
|
||||
fn run_bench_on_column_block_single_calls(b: &mut Bencher, column: Column) {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
b.iter(move || {
|
||||
for i in 0..fetch_docids.len() {
|
||||
block[i] = column.first(fetch_docids[i]);
|
||||
}
|
||||
block[0]
|
||||
});
|
||||
}
|
||||
|
||||
/// Column first method
|
||||
#[bench]
|
||||
fn bench_get_first_on_full_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_first_on_optional_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_first_on_multi_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
/// Block fetch column accessor
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_optional_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_multi_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_full_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_optional_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_multi_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_full_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
49
columnar/benches/bench_merge.rs
Normal file
49
columnar/benches/bench_merge.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
pub mod common;
|
||||
|
||||
use binggan::{black_box, BenchRunner};
|
||||
use common::{generate_columnar_with_name, Card};
|
||||
use tantivy_columnar::*;
|
||||
|
||||
const NUM_DOCS: u32 = 100_000;
|
||||
|
||||
fn main() {
|
||||
let mut inputs = Vec::new();
|
||||
|
||||
let mut add_combo = |card1: Card, card2: Card| {
|
||||
inputs.push((
|
||||
format!("merge_{card1}_and_{card2}"),
|
||||
vec![
|
||||
generate_columnar_with_name(card1, NUM_DOCS, "price"),
|
||||
generate_columnar_with_name(card2, NUM_DOCS, "price"),
|
||||
],
|
||||
));
|
||||
};
|
||||
|
||||
add_combo(Card::Multi, Card::Multi);
|
||||
add_combo(Card::MultiSparse, Card::MultiSparse);
|
||||
add_combo(Card::Dense, Card::Dense);
|
||||
add_combo(Card::Sparse, Card::Sparse);
|
||||
add_combo(Card::Sparse, Card::Dense);
|
||||
add_combo(Card::MultiSparse, Card::Dense);
|
||||
add_combo(Card::MultiSparse, Card::Sparse);
|
||||
add_combo(Card::Multi, Card::Dense);
|
||||
add_combo(Card::Multi, Card::Sparse);
|
||||
|
||||
let runner: BenchRunner = BenchRunner::new();
|
||||
let mut group = runner.new_group();
|
||||
for (input_name, columnar_readers) in inputs.iter() {
|
||||
group.register_with_input(
|
||||
input_name,
|
||||
columnar_readers,
|
||||
move |columnar_readers: &Vec<ColumnarReader>| {
|
||||
let mut out = Vec::new();
|
||||
let columnar_readers = columnar_readers.iter().collect::<Vec<_>>();
|
||||
let merge_row_order = StackMergeOrder::stack(&columnar_readers[..]);
|
||||
|
||||
merge_columnar(&columnar_readers, &[], merge_row_order.into(), &mut out).unwrap();
|
||||
Some(out.len() as u64)
|
||||
},
|
||||
);
|
||||
}
|
||||
group.run();
|
||||
}
|
||||
@@ -16,14 +16,6 @@ fn generate_permutation() -> Vec<u64> {
|
||||
permutation
|
||||
}
|
||||
|
||||
fn generate_random() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64)
|
||||
.map(|el| el + random::<u16>() as u64)
|
||||
.collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation_gcd() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (1u64..100_000u64).map(|el| el * 1000).collect();
|
||||
59
columnar/benches/common.rs
Normal file
59
columnar/benches/common.rs
Normal file
@@ -0,0 +1,59 @@
|
||||
extern crate tantivy_columnar;
|
||||
|
||||
use core::fmt;
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
use tantivy_columnar::{ColumnarReader, ColumnarWriter};
|
||||
|
||||
pub enum Card {
|
||||
MultiSparse,
|
||||
Multi,
|
||||
Sparse,
|
||||
Dense,
|
||||
Full,
|
||||
}
|
||||
impl Display for Card {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Card::MultiSparse => write!(f, "multi sparse 1/13"),
|
||||
Card::Multi => write!(f, "multi 2x"),
|
||||
Card::Sparse => write!(f, "sparse 1/13"),
|
||||
Card::Dense => write!(f, "dense 1/12"),
|
||||
Card::Full => write!(f, "full"),
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn generate_columnar_with_name(card: Card, num_docs: u32, column_name: &str) -> ColumnarReader {
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
|
||||
if let Card::MultiSparse = card {
|
||||
columnar_writer.record_numerical(0, column_name, 10u64);
|
||||
columnar_writer.record_numerical(0, column_name, 10u64);
|
||||
}
|
||||
|
||||
for i in 0..num_docs {
|
||||
match card {
|
||||
Card::MultiSparse | Card::Sparse => {
|
||||
if i % 13 == 0 {
|
||||
columnar_writer.record_numerical(i, column_name, i as u64);
|
||||
}
|
||||
}
|
||||
Card::Dense => {
|
||||
if i % 12 == 0 {
|
||||
columnar_writer.record_numerical(i, column_name, i as u64);
|
||||
}
|
||||
}
|
||||
Card::Full => {
|
||||
columnar_writer.record_numerical(i, column_name, i as u64);
|
||||
}
|
||||
Card::Multi => {
|
||||
columnar_writer.record_numerical(i, column_name, i as u64);
|
||||
columnar_writer.record_numerical(i, column_name, i as u64);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut wrt: Vec<u8> = Vec::new();
|
||||
columnar_writer.serialize(num_docs, &mut wrt).unwrap();
|
||||
ColumnarReader::open(wrt).unwrap()
|
||||
}
|
||||
@@ -8,7 +8,6 @@ license = "MIT"
|
||||
columnar = {path="../", package="tantivy-columnar"}
|
||||
serde_json = "1"
|
||||
serde_json_borrow = {git="https://github.com/PSeitz/serde_json_borrow/"}
|
||||
serde = "1"
|
||||
|
||||
[workspace]
|
||||
members = []
|
||||
|
||||
BIN
columnar/compat_tests_data/v1.columnar
Normal file
BIN
columnar/compat_tests_data/v1.columnar
Normal file
Binary file not shown.
BIN
columnar/compat_tests_data/v2.columnar
Normal file
BIN
columnar/compat_tests_data/v2.columnar
Normal file
Binary file not shown.
@@ -10,7 +10,7 @@
|
||||
|
||||
# Perf and Size
|
||||
* remove alloc in `ord_to_term`
|
||||
+ multivaued range queries restrat frm the beginning all of the time.
|
||||
+ multivaued range queries restart from the beginning all of the time.
|
||||
* re-add ZSTD compression for dictionaries
|
||||
no systematic monotonic mapping
|
||||
consider removing multilinear
|
||||
@@ -30,7 +30,7 @@ investigate if should have better errors? io::Error is overused at the moment.
|
||||
rename rank/select in unit tests
|
||||
Review the public API via cargo doc
|
||||
go through TODOs
|
||||
remove all doc_id occurences -> row_id
|
||||
remove all doc_id occurrences -> row_id
|
||||
use the rank & select naming in unit tests branch.
|
||||
multi-linear -> blockwise
|
||||
linear codec -> simply a multiplication for the index column
|
||||
@@ -43,5 +43,5 @@ isolate u128_based and uniform naming
|
||||
# Other
|
||||
fix enhance column-cli
|
||||
|
||||
# Santa claus
|
||||
# Santa Claus
|
||||
autodetect datetime ipaddr, plug customizable tokenizer.
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use crate::{Column, DocId, RowId};
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ColumnBlockAccessor<T> {
|
||||
val_cache: Vec<T>,
|
||||
docid_cache: Vec<DocId>,
|
||||
missing_docids_cache: Vec<DocId>,
|
||||
row_id_cache: Vec<RowId>,
|
||||
}
|
||||
|
||||
@@ -11,14 +14,40 @@ impl<T: PartialOrd + Copy + std::fmt::Debug + Send + Sync + 'static + Default>
|
||||
ColumnBlockAccessor<T>
|
||||
{
|
||||
#[inline]
|
||||
pub fn fetch_block(&mut self, docs: &[u32], accessor: &Column<T>) {
|
||||
self.docid_cache.clear();
|
||||
self.row_id_cache.clear();
|
||||
accessor.row_ids_for_docs(docs, &mut self.docid_cache, &mut self.row_id_cache);
|
||||
self.val_cache.resize(self.row_id_cache.len(), T::default());
|
||||
accessor
|
||||
.values
|
||||
.get_vals(&self.row_id_cache, &mut self.val_cache);
|
||||
pub fn fetch_block<'a>(&'a mut self, docs: &'a [u32], accessor: &Column<T>) {
|
||||
if accessor.index.get_cardinality().is_full() {
|
||||
self.val_cache.resize(docs.len(), T::default());
|
||||
accessor.values.get_vals(docs, &mut self.val_cache);
|
||||
} else {
|
||||
self.docid_cache.clear();
|
||||
self.row_id_cache.clear();
|
||||
accessor.row_ids_for_docs(docs, &mut self.docid_cache, &mut self.row_id_cache);
|
||||
self.val_cache.resize(self.row_id_cache.len(), T::default());
|
||||
accessor
|
||||
.values
|
||||
.get_vals(&self.row_id_cache, &mut self.val_cache);
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub fn fetch_block_with_missing(&mut self, docs: &[u32], accessor: &Column<T>, missing: T) {
|
||||
self.fetch_block(docs, accessor);
|
||||
// no missing values
|
||||
if accessor.index.get_cardinality().is_full() {
|
||||
return;
|
||||
}
|
||||
|
||||
// We can compare docid_cache length with docs to find missing docs
|
||||
// For multi value columns we can't rely on the length and always need to scan
|
||||
if accessor.index.get_cardinality().is_multivalue() || docs.len() != self.docid_cache.len()
|
||||
{
|
||||
self.missing_docids_cache.clear();
|
||||
find_missing_docs(docs, &self.docid_cache, |doc| {
|
||||
self.missing_docids_cache.push(doc);
|
||||
self.val_cache.push(missing);
|
||||
});
|
||||
self.docid_cache
|
||||
.extend_from_slice(&self.missing_docids_cache);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -27,10 +56,103 @@ impl<T: PartialOrd + Copy + std::fmt::Debug + Send + Sync + 'static + Default>
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn iter_docid_vals(&self) -> impl Iterator<Item = (DocId, T)> + '_ {
|
||||
self.docid_cache
|
||||
.iter()
|
||||
.cloned()
|
||||
.zip(self.val_cache.iter().cloned())
|
||||
/// Returns an iterator over the docids and values
|
||||
/// The passed in `docs` slice needs to be the same slice that was passed to `fetch_block` or
|
||||
/// `fetch_block_with_missing`.
|
||||
///
|
||||
/// The docs is used if the column is full (each docs has exactly one value), otherwise the
|
||||
/// internal docid vec is used for the iterator, which e.g. may contain duplicate docs.
|
||||
pub fn iter_docid_vals<'a>(
|
||||
&'a self,
|
||||
docs: &'a [u32],
|
||||
accessor: &Column<T>,
|
||||
) -> impl Iterator<Item = (DocId, T)> + '_ {
|
||||
if accessor.index.get_cardinality().is_full() {
|
||||
docs.iter().cloned().zip(self.val_cache.iter().cloned())
|
||||
} else {
|
||||
self.docid_cache
|
||||
.iter()
|
||||
.cloned()
|
||||
.zip(self.val_cache.iter().cloned())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Given two sorted lists of docids `docs` and `hits`, hits is a subset of `docs`.
|
||||
/// Return all docs that are not in `hits`.
|
||||
fn find_missing_docs<F>(docs: &[u32], hits: &[u32], mut callback: F)
|
||||
where F: FnMut(u32) {
|
||||
let mut docs_iter = docs.iter();
|
||||
let mut hits_iter = hits.iter();
|
||||
|
||||
let mut doc = docs_iter.next();
|
||||
let mut hit = hits_iter.next();
|
||||
|
||||
while let (Some(¤t_doc), Some(¤t_hit)) = (doc, hit) {
|
||||
match current_doc.cmp(¤t_hit) {
|
||||
Ordering::Less => {
|
||||
callback(current_doc);
|
||||
doc = docs_iter.next();
|
||||
}
|
||||
Ordering::Equal => {
|
||||
doc = docs_iter.next();
|
||||
hit = hits_iter.next();
|
||||
}
|
||||
Ordering::Greater => {
|
||||
hit = hits_iter.next();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while let Some(¤t_doc) = doc {
|
||||
callback(current_doc);
|
||||
doc = docs_iter.next();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_find_missing_docs() {
|
||||
let docs: Vec<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
let hits: Vec<u32> = vec![2, 4, 6, 8, 10];
|
||||
|
||||
let mut missing_docs: Vec<u32> = Vec::new();
|
||||
|
||||
find_missing_docs(&docs, &hits, |missing_doc| {
|
||||
missing_docs.push(missing_doc);
|
||||
});
|
||||
|
||||
assert_eq!(missing_docs, vec![1, 3, 5, 7, 9]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_missing_docs_empty() {
|
||||
let docs: Vec<u32> = Vec::new();
|
||||
let hits: Vec<u32> = vec![2, 4, 6, 8, 10];
|
||||
|
||||
let mut missing_docs: Vec<u32> = Vec::new();
|
||||
|
||||
find_missing_docs(&docs, &hits, |missing_doc| {
|
||||
missing_docs.push(missing_doc);
|
||||
});
|
||||
|
||||
assert_eq!(missing_docs, vec![]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_missing_docs_all_missing() {
|
||||
let docs: Vec<u32> = vec![1, 2, 3, 4, 5];
|
||||
let hits: Vec<u32> = Vec::new();
|
||||
|
||||
let mut missing_docs: Vec<u32> = Vec::new();
|
||||
|
||||
find_missing_docs(&docs, &hits, |missing_doc| {
|
||||
missing_docs.push(missing_doc);
|
||||
});
|
||||
|
||||
assert_eq!(missing_docs, vec![1, 2, 3, 4, 5]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,13 @@ impl fmt::Debug for BytesColumn {
|
||||
}
|
||||
|
||||
impl BytesColumn {
|
||||
pub fn empty(num_docs: u32) -> BytesColumn {
|
||||
BytesColumn {
|
||||
dictionary: Arc::new(Dictionary::empty()),
|
||||
term_ord_column: Column::build_empty_column(num_docs),
|
||||
}
|
||||
}
|
||||
|
||||
/// Fills the given `output` buffer with the term associated to the ordinal `ord`.
|
||||
///
|
||||
/// Returns `false` if the term does not exist (e.g. `term_ord` is greater or equal to the
|
||||
@@ -77,7 +84,7 @@ impl From<StrColumn> for BytesColumn {
|
||||
}
|
||||
|
||||
impl StrColumn {
|
||||
pub(crate) fn wrap(bytes_column: BytesColumn) -> StrColumn {
|
||||
pub fn wrap(bytes_column: BytesColumn) -> StrColumn {
|
||||
StrColumn(bytes_column)
|
||||
}
|
||||
|
||||
|
||||
@@ -3,17 +3,17 @@ mod serialize;
|
||||
|
||||
use std::fmt::{self, Debug};
|
||||
use std::io::Write;
|
||||
use std::ops::{Deref, Range, RangeInclusive};
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::BinarySerializable;
|
||||
pub use dictionary_encoded::{BytesColumn, StrColumn};
|
||||
pub use serialize::{
|
||||
open_column_bytes, open_column_str, open_column_u128, open_column_u64,
|
||||
serialize_column_mappable_to_u128, serialize_column_mappable_to_u64,
|
||||
open_column_bytes, open_column_str, open_column_u128, open_column_u128_as_compact_u64,
|
||||
open_column_u64, serialize_column_mappable_to_u128, serialize_column_mappable_to_u64,
|
||||
};
|
||||
|
||||
use crate::column_index::ColumnIndex;
|
||||
use crate::column_index::{ColumnIndex, Set};
|
||||
use crate::column_values::monotonic_mapping::StrictlyMonotonicMappingToInternal;
|
||||
use crate::column_values::{monotonic_map_column, ColumnValues};
|
||||
use crate::{Cardinality, DocId, EmptyColumnValues, MonotonicallyMappableToU64, RowId};
|
||||
@@ -83,10 +83,36 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
self.values.max_value()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn first(&self, row_id: RowId) -> Option<T> {
|
||||
self.values_for_doc(row_id).next()
|
||||
}
|
||||
|
||||
/// Load the first value for each docid in the provided slice.
|
||||
#[inline]
|
||||
pub fn first_vals(&self, docids: &[DocId], output: &mut [Option<T>]) {
|
||||
match &self.index {
|
||||
ColumnIndex::Empty { .. } => {}
|
||||
ColumnIndex::Full => self.values.get_vals_opt(docids, output),
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
for (i, docid) in docids.iter().enumerate() {
|
||||
output[i] = optional_index
|
||||
.rank_if_exists(*docid)
|
||||
.map(|rowid| self.values.get_val(rowid));
|
||||
}
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
for (i, docid) in docids.iter().enumerate() {
|
||||
let range = multivalued_index.range(*docid);
|
||||
let is_empty = range.start == range.end;
|
||||
if !is_empty {
|
||||
output[i] = Some(self.values.get_val(range.start));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Translates a block of docis to row_ids.
|
||||
///
|
||||
/// returns the row_ids and the matching docids on the same index
|
||||
@@ -105,11 +131,12 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
}
|
||||
|
||||
pub fn values_for_doc(&self, doc_id: DocId) -> impl Iterator<Item = T> + '_ {
|
||||
self.value_row_ids(doc_id)
|
||||
self.index
|
||||
.value_row_ids(doc_id)
|
||||
.map(|value_row_id: RowId| self.values.get_val(value_row_id))
|
||||
}
|
||||
|
||||
/// Get the docids of values which are in the provided value range.
|
||||
/// Get the docids of values which are in the provided value and docid range.
|
||||
#[inline]
|
||||
pub fn get_docids_for_value_range(
|
||||
&self,
|
||||
@@ -130,7 +157,7 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
.select_batch_in_place(selected_docid_range.start, doc_ids);
|
||||
}
|
||||
|
||||
/// Fils the output vector with the (possibly multiple values that are associated_with
|
||||
/// Fills the output vector with the (possibly multiple values that are associated_with
|
||||
/// `row_id`.
|
||||
///
|
||||
/// This method clears the `output` vector.
|
||||
@@ -147,14 +174,6 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for Column<T> {
|
||||
type Target = ColumnIndex;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.index
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for Cardinality {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
self.to_code().serialize(writer)
|
||||
@@ -176,6 +195,7 @@ struct FirstValueWithDefault<T: Copy> {
|
||||
impl<T: PartialOrd + Debug + Send + Sync + Copy + 'static> ColumnValues<T>
|
||||
for FirstValueWithDefault<T>
|
||||
{
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
self.column.first(idx).unwrap_or(self.default_value)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use crate::column_values::{
|
||||
CodecType, MonotonicallyMappableToU128, MonotonicallyMappableToU64,
|
||||
};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::StrColumn;
|
||||
use crate::{StrColumn, Version};
|
||||
|
||||
pub fn serialize_column_mappable_to_u128<T: MonotonicallyMappableToU128>(
|
||||
column_index: SerializableColumnIndex<'_>,
|
||||
@@ -40,25 +40,9 @@ pub fn serialize_column_mappable_to_u64<T: MonotonicallyMappableToU64>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn open_column_u64<T: MonotonicallyMappableToU64>(bytes: OwnedBytes) -> io::Result<Column<T>> {
|
||||
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
|
||||
let column_index_num_bytes = u32::from_le_bytes(
|
||||
column_index_num_bytes_payload
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
);
|
||||
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
|
||||
let column_index = crate::column_index::open_column_index(column_index_data)?;
|
||||
let column_values = load_u64_based_column_values(column_values_data)?;
|
||||
Ok(Column {
|
||||
index: column_index,
|
||||
values: column_values,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_column_u128<T: MonotonicallyMappableToU128>(
|
||||
pub fn open_column_u64<T: MonotonicallyMappableToU64>(
|
||||
bytes: OwnedBytes,
|
||||
format_version: Version,
|
||||
) -> io::Result<Column<T>> {
|
||||
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
|
||||
let column_index_num_bytes = u32::from_le_bytes(
|
||||
@@ -68,7 +52,27 @@ pub fn open_column_u128<T: MonotonicallyMappableToU128>(
|
||||
.unwrap(),
|
||||
);
|
||||
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
|
||||
let column_index = crate::column_index::open_column_index(column_index_data)?;
|
||||
let column_index = crate::column_index::open_column_index(column_index_data, format_version)?;
|
||||
let column_values = load_u64_based_column_values(column_values_data)?;
|
||||
Ok(Column {
|
||||
index: column_index,
|
||||
values: column_values,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_column_u128<T: MonotonicallyMappableToU128>(
|
||||
bytes: OwnedBytes,
|
||||
format_version: Version,
|
||||
) -> io::Result<Column<T>> {
|
||||
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
|
||||
let column_index_num_bytes = u32::from_le_bytes(
|
||||
column_index_num_bytes_payload
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
);
|
||||
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
|
||||
let column_index = crate::column_index::open_column_index(column_index_data, format_version)?;
|
||||
let column_values = crate::column_values::open_u128_mapped(column_values_data)?;
|
||||
Ok(Column {
|
||||
index: column_index,
|
||||
@@ -76,19 +80,42 @@ pub fn open_column_u128<T: MonotonicallyMappableToU128>(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_column_bytes(data: OwnedBytes) -> io::Result<BytesColumn> {
|
||||
/// Open the column as u64.
|
||||
///
|
||||
/// See [`open_u128_as_compact_u64`] for more details.
|
||||
pub fn open_column_u128_as_compact_u64(
|
||||
bytes: OwnedBytes,
|
||||
format_version: Version,
|
||||
) -> io::Result<Column<u64>> {
|
||||
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
|
||||
let column_index_num_bytes = u32::from_le_bytes(
|
||||
column_index_num_bytes_payload
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
);
|
||||
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
|
||||
let column_index = crate::column_index::open_column_index(column_index_data, format_version)?;
|
||||
let column_values = crate::column_values::open_u128_as_compact_u64(column_values_data)?;
|
||||
Ok(Column {
|
||||
index: column_index,
|
||||
values: column_values,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_column_bytes(data: OwnedBytes, format_version: Version) -> io::Result<BytesColumn> {
|
||||
let (body, dictionary_len_bytes) = data.rsplit(4);
|
||||
let dictionary_len = u32::from_le_bytes(dictionary_len_bytes.as_slice().try_into().unwrap());
|
||||
let (dictionary_bytes, column_bytes) = body.split(dictionary_len as usize);
|
||||
let dictionary = Arc::new(Dictionary::from_bytes(dictionary_bytes)?);
|
||||
let term_ord_column = crate::column::open_column_u64::<u64>(column_bytes)?;
|
||||
let term_ord_column = crate::column::open_column_u64::<u64>(column_bytes, format_version)?;
|
||||
Ok(BytesColumn {
|
||||
dictionary,
|
||||
term_ord_column,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_column_str(data: OwnedBytes) -> io::Result<StrColumn> {
|
||||
let bytes_column = open_column_bytes(data)?;
|
||||
pub fn open_column_str(data: OwnedBytes, format_version: Version) -> io::Result<StrColumn> {
|
||||
let bytes_column = open_column_bytes(data, format_version)?;
|
||||
Ok(StrColumn::wrap(bytes_column))
|
||||
}
|
||||
|
||||
@@ -95,8 +95,12 @@ pub fn merge_column_index<'a>(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common::OwnedBytes;
|
||||
|
||||
use crate::column_index::merge::detect_cardinality;
|
||||
use crate::column_index::multivalued_index::MultiValueIndex;
|
||||
use crate::column_index::multivalued_index::{
|
||||
open_multivalued_index, serialize_multivalued_index, MultiValueIndex,
|
||||
};
|
||||
use crate::column_index::{merge_column_index, OptionalIndex, SerializableColumnIndex};
|
||||
use crate::{
|
||||
Cardinality, ColumnIndex, MergeRowOrder, RowAddr, RowId, ShuffleMergeOrder, StackMergeOrder,
|
||||
@@ -168,9 +172,14 @@ mod tests {
|
||||
)
|
||||
.into();
|
||||
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index
|
||||
else { panic!("Excpected a multivalued index") };
|
||||
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index else {
|
||||
panic!("Expected a multivalued index")
|
||||
};
|
||||
let mut output = Vec::new();
|
||||
serialize_multivalued_index(&start_index_iterable, &mut output).unwrap();
|
||||
let multivalue =
|
||||
open_multivalued_index(OwnedBytes::new(output), crate::Version::V2).unwrap();
|
||||
let start_indexes: Vec<RowId> = multivalue.get_start_index_column().iter().collect();
|
||||
assert_eq!(&start_indexes, &[0, 3, 5]);
|
||||
}
|
||||
|
||||
@@ -199,10 +208,16 @@ mod tests {
|
||||
],
|
||||
)
|
||||
.into();
|
||||
|
||||
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index
|
||||
else { panic!("Excpected a multivalued index") };
|
||||
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index else {
|
||||
panic!("Expected a multivalued index")
|
||||
};
|
||||
let mut output = Vec::new();
|
||||
serialize_multivalued_index(&start_index_iterable, &mut output).unwrap();
|
||||
let multivalue =
|
||||
open_multivalued_index(OwnedBytes::new(output), crate::Version::V2).unwrap();
|
||||
let start_indexes: Vec<RowId> = multivalue.get_start_index_column().iter().collect();
|
||||
assert_eq!(&start_indexes, &[0, 3, 5, 6]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use std::iter;
|
||||
|
||||
use crate::column_index::{SerializableColumnIndex, Set};
|
||||
use crate::column_index::{
|
||||
SerializableColumnIndex, SerializableMultivalueIndex, SerializableOptionalIndex, Set,
|
||||
};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{Cardinality, ColumnIndex, RowId, ShuffleMergeOrder};
|
||||
|
||||
@@ -14,15 +16,24 @@ pub fn merge_column_index_shuffled<'a>(
|
||||
Cardinality::Optional => {
|
||||
let non_null_row_ids =
|
||||
merge_column_index_shuffled_optional(column_indexes, shuffle_merge_order);
|
||||
SerializableColumnIndex::Optional {
|
||||
SerializableColumnIndex::Optional(SerializableOptionalIndex {
|
||||
non_null_row_ids,
|
||||
num_rows: shuffle_merge_order.num_rows(),
|
||||
}
|
||||
})
|
||||
}
|
||||
Cardinality::Multivalued => {
|
||||
let multivalue_start_index =
|
||||
merge_column_index_shuffled_multivalued(column_indexes, shuffle_merge_order);
|
||||
SerializableColumnIndex::Multivalued(multivalue_start_index)
|
||||
let non_null_row_ids =
|
||||
merge_column_index_shuffled_optional(column_indexes, shuffle_merge_order);
|
||||
SerializableColumnIndex::Multivalued(SerializableMultivalueIndex {
|
||||
doc_ids_with_values: SerializableOptionalIndex {
|
||||
non_null_row_ids,
|
||||
num_rows: shuffle_merge_order.num_rows(),
|
||||
},
|
||||
start_offsets: merge_column_index_shuffled_multivalued(
|
||||
column_indexes,
|
||||
shuffle_merge_order,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -102,11 +113,18 @@ fn iter_num_values<'a>(
|
||||
|
||||
/// Transforms an iterator containing the number of vals per row (with `num_rows` elements)
|
||||
/// into a `start_offset` iterator starting at 0 and (with `num_rows + 1` element)
|
||||
///
|
||||
/// This will filter values with 0 values as these are covered by the optional index in the
|
||||
/// multivalue index.
|
||||
fn integrate_num_vals(num_vals: impl Iterator<Item = u32>) -> impl Iterator<Item = RowId> {
|
||||
iter::once(0u32).chain(num_vals.scan(0, |state, num_vals| {
|
||||
*state += num_vals;
|
||||
Some(*state)
|
||||
}))
|
||||
iter::once(0u32).chain(
|
||||
num_vals
|
||||
.filter(|num_vals| *num_vals != 0)
|
||||
.scan(0, |state, num_vals| {
|
||||
*state += num_vals;
|
||||
Some(*state)
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
impl<'a> Iterable<u32> for ShuffledMultivaluedIndex<'a> {
|
||||
@@ -134,13 +152,13 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_integrate_num_vals_several() {
|
||||
assert!(integrate_num_vals([3, 0, 10, 20].into_iter()).eq([0, 3, 3, 13, 33].into_iter()));
|
||||
assert!(integrate_num_vals([3, 0, 10, 20].into_iter()).eq([0, 3, 13, 33].into_iter()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_column_index_optional_shuffle() {
|
||||
let optional_index: ColumnIndex = OptionalIndex::for_test(2, &[0]).into();
|
||||
let column_indexes = vec![optional_index, ColumnIndex::Full];
|
||||
let column_indexes = [optional_index, ColumnIndex::Full];
|
||||
let row_addrs = vec![
|
||||
RowAddr {
|
||||
segment_ord: 0u32,
|
||||
@@ -157,7 +175,13 @@ mod tests {
|
||||
Cardinality::Optional,
|
||||
&shuffle_merge_order,
|
||||
);
|
||||
let SerializableColumnIndex::Optional { non_null_row_ids, num_rows } = serializable_index else { panic!() };
|
||||
let SerializableColumnIndex::Optional(SerializableOptionalIndex {
|
||||
non_null_row_ids,
|
||||
num_rows,
|
||||
}) = serializable_index
|
||||
else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(num_rows, 2);
|
||||
let non_null_rows: Vec<RowId> = non_null_row_ids.boxed_iter().collect();
|
||||
assert_eq!(&non_null_rows, &[1]);
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use std::iter;
|
||||
use std::ops::Range;
|
||||
|
||||
use crate::column_index::{SerializableColumnIndex, Set};
|
||||
use crate::column_index::multivalued_index::{MultiValueIndex, SerializableMultivalueIndex};
|
||||
use crate::column_index::serialize::SerializableOptionalIndex;
|
||||
use crate::column_index::SerializableColumnIndex;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{Cardinality, ColumnIndex, RowId, StackMergeOrder};
|
||||
|
||||
@@ -15,23 +17,149 @@ pub fn merge_column_index_stacked<'a>(
|
||||
) -> SerializableColumnIndex<'a> {
|
||||
match cardinality_after_merge {
|
||||
Cardinality::Full => SerializableColumnIndex::Full,
|
||||
Cardinality::Optional => SerializableColumnIndex::Optional {
|
||||
Cardinality::Optional => SerializableColumnIndex::Optional(SerializableOptionalIndex {
|
||||
non_null_row_ids: Box::new(StackedOptionalIndex {
|
||||
columns,
|
||||
stack_merge_order,
|
||||
}),
|
||||
num_rows: stack_merge_order.num_rows(),
|
||||
},
|
||||
}),
|
||||
Cardinality::Multivalued => {
|
||||
let stacked_multivalued_index = StackedMultivaluedIndex {
|
||||
columns,
|
||||
stack_merge_order,
|
||||
};
|
||||
SerializableColumnIndex::Multivalued(Box::new(stacked_multivalued_index))
|
||||
let serializable_multivalue_index =
|
||||
make_serializable_multivalued_index(columns, stack_merge_order);
|
||||
SerializableColumnIndex::Multivalued(serializable_multivalue_index)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct StackedDocIdsWithValues<'a> {
|
||||
column_indexes: &'a [ColumnIndex],
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
}
|
||||
|
||||
impl Iterable<u32> for StackedDocIdsWithValues<'_> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
Box::new((0..self.column_indexes.len()).flat_map(|i| {
|
||||
let column_index = &self.column_indexes[i];
|
||||
let doc_range = self.stack_merge_order.columnar_range(i);
|
||||
get_doc_ids_with_values(column_index, doc_range)
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
fn get_doc_ids_with_values<'a>(
|
||||
column_index: &'a ColumnIndex,
|
||||
doc_range: Range<u32>,
|
||||
) -> Box<dyn Iterator<Item = u32> + 'a> {
|
||||
match column_index {
|
||||
ColumnIndex::Empty { .. } => Box::new(0..0),
|
||||
ColumnIndex::Full => Box::new(doc_range),
|
||||
ColumnIndex::Optional(optional_index) => Box::new(
|
||||
optional_index
|
||||
.iter_rows()
|
||||
.map(move |row| row + doc_range.start),
|
||||
),
|
||||
ColumnIndex::Multivalued(multivalued_index) => match multivalued_index {
|
||||
MultiValueIndex::MultiValueIndexV1(multivalued_index) => {
|
||||
Box::new((0..multivalued_index.num_docs()).filter_map(move |docid| {
|
||||
let range = multivalued_index.range(docid);
|
||||
if range.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(docid + doc_range.start)
|
||||
}
|
||||
}))
|
||||
}
|
||||
MultiValueIndex::MultiValueIndexV2(multivalued_index) => Box::new(
|
||||
multivalued_index
|
||||
.optional_index
|
||||
.iter_rows()
|
||||
.map(move |row| row + doc_range.start),
|
||||
),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn stack_doc_ids_with_values<'a>(
|
||||
column_indexes: &'a [ColumnIndex],
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
) -> SerializableOptionalIndex<'a> {
|
||||
let num_rows = stack_merge_order.num_rows();
|
||||
SerializableOptionalIndex {
|
||||
non_null_row_ids: Box::new(StackedDocIdsWithValues {
|
||||
column_indexes,
|
||||
stack_merge_order,
|
||||
}),
|
||||
num_rows,
|
||||
}
|
||||
}
|
||||
|
||||
struct StackedStartOffsets<'a> {
|
||||
column_indexes: &'a [ColumnIndex],
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
}
|
||||
|
||||
fn get_num_values_iterator<'a>(
|
||||
column_index: &'a ColumnIndex,
|
||||
num_docs: u32,
|
||||
) -> Box<dyn Iterator<Item = u32> + 'a> {
|
||||
match column_index {
|
||||
ColumnIndex::Empty { .. } => Box::new(std::iter::empty()),
|
||||
ColumnIndex::Full => Box::new(std::iter::repeat(1u32).take(num_docs as usize)),
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
Box::new(std::iter::repeat(1u32).take(optional_index.num_non_nulls() as usize))
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => Box::new(
|
||||
multivalued_index
|
||||
.get_start_index_column()
|
||||
.iter()
|
||||
.scan(0u32, |previous_start_offset, current_start_offset| {
|
||||
let num_vals = current_start_offset - *previous_start_offset;
|
||||
*previous_start_offset = current_start_offset;
|
||||
Some(num_vals)
|
||||
})
|
||||
.skip(1),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterable<u32> for StackedStartOffsets<'a> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
let num_values_it = (0..self.column_indexes.len()).flat_map(|columnar_id| {
|
||||
let num_docs = self.stack_merge_order.columnar_range(columnar_id).len() as u32;
|
||||
let column_index = &self.column_indexes[columnar_id];
|
||||
get_num_values_iterator(column_index, num_docs)
|
||||
});
|
||||
Box::new(std::iter::once(0u32).chain(num_values_it.into_iter().scan(
|
||||
0u32,
|
||||
|cumulated, el| {
|
||||
*cumulated += el;
|
||||
Some(*cumulated)
|
||||
},
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
fn stack_start_offsets<'a>(
|
||||
column_indexes: &'a [ColumnIndex],
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
) -> Box<dyn Iterable<u32> + 'a> {
|
||||
Box::new(StackedStartOffsets {
|
||||
column_indexes,
|
||||
stack_merge_order,
|
||||
})
|
||||
}
|
||||
|
||||
fn make_serializable_multivalued_index<'a>(
|
||||
columns: &'a [ColumnIndex],
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
) -> SerializableMultivalueIndex<'a> {
|
||||
SerializableMultivalueIndex {
|
||||
doc_ids_with_values: stack_doc_ids_with_values(columns, stack_merge_order),
|
||||
start_offsets: stack_start_offsets(columns, stack_merge_order),
|
||||
}
|
||||
}
|
||||
|
||||
struct StackedOptionalIndex<'a> {
|
||||
columns: &'a [ColumnIndex],
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
@@ -62,90 +190,3 @@ impl<'a> Iterable<RowId> for StackedOptionalIndex<'a> {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct StackedMultivaluedIndex<'a> {
|
||||
columns: &'a [ColumnIndex],
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
}
|
||||
|
||||
fn convert_column_opt_to_multivalued_index<'a>(
|
||||
column_index_opt: &'a ColumnIndex,
|
||||
num_rows: RowId,
|
||||
) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
match column_index_opt {
|
||||
ColumnIndex::Empty { .. } => Box::new(iter::repeat(0u32).take(num_rows as usize + 1)),
|
||||
ColumnIndex::Full => Box::new(0..num_rows + 1),
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
Box::new(
|
||||
(0..num_rows)
|
||||
// TODO optimize
|
||||
.map(|row_id| optional_index.rank(row_id))
|
||||
.chain(std::iter::once(optional_index.num_non_nulls())),
|
||||
)
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => multivalued_index.start_index_column.iter(),
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterable<RowId> for StackedMultivaluedIndex<'a> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = RowId> + '_> {
|
||||
let multivalued_indexes =
|
||||
self.columns
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(columnar_id, column_opt)| {
|
||||
let num_rows =
|
||||
self.stack_merge_order.columnar_range(columnar_id).len() as RowId;
|
||||
convert_column_opt_to_multivalued_index(column_opt, num_rows)
|
||||
});
|
||||
stack_multivalued_indexes(multivalued_indexes)
|
||||
}
|
||||
}
|
||||
|
||||
// Refactor me
|
||||
fn stack_multivalued_indexes<'a>(
|
||||
mut multivalued_indexes: impl Iterator<Item = Box<dyn Iterator<Item = RowId> + 'a>> + 'a,
|
||||
) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
let mut offset = 0;
|
||||
let mut last_row_id = 0;
|
||||
let mut current_it = multivalued_indexes.next();
|
||||
Box::new(std::iter::from_fn(move || loop {
|
||||
let Some(multivalued_index) = current_it.as_mut() else {
|
||||
return None;
|
||||
};
|
||||
if let Some(row_id) = multivalued_index.next() {
|
||||
last_row_id = offset + row_id;
|
||||
return Some(last_row_id);
|
||||
}
|
||||
offset = last_row_id;
|
||||
loop {
|
||||
current_it = multivalued_indexes.next();
|
||||
if current_it.as_mut()?.next().is_some() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::RowId;
|
||||
|
||||
fn it<'a>(row_ids: &'a [RowId]) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
Box::new(row_ids.iter().copied())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stack() {
|
||||
let columns = [
|
||||
it(&[0u32, 0u32]),
|
||||
it(&[0u32, 1u32, 1u32, 4u32]),
|
||||
it(&[0u32, 3u32, 5u32]),
|
||||
it(&[0u32, 4u32]),
|
||||
]
|
||||
.into_iter();
|
||||
let start_offsets: Vec<RowId> = super::stack_multivalued_indexes(columns).collect();
|
||||
assert_eq!(start_offsets, &[0, 0, 1, 1, 4, 7, 9, 13]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
//! # `column_index`
|
||||
//!
|
||||
//! `column_index` provides rank and select operations to associate positions when not all
|
||||
//! documents have exactly one element.
|
||||
|
||||
mod merge;
|
||||
mod multivalued_index;
|
||||
mod optional_index;
|
||||
@@ -6,8 +11,11 @@ mod serialize;
|
||||
use std::ops::Range;
|
||||
|
||||
pub use merge::merge_column_index;
|
||||
pub(crate) use multivalued_index::SerializableMultivalueIndex;
|
||||
pub use optional_index::{OptionalIndex, Set};
|
||||
pub use serialize::{open_column_index, serialize_column_index, SerializableColumnIndex};
|
||||
pub use serialize::{
|
||||
open_column_index, serialize_column_index, SerializableColumnIndex, SerializableOptionalIndex,
|
||||
};
|
||||
|
||||
use crate::column_index::multivalued_index::MultiValueIndex;
|
||||
use crate::{Cardinality, DocId, RowId};
|
||||
@@ -20,7 +28,7 @@ pub enum ColumnIndex {
|
||||
Full,
|
||||
Optional(OptionalIndex),
|
||||
/// In addition, at index num_rows, an extra value is added
|
||||
/// containing the overal number of values.
|
||||
/// containing the overall number of values.
|
||||
Multivalued(MultiValueIndex),
|
||||
}
|
||||
|
||||
@@ -37,10 +45,10 @@ impl From<MultiValueIndex> for ColumnIndex {
|
||||
}
|
||||
|
||||
impl ColumnIndex {
|
||||
// Returns the cardinality of the column index.
|
||||
//
|
||||
// By convention, if the column contains no docs, we consider that it is
|
||||
// full.
|
||||
/// Returns the cardinality of the column index.
|
||||
///
|
||||
/// By convention, if the column contains no docs, we consider that it is
|
||||
/// full.
|
||||
#[inline]
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
match self {
|
||||
@@ -117,24 +125,50 @@ impl ColumnIndex {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn docid_range_to_rowids(&self, doc_id: Range<DocId>) -> Range<RowId> {
|
||||
pub fn docid_range_to_rowids(&self, doc_id_range: Range<DocId>) -> Range<RowId> {
|
||||
match self {
|
||||
ColumnIndex::Empty { .. } => 0..0,
|
||||
ColumnIndex::Full => doc_id,
|
||||
ColumnIndex::Full => doc_id_range,
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
let row_start = optional_index.rank(doc_id.start);
|
||||
let row_end = optional_index.rank(doc_id.end);
|
||||
let row_start = optional_index.rank(doc_id_range.start);
|
||||
let row_end = optional_index.rank(doc_id_range.end);
|
||||
row_start..row_end
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
let end_docid = doc_id.end.min(multivalued_index.num_docs() - 1) + 1;
|
||||
let start_docid = doc_id.start.min(end_docid);
|
||||
ColumnIndex::Multivalued(multivalued_index) => match multivalued_index {
|
||||
MultiValueIndex::MultiValueIndexV1(index) => {
|
||||
let row_start = index.start_index_column.get_val(doc_id_range.start);
|
||||
let row_end = index.start_index_column.get_val(doc_id_range.end);
|
||||
row_start..row_end
|
||||
}
|
||||
MultiValueIndex::MultiValueIndexV2(index) => {
|
||||
// In this case we will use the optional_index select the next values
|
||||
// that are valid. There are different cases to consider:
|
||||
// Not exists below means does not exist in the optional
|
||||
// index, because it has no values.
|
||||
// * doc_id_range may cover a range of docids which are non existent
|
||||
// => rank
|
||||
// will give us the next document outside the range with a value. They both
|
||||
// get the same rank and therefore return a zero range
|
||||
//
|
||||
// * doc_id_range.start and doc_id_range.end may not exist, but docids in
|
||||
// between may have values
|
||||
// => rank will give us the next document outside the range with a value.
|
||||
//
|
||||
// * doc_id_range.start may be not existent but doc_id_range.end may exist
|
||||
// * doc_id_range.start may exist but doc_id_range.end may not exist
|
||||
// * doc_id_range.start and doc_id_range.end may exist
|
||||
// => rank on doc_id_range.end will give use the next value, which matches
|
||||
// how the `start_index_column` works, so we get the value start of the next
|
||||
// docid which we use to create the exclusive range.
|
||||
//
|
||||
let rank_start = index.optional_index.rank(doc_id_range.start);
|
||||
let row_start = index.start_index_column.get_val(rank_start);
|
||||
let rank_end = index.optional_index.rank(doc_id_range.end);
|
||||
let row_end = index.start_index_column.get_val(rank_end);
|
||||
|
||||
let row_start = multivalued_index.start_index_column.get_val(start_docid);
|
||||
let row_end = multivalued_index.start_index_column.get_val(end_docid);
|
||||
|
||||
row_start..row_end
|
||||
}
|
||||
row_start..row_end
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,64 +3,98 @@ use std::io::Write;
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use common::{CountingWriter, OwnedBytes};
|
||||
|
||||
use super::optional_index::{open_optional_index, serialize_optional_index};
|
||||
use super::{OptionalIndex, SerializableOptionalIndex, Set};
|
||||
use crate::column_values::{
|
||||
load_u64_based_column_values, serialize_u64_based_column_values, CodecType, ColumnValues,
|
||||
};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{DocId, RowId};
|
||||
use crate::{DocId, RowId, Version};
|
||||
|
||||
pub struct SerializableMultivalueIndex<'a> {
|
||||
pub doc_ids_with_values: SerializableOptionalIndex<'a>,
|
||||
pub start_offsets: Box<dyn Iterable<u32> + 'a>,
|
||||
}
|
||||
|
||||
pub fn serialize_multivalued_index(
|
||||
multivalued_index: &dyn Iterable<RowId>,
|
||||
multivalued_index: &SerializableMultivalueIndex,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
let SerializableMultivalueIndex {
|
||||
doc_ids_with_values,
|
||||
start_offsets,
|
||||
} = multivalued_index;
|
||||
let mut count_writer = CountingWriter::wrap(output);
|
||||
let SerializableOptionalIndex {
|
||||
non_null_row_ids,
|
||||
num_rows,
|
||||
} = doc_ids_with_values;
|
||||
serialize_optional_index(&**non_null_row_ids, *num_rows, &mut count_writer)?;
|
||||
let optional_len = count_writer.written_bytes() as u32;
|
||||
let output = count_writer.finish();
|
||||
serialize_u64_based_column_values(
|
||||
multivalued_index,
|
||||
&**start_offsets,
|
||||
&[CodecType::Bitpacked, CodecType::Linear],
|
||||
output,
|
||||
)?;
|
||||
output.write_all(&optional_len.to_le_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn open_multivalued_index(bytes: OwnedBytes) -> io::Result<MultiValueIndex> {
|
||||
let start_index_column: Arc<dyn ColumnValues<RowId>> = load_u64_based_column_values(bytes)?;
|
||||
Ok(MultiValueIndex { start_index_column })
|
||||
pub fn open_multivalued_index(
|
||||
bytes: OwnedBytes,
|
||||
format_version: Version,
|
||||
) -> io::Result<MultiValueIndex> {
|
||||
match format_version {
|
||||
Version::V1 => {
|
||||
let start_index_column: Arc<dyn ColumnValues<RowId>> =
|
||||
load_u64_based_column_values(bytes)?;
|
||||
Ok(MultiValueIndex::MultiValueIndexV1(MultiValueIndexV1 {
|
||||
start_index_column,
|
||||
}))
|
||||
}
|
||||
Version::V2 => {
|
||||
let (body_bytes, optional_index_len) = bytes.rsplit(4);
|
||||
let optional_index_len =
|
||||
u32::from_le_bytes(optional_index_len.as_slice().try_into().unwrap());
|
||||
let (optional_index_bytes, start_index_bytes) =
|
||||
body_bytes.split(optional_index_len as usize);
|
||||
let optional_index = open_optional_index(optional_index_bytes)?;
|
||||
let start_index_column: Arc<dyn ColumnValues<RowId>> =
|
||||
load_u64_based_column_values(start_index_bytes)?;
|
||||
Ok(MultiValueIndex::MultiValueIndexV2(MultiValueIndexV2 {
|
||||
optional_index,
|
||||
start_index_column,
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
/// Index to resolve value range for given doc_id.
|
||||
/// Starts at 0.
|
||||
pub struct MultiValueIndex {
|
||||
pub enum MultiValueIndex {
|
||||
MultiValueIndexV1(MultiValueIndexV1),
|
||||
MultiValueIndexV2(MultiValueIndexV2),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
/// Index to resolve value range for given doc_id.
|
||||
/// Starts at 0.
|
||||
pub struct MultiValueIndexV1 {
|
||||
pub start_index_column: Arc<dyn crate::ColumnValues<RowId>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for MultiValueIndex {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
f.debug_struct("MultiValuedIndex")
|
||||
.field("num_rows", &self.start_index_column.num_vals())
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Arc<dyn ColumnValues<RowId>>> for MultiValueIndex {
|
||||
fn from(start_index_column: Arc<dyn ColumnValues<RowId>>) -> Self {
|
||||
MultiValueIndex { start_index_column }
|
||||
}
|
||||
}
|
||||
|
||||
impl MultiValueIndex {
|
||||
pub fn for_test(start_offsets: &[RowId]) -> MultiValueIndex {
|
||||
let mut buffer = Vec::new();
|
||||
serialize_multivalued_index(&start_offsets, &mut buffer).unwrap();
|
||||
let bytes = OwnedBytes::new(buffer);
|
||||
open_multivalued_index(bytes).unwrap()
|
||||
}
|
||||
|
||||
impl MultiValueIndexV1 {
|
||||
/// Returns `[start, end)`, such that the values associated with
|
||||
/// the given document are `start..end`.
|
||||
#[inline]
|
||||
pub(crate) fn range(&self, doc_id: DocId) -> Range<RowId> {
|
||||
if doc_id >= self.num_docs() {
|
||||
return 0..0;
|
||||
}
|
||||
let start = self.start_index_column.get_val(doc_id);
|
||||
let end = self.start_index_column.get_val(doc_id + 1);
|
||||
start..end
|
||||
@@ -83,7 +117,6 @@ impl MultiValueIndex {
|
||||
///
|
||||
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
|
||||
/// match a docid to its value position.
|
||||
#[allow(clippy::bool_to_int_with_if)]
|
||||
pub(crate) fn select_batch_in_place(&self, docid_start: DocId, ranks: &mut Vec<u32>) {
|
||||
if ranks.is_empty() {
|
||||
return;
|
||||
@@ -111,11 +144,170 @@ impl MultiValueIndex {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
/// Index to resolve value range for given doc_id.
|
||||
/// Starts at 0.
|
||||
pub struct MultiValueIndexV2 {
|
||||
pub optional_index: OptionalIndex,
|
||||
pub start_index_column: Arc<dyn crate::ColumnValues<RowId>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for MultiValueIndex {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
let index = match self {
|
||||
MultiValueIndex::MultiValueIndexV1(idx) => &idx.start_index_column,
|
||||
MultiValueIndex::MultiValueIndexV2(idx) => &idx.start_index_column,
|
||||
};
|
||||
f.debug_struct("MultiValuedIndex")
|
||||
.field("num_rows", &index.num_vals())
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl MultiValueIndex {
|
||||
pub fn for_test(start_offsets: &[RowId]) -> MultiValueIndex {
|
||||
assert!(!start_offsets.is_empty());
|
||||
assert_eq!(start_offsets[0], 0);
|
||||
let mut doc_with_values = Vec::new();
|
||||
let mut compact_start_offsets: Vec<u32> = vec![0];
|
||||
for doc in 0..start_offsets.len() - 1 {
|
||||
if start_offsets[doc] < start_offsets[doc + 1] {
|
||||
doc_with_values.push(doc as RowId);
|
||||
compact_start_offsets.push(start_offsets[doc + 1]);
|
||||
}
|
||||
}
|
||||
let serializable_multivalued_index = SerializableMultivalueIndex {
|
||||
doc_ids_with_values: SerializableOptionalIndex {
|
||||
non_null_row_ids: Box::new(&doc_with_values[..]),
|
||||
num_rows: start_offsets.len() as u32 - 1,
|
||||
},
|
||||
start_offsets: Box::new(&compact_start_offsets[..]),
|
||||
};
|
||||
let mut buffer = Vec::new();
|
||||
serialize_multivalued_index(&serializable_multivalued_index, &mut buffer).unwrap();
|
||||
let bytes = OwnedBytes::new(buffer);
|
||||
open_multivalued_index(bytes, Version::V2).unwrap()
|
||||
}
|
||||
|
||||
pub fn get_start_index_column(&self) -> &Arc<dyn crate::ColumnValues<RowId>> {
|
||||
match self {
|
||||
MultiValueIndex::MultiValueIndexV1(idx) => &idx.start_index_column,
|
||||
MultiValueIndex::MultiValueIndexV2(idx) => &idx.start_index_column,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `[start, end)` values range, such that the values associated with
|
||||
/// the given document are `start..end`.
|
||||
#[inline]
|
||||
pub(crate) fn range(&self, doc_id: DocId) -> Range<RowId> {
|
||||
match self {
|
||||
MultiValueIndex::MultiValueIndexV1(idx) => idx.range(doc_id),
|
||||
MultiValueIndex::MultiValueIndexV2(idx) => idx.range(doc_id),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of documents in the index.
|
||||
#[inline]
|
||||
pub fn num_docs(&self) -> u32 {
|
||||
match self {
|
||||
MultiValueIndex::MultiValueIndexV1(idx) => idx.start_index_column.num_vals() - 1,
|
||||
MultiValueIndex::MultiValueIndexV2(idx) => idx.optional_index.num_docs(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a list of ranks (row ids of values) in a 1:n index to the corresponding list of
|
||||
/// docids. Positions are converted inplace to docids.
|
||||
///
|
||||
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the
|
||||
/// index.
|
||||
///
|
||||
/// Correctness: positions needs to be sorted. idx_reader needs to contain monotonically
|
||||
/// increasing positions.
|
||||
///
|
||||
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
|
||||
/// match a docid to its value position.
|
||||
pub(crate) fn select_batch_in_place(&self, docid_start: DocId, ranks: &mut Vec<u32>) {
|
||||
match self {
|
||||
MultiValueIndex::MultiValueIndexV1(idx) => {
|
||||
idx.select_batch_in_place(docid_start, ranks)
|
||||
}
|
||||
MultiValueIndex::MultiValueIndexV2(idx) => {
|
||||
idx.select_batch_in_place(docid_start, ranks)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl MultiValueIndexV2 {
|
||||
/// Returns `[start, end)`, such that the values associated with
|
||||
/// the given document are `start..end`.
|
||||
#[inline]
|
||||
pub(crate) fn range(&self, doc_id: DocId) -> Range<RowId> {
|
||||
let Some(rank) = self.optional_index.rank_if_exists(doc_id) else {
|
||||
return 0..0;
|
||||
};
|
||||
let start = self.start_index_column.get_val(rank);
|
||||
let end = self.start_index_column.get_val(rank + 1);
|
||||
start..end
|
||||
}
|
||||
|
||||
/// Returns the number of documents in the index.
|
||||
#[inline]
|
||||
pub fn num_docs(&self) -> u32 {
|
||||
self.optional_index.num_docs()
|
||||
}
|
||||
|
||||
/// Converts a list of ranks (row ids of values) in a 1:n index to the corresponding list of
|
||||
/// docids. Positions are converted inplace to docids.
|
||||
///
|
||||
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the
|
||||
/// index.
|
||||
///
|
||||
/// Correctness: positions needs to be sorted. idx_reader needs to contain monotonically
|
||||
/// increasing positions.
|
||||
///
|
||||
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
|
||||
/// match a docid to its value position.
|
||||
pub(crate) fn select_batch_in_place(&self, docid_start: DocId, ranks: &mut Vec<u32>) {
|
||||
if ranks.is_empty() {
|
||||
return;
|
||||
}
|
||||
let mut cur_pos_in_idx = self.optional_index.rank(docid_start);
|
||||
let mut last_doc = None;
|
||||
|
||||
assert!(cur_pos_in_idx <= ranks[0]);
|
||||
|
||||
let mut write_doc_pos = 0;
|
||||
for i in 0..ranks.len() {
|
||||
let pos = ranks[i];
|
||||
loop {
|
||||
let end = self.start_index_column.get_val(cur_pos_in_idx + 1);
|
||||
if end > pos {
|
||||
ranks[write_doc_pos] = cur_pos_in_idx;
|
||||
write_doc_pos += if last_doc == Some(cur_pos_in_idx) {
|
||||
0
|
||||
} else {
|
||||
1
|
||||
};
|
||||
last_doc = Some(cur_pos_in_idx);
|
||||
break;
|
||||
}
|
||||
cur_pos_in_idx += 1;
|
||||
}
|
||||
}
|
||||
ranks.truncate(write_doc_pos);
|
||||
|
||||
for rank in ranks.iter_mut() {
|
||||
*rank = self.optional_index.select(*rank);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::Range;
|
||||
|
||||
use super::MultiValueIndex;
|
||||
use crate::{ColumnarReader, DynamicColumn};
|
||||
|
||||
fn index_to_pos_helper(
|
||||
index: &MultiValueIndex,
|
||||
@@ -134,6 +326,7 @@ mod tests {
|
||||
let positions = &[10u32, 11, 15, 20, 21, 22];
|
||||
assert_eq!(index_to_pos_helper(&index, 0..5, positions), vec![1, 3, 4]);
|
||||
assert_eq!(index_to_pos_helper(&index, 1..5, positions), vec![1, 3, 4]);
|
||||
|
||||
assert_eq!(index_to_pos_helper(&index, 0..5, &[9]), vec![0]);
|
||||
assert_eq!(index_to_pos_helper(&index, 1..5, &[10]), vec![1]);
|
||||
assert_eq!(index_to_pos_helper(&index, 1..5, &[11]), vec![1]);
|
||||
@@ -141,4 +334,67 @@ mod tests {
|
||||
assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14]), vec![2]);
|
||||
assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14, 15]), vec![2, 3]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_to_rowids() {
|
||||
use crate::ColumnarWriter;
|
||||
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
|
||||
// This column gets coerced to u64
|
||||
columnar_writer.record_numerical(1, "full", u64::MAX);
|
||||
columnar_writer.record_numerical(1, "full", u64::MAX);
|
||||
|
||||
columnar_writer.record_numerical(5, "full", u64::MAX);
|
||||
columnar_writer.record_numerical(5, "full", u64::MAX);
|
||||
|
||||
let mut wrt: Vec<u8> = Vec::new();
|
||||
columnar_writer.serialize(7, &mut wrt).unwrap();
|
||||
|
||||
let reader = ColumnarReader::open(wrt).unwrap();
|
||||
// Open the column as u64
|
||||
let column = reader.read_columns("full").unwrap()[0]
|
||||
.open()
|
||||
.unwrap()
|
||||
.coerce_numerical(crate::NumericalType::U64)
|
||||
.unwrap();
|
||||
let DynamicColumn::U64(column) = column else {
|
||||
panic!();
|
||||
};
|
||||
|
||||
let row_id_range = column.index.docid_range_to_rowids(1..2);
|
||||
assert_eq!(row_id_range, 0..2);
|
||||
|
||||
let row_id_range = column.index.docid_range_to_rowids(0..2);
|
||||
assert_eq!(row_id_range, 0..2);
|
||||
|
||||
let row_id_range = column.index.docid_range_to_rowids(0..4);
|
||||
assert_eq!(row_id_range, 0..2);
|
||||
|
||||
let row_id_range = column.index.docid_range_to_rowids(3..4);
|
||||
assert_eq!(row_id_range, 2..2);
|
||||
|
||||
let row_id_range = column.index.docid_range_to_rowids(1..6);
|
||||
assert_eq!(row_id_range, 0..4);
|
||||
|
||||
let row_id_range = column.index.docid_range_to_rowids(3..6);
|
||||
assert_eq!(row_id_range, 2..4);
|
||||
|
||||
let row_id_range = column.index.docid_range_to_rowids(0..6);
|
||||
assert_eq!(row_id_range, 0..4);
|
||||
|
||||
let row_id_range = column.index.docid_range_to_rowids(0..6);
|
||||
assert_eq!(row_id_range, 0..4);
|
||||
|
||||
let check = |range, expected| {
|
||||
let full_range = 0..=u64::MAX;
|
||||
let mut docids = Vec::new();
|
||||
column.get_docids_for_value_range(full_range, range, &mut docids);
|
||||
assert_eq!(docids, expected);
|
||||
};
|
||||
|
||||
// check(0..1, vec![]);
|
||||
// check(0..2, vec![1]);
|
||||
check(1..2, vec![1]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,8 +21,6 @@ const DENSE_BLOCK_THRESHOLD: u32 =
|
||||
|
||||
const ELEMENTS_PER_BLOCK: u32 = u16::MAX as u32 + 1;
|
||||
|
||||
const BLOCK_SIZE: RowId = 1 << 16;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
struct BlockMeta {
|
||||
non_null_rows_before_block: u32,
|
||||
@@ -88,8 +86,14 @@ pub struct OptionalIndex {
|
||||
block_metas: Arc<[BlockMeta]>,
|
||||
}
|
||||
|
||||
impl<'a> Iterable<u32> for &'a OptionalIndex {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
Box::new(self.iter_rows())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for OptionalIndex {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
f.debug_struct("OptionalIndex")
|
||||
.field("num_rows", &self.num_rows)
|
||||
.field("num_non_null_rows", &self.num_non_null_rows)
|
||||
@@ -109,8 +113,8 @@ struct RowAddr {
|
||||
#[inline(always)]
|
||||
fn row_addr_from_row_id(row_id: RowId) -> RowAddr {
|
||||
RowAddr {
|
||||
block_id: (row_id / BLOCK_SIZE) as u16,
|
||||
in_block_row_id: (row_id % BLOCK_SIZE) as u16,
|
||||
block_id: (row_id / ELEMENTS_PER_BLOCK) as u16,
|
||||
in_block_row_id: (row_id % ELEMENTS_PER_BLOCK) as u16,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,7 +174,9 @@ impl<'a> SelectCursor<RowId> for OptionalIndexSelectCursor<'a> {
|
||||
}
|
||||
|
||||
impl Set<RowId> for OptionalIndex {
|
||||
type SelectCursor<'b> = OptionalIndexSelectCursor<'b> where Self: 'b;
|
||||
type SelectCursor<'b>
|
||||
= OptionalIndexSelectCursor<'b>
|
||||
where Self: 'b;
|
||||
// Check if value at position is not null.
|
||||
#[inline]
|
||||
fn contains(&self, row_id: RowId) -> bool {
|
||||
@@ -185,14 +191,20 @@ impl Set<RowId> for OptionalIndex {
|
||||
}
|
||||
}
|
||||
|
||||
/// Any value doc_id is allowed.
|
||||
/// In particular, doc_id = num_rows.
|
||||
#[inline]
|
||||
fn rank(&self, doc_id: DocId) -> RowId {
|
||||
if doc_id >= self.num_docs() {
|
||||
return self.num_non_nulls();
|
||||
}
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
} = row_addr_from_row_id(doc_id);
|
||||
let block_meta = self.block_metas[block_id as usize];
|
||||
let block = self.block(block_meta);
|
||||
|
||||
let block_offset_row_id = match block {
|
||||
Block::Dense(dense_block) => dense_block.rank(in_block_row_id),
|
||||
Block::Sparse(sparse_block) => sparse_block.rank(in_block_row_id),
|
||||
@@ -200,13 +212,15 @@ impl Set<RowId> for OptionalIndex {
|
||||
block_meta.non_null_rows_before_block + block_offset_row_id
|
||||
}
|
||||
|
||||
/// Any value doc_id is allowed.
|
||||
/// In particular, doc_id = num_rows.
|
||||
#[inline]
|
||||
fn rank_if_exists(&self, doc_id: DocId) -> Option<RowId> {
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
} = row_addr_from_row_id(doc_id);
|
||||
let block_meta = self.block_metas[block_id as usize];
|
||||
let block_meta = *self.block_metas.get(block_id as usize)?;
|
||||
let block = self.block(block_meta);
|
||||
let block_offset_row_id = match block {
|
||||
Block::Dense(dense_block) => dense_block.rank_if_exists(in_block_row_id),
|
||||
@@ -491,7 +505,7 @@ fn deserialize_optional_index_block_metadatas(
|
||||
non_null_rows_before_block += num_non_null_rows;
|
||||
}
|
||||
block_metas.resize(
|
||||
((num_rows + BLOCK_SIZE - 1) / BLOCK_SIZE) as usize,
|
||||
((num_rows + ELEMENTS_PER_BLOCK - 1) / ELEMENTS_PER_BLOCK) as usize,
|
||||
BlockMeta {
|
||||
non_null_rows_before_block,
|
||||
start_byte_offset,
|
||||
|
||||
@@ -28,10 +28,11 @@ pub trait Set<T> {
|
||||
/// Returns true if the elements is contained in the Set
|
||||
fn contains(&self, el: T) -> bool;
|
||||
|
||||
/// Returns the number of rows in the set that are < `el`
|
||||
/// Returns the element's rank (its position in the set).
|
||||
/// If the set does not contain the element, it will return the next existing elements rank.
|
||||
fn rank(&self, el: T) -> T;
|
||||
|
||||
/// If the set contains `el` returns the element rank.
|
||||
/// If the set contains `el`, returns the element's rank (its position in the set).
|
||||
/// If the set does not contain the element, it returns `None`.
|
||||
fn rank_if_exists(&self, el: T) -> Option<T>;
|
||||
|
||||
@@ -39,7 +40,8 @@ pub trait Set<T> {
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if rank is greater than the number of elements in the Set.
|
||||
/// May panic if rank is greater or equal to the number of
|
||||
/// elements in the Set.
|
||||
fn select(&self, rank: T) -> T;
|
||||
|
||||
/// Creates a brand new select cursor.
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::BinarySerializable;
|
||||
@@ -124,7 +123,9 @@ impl<'a> SelectCursor<u16> for DenseBlockSelectCursor<'a> {
|
||||
}
|
||||
|
||||
impl<'a> Set<u16> for DenseBlock<'a> {
|
||||
type SelectCursor<'b> = DenseBlockSelectCursor<'a> where Self: 'b;
|
||||
type SelectCursor<'b>
|
||||
= DenseBlockSelectCursor<'a>
|
||||
where Self: 'b;
|
||||
|
||||
#[inline(always)]
|
||||
fn contains(&self, el: u16) -> bool {
|
||||
|
||||
@@ -32,7 +32,9 @@ impl<'a> SelectCursor<u16> for SparseBlock<'a> {
|
||||
}
|
||||
|
||||
impl<'a> Set<u16> for SparseBlock<'a> {
|
||||
type SelectCursor<'b> = Self where Self: 'b;
|
||||
type SelectCursor<'b>
|
||||
= Self
|
||||
where Self: 'b;
|
||||
|
||||
#[inline(always)]
|
||||
fn contains(&self, el: u16) -> bool {
|
||||
|
||||
@@ -22,8 +22,8 @@ fn test_set_helper<C: SetCodec<Item = u16>>(vals: &[u16]) -> usize {
|
||||
vals.iter().cloned().take_while(|v| *v < val).count() as u16
|
||||
);
|
||||
}
|
||||
for rank in 0..vals.len() {
|
||||
assert_eq!(tested_set.select(rank as u16), vals[rank]);
|
||||
for (rank, val) in vals.iter().enumerate() {
|
||||
assert_eq!(tested_set.select(rank as u16), *val);
|
||||
}
|
||||
buffer.len()
|
||||
}
|
||||
@@ -107,3 +107,41 @@ fn test_simple_translate_codec_idx_to_original_idx_dense() {
|
||||
assert_eq!(i, select_cursor.select(i));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_translate_idx_to_value_idx_dense() {
|
||||
let mut buffer = Vec::new();
|
||||
DenseBlockCodec::serialize([1, 10].iter().copied(), &mut buffer).unwrap();
|
||||
let tested_set = DenseBlockCodec::open(buffer.as_slice());
|
||||
assert!(tested_set.contains(1));
|
||||
assert!(!tested_set.contains(2));
|
||||
assert_eq!(tested_set.rank(0), 0);
|
||||
assert_eq!(tested_set.rank(1), 0);
|
||||
for rank in 2..10 {
|
||||
// ranks that don't exist select the next highest one
|
||||
assert_eq!(tested_set.rank_if_exists(rank), None);
|
||||
assert_eq!(tested_set.rank(rank), 1);
|
||||
}
|
||||
assert_eq!(tested_set.rank(10), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_translate_idx_to_value_idx_sparse() {
|
||||
let mut buffer = Vec::new();
|
||||
SparseBlockCodec::serialize([1, 10].iter().copied(), &mut buffer).unwrap();
|
||||
let tested_set = SparseBlockCodec::open(buffer.as_slice());
|
||||
assert!(tested_set.contains(1));
|
||||
assert!(!tested_set.contains(2));
|
||||
assert_eq!(tested_set.rank(0), 0);
|
||||
assert_eq!(tested_set.select(tested_set.rank(0)), 1);
|
||||
assert_eq!(tested_set.rank(1), 0);
|
||||
assert_eq!(tested_set.select(tested_set.rank(1)), 1);
|
||||
for rank in 2..10 {
|
||||
// ranks that don't exist select the next highest one
|
||||
assert_eq!(tested_set.rank_if_exists(rank), None);
|
||||
assert_eq!(tested_set.rank(rank), 1);
|
||||
assert_eq!(tested_set.select(tested_set.rank(rank)), 10);
|
||||
}
|
||||
assert_eq!(tested_set.rank(10), 1);
|
||||
assert_eq!(tested_set.select(tested_set.rank(10)), 10);
|
||||
}
|
||||
|
||||
@@ -1,8 +1,29 @@
|
||||
use proptest::prelude::{any, prop, *};
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::prelude::*;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
use super::*;
|
||||
use crate::{ColumnarReader, ColumnarWriter, DynamicColumnHandle};
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_bug_2293() {
|
||||
// tests for panic in docid_range_to_rowids for docid == num_docs
|
||||
test_optional_index_with_num_docs(ELEMENTS_PER_BLOCK - 1);
|
||||
test_optional_index_with_num_docs(ELEMENTS_PER_BLOCK);
|
||||
test_optional_index_with_num_docs(ELEMENTS_PER_BLOCK + 1);
|
||||
}
|
||||
fn test_optional_index_with_num_docs(num_docs: u32) {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(100, "score", 80i64);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(num_docs, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("score").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
|
||||
let col = cols[0].open().unwrap();
|
||||
col.column_index().docid_range_to_rowids(0..num_docs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dense_block_threshold() {
|
||||
@@ -35,7 +56,7 @@ proptest! {
|
||||
|
||||
#[test]
|
||||
fn test_with_random_sets_simple() {
|
||||
let vals = 10..BLOCK_SIZE * 2;
|
||||
let vals = 10..ELEMENTS_PER_BLOCK * 2;
|
||||
let mut out: Vec<u8> = Vec::new();
|
||||
serialize_optional_index(&vals, 100, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
@@ -89,8 +110,8 @@ fn test_null_index(data: &[bool]) {
|
||||
.map(|(pos, _val)| pos as u32)
|
||||
.collect();
|
||||
let mut select_iter = null_index.select_cursor();
|
||||
for i in 0..orig_idx_with_value.len() {
|
||||
assert_eq!(select_iter.select(i as u32), orig_idx_with_value[i]);
|
||||
for (i, expected) in orig_idx_with_value.iter().enumerate() {
|
||||
assert_eq!(select_iter.select(i as u32), *expected);
|
||||
}
|
||||
|
||||
let step_size = (orig_idx_with_value.len() / 100).max(1);
|
||||
@@ -171,7 +192,7 @@ fn test_optional_index_rank() {
|
||||
test_optional_index_rank_aux(&[0u32, 1u32]);
|
||||
let mut block = Vec::new();
|
||||
block.push(3u32);
|
||||
block.extend((0..BLOCK_SIZE).map(|i| i + BLOCK_SIZE + 1));
|
||||
block.extend((0..ELEMENTS_PER_BLOCK).map(|i| i + ELEMENTS_PER_BLOCK + 1));
|
||||
test_optional_index_rank_aux(&block);
|
||||
}
|
||||
|
||||
@@ -185,8 +206,8 @@ fn test_optional_index_iter_empty_one() {
|
||||
fn test_optional_index_iter_dense_block() {
|
||||
let mut block = Vec::new();
|
||||
block.push(3u32);
|
||||
block.extend((0..BLOCK_SIZE).map(|i| i + BLOCK_SIZE + 1));
|
||||
test_optional_index_iter_aux(&block, 3 * BLOCK_SIZE);
|
||||
block.extend((0..ELEMENTS_PER_BLOCK).map(|i| i + ELEMENTS_PER_BLOCK + 1));
|
||||
test_optional_index_iter_aux(&block, 3 * ELEMENTS_PER_BLOCK);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -215,12 +236,12 @@ mod bench {
|
||||
let vals: Vec<RowId> = (0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.enumerate()
|
||||
.filter(|(pos, val)| *val)
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _)| pos as RowId)
|
||||
.collect();
|
||||
serialize_optional_index(&&vals[..], TOTAL_NUM_VALUES, &mut out).unwrap();
|
||||
let codec = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
codec
|
||||
|
||||
open_optional_index(OwnedBytes::new(out)).unwrap()
|
||||
}
|
||||
|
||||
fn random_range_iterator(
|
||||
@@ -242,7 +263,7 @@ mod bench {
|
||||
}
|
||||
|
||||
fn n_percent_step_iterator(percent: f32, num_values: u32) -> impl Iterator<Item = u32> {
|
||||
let ratio = percent as f32 / 100.0;
|
||||
let ratio = percent / 100.0;
|
||||
let step_size = (1f32 / ratio) as u32;
|
||||
let deviation = step_size - 1;
|
||||
random_range_iterator(0, num_values, step_size, deviation)
|
||||
|
||||
@@ -3,33 +3,45 @@ use std::io::Write;
|
||||
|
||||
use common::{CountingWriter, OwnedBytes};
|
||||
|
||||
use super::multivalued_index::SerializableMultivalueIndex;
|
||||
use super::OptionalIndex;
|
||||
use crate::column_index::multivalued_index::serialize_multivalued_index;
|
||||
use crate::column_index::optional_index::serialize_optional_index;
|
||||
use crate::column_index::ColumnIndex;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{Cardinality, RowId};
|
||||
use crate::{Cardinality, RowId, Version};
|
||||
|
||||
pub struct SerializableOptionalIndex<'a> {
|
||||
pub non_null_row_ids: Box<dyn Iterable<RowId> + 'a>,
|
||||
pub num_rows: RowId,
|
||||
}
|
||||
|
||||
impl<'a> From<&'a OptionalIndex> for SerializableOptionalIndex<'a> {
|
||||
fn from(optional_index: &'a OptionalIndex) -> Self {
|
||||
SerializableOptionalIndex {
|
||||
non_null_row_ids: Box::new(optional_index),
|
||||
num_rows: optional_index.num_docs(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum SerializableColumnIndex<'a> {
|
||||
Full,
|
||||
Optional {
|
||||
non_null_row_ids: Box<dyn Iterable<RowId> + 'a>,
|
||||
num_rows: RowId,
|
||||
},
|
||||
// TODO remove the Arc<dyn> apart from serialization this is not
|
||||
// dynamic at all.
|
||||
Multivalued(Box<dyn Iterable<RowId> + 'a>),
|
||||
Optional(SerializableOptionalIndex<'a>),
|
||||
Multivalued(SerializableMultivalueIndex<'a>),
|
||||
}
|
||||
|
||||
impl<'a> SerializableColumnIndex<'a> {
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
match self {
|
||||
SerializableColumnIndex::Full => Cardinality::Full,
|
||||
SerializableColumnIndex::Optional { .. } => Cardinality::Optional,
|
||||
SerializableColumnIndex::Optional(_) => Cardinality::Optional,
|
||||
SerializableColumnIndex::Multivalued(_) => Cardinality::Multivalued,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize a column index.
|
||||
pub fn serialize_column_index(
|
||||
column_index: SerializableColumnIndex,
|
||||
output: &mut impl Write,
|
||||
@@ -39,19 +51,23 @@ pub fn serialize_column_index(
|
||||
output.write_all(&[cardinality])?;
|
||||
match column_index {
|
||||
SerializableColumnIndex::Full => {}
|
||||
SerializableColumnIndex::Optional {
|
||||
SerializableColumnIndex::Optional(SerializableOptionalIndex {
|
||||
non_null_row_ids,
|
||||
num_rows,
|
||||
} => serialize_optional_index(non_null_row_ids.as_ref(), num_rows, &mut output)?,
|
||||
}) => serialize_optional_index(non_null_row_ids.as_ref(), num_rows, &mut output)?,
|
||||
SerializableColumnIndex::Multivalued(multivalued_index) => {
|
||||
serialize_multivalued_index(&*multivalued_index, &mut output)?
|
||||
serialize_multivalued_index(&multivalued_index, &mut output)?
|
||||
}
|
||||
}
|
||||
let column_index_num_bytes = output.written_bytes() as u32;
|
||||
Ok(column_index_num_bytes)
|
||||
}
|
||||
|
||||
pub fn open_column_index(mut bytes: OwnedBytes) -> io::Result<ColumnIndex> {
|
||||
/// Open a serialized column index.
|
||||
pub fn open_column_index(
|
||||
mut bytes: OwnedBytes,
|
||||
format_version: Version,
|
||||
) -> io::Result<ColumnIndex> {
|
||||
if bytes.is_empty() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
@@ -68,7 +84,8 @@ pub fn open_column_index(mut bytes: OwnedBytes) -> io::Result<ColumnIndex> {
|
||||
Ok(ColumnIndex::Optional(optional_index))
|
||||
}
|
||||
Cardinality::Multivalued => {
|
||||
let multivalue_index = super::multivalued_index::open_multivalued_index(bytes)?;
|
||||
let multivalue_index =
|
||||
super::multivalued_index::open_multivalued_index(bytes, format_version)?;
|
||||
Ok(ColumnIndex::Multivalued(multivalue_index))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ fn compute_stats(vals: impl Iterator<Item = u64>) -> ColumnStats {
|
||||
fn value_iter() -> impl Iterator<Item = u64> {
|
||||
0..20_000
|
||||
}
|
||||
|
||||
fn get_reader_for_bench<Codec: ColumnCodec>(data: &[u64]) -> Codec::ColumnValues {
|
||||
let mut bytes = Vec::new();
|
||||
let stats = compute_stats(data.iter().cloned());
|
||||
@@ -41,10 +42,13 @@ fn get_reader_for_bench<Codec: ColumnCodec>(data: &[u64]) -> Codec::ColumnValues
|
||||
for val in data {
|
||||
codec_serializer.collect(*val);
|
||||
}
|
||||
codec_serializer.serialize(&stats, Box::new(data.iter().copied()).as_mut(), &mut bytes);
|
||||
codec_serializer
|
||||
.serialize(&stats, Box::new(data.iter().copied()).as_mut(), &mut bytes)
|
||||
.unwrap();
|
||||
|
||||
Codec::load(OwnedBytes::new(bytes)).unwrap()
|
||||
}
|
||||
|
||||
fn bench_get<Codec: ColumnCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let col = get_reader_for_bench::<Codec>(data);
|
||||
b.iter(|| {
|
||||
|
||||
@@ -10,7 +10,7 @@ pub(crate) struct MergedColumnValues<'a, T> {
|
||||
pub(crate) merge_row_order: &'a MergeRowOrder,
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Debug> Iterable<T> for MergedColumnValues<'a, T> {
|
||||
impl<'a, T: Copy + PartialOrd + Debug + 'static> Iterable<T> for MergedColumnValues<'a, T> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||
match self.merge_row_order {
|
||||
MergeRowOrder::Stack(_) => Box::new(
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
//! # `fastfield_codecs`
|
||||
//!
|
||||
//! - Columnar storage of data for tantivy [`Column`].
|
||||
//! - Columnar storage of data for tantivy [`crate::Column`].
|
||||
//! - Encode data in different codecs.
|
||||
//! - Monotonically map values to u64/u128
|
||||
|
||||
@@ -10,6 +10,7 @@ use std::fmt::Debug;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
|
||||
use downcast_rs::DowncastSync;
|
||||
pub use monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
|
||||
pub use monotonic_mapping_u128::MonotonicallyMappableToU128;
|
||||
|
||||
@@ -25,7 +26,10 @@ mod monotonic_column;
|
||||
|
||||
pub(crate) use merge::MergedColumnValues;
|
||||
pub use stats::ColumnStats;
|
||||
pub use u128_based::{open_u128_mapped, serialize_column_values_u128};
|
||||
pub use u128_based::{
|
||||
open_u128_as_compact_u64, open_u128_mapped, serialize_column_values_u128,
|
||||
CompactSpaceU64Accessor,
|
||||
};
|
||||
pub use u64_based::{
|
||||
load_u64_based_column_values, serialize_and_load_u64_based_column_values,
|
||||
serialize_u64_based_column_values, CodecType, ALL_U64_CODEC_TYPES,
|
||||
@@ -41,7 +45,7 @@ use crate::RowId;
|
||||
///
|
||||
/// Any methods with a default and specialized implementation need to be called in the
|
||||
/// wrappers that implement the trait: Arc and MonotonicMappingColumn
|
||||
pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync + DowncastSync {
|
||||
/// Return the value associated with the given idx.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
@@ -68,11 +72,40 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
out_x4[3] = self.get_val(idx_x4[3]);
|
||||
}
|
||||
|
||||
let step_size = 4;
|
||||
let cutoff = indexes.len() - indexes.len() % step_size;
|
||||
let out_and_idx_chunks = output
|
||||
.chunks_exact_mut(4)
|
||||
.into_remainder()
|
||||
.iter_mut()
|
||||
.zip(indexes.chunks_exact(4).remainder());
|
||||
for (out, idx) in out_and_idx_chunks {
|
||||
*out = self.get_val(*idx);
|
||||
}
|
||||
}
|
||||
|
||||
for idx in cutoff..indexes.len() {
|
||||
output[idx] = self.get_val(indexes[idx]);
|
||||
/// Allows to push down multiple fetch calls, to avoid dynamic dispatch overhead.
|
||||
/// The slightly weird `Option<T>` in output allows pushdown to full columns.
|
||||
///
|
||||
/// idx and output should have the same length
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `idx` is greater than the column length.
|
||||
fn get_vals_opt(&self, indexes: &[u32], output: &mut [Option<T>]) {
|
||||
assert!(indexes.len() == output.len());
|
||||
let out_and_idx_chunks = output.chunks_exact_mut(4).zip(indexes.chunks_exact(4));
|
||||
for (out_x4, idx_x4) in out_and_idx_chunks {
|
||||
out_x4[0] = Some(self.get_val(idx_x4[0]));
|
||||
out_x4[1] = Some(self.get_val(idx_x4[1]));
|
||||
out_x4[2] = Some(self.get_val(idx_x4[2]));
|
||||
out_x4[3] = Some(self.get_val(idx_x4[3]));
|
||||
}
|
||||
let out_and_idx_chunks = output
|
||||
.chunks_exact_mut(4)
|
||||
.into_remainder()
|
||||
.iter_mut()
|
||||
.zip(indexes.chunks_exact(4).remainder());
|
||||
for (out, idx) in out_and_idx_chunks {
|
||||
*out = Some(self.get_val(*idx));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,7 +134,7 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
row_id_hits: &mut Vec<RowId>,
|
||||
) {
|
||||
let row_id_range = row_id_range.start..row_id_range.end.min(self.num_vals());
|
||||
for idx in row_id_range.start..row_id_range.end {
|
||||
for idx in row_id_range {
|
||||
let val = self.get_val(idx);
|
||||
if value_range.contains(&val) {
|
||||
row_id_hits.push(idx);
|
||||
@@ -139,6 +172,7 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
||||
}
|
||||
}
|
||||
downcast_rs::impl_downcast!(sync ColumnValues<T> where T: PartialOrd);
|
||||
|
||||
/// Empty column of values.
|
||||
pub struct EmptyColumnValues;
|
||||
@@ -161,12 +195,17 @@ impl<T: PartialOrd + Default> ColumnValues<T> for EmptyColumnValues {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
|
||||
impl<T: Copy + PartialOrd + Debug + 'static> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
self.as_ref().get_val(idx)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn get_vals_opt(&self, indexes: &[u32], output: &mut [Option<T>]) {
|
||||
self.as_ref().get_vals_opt(indexes, output)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> T {
|
||||
self.as_ref().min_value()
|
||||
|
||||
@@ -31,10 +31,10 @@ pub fn monotonic_map_column<C, T, Input, Output>(
|
||||
monotonic_mapping: T,
|
||||
) -> impl ColumnValues<Output>
|
||||
where
|
||||
C: ColumnValues<Input>,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||
Input: PartialOrd + Debug + Send + Sync + Clone,
|
||||
Output: PartialOrd + Debug + Send + Sync + Clone,
|
||||
C: ColumnValues<Input> + 'static,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync + 'static,
|
||||
Input: PartialOrd + Debug + Send + Sync + Clone + 'static,
|
||||
Output: PartialOrd + Debug + Send + Sync + Clone + 'static,
|
||||
{
|
||||
MonotonicMappingColumn {
|
||||
from_column,
|
||||
@@ -45,10 +45,10 @@ where
|
||||
|
||||
impl<C, T, Input, Output> ColumnValues<Output> for MonotonicMappingColumn<C, T, Input>
|
||||
where
|
||||
C: ColumnValues<Input>,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||
Input: PartialOrd + Send + Debug + Sync + Clone,
|
||||
Output: PartialOrd + Send + Debug + Sync + Clone,
|
||||
C: ColumnValues<Input> + 'static,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync + 'static,
|
||||
Input: PartialOrd + Send + Debug + Sync + Clone + 'static,
|
||||
Output: PartialOrd + Send + Debug + Sync + Clone + 'static,
|
||||
{
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> Output {
|
||||
@@ -107,7 +107,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_monotonic_mapping_iter() {
|
||||
let vals: Vec<u64> = (0..100u64).map(|el| el * 10).collect();
|
||||
let col = VecColumn::from(&vals);
|
||||
let col = VecColumn::from(vals);
|
||||
let mapped = monotonic_map_column(
|
||||
col,
|
||||
StrictlyMonotonicMappingInverter::from(StrictlyMonotonicMappingToInternal::<i64>::new()),
|
||||
|
||||
@@ -38,6 +38,6 @@ impl Ord for BlankRange {
|
||||
}
|
||||
impl PartialOrd for BlankRange {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.blank_size().cmp(&other.blank_size()))
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -184,7 +184,7 @@ impl CompactSpaceBuilder {
|
||||
|
||||
let mut covered_space = Vec::with_capacity(self.blanks.len());
|
||||
|
||||
// begining of the blanks
|
||||
// beginning of the blanks
|
||||
if let Some(first_blank_start) = self.blanks.first().map(RangeInclusive::start) {
|
||||
if *first_blank_start != 0 {
|
||||
covered_space.push(0..=first_blank_start - 1);
|
||||
|
||||
@@ -22,7 +22,7 @@ mod build_compact_space;
|
||||
|
||||
use build_compact_space::get_compact_space;
|
||||
use common::{BinarySerializable, CountingWriter, OwnedBytes, VInt, VIntU128};
|
||||
use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
|
||||
use tantivy_bitpacker::{BitPacker, BitUnpacker};
|
||||
|
||||
use crate::column_values::ColumnValues;
|
||||
use crate::RowId;
|
||||
@@ -148,7 +148,7 @@ impl CompactSpace {
|
||||
.binary_search_by_key(&compact, |range_mapping| range_mapping.compact_start)
|
||||
// Correctness: Overflow. The first range starts at compact space 0, the error from
|
||||
// binary search can never be 0
|
||||
.map_or_else(|e| e - 1, |v| v);
|
||||
.unwrap_or_else(|e| e - 1);
|
||||
|
||||
let range_mapping = &self.ranges_mapping[pos];
|
||||
let diff = compact - range_mapping.compact_start;
|
||||
@@ -292,6 +292,63 @@ impl BinarySerializable for IPCodecParams {
|
||||
}
|
||||
}
|
||||
|
||||
/// Exposes the compact space compressed values as u64.
|
||||
///
|
||||
/// This allows faster access to the values, as u64 is faster to work with than u128.
|
||||
/// It also allows to handle u128 values like u64, via the `open_u64_lenient` as a uniform
|
||||
/// access interface.
|
||||
///
|
||||
/// When converting from the internal u64 to u128 `compact_to_u128` can be used.
|
||||
pub struct CompactSpaceU64Accessor(CompactSpaceDecompressor);
|
||||
impl CompactSpaceU64Accessor {
|
||||
pub(crate) fn open(data: OwnedBytes) -> io::Result<CompactSpaceU64Accessor> {
|
||||
let decompressor = CompactSpaceU64Accessor(CompactSpaceDecompressor::open(data)?);
|
||||
Ok(decompressor)
|
||||
}
|
||||
/// Convert a compact space value to u128
|
||||
pub fn compact_to_u128(&self, compact: u32) -> u128 {
|
||||
self.0.compact_to_u128(compact)
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnValues<u64> for CompactSpaceU64Accessor {
|
||||
#[inline]
|
||||
fn get_val(&self, doc: u32) -> u64 {
|
||||
let compact = self.0.get_compact(doc);
|
||||
compact as u64
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u64 {
|
||||
self.0.u128_to_compact(self.0.min_value()).unwrap() as u64
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.0.u128_to_compact(self.0.max_value()).unwrap() as u64
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.0.params.num_vals
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(self.0.iter_compact().map(|el| el as u64))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u64>,
|
||||
position_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
let value_range = self.0.compact_to_u128(*value_range.start() as u32)
|
||||
..=self.0.compact_to_u128(*value_range.end() as u32);
|
||||
self.0
|
||||
.get_row_ids_for_value_range(value_range, position_range, positions)
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnValues<u128> for CompactSpaceDecompressor {
|
||||
#[inline]
|
||||
fn get_val(&self, doc: u32) -> u128 {
|
||||
@@ -402,9 +459,14 @@ impl CompactSpaceDecompressor {
|
||||
.map(|compact| self.compact_to_u128(compact))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_compact(&self, idx: u32) -> u32 {
|
||||
self.params.bit_unpacker.get(idx, &self.data) as u32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, idx: u32) -> u128 {
|
||||
let compact = self.params.bit_unpacker.get(idx, &self.data) as u32;
|
||||
let compact = self.get_compact(idx);
|
||||
self.compact_to_u128(compact)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,9 @@ use std::sync::Arc;
|
||||
mod compact_space;
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes, VInt};
|
||||
use compact_space::{CompactSpaceCompressor, CompactSpaceDecompressor};
|
||||
pub use compact_space::{
|
||||
CompactSpaceCompressor, CompactSpaceDecompressor, CompactSpaceU64Accessor,
|
||||
};
|
||||
|
||||
use crate::column_values::monotonic_map_column;
|
||||
use crate::column_values::monotonic_mapping::{
|
||||
@@ -108,6 +110,23 @@ pub fn open_u128_mapped<T: MonotonicallyMappableToU128 + Debug>(
|
||||
StrictlyMonotonicMappingToInternal::<T>::new().into();
|
||||
Ok(Arc::new(monotonic_map_column(reader, inverted)))
|
||||
}
|
||||
|
||||
/// Returns the u64 representation of the u128 data.
|
||||
/// The internal representation of the data as u64 is useful for faster processing.
|
||||
///
|
||||
/// In order to convert to u128 back cast to `CompactSpaceU64Accessor` and call
|
||||
/// `compact_to_u128`.
|
||||
///
|
||||
/// # Notice
|
||||
/// In case there are new codecs added, check for usages of `CompactSpaceDecompressorU64` and
|
||||
/// also handle the new codecs.
|
||||
pub fn open_u128_as_compact_u64(mut bytes: OwnedBytes) -> io::Result<Arc<dyn ColumnValues<u64>>> {
|
||||
let header = U128Header::deserialize(&mut bytes)?;
|
||||
assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
|
||||
let reader = CompactSpaceU64Accessor::open(bytes)?;
|
||||
Ok(Arc::new(reader))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -63,7 +63,6 @@ impl ColumnValues for BitpackedReader {
|
||||
fn get_val(&self, doc: u32) -> u64 {
|
||||
self.stats.min_value + self.stats.gcd.get() * self.bit_unpacker.get(doc, &self.data)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn min_value(&self) -> u64 {
|
||||
self.stats.min_value
|
||||
@@ -83,7 +82,8 @@ impl ColumnValues for BitpackedReader {
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
let Some(transformed_range) = transform_range_before_linear_transformation(&self.stats, range)
|
||||
let Some(transformed_range) =
|
||||
transform_range_before_linear_transformation(&self.stats, range)
|
||||
else {
|
||||
positions.clear();
|
||||
return;
|
||||
|
||||
@@ -63,7 +63,10 @@ impl BlockwiseLinearEstimator {
|
||||
if self.block.is_empty() {
|
||||
return;
|
||||
}
|
||||
let line = Line::train(&VecColumn::from(&self.block));
|
||||
let column = VecColumn::from(std::mem::take(&mut self.block));
|
||||
let line = Line::train(&column);
|
||||
self.block = column.into();
|
||||
|
||||
let mut max_value = 0u64;
|
||||
for (i, buffer_val) in self.block.iter().enumerate() {
|
||||
let interpolated_val = line.eval(i as u32);
|
||||
@@ -125,7 +128,7 @@ impl ColumnCodecEstimator for BlockwiseLinearEstimator {
|
||||
*buffer_val = gcd_divider.divide(*buffer_val - stats.min_value);
|
||||
}
|
||||
|
||||
let line = Line::train(&VecColumn::from(&buffer));
|
||||
let line = Line::train(&VecColumn::from(buffer.to_vec()));
|
||||
|
||||
assert!(!buffer.is_empty());
|
||||
|
||||
|
||||
@@ -122,12 +122,11 @@ impl Line {
|
||||
line
|
||||
}
|
||||
|
||||
/// Returns a line that attemps to approximate a function
|
||||
/// Returns a line that attempts to approximate a function
|
||||
/// f: i in 0..[ys.num_vals()) -> ys[i].
|
||||
///
|
||||
/// - The approximation is always lower than the actual value.
|
||||
/// Or more rigorously, formally `f(i).wrapping_sub(ys[i])` is small
|
||||
/// for any i in [0..ys.len()).
|
||||
/// - The approximation is always lower than the actual value. Or more rigorously, formally
|
||||
/// `f(i).wrapping_sub(ys[i])` is small for any i in [0..ys.len()).
|
||||
/// - It computes without panicking for any value of it.
|
||||
///
|
||||
/// This function is only invariable by translation if all of the
|
||||
@@ -184,7 +183,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn test_eval_max_err(ys: &[u64]) -> Option<u64> {
|
||||
let line = Line::train(&VecColumn::from(&ys));
|
||||
let line = Line::train(&VecColumn::from(ys.to_vec()));
|
||||
ys.iter()
|
||||
.enumerate()
|
||||
.map(|(x, y)| y.wrapping_sub(line.eval(x as u32)))
|
||||
|
||||
@@ -173,7 +173,9 @@ impl LinearCodecEstimator {
|
||||
fn collect_before_line_estimation(&mut self, value: u64) {
|
||||
self.block.push(value);
|
||||
if self.block.len() == LINE_ESTIMATION_BLOCK_LEN {
|
||||
let line = Line::train(&VecColumn::from(&self.block));
|
||||
let column = VecColumn::from(std::mem::take(&mut self.block));
|
||||
let line = Line::train(&column);
|
||||
self.block = column.into();
|
||||
let block = std::mem::take(&mut self.block);
|
||||
for val in block {
|
||||
self.collect_after_line_estimation(&line, val);
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use proptest::prelude::*;
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -4,14 +4,14 @@ use tantivy_bitpacker::minmax;
|
||||
|
||||
use crate::ColumnValues;
|
||||
|
||||
/// VecColumn provides `Column` over a slice.
|
||||
pub struct VecColumn<'a, T = u64> {
|
||||
pub(crate) values: &'a [T],
|
||||
/// VecColumn provides `Column` over a `Vec<T>`.
|
||||
pub struct VecColumn<T = u64> {
|
||||
pub(crate) values: Vec<T>,
|
||||
pub(crate) min_value: T,
|
||||
pub(crate) max_value: T,
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Send + Sync + Debug> ColumnValues<T> for VecColumn<'a, T> {
|
||||
impl<T: Copy + PartialOrd + Send + Sync + Debug + 'static> ColumnValues<T> for VecColumn<T> {
|
||||
fn get_val(&self, position: u32) -> T {
|
||||
self.values[position as usize]
|
||||
}
|
||||
@@ -37,11 +37,8 @@ impl<'a, T: Copy + PartialOrd + Send + Sync + Debug> ColumnValues<T> for VecColu
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Default, V> From<&'a V> for VecColumn<'a, T>
|
||||
where V: AsRef<[T]> + ?Sized
|
||||
{
|
||||
fn from(values: &'a V) -> Self {
|
||||
let values = values.as_ref();
|
||||
impl<T: Copy + PartialOrd + Default> From<Vec<T>> for VecColumn<T> {
|
||||
fn from(values: Vec<T>) -> Self {
|
||||
let (min_value, max_value) = minmax(values.iter().copied()).unwrap_or_default();
|
||||
Self {
|
||||
values,
|
||||
@@ -50,3 +47,8 @@ where V: AsRef<[T]> + ?Sized
|
||||
}
|
||||
}
|
||||
}
|
||||
impl From<VecColumn> for Vec<u64> {
|
||||
fn from(column: VecColumn) -> Self {
|
||||
column.values
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
use core::fmt;
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
use crate::InvalidData;
|
||||
|
||||
pub const VERSION_FOOTER_NUM_BYTES: usize = MAGIC_BYTES.len() + std::mem::size_of::<u32>();
|
||||
@@ -8,7 +11,7 @@ const MAGIC_BYTES: [u8; 4] = [2, 113, 119, 66];
|
||||
|
||||
pub fn footer() -> [u8; VERSION_FOOTER_NUM_BYTES] {
|
||||
let mut footer_bytes = [0u8; VERSION_FOOTER_NUM_BYTES];
|
||||
footer_bytes[0..4].copy_from_slice(&Version::V1.to_bytes());
|
||||
footer_bytes[0..4].copy_from_slice(&CURRENT_VERSION.to_bytes());
|
||||
footer_bytes[4..8].copy_from_slice(&MAGIC_BYTES[..]);
|
||||
footer_bytes
|
||||
}
|
||||
@@ -20,10 +23,22 @@ pub fn parse_footer(footer_bytes: [u8; VERSION_FOOTER_NUM_BYTES]) -> Result<Vers
|
||||
Version::try_from_bytes(footer_bytes[0..4].try_into().unwrap())
|
||||
}
|
||||
|
||||
pub const CURRENT_VERSION: Version = Version::V2;
|
||||
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
#[repr(u32)]
|
||||
pub enum Version {
|
||||
V1 = 1u32,
|
||||
V2 = 2u32,
|
||||
}
|
||||
|
||||
impl Display for Version {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Version::V1 => write!(f, "v1"),
|
||||
Version::V2 => write!(f, "v2"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Version {
|
||||
@@ -35,6 +50,7 @@ impl Version {
|
||||
let code = u32::from_le_bytes(bytes);
|
||||
match code {
|
||||
1u32 => Ok(Version::V1),
|
||||
2u32 => Ok(Version::V2),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
}
|
||||
@@ -47,9 +63,9 @@ mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_footer_dserialization() {
|
||||
fn test_footer_deserialization() {
|
||||
let parsed_version: Version = parse_footer(footer()).unwrap();
|
||||
assert_eq!(Version::V1, parsed_version);
|
||||
assert_eq!(Version::V2, parsed_version);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -63,11 +79,10 @@ mod tests {
|
||||
for &i in &version_to_tests {
|
||||
let version_res = Version::try_from_bytes(i.to_le_bytes());
|
||||
if let Ok(version) = version_res {
|
||||
assert_eq!(version, Version::V1);
|
||||
assert_eq!(version.to_bytes(), i.to_le_bytes());
|
||||
valid_versions.insert(i);
|
||||
}
|
||||
}
|
||||
assert_eq!(valid_versions.len(), 1);
|
||||
assert_eq!(valid_versions.len(), 2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,8 +52,8 @@ pub enum MergeRowOrder {
|
||||
/// Columnar tables are simply stacked one above the other.
|
||||
/// If the i-th columnar_readers has n_rows_i rows, then
|
||||
/// in the resulting columnar,
|
||||
/// rows [r0..n_row_0) contains the row of columnar_readers[0], in ordder
|
||||
/// rows [n_row_0..n_row_0 + n_row_1 contains the row of columnar_readers[1], in order.
|
||||
/// rows [r0..n_row_0) contains the row of `columnar_readers[0]`, in ordder
|
||||
/// rows [n_row_0..n_row_0 + n_row_1 contains the row of `columnar_readers[1]`, in order.
|
||||
/// ..
|
||||
/// No documents is deleted.
|
||||
Stack(StackMergeOrder),
|
||||
|
||||
@@ -2,12 +2,11 @@ mod merge_dict_column;
|
||||
mod merge_mapping;
|
||||
mod term_merger;
|
||||
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::collections::{BTreeMap, HashSet};
|
||||
use std::io;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use itertools::Itertools;
|
||||
pub use merge_mapping::{MergeRowOrder, ShuffleMergeOrder, StackMergeOrder};
|
||||
|
||||
use super::writer::ColumnarSerializer;
|
||||
@@ -18,24 +17,27 @@ use crate::columnar::writer::CompatibleNumericalTypes;
|
||||
use crate::columnar::ColumnarReader;
|
||||
use crate::dynamic_column::DynamicColumn;
|
||||
use crate::{
|
||||
BytesColumn, Column, ColumnIndex, ColumnType, ColumnValues, NumericalType, NumericalValue,
|
||||
BytesColumn, Column, ColumnIndex, ColumnType, ColumnValues, DynamicColumnHandle, NumericalType,
|
||||
NumericalValue,
|
||||
};
|
||||
|
||||
/// Column types are grouped into different categories.
|
||||
/// After merge, all columns belonging to the same category are coerced to
|
||||
/// the same column type.
|
||||
///
|
||||
/// In practise, today, only Numerical colummns are coerced into one type today.
|
||||
/// In practise, today, only Numerical columns are coerced into one type today.
|
||||
///
|
||||
/// See also [README.md].
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
|
||||
///
|
||||
/// The ordering has to match the ordering of the variants in [ColumnType].
|
||||
#[derive(Copy, Clone, Eq, PartialOrd, Ord, PartialEq, Hash, Debug)]
|
||||
pub(crate) enum ColumnTypeCategory {
|
||||
Bool,
|
||||
Str,
|
||||
Numerical,
|
||||
DateTime,
|
||||
Bytes,
|
||||
Str,
|
||||
Bool,
|
||||
IpAddr,
|
||||
DateTime,
|
||||
}
|
||||
|
||||
impl From<ColumnType> for ColumnTypeCategory {
|
||||
@@ -61,11 +63,10 @@ impl From<ColumnType> for ColumnTypeCategory {
|
||||
/// `require_columns` makes it possible to ensure that some columns will be present in the
|
||||
/// resulting columnar. When a required column is a numerical column type, one of two things can
|
||||
/// happen:
|
||||
/// - If the required column type is compatible with all of the input columnar, the resulsting
|
||||
/// merged
|
||||
/// columnar will simply coerce the input column and use the required column type.
|
||||
/// - If the required column type is incompatible with one of the input columnar, the merged
|
||||
/// will fail with an InvalidData error.
|
||||
/// - If the required column type is compatible with all of the input columnar, the resulting merged
|
||||
/// columnar will simply coerce the input column and use the required column type.
|
||||
/// - If the required column type is incompatible with one of the input columnar, the merged will
|
||||
/// fail with an InvalidData error.
|
||||
///
|
||||
/// `merge_row_order` makes it possible to remove or reorder row in the resulting
|
||||
/// `Columnar` table.
|
||||
@@ -83,9 +84,20 @@ pub fn merge_columnar(
|
||||
.iter()
|
||||
.map(|reader| reader.num_rows())
|
||||
.collect::<Vec<u32>>();
|
||||
|
||||
let columns_to_merge =
|
||||
group_columns_for_merge(columnar_readers, required_columns, &merge_row_order)?;
|
||||
for ((column_name, column_type), columns) in columns_to_merge {
|
||||
for res in columns_to_merge {
|
||||
let ((column_name, _column_type_category), grouped_columns) = res;
|
||||
let grouped_columns = grouped_columns.open(&merge_row_order)?;
|
||||
if grouped_columns.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let column_type = grouped_columns.column_type_after_merge();
|
||||
let mut columns = grouped_columns.columns;
|
||||
coerce_columns(column_type, &mut columns)?;
|
||||
|
||||
let mut column_serializer =
|
||||
serializer.start_serialize_column(column_name.as_bytes(), column_type);
|
||||
merge_column(
|
||||
@@ -97,6 +109,7 @@ pub fn merge_columnar(
|
||||
)?;
|
||||
column_serializer.finalize()?;
|
||||
}
|
||||
|
||||
serializer.finalize(merge_row_order.num_rows())?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -210,40 +223,12 @@ fn merge_column(
|
||||
struct GroupedColumns {
|
||||
required_column_type: Option<ColumnType>,
|
||||
columns: Vec<Option<DynamicColumn>>,
|
||||
column_category: ColumnTypeCategory,
|
||||
}
|
||||
|
||||
impl GroupedColumns {
|
||||
fn for_category(column_category: ColumnTypeCategory, num_columnars: usize) -> Self {
|
||||
GroupedColumns {
|
||||
required_column_type: None,
|
||||
columns: vec![None; num_columnars],
|
||||
column_category,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the dynamic column for a given columnar.
|
||||
fn set_column(&mut self, columnar_id: usize, column: DynamicColumn) {
|
||||
self.columns[columnar_id] = Some(column);
|
||||
}
|
||||
|
||||
/// Force the existence of a column, as well as its type.
|
||||
fn require_type(&mut self, required_type: ColumnType) -> io::Result<()> {
|
||||
if let Some(existing_required_type) = self.required_column_type {
|
||||
if existing_required_type == required_type {
|
||||
// This was just a duplicate in the `required_columns`.
|
||||
// Nothing to do.
|
||||
return Ok(());
|
||||
} else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Required column conflicts with another required column of the same type \
|
||||
category.",
|
||||
));
|
||||
}
|
||||
}
|
||||
self.required_column_type = Some(required_type);
|
||||
Ok(())
|
||||
/// Check is column group can be skipped during serialization.
|
||||
fn is_empty(&self) -> bool {
|
||||
self.required_column_type.is_none() && self.columns.iter().all(Option::is_none)
|
||||
}
|
||||
|
||||
/// Returns the column type after merge.
|
||||
@@ -265,11 +250,76 @@ impl GroupedColumns {
|
||||
}
|
||||
// At the moment, only the numerical categorical column type has more than one possible
|
||||
// column type.
|
||||
assert_eq!(self.column_category, ColumnTypeCategory::Numerical);
|
||||
assert!(self
|
||||
.columns
|
||||
.iter()
|
||||
.flatten()
|
||||
.all(|el| ColumnTypeCategory::from(el.column_type()) == ColumnTypeCategory::Numerical));
|
||||
merged_numerical_columns_type(self.columns.iter().flatten()).into()
|
||||
}
|
||||
}
|
||||
|
||||
struct GroupedColumnsHandle {
|
||||
required_column_type: Option<ColumnType>,
|
||||
columns: Vec<Option<DynamicColumnHandle>>,
|
||||
}
|
||||
|
||||
impl GroupedColumnsHandle {
|
||||
fn new(num_columnars: usize) -> Self {
|
||||
GroupedColumnsHandle {
|
||||
required_column_type: None,
|
||||
columns: vec![None; num_columnars],
|
||||
}
|
||||
}
|
||||
fn open(self, merge_row_order: &MergeRowOrder) -> io::Result<GroupedColumns> {
|
||||
let mut columns: Vec<Option<DynamicColumn>> = Vec::new();
|
||||
for (columnar_id, column) in self.columns.iter().enumerate() {
|
||||
if let Some(column) = column {
|
||||
let column = column.open()?;
|
||||
// We skip columns that end up with 0 documents.
|
||||
// That way, we make sure they don't end up influencing the merge type or
|
||||
// creating empty columns.
|
||||
|
||||
if is_empty_after_merge(merge_row_order, &column, columnar_id) {
|
||||
columns.push(None);
|
||||
} else {
|
||||
columns.push(Some(column));
|
||||
}
|
||||
} else {
|
||||
columns.push(None);
|
||||
}
|
||||
}
|
||||
Ok(GroupedColumns {
|
||||
required_column_type: self.required_column_type,
|
||||
columns,
|
||||
})
|
||||
}
|
||||
|
||||
/// Set the dynamic column for a given columnar.
|
||||
fn set_column(&mut self, columnar_id: usize, column: DynamicColumnHandle) {
|
||||
self.columns[columnar_id] = Some(column);
|
||||
}
|
||||
|
||||
/// Force the existence of a column, as well as its type.
|
||||
fn require_type(&mut self, required_type: ColumnType) -> io::Result<()> {
|
||||
if let Some(existing_required_type) = self.required_column_type {
|
||||
if existing_required_type == required_type {
|
||||
// This was just a duplicate in the `required_columns`.
|
||||
// Nothing to do.
|
||||
return Ok(());
|
||||
} else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Required column conflicts with another required column of the same type \
|
||||
category.",
|
||||
));
|
||||
}
|
||||
}
|
||||
self.required_column_type = Some(required_type);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the type of the merged numerical column.
|
||||
///
|
||||
/// This function picks the first numerical type out of i64, u64, f64 (order matters
|
||||
@@ -293,7 +343,7 @@ fn merged_numerical_columns_type<'a>(
|
||||
fn is_empty_after_merge(
|
||||
merge_row_order: &MergeRowOrder,
|
||||
column: &DynamicColumn,
|
||||
columnar_id: usize,
|
||||
columnar_ord: usize,
|
||||
) -> bool {
|
||||
if column.num_values() == 0u32 {
|
||||
// It was empty before the merge.
|
||||
@@ -305,7 +355,7 @@ fn is_empty_after_merge(
|
||||
false
|
||||
}
|
||||
MergeRowOrder::Shuffled(shuffled) => {
|
||||
if let Some(alive_bitset) = &shuffled.alive_bitsets[columnar_id] {
|
||||
if let Some(alive_bitset) = &shuffled.alive_bitsets[columnar_ord] {
|
||||
let column_index = column.column_index();
|
||||
match column_index {
|
||||
ColumnIndex::Empty { .. } => true,
|
||||
@@ -319,20 +369,8 @@ fn is_empty_after_merge(
|
||||
true
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
for (doc_id, (start_index, end_index)) in multivalued_index
|
||||
.start_index_column
|
||||
.iter()
|
||||
.tuple_windows()
|
||||
.enumerate()
|
||||
{
|
||||
let doc_id = doc_id as u32;
|
||||
if start_index == end_index {
|
||||
// There are no values in this document
|
||||
continue;
|
||||
}
|
||||
// The document contains values and is present in the alive bitset.
|
||||
// The column is therefore not empty.
|
||||
if alive_bitset.contains(doc_id) {
|
||||
for alive_docid in alive_bitset.iter() {
|
||||
if !multivalued_index.range(alive_docid).is_empty() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -348,56 +386,34 @@ fn is_empty_after_merge(
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn group_columns_for_merge(
|
||||
columnar_readers: &[&ColumnarReader],
|
||||
required_columns: &[(String, ColumnType)],
|
||||
merge_row_order: &MergeRowOrder,
|
||||
) -> io::Result<BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>>> {
|
||||
// Each column name may have multiple types of column associated.
|
||||
// For merging we are interested in the same column type category since they can be merged.
|
||||
let mut columns_grouped: HashMap<(String, ColumnTypeCategory), GroupedColumns> = HashMap::new();
|
||||
/// Iterates over the columns of the columnar readers, grouped by column name.
|
||||
/// Key functionality is that `open` of the Columns is done lazy per group.
|
||||
fn group_columns_for_merge<'a>(
|
||||
columnar_readers: &'a [&'a ColumnarReader],
|
||||
required_columns: &'a [(String, ColumnType)],
|
||||
_merge_row_order: &'a MergeRowOrder,
|
||||
) -> io::Result<BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle>> {
|
||||
let mut columns: BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle> = BTreeMap::new();
|
||||
|
||||
for &(ref column_name, column_type) in required_columns {
|
||||
columns_grouped
|
||||
columns
|
||||
.entry((column_name.clone(), column_type.into()))
|
||||
.or_insert_with(|| {
|
||||
GroupedColumns::for_category(column_type.into(), columnar_readers.len())
|
||||
})
|
||||
.or_insert_with(|| GroupedColumnsHandle::new(columnar_readers.len()))
|
||||
.require_type(column_type)?;
|
||||
}
|
||||
|
||||
for (columnar_id, columnar_reader) in columnar_readers.iter().enumerate() {
|
||||
let column_name_and_handle = columnar_reader.list_columns()?;
|
||||
// We skip columns that end up with 0 documents.
|
||||
// That way, we make sure they don't end up influencing the merge type or
|
||||
// creating empty columns.
|
||||
let column_name_and_handle = columnar_reader.iter_columns()?;
|
||||
|
||||
for (column_name, handle) in column_name_and_handle {
|
||||
let column_category: ColumnTypeCategory = handle.column_type().into();
|
||||
let column = handle.open()?;
|
||||
if is_empty_after_merge(merge_row_order, &column, columnar_id) {
|
||||
continue;
|
||||
}
|
||||
columns_grouped
|
||||
columns
|
||||
.entry((column_name, column_category))
|
||||
.or_insert_with(|| {
|
||||
GroupedColumns::for_category(column_category, columnar_readers.len())
|
||||
})
|
||||
.set_column(columnar_id, column);
|
||||
.or_insert_with(|| GroupedColumnsHandle::new(columnar_readers.len()))
|
||||
.set_column(columnar_id, handle);
|
||||
}
|
||||
}
|
||||
|
||||
let mut merge_columns: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
Default::default();
|
||||
|
||||
for ((column_name, _), mut grouped_columns) in columns_grouped {
|
||||
let column_type = grouped_columns.column_type_after_merge();
|
||||
coerce_columns(column_type, &mut grouped_columns.columns)?;
|
||||
merge_columns.insert((column_name, column_type), grouped_columns.columns);
|
||||
}
|
||||
|
||||
Ok(merge_columns)
|
||||
Ok(columns)
|
||||
}
|
||||
|
||||
fn coerce_columns(
|
||||
|
||||
@@ -35,8 +35,7 @@ impl<'a> Ord for HeapItem<'a> {
|
||||
///
|
||||
/// The item yield is actually a pair with
|
||||
/// - the term
|
||||
/// - a slice with the ordinal of the segments containing
|
||||
/// the terms.
|
||||
/// - a slice with the ordinal of the segments containing the terms.
|
||||
pub struct TermMerger<'a> {
|
||||
heap: BinaryHeap<HeapItem<'a>>,
|
||||
current_streamers: Vec<HeapItem<'a>>,
|
||||
|
||||
@@ -14,7 +14,7 @@ fn make_columnar<T: Into<NumericalValue> + HasAssociatedColumnType + Copy>(
|
||||
}
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(vals.len() as RowId, None, &mut buffer)
|
||||
.serialize(vals.len() as RowId, &mut buffer)
|
||||
.unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
@@ -27,22 +27,10 @@ fn test_column_coercion_to_u64() {
|
||||
let columnar2 = make_columnar("numbers", &[u64::MAX]);
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let merge_order = StackMergeOrder::stack(columnars).into();
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
let column_map: BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle> =
|
||||
group_columns_for_merge(columnars, &[], &merge_order).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_no_coercion_if_all_the_same() {
|
||||
let columnar1 = make_columnar("numbers", &[1u64]);
|
||||
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let merge_order = StackMergeOrder::stack(columnars).into();
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
group_columns_for_merge(columnars, &[], &merge_order).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnTypeCategory::Numerical)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -51,24 +39,24 @@ fn test_column_coercion_to_i64() {
|
||||
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let merge_order = StackMergeOrder::stack(columnars).into();
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
let column_map: BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle> =
|
||||
group_columns_for_merge(columnars, &[], &merge_order).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::I64)));
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnTypeCategory::Numerical)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_impossible_coercion_returns_an_error() {
|
||||
let columnar1 = make_columnar("numbers", &[u64::MAX]);
|
||||
let merge_order = StackMergeOrder::stack(&[&columnar1]).into();
|
||||
let group_error = group_columns_for_merge(
|
||||
&[&columnar1],
|
||||
&[("numbers".to_string(), ColumnType::I64)],
|
||||
&merge_order,
|
||||
)
|
||||
.unwrap_err();
|
||||
assert_eq!(group_error.kind(), io::ErrorKind::InvalidInput);
|
||||
}
|
||||
//#[test]
|
||||
// fn test_impossible_coercion_returns_an_error() {
|
||||
// let columnar1 = make_columnar("numbers", &[u64::MAX]);
|
||||
// let merge_order = StackMergeOrder::stack(&[&columnar1]).into();
|
||||
// let group_error = group_columns_for_merge_iter(
|
||||
//&[&columnar1],
|
||||
//&[("numbers".to_string(), ColumnType::I64)],
|
||||
//&merge_order,
|
||||
//)
|
||||
//.unwrap_err();
|
||||
// assert_eq!(group_error.kind(), io::ErrorKind::InvalidInput);
|
||||
//}
|
||||
|
||||
#[test]
|
||||
fn test_group_columns_with_required_column() {
|
||||
@@ -76,7 +64,7 @@ fn test_group_columns_with_required_column() {
|
||||
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let merge_order = StackMergeOrder::stack(columnars).into();
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
let column_map: BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle> =
|
||||
group_columns_for_merge(
|
||||
&[&columnar1, &columnar2],
|
||||
&[("numbers".to_string(), ColumnType::U64)],
|
||||
@@ -84,7 +72,7 @@ fn test_group_columns_with_required_column() {
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnTypeCategory::Numerical)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -93,17 +81,17 @@ fn test_group_columns_required_column_with_no_existing_columns() {
|
||||
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let merge_order = StackMergeOrder::stack(columnars).into();
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
group_columns_for_merge(
|
||||
columnars,
|
||||
&[("required_col".to_string(), ColumnType::Str)],
|
||||
&merge_order,
|
||||
)
|
||||
.unwrap();
|
||||
let column_map: BTreeMap<_, _> = group_columns_for_merge(
|
||||
columnars,
|
||||
&[("required_col".to_string(), ColumnType::Str)],
|
||||
&merge_order,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(column_map.len(), 2);
|
||||
let columns = column_map
|
||||
.get(&("required_col".to_string(), ColumnType::Str))
|
||||
.unwrap();
|
||||
let columns = &column_map
|
||||
.get(&("required_col".to_string(), ColumnTypeCategory::Str))
|
||||
.unwrap()
|
||||
.columns;
|
||||
assert_eq!(columns.len(), 2);
|
||||
assert!(columns[0].is_none());
|
||||
assert!(columns[1].is_none());
|
||||
@@ -115,7 +103,7 @@ fn test_group_columns_required_column_is_above_all_columns_have_the_same_type_ru
|
||||
let columnar2 = make_columnar("numbers", &[2i64]);
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let merge_order = StackMergeOrder::stack(columnars).into();
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
let column_map: BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle> =
|
||||
group_columns_for_merge(
|
||||
columnars,
|
||||
&[("numbers".to_string(), ColumnType::U64)],
|
||||
@@ -123,7 +111,7 @@ fn test_group_columns_required_column_is_above_all_columns_have_the_same_type_ru
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnTypeCategory::Numerical)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -132,21 +120,23 @@ fn test_missing_column() {
|
||||
let columnar2 = make_columnar("numbers2", &[2u64]);
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let merge_order = StackMergeOrder::stack(columnars).into();
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
let column_map: BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle> =
|
||||
group_columns_for_merge(columnars, &[], &merge_order).unwrap();
|
||||
assert_eq!(column_map.len(), 2);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::I64)));
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnTypeCategory::Numerical)));
|
||||
{
|
||||
let columns = column_map
|
||||
.get(&("numbers".to_string(), ColumnType::I64))
|
||||
.unwrap();
|
||||
let columns = &column_map
|
||||
.get(&("numbers".to_string(), ColumnTypeCategory::Numerical))
|
||||
.unwrap()
|
||||
.columns;
|
||||
assert!(columns[0].is_some());
|
||||
assert!(columns[1].is_none());
|
||||
}
|
||||
{
|
||||
let columns = column_map
|
||||
.get(&("numbers2".to_string(), ColumnType::U64))
|
||||
.unwrap();
|
||||
let columns = &column_map
|
||||
.get(&("numbers2".to_string(), ColumnTypeCategory::Numerical))
|
||||
.unwrap()
|
||||
.columns;
|
||||
assert!(columns[0].is_none());
|
||||
assert!(columns[1].is_some());
|
||||
}
|
||||
@@ -169,9 +159,7 @@ fn make_numerical_columnar_multiple_columns(
|
||||
.max()
|
||||
.unwrap_or(0u32);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(num_rows, None, &mut buffer)
|
||||
.unwrap();
|
||||
dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
@@ -194,9 +182,7 @@ fn make_byte_columnar_multiple_columns(
|
||||
}
|
||||
}
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(num_rows, None, &mut buffer)
|
||||
.unwrap();
|
||||
dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
@@ -215,9 +201,7 @@ fn make_text_columnar_multiple_columns(columns: &[(&str, &[&[&str]])]) -> Column
|
||||
.max()
|
||||
.unwrap_or(0u32);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(num_rows, None, &mut buffer)
|
||||
.unwrap();
|
||||
dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
@@ -244,7 +228,9 @@ fn test_merge_columnar_numbers() {
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("numbers").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::F64(vals) = dynamic_column else { panic!() };
|
||||
let DynamicColumn::F64(vals) = dynamic_column else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(vals.get_cardinality(), Cardinality::Optional);
|
||||
assert_eq!(vals.first(0u32), Some(-1f64));
|
||||
assert_eq!(vals.first(1u32), None);
|
||||
@@ -270,7 +256,9 @@ fn test_merge_columnar_texts() {
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("texts").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::Str(vals) = dynamic_column else { panic!() };
|
||||
let DynamicColumn::Str(vals) = dynamic_column else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(vals.ords().get_cardinality(), Cardinality::Optional);
|
||||
|
||||
let get_str_for_ord = |ord| {
|
||||
@@ -317,7 +305,9 @@ fn test_merge_columnar_byte() {
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("bytes").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::Bytes(vals) = dynamic_column else { panic!() };
|
||||
let DynamicColumn::Bytes(vals) = dynamic_column else {
|
||||
panic!()
|
||||
};
|
||||
let get_bytes_for_ord = |ord| {
|
||||
let mut out = Vec::new();
|
||||
vals.ord_to_bytes(ord, &mut out).unwrap();
|
||||
@@ -371,7 +361,9 @@ fn test_merge_columnar_byte_with_missing() {
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let cols = columnar_reader.read_columns("col").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::Bytes(vals) = dynamic_column else { panic!() };
|
||||
let DynamicColumn::Bytes(vals) = dynamic_column else {
|
||||
panic!()
|
||||
};
|
||||
let get_bytes_for_ord = |ord| {
|
||||
let mut out = Vec::new();
|
||||
vals.ord_to_bytes(ord, &mut out).unwrap();
|
||||
@@ -423,7 +415,9 @@ fn test_merge_columnar_different_types() {
|
||||
|
||||
// numeric column
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::I64(vals) = dynamic_column else { panic!() };
|
||||
let DynamicColumn::I64(vals) = dynamic_column else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(vals.get_cardinality(), Cardinality::Optional);
|
||||
assert_eq!(vals.values_for_doc(0).collect_vec(), vec![]);
|
||||
assert_eq!(vals.values_for_doc(1).collect_vec(), vec![]);
|
||||
@@ -433,7 +427,9 @@ fn test_merge_columnar_different_types() {
|
||||
|
||||
// text column
|
||||
let dynamic_column = cols[1].open().unwrap();
|
||||
let DynamicColumn::Str(vals) = dynamic_column else { panic!() };
|
||||
let DynamicColumn::Str(vals) = dynamic_column else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(vals.ords().get_cardinality(), Cardinality::Optional);
|
||||
let get_str_for_ord = |ord| {
|
||||
let mut out = String::new();
|
||||
|
||||
@@ -5,6 +5,7 @@ mod reader;
|
||||
mod writer;
|
||||
|
||||
pub use column_type::{ColumnType, HasAssociatedColumnType};
|
||||
pub use format_version::{Version, CURRENT_VERSION};
|
||||
#[cfg(test)]
|
||||
pub(crate) use merge::ColumnTypeCategory;
|
||||
pub use merge::{merge_columnar, MergeRowOrder, ShuffleMergeOrder, StackMergeOrder};
|
||||
|
||||
@@ -6,7 +6,7 @@ use sstable::{Dictionary, RangeSSTable};
|
||||
|
||||
use crate::columnar::{format_version, ColumnType};
|
||||
use crate::dynamic_column::DynamicColumnHandle;
|
||||
use crate::RowId;
|
||||
use crate::{RowId, Version};
|
||||
|
||||
fn io_invalid_data(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::InvalidData, msg)
|
||||
@@ -19,6 +19,7 @@ pub struct ColumnarReader {
|
||||
column_dictionary: Dictionary<RangeSSTable>,
|
||||
column_data: FileSlice,
|
||||
num_rows: RowId,
|
||||
format_version: Version,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ColumnarReader {
|
||||
@@ -53,6 +54,7 @@ impl fmt::Debug for ColumnarReader {
|
||||
fn read_all_columns_in_stream(
|
||||
mut stream: sstable::Streamer<'_, RangeSSTable>,
|
||||
column_data: &FileSlice,
|
||||
format_version: Version,
|
||||
) -> io::Result<Vec<DynamicColumnHandle>> {
|
||||
let mut results = Vec::new();
|
||||
while stream.advance() {
|
||||
@@ -67,6 +69,7 @@ fn read_all_columns_in_stream(
|
||||
let dynamic_column_handle = DynamicColumnHandle {
|
||||
file_slice,
|
||||
column_type,
|
||||
format_version,
|
||||
};
|
||||
results.push(dynamic_column_handle);
|
||||
}
|
||||
@@ -88,7 +91,7 @@ impl ColumnarReader {
|
||||
let num_rows = u32::deserialize(&mut &footer_bytes[8..12])?;
|
||||
let version_footer_bytes: [u8; format_version::VERSION_FOOTER_NUM_BYTES] =
|
||||
footer_bytes[12..].try_into().unwrap();
|
||||
let _version = format_version::parse_footer(version_footer_bytes)?;
|
||||
let format_version = format_version::parse_footer(version_footer_bytes)?;
|
||||
let (column_data, sstable) =
|
||||
file_slice_without_sstable_len.split_from_end(sstable_len as usize);
|
||||
let column_dictionary = Dictionary::open(sstable)?;
|
||||
@@ -96,36 +99,49 @@ impl ColumnarReader {
|
||||
column_dictionary,
|
||||
column_data,
|
||||
num_rows,
|
||||
format_version,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.num_rows
|
||||
}
|
||||
// Iterate over the columns in a sorted way
|
||||
pub fn iter_columns(
|
||||
&self,
|
||||
) -> io::Result<impl Iterator<Item = (String, DynamicColumnHandle)> + '_> {
|
||||
let mut stream = self.column_dictionary.stream()?;
|
||||
Ok(std::iter::from_fn(move || {
|
||||
if stream.advance() {
|
||||
let key_bytes: &[u8] = stream.key();
|
||||
let column_code: u8 = key_bytes.last().cloned().unwrap();
|
||||
// TODO Error Handling. The API gets quite ugly when returning the error here, so
|
||||
// instead we could just check the first N columns upfront.
|
||||
let column_type: ColumnType = ColumnType::try_from_code(column_code)
|
||||
.map_err(|_| io_invalid_data(format!("Unknown column code `{column_code}`")))
|
||||
.unwrap();
|
||||
let range = stream.value().clone();
|
||||
let column_name =
|
||||
// The last two bytes are respectively the 0u8 separator and the column_type.
|
||||
String::from_utf8_lossy(&key_bytes[..key_bytes.len() - 2]).to_string();
|
||||
let file_slice = self
|
||||
.column_data
|
||||
.slice(range.start as usize..range.end as usize);
|
||||
let column_handle = DynamicColumnHandle {
|
||||
file_slice,
|
||||
column_type,
|
||||
format_version: self.format_version,
|
||||
};
|
||||
Some((column_name, column_handle))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
// TODO Add unit tests
|
||||
pub fn list_columns(&self) -> io::Result<Vec<(String, DynamicColumnHandle)>> {
|
||||
let mut stream = self.column_dictionary.stream()?;
|
||||
let mut results = Vec::new();
|
||||
while stream.advance() {
|
||||
let key_bytes: &[u8] = stream.key();
|
||||
let column_code: u8 = key_bytes.last().cloned().unwrap();
|
||||
let column_type: ColumnType = ColumnType::try_from_code(column_code)
|
||||
.map_err(|_| io_invalid_data(format!("Unknown column code `{column_code}`")))?;
|
||||
let range = stream.value().clone();
|
||||
let column_name =
|
||||
// The last two bytes are respectively the 0u8 separator and the column_type.
|
||||
String::from_utf8_lossy(&key_bytes[..key_bytes.len() - 2]).to_string();
|
||||
let file_slice = self
|
||||
.column_data
|
||||
.slice(range.start as usize..range.end as usize);
|
||||
let column_handle = DynamicColumnHandle {
|
||||
file_slice,
|
||||
column_type,
|
||||
};
|
||||
results.push((column_name, column_handle));
|
||||
}
|
||||
Ok(results)
|
||||
Ok(self.iter_columns()?.collect())
|
||||
}
|
||||
|
||||
fn stream_for_column_range(&self, column_name: &str) -> sstable::StreamerBuilder<RangeSSTable> {
|
||||
@@ -156,7 +172,7 @@ impl ColumnarReader {
|
||||
.stream_for_column_range(column_name)
|
||||
.into_stream_async()
|
||||
.await?;
|
||||
read_all_columns_in_stream(stream, &self.column_data)
|
||||
read_all_columns_in_stream(stream, &self.column_data, self.format_version)
|
||||
}
|
||||
|
||||
/// Get all columns for the given column name.
|
||||
@@ -165,7 +181,7 @@ impl ColumnarReader {
|
||||
/// different types.
|
||||
pub fn read_columns(&self, column_name: &str) -> io::Result<Vec<DynamicColumnHandle>> {
|
||||
let stream = self.stream_for_column_range(column_name).into_stream()?;
|
||||
read_all_columns_in_stream(stream, &self.column_data)
|
||||
read_all_columns_in_stream(stream, &self.column_data, self.format_version)
|
||||
}
|
||||
|
||||
/// Return the number of columns in the columnar.
|
||||
@@ -184,7 +200,7 @@ mod tests {
|
||||
columnar_writer.record_column_type("col1", ColumnType::Str, false);
|
||||
columnar_writer.record_column_type("col2", ColumnType::U64, false);
|
||||
let mut buffer = Vec::new();
|
||||
columnar_writer.serialize(1, None, &mut buffer).unwrap();
|
||||
columnar_writer.serialize(1, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
let columns = columnar.list_columns().unwrap();
|
||||
assert_eq!(columns.len(), 2);
|
||||
@@ -200,7 +216,7 @@ mod tests {
|
||||
columnar_writer.record_column_type("count", ColumnType::U64, false);
|
||||
columnar_writer.record_numerical(1, "count", 1u64);
|
||||
let mut buffer = Vec::new();
|
||||
columnar_writer.serialize(2, None, &mut buffer).unwrap();
|
||||
columnar_writer.serialize(2, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
let columns = columnar.list_columns().unwrap();
|
||||
assert_eq!(columns.len(), 1);
|
||||
|
||||
@@ -87,7 +87,7 @@ impl<V: SymbolValue> ColumnOperation<V> {
|
||||
minibuf
|
||||
}
|
||||
|
||||
/// Deserialize a colummn operation.
|
||||
/// Deserialize a column operation.
|
||||
/// Returns None if the buffer is empty.
|
||||
///
|
||||
/// Panics if the payload is invalid:
|
||||
|
||||
@@ -41,31 +41,10 @@ impl ColumnWriter {
|
||||
pub(super) fn operation_iterator<'a, V: SymbolValue>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
old_to_new_ids_opt: Option<&[RowId]>,
|
||||
buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<V>> + 'a {
|
||||
buffer.clear();
|
||||
self.values.read_to_end(arena, buffer);
|
||||
if let Some(old_to_new_ids) = old_to_new_ids_opt {
|
||||
// TODO avoid the extra deserialization / serialization.
|
||||
let mut sorted_ops: Vec<(RowId, ColumnOperation<V>)> = Vec::new();
|
||||
let mut new_doc = 0u32;
|
||||
let mut cursor = &buffer[..];
|
||||
for op in std::iter::from_fn(|| ColumnOperation::<V>::deserialize(&mut cursor)) {
|
||||
if let ColumnOperation::NewDoc(doc) = &op {
|
||||
new_doc = old_to_new_ids[*doc as usize];
|
||||
sorted_ops.push((new_doc, ColumnOperation::NewDoc(new_doc)));
|
||||
} else {
|
||||
sorted_ops.push((new_doc, op));
|
||||
}
|
||||
}
|
||||
// stable sort is crucial here.
|
||||
sorted_ops.sort_by_key(|(new_doc_id, _)| *new_doc_id);
|
||||
buffer.clear();
|
||||
for (_, op) in sorted_ops {
|
||||
buffer.extend_from_slice(op.serialize().as_ref());
|
||||
}
|
||||
}
|
||||
let mut cursor: &[u8] = &buffer[..];
|
||||
std::iter::from_fn(move || ColumnOperation::deserialize(&mut cursor))
|
||||
}
|
||||
@@ -231,11 +210,9 @@ impl NumericalColumnWriter {
|
||||
pub(super) fn operation_iterator<'a>(
|
||||
self,
|
||||
arena: &MemoryArena,
|
||||
old_to_new_ids: Option<&[RowId]>,
|
||||
buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<NumericalValue>> + 'a {
|
||||
self.column_writer
|
||||
.operation_iterator(arena, old_to_new_ids, buffer)
|
||||
self.column_writer.operation_iterator(arena, buffer)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -269,18 +246,17 @@ impl StrOrBytesColumnWriter {
|
||||
dictionaries: &mut [DictionaryBuilder],
|
||||
arena: &mut MemoryArena,
|
||||
) {
|
||||
let unordered_id = dictionaries[self.dictionary_id as usize].get_or_allocate_id(bytes);
|
||||
let unordered_id =
|
||||
dictionaries[self.dictionary_id as usize].get_or_allocate_id(bytes, arena);
|
||||
self.column_writer.record(doc, unordered_id, arena);
|
||||
}
|
||||
|
||||
pub(super) fn operation_iterator<'a>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
old_to_new_ids: Option<&[RowId]>,
|
||||
byte_buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<UnorderedId>> + 'a {
|
||||
self.column_writer
|
||||
.operation_iterator(arena, old_to_new_ids, byte_buffer)
|
||||
self.column_writer.operation_iterator(arena, byte_buffer)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,14 +8,13 @@ use std::net::Ipv6Addr;
|
||||
|
||||
use column_operation::ColumnOperation;
|
||||
pub(crate) use column_writers::CompatibleNumericalTypes;
|
||||
use common::json_path_writer::JSON_END_OF_PATH;
|
||||
use common::CountingWriter;
|
||||
pub(crate) use serializer::ColumnarSerializer;
|
||||
use stacker::{Addr, ArenaHashMap, MemoryArena};
|
||||
|
||||
use crate::column_index::SerializableColumnIndex;
|
||||
use crate::column_values::{
|
||||
ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64, VecColumn,
|
||||
};
|
||||
use crate::column_index::{SerializableColumnIndex, SerializableOptionalIndex};
|
||||
use crate::column_values::{MonotonicallyMappableToU128, MonotonicallyMappableToU64};
|
||||
use crate::columnar::column_type::ColumnType;
|
||||
use crate::columnar::writer::column_writers::{
|
||||
ColumnWriter, NumericalColumnWriter, StrOrBytesColumnWriter,
|
||||
@@ -45,7 +44,7 @@ struct SpareBuffers {
|
||||
/// columnar_writer.record_str(1u32 /* doc id */, "product_name", "Apple");
|
||||
/// columnar_writer.record_numerical(0u32 /* doc id */, "price", 10.5f64); //< uh oh we ended up mixing integer and floats.
|
||||
/// let mut wrt: Vec<u8> = Vec::new();
|
||||
/// columnar_writer.serialize(2u32, None, &mut wrt).unwrap();
|
||||
/// columnar_writer.serialize(2u32, &mut wrt).unwrap();
|
||||
/// ```
|
||||
#[derive(Default)]
|
||||
pub struct ColumnarWriter {
|
||||
@@ -61,25 +60,8 @@ pub struct ColumnarWriter {
|
||||
buffers: SpareBuffers,
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn mutate_or_create_column<V, TMutator>(
|
||||
arena_hash_map: &mut ArenaHashMap,
|
||||
column_name: &str,
|
||||
updater: TMutator,
|
||||
) where
|
||||
V: Copy + 'static,
|
||||
TMutator: FnMut(Option<V>) -> V,
|
||||
{
|
||||
assert!(
|
||||
!column_name.as_bytes().contains(&0u8),
|
||||
"key may not contain the 0 byte"
|
||||
);
|
||||
arena_hash_map.mutate_or_create(column_name.as_bytes(), updater);
|
||||
}
|
||||
|
||||
impl ColumnarWriter {
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
// TODO add dictionary builders.
|
||||
self.arena.mem_usage()
|
||||
+ self.numerical_field_hash_map.mem_usage()
|
||||
+ self.bool_field_hash_map.mem_usage()
|
||||
@@ -87,57 +69,11 @@ impl ColumnarWriter {
|
||||
+ self.str_field_hash_map.mem_usage()
|
||||
+ self.ip_addr_field_hash_map.mem_usage()
|
||||
+ self.datetime_field_hash_map.mem_usage()
|
||||
}
|
||||
|
||||
/// Returns the list of doc ids from 0..num_docs sorted by the `sort_field`
|
||||
/// column.
|
||||
///
|
||||
/// If the column is multivalued, use the first value for scoring.
|
||||
/// If no value is associated to a specific row, the document is assigned
|
||||
/// the lowest possible score.
|
||||
///
|
||||
/// The sort applied is stable.
|
||||
pub fn sort_order(&self, sort_field: &str, num_docs: RowId, reversed: bool) -> Vec<u32> {
|
||||
let Some(numerical_col_writer) =
|
||||
self.numerical_field_hash_map.get::<NumericalColumnWriter>(sort_field.as_bytes()) else {
|
||||
return Vec::new();
|
||||
};
|
||||
let mut symbols_buffer = Vec::new();
|
||||
let mut values = Vec::new();
|
||||
let mut start_doc_check_fill = 0;
|
||||
let mut current_doc_opt: Option<RowId> = None;
|
||||
// Assumption: NewDoc will never call the same doc twice and is strictly increasing between
|
||||
// calls
|
||||
for op in numerical_col_writer.operation_iterator(&self.arena, None, &mut symbols_buffer) {
|
||||
match op {
|
||||
ColumnOperation::NewDoc(doc) => {
|
||||
current_doc_opt = Some(doc);
|
||||
}
|
||||
ColumnOperation::Value(numerical_value) => {
|
||||
if let Some(current_doc) = current_doc_opt {
|
||||
// Fill up with 0.0 since last doc
|
||||
values.extend((start_doc_check_fill..current_doc).map(|doc| (0.0, doc)));
|
||||
start_doc_check_fill = current_doc + 1;
|
||||
// handle multi values
|
||||
current_doc_opt = None;
|
||||
|
||||
let score: f32 = f64::coerce(numerical_value) as f32;
|
||||
values.push((score, current_doc));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for doc in values.len() as u32..num_docs {
|
||||
values.push((0.0f32, doc));
|
||||
}
|
||||
values.sort_by(|(left_score, _), (right_score, _)| {
|
||||
if reversed {
|
||||
right_score.total_cmp(left_score)
|
||||
} else {
|
||||
left_score.total_cmp(right_score)
|
||||
}
|
||||
});
|
||||
values.into_iter().map(|(_score, doc)| doc).collect()
|
||||
+ self
|
||||
.dictionaries
|
||||
.iter()
|
||||
.map(|dict| dict.mem_usage())
|
||||
.sum::<usize>()
|
||||
}
|
||||
|
||||
/// Records a column type. This is useful to bypass the coercion process,
|
||||
@@ -167,9 +103,8 @@ impl ColumnarWriter {
|
||||
},
|
||||
&mut self.dictionaries,
|
||||
);
|
||||
mutate_or_create_column(
|
||||
hash_map,
|
||||
column_name,
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<StrOrBytesColumnWriter>| {
|
||||
let mut column_writer = if let Some(column_writer) = column_opt {
|
||||
column_writer
|
||||
@@ -184,24 +119,21 @@ impl ColumnarWriter {
|
||||
);
|
||||
}
|
||||
ColumnType::Bool => {
|
||||
mutate_or_create_column(
|
||||
&mut self.bool_field_hash_map,
|
||||
column_name,
|
||||
self.bool_field_hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
||||
);
|
||||
}
|
||||
ColumnType::DateTime => {
|
||||
mutate_or_create_column(
|
||||
&mut self.datetime_field_hash_map,
|
||||
column_name,
|
||||
self.datetime_field_hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
||||
);
|
||||
}
|
||||
ColumnType::I64 | ColumnType::F64 | ColumnType::U64 => {
|
||||
let numerical_type = column_type.numerical_type().unwrap();
|
||||
mutate_or_create_column(
|
||||
&mut self.numerical_field_hash_map,
|
||||
column_name,
|
||||
self.numerical_field_hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<NumericalColumnWriter>| {
|
||||
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
|
||||
column.force_numerical_type(numerical_type);
|
||||
@@ -209,9 +141,8 @@ impl ColumnarWriter {
|
||||
},
|
||||
);
|
||||
}
|
||||
ColumnType::IpAddr => mutate_or_create_column(
|
||||
&mut self.ip_addr_field_hash_map,
|
||||
column_name,
|
||||
ColumnType::IpAddr => self.ip_addr_field_hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
||||
),
|
||||
}
|
||||
@@ -224,9 +155,8 @@ impl ColumnarWriter {
|
||||
numerical_value: T,
|
||||
) {
|
||||
let (hash_map, arena) = (&mut self.numerical_field_hash_map, &mut self.arena);
|
||||
mutate_or_create_column(
|
||||
hash_map,
|
||||
column_name,
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<NumericalColumnWriter>| {
|
||||
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record_numerical_value(doc, numerical_value.into(), arena);
|
||||
@@ -236,10 +166,6 @@ impl ColumnarWriter {
|
||||
}
|
||||
|
||||
pub fn record_ip_addr(&mut self, doc: RowId, column_name: &str, ip_addr: Ipv6Addr) {
|
||||
assert!(
|
||||
!column_name.as_bytes().contains(&0u8),
|
||||
"key may not contain the 0 byte"
|
||||
);
|
||||
let (hash_map, arena) = (&mut self.ip_addr_field_hash_map, &mut self.arena);
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
@@ -253,24 +179,30 @@ impl ColumnarWriter {
|
||||
|
||||
pub fn record_bool(&mut self, doc: RowId, column_name: &str, val: bool) {
|
||||
let (hash_map, arena) = (&mut self.bool_field_hash_map, &mut self.arena);
|
||||
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
|
||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record(doc, val, arena);
|
||||
column
|
||||
});
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<ColumnWriter>| {
|
||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record(doc, val, arena);
|
||||
column
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn record_datetime(&mut self, doc: RowId, column_name: &str, datetime: common::DateTime) {
|
||||
let (hash_map, arena) = (&mut self.datetime_field_hash_map, &mut self.arena);
|
||||
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
|
||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record(
|
||||
doc,
|
||||
NumericalValue::I64(datetime.into_timestamp_nanos()),
|
||||
arena,
|
||||
);
|
||||
column
|
||||
});
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<ColumnWriter>| {
|
||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record(
|
||||
doc,
|
||||
NumericalValue::I64(datetime.into_timestamp_nanos()),
|
||||
arena,
|
||||
);
|
||||
column
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn record_str(&mut self, doc: RowId, column_name: &str, value: &str) {
|
||||
@@ -295,10 +227,6 @@ impl ColumnarWriter {
|
||||
}
|
||||
|
||||
pub fn record_bytes(&mut self, doc: RowId, column_name: &str, value: &[u8]) {
|
||||
assert!(
|
||||
!column_name.as_bytes().contains(&0u8),
|
||||
"key may not contain the 0 byte"
|
||||
);
|
||||
let (hash_map, arena, dictionaries) = (
|
||||
&mut self.bytes_field_hash_map,
|
||||
&mut self.arena,
|
||||
@@ -318,17 +246,13 @@ impl ColumnarWriter {
|
||||
},
|
||||
);
|
||||
}
|
||||
pub fn serialize(
|
||||
&mut self,
|
||||
num_docs: RowId,
|
||||
old_to_new_row_ids: Option<&[RowId]>,
|
||||
wrt: &mut dyn io::Write,
|
||||
) -> io::Result<()> {
|
||||
pub fn serialize(&mut self, num_docs: RowId, wrt: &mut dyn io::Write) -> io::Result<()> {
|
||||
let mut serializer = ColumnarSerializer::new(wrt);
|
||||
|
||||
let mut columns: Vec<(&[u8], ColumnType, Addr)> = self
|
||||
.numerical_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| {
|
||||
.map(|(column_name, addr)| {
|
||||
let numerical_column_writer: NumericalColumnWriter =
|
||||
self.numerical_field_hash_map.read(addr);
|
||||
let column_type = numerical_column_writer.numerical_type().into();
|
||||
@@ -338,33 +262,39 @@ impl ColumnarWriter {
|
||||
columns.extend(
|
||||
self.bytes_field_hash_map
|
||||
.iter()
|
||||
.map(|(term, addr, _)| (term, ColumnType::Bytes, addr)),
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::Bytes, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.str_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::Str, addr)),
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::Str, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.bool_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::Bool, addr)),
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::Bool, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.ip_addr_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::IpAddr, addr)),
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::IpAddr, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.datetime_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::DateTime, addr)),
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::DateTime, addr)),
|
||||
);
|
||||
columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
|
||||
|
||||
let (arena, buffers, dictionaries) = (&self.arena, &mut self.buffers, &self.dictionaries);
|
||||
let mut symbol_byte_buffer: Vec<u8> = Vec::new();
|
||||
for (column_name, column_type, addr) in columns {
|
||||
if column_name.contains(&JSON_END_OF_PATH) {
|
||||
// Tantivy uses b'0' as a separator for nested fields in JSON.
|
||||
// Column names with a b'0' are not simply ignored by the columnar (and the inverted
|
||||
// index).
|
||||
continue;
|
||||
}
|
||||
match column_type {
|
||||
ColumnType::Bool => {
|
||||
let column_writer: ColumnWriter = self.bool_field_hash_map.read(addr);
|
||||
@@ -374,11 +304,7 @@ impl ColumnarWriter {
|
||||
serialize_bool_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
@@ -392,11 +318,7 @@ impl ColumnarWriter {
|
||||
serialize_ip_addr_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
@@ -421,12 +343,10 @@ impl ColumnarWriter {
|
||||
num_docs,
|
||||
str_or_bytes_column_writer.sort_values_within_row,
|
||||
dictionary_builder,
|
||||
str_or_bytes_column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
str_or_bytes_column_writer
|
||||
.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
buffers,
|
||||
&self.arena,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
column_serializer.finalize()?;
|
||||
@@ -442,11 +362,7 @@ impl ColumnarWriter {
|
||||
cardinality,
|
||||
num_docs,
|
||||
numerical_type,
|
||||
numerical_column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
numerical_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
@@ -461,11 +377,7 @@ impl ColumnarWriter {
|
||||
cardinality,
|
||||
num_docs,
|
||||
NumericalType::I64,
|
||||
column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
@@ -480,6 +392,7 @@ impl ColumnarWriter {
|
||||
|
||||
// Serialize [Dictionary, Column, dictionary num bytes U32::LE]
|
||||
// Column: [Column Index, Column Values, column index num bytes U32::LE]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn serialize_bytes_or_str_column(
|
||||
cardinality: Cardinality,
|
||||
num_docs: RowId,
|
||||
@@ -487,6 +400,7 @@ fn serialize_bytes_or_str_column(
|
||||
dictionary_builder: &DictionaryBuilder,
|
||||
operation_it: impl Iterator<Item = ColumnOperation<UnorderedId>>,
|
||||
buffers: &mut SpareBuffers,
|
||||
arena: &MemoryArena,
|
||||
wrt: impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let SpareBuffers {
|
||||
@@ -495,7 +409,8 @@ fn serialize_bytes_or_str_column(
|
||||
..
|
||||
} = buffers;
|
||||
let mut counting_writer = CountingWriter::wrap(wrt);
|
||||
let term_id_mapping: TermIdMapping = dictionary_builder.serialize(&mut counting_writer)?;
|
||||
let term_id_mapping: TermIdMapping =
|
||||
dictionary_builder.serialize(arena, &mut counting_writer)?;
|
||||
let dictionary_num_bytes: u32 = counting_writer.written_bytes() as u32;
|
||||
let mut wrt = counting_writer.finish();
|
||||
let operation_iterator = operation_it.map(|symbol: ColumnOperation<UnorderedId>| {
|
||||
@@ -631,10 +546,7 @@ fn send_to_serialize_column_mappable_to_u128<
|
||||
value_index_builders: &mut PreallocatedIndexBuilders,
|
||||
values: &mut Vec<T>,
|
||||
mut wrt: impl io::Write,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
for<'a> VecColumn<'a, T>: ColumnValues<T>,
|
||||
{
|
||||
) -> io::Result<()> {
|
||||
values.clear();
|
||||
// TODO: split index and values
|
||||
let serializable_column_index = match cardinality {
|
||||
@@ -650,16 +562,16 @@ where
|
||||
let optional_index_builder = value_index_builders.borrow_optional_index_builder();
|
||||
consume_operation_iterator(op_iterator, optional_index_builder, values);
|
||||
let optional_index = optional_index_builder.finish(num_rows);
|
||||
SerializableColumnIndex::Optional {
|
||||
SerializableColumnIndex::Optional(SerializableOptionalIndex {
|
||||
num_rows,
|
||||
non_null_row_ids: Box::new(optional_index),
|
||||
}
|
||||
})
|
||||
}
|
||||
Cardinality::Multivalued => {
|
||||
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
|
||||
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
|
||||
let multivalued_index = multivalued_index_builder.finish(num_rows);
|
||||
SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
|
||||
let serializable_multivalued_index = multivalued_index_builder.finish(num_rows);
|
||||
SerializableColumnIndex::Multivalued(serializable_multivalued_index)
|
||||
}
|
||||
};
|
||||
crate::column::serialize_column_mappable_to_u128(
|
||||
@@ -670,15 +582,6 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sort_values_within_row_in_place(multivalued_index: &[RowId], values: &mut [u64]) {
|
||||
let mut start_index: usize = 0;
|
||||
for end_index in multivalued_index.iter().copied() {
|
||||
let end_index = end_index as usize;
|
||||
values[start_index..end_index].sort_unstable();
|
||||
start_index = end_index;
|
||||
}
|
||||
}
|
||||
|
||||
fn send_to_serialize_column_mappable_to_u64(
|
||||
op_iterator: impl Iterator<Item = ColumnOperation<u64>>,
|
||||
cardinality: Cardinality,
|
||||
@@ -687,10 +590,7 @@ fn send_to_serialize_column_mappable_to_u64(
|
||||
value_index_builders: &mut PreallocatedIndexBuilders,
|
||||
values: &mut Vec<u64>,
|
||||
mut wrt: impl io::Write,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
for<'a> VecColumn<'a, u64>: ColumnValues<u64>,
|
||||
{
|
||||
) -> io::Result<()> {
|
||||
values.clear();
|
||||
let serializable_column_index = match cardinality {
|
||||
Cardinality::Full => {
|
||||
@@ -705,19 +605,22 @@ where
|
||||
let optional_index_builder = value_index_builders.borrow_optional_index_builder();
|
||||
consume_operation_iterator(op_iterator, optional_index_builder, values);
|
||||
let optional_index = optional_index_builder.finish(num_rows);
|
||||
SerializableColumnIndex::Optional {
|
||||
SerializableColumnIndex::Optional(SerializableOptionalIndex {
|
||||
non_null_row_ids: Box::new(optional_index),
|
||||
num_rows,
|
||||
}
|
||||
})
|
||||
}
|
||||
Cardinality::Multivalued => {
|
||||
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
|
||||
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
|
||||
let multivalued_index = multivalued_index_builder.finish(num_rows);
|
||||
let serializable_multivalued_index = multivalued_index_builder.finish(num_rows);
|
||||
if sort_values_within_row {
|
||||
sort_values_within_row_in_place(multivalued_index, values);
|
||||
sort_values_within_row_in_place(
|
||||
serializable_multivalued_index.start_offsets.boxed_iter(),
|
||||
values,
|
||||
);
|
||||
}
|
||||
SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
|
||||
SerializableColumnIndex::Multivalued(serializable_multivalued_index)
|
||||
}
|
||||
};
|
||||
crate::column::serialize_column_mappable_to_u64(
|
||||
@@ -728,6 +631,18 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sort_values_within_row_in_place(
|
||||
multivalued_index: impl Iterator<Item = RowId>,
|
||||
values: &mut [u64],
|
||||
) {
|
||||
let mut start_index: usize = 0;
|
||||
for end_index in multivalued_index {
|
||||
let end_index = end_index as usize;
|
||||
values[start_index..end_index].sort_unstable();
|
||||
start_index = end_index;
|
||||
}
|
||||
}
|
||||
|
||||
fn coerce_numerical_symbol<T>(
|
||||
operation_iterator: impl Iterator<Item = ColumnOperation<NumericalValue>>,
|
||||
) -> impl Iterator<Item = ColumnOperation<u64>>
|
||||
@@ -775,7 +690,7 @@ mod tests {
|
||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Full);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&arena, None, &mut buffer)
|
||||
.operation_iterator(&arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 6);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
@@ -804,7 +719,7 @@ mod tests {
|
||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Optional);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&arena, None, &mut buffer)
|
||||
.operation_iterator(&arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 4);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(1u32)));
|
||||
@@ -827,7 +742,7 @@ mod tests {
|
||||
assert_eq!(column_writer.get_cardinality(2), Cardinality::Optional);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&arena, None, &mut buffer)
|
||||
.operation_iterator(&arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 2);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
@@ -846,7 +761,7 @@ mod tests {
|
||||
assert_eq!(column_writer.get_cardinality(1), Cardinality::Multivalued);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&arena, None, &mut buffer)
|
||||
.operation_iterator(&arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 3);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
use common::json_path_writer::JSON_END_OF_PATH;
|
||||
use common::{BinarySerializable, CountingWriter};
|
||||
use sstable::value::RangeValueWriter;
|
||||
use sstable::RangeSSTable;
|
||||
@@ -19,7 +20,7 @@ pub struct ColumnarSerializer<W: io::Write> {
|
||||
fn prepare_key(key: &[u8], column_type: ColumnType, buffer: &mut Vec<u8>) {
|
||||
buffer.clear();
|
||||
buffer.extend_from_slice(key);
|
||||
buffer.push(0u8);
|
||||
buffer.push(JSON_END_OF_PATH);
|
||||
buffer.push(column_type.to_code());
|
||||
}
|
||||
|
||||
@@ -92,19 +93,3 @@ impl<'a, W: io::Write> io::Write for ColumnSerializer<'a, W> {
|
||||
self.columnar_serializer.wrt.write_all(buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::columnar::column_type::ColumnType;
|
||||
|
||||
#[test]
|
||||
fn test_prepare_key_bytes() {
|
||||
let mut buffer: Vec<u8> = b"somegarbage".to_vec();
|
||||
prepare_key(b"root\0child", ColumnType::Str, &mut buffer);
|
||||
assert_eq!(buffer.len(), 12);
|
||||
assert_eq!(&buffer[..10], b"root\0child");
|
||||
assert_eq!(buffer[10], 0u8);
|
||||
assert_eq!(buffer[11], ColumnType::Str.to_code());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use crate::column_index::{SerializableMultivalueIndex, SerializableOptionalIndex};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::RowId;
|
||||
|
||||
@@ -59,31 +60,47 @@ impl IndexBuilder for OptionalIndexBuilder {
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MultivaluedIndexBuilder {
|
||||
start_offsets: Vec<RowId>,
|
||||
doc_with_values: Vec<RowId>,
|
||||
start_offsets: Vec<u32>,
|
||||
total_num_vals_seen: u32,
|
||||
current_row: RowId,
|
||||
current_row_has_value: bool,
|
||||
}
|
||||
|
||||
impl MultivaluedIndexBuilder {
|
||||
pub fn finish(&mut self, num_docs: RowId) -> &[u32] {
|
||||
self.start_offsets
|
||||
.resize(num_docs as usize + 1, self.total_num_vals_seen);
|
||||
&self.start_offsets[..]
|
||||
pub fn finish(&mut self, num_docs: RowId) -> SerializableMultivalueIndex<'_> {
|
||||
self.start_offsets.push(self.total_num_vals_seen);
|
||||
let non_null_row_ids: Box<dyn Iterable<RowId>> = Box::new(&self.doc_with_values[..]);
|
||||
SerializableMultivalueIndex {
|
||||
doc_ids_with_values: SerializableOptionalIndex {
|
||||
non_null_row_ids,
|
||||
num_rows: num_docs,
|
||||
},
|
||||
start_offsets: Box::new(&self.start_offsets[..]),
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.doc_with_values.clear();
|
||||
self.start_offsets.clear();
|
||||
self.start_offsets.push(0u32);
|
||||
self.total_num_vals_seen = 0;
|
||||
self.current_row = 0;
|
||||
self.current_row_has_value = false;
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexBuilder for MultivaluedIndexBuilder {
|
||||
fn record_row(&mut self, row_id: RowId) {
|
||||
self.start_offsets
|
||||
.resize(row_id as usize + 1, self.total_num_vals_seen);
|
||||
self.current_row = row_id;
|
||||
self.current_row_has_value = false;
|
||||
}
|
||||
|
||||
fn record_value(&mut self) {
|
||||
if !self.current_row_has_value {
|
||||
self.current_row_has_value = true;
|
||||
self.doc_with_values.push(self.current_row);
|
||||
self.start_offsets.push(self.total_num_vals_seen);
|
||||
}
|
||||
self.total_num_vals_seen += 1;
|
||||
}
|
||||
}
|
||||
@@ -141,6 +158,32 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multivalued_value_index_builder_simple() {
|
||||
let mut multivalued_value_index_builder = MultivaluedIndexBuilder::default();
|
||||
{
|
||||
multivalued_value_index_builder.record_row(0u32);
|
||||
multivalued_value_index_builder.record_value();
|
||||
multivalued_value_index_builder.record_value();
|
||||
let serialized_multivalue_index = multivalued_value_index_builder.finish(1u32);
|
||||
let start_offsets: Vec<u32> = serialized_multivalue_index
|
||||
.start_offsets
|
||||
.boxed_iter()
|
||||
.collect();
|
||||
assert_eq!(&start_offsets, &[0, 2]);
|
||||
}
|
||||
multivalued_value_index_builder.reset();
|
||||
multivalued_value_index_builder.record_row(0u32);
|
||||
multivalued_value_index_builder.record_value();
|
||||
multivalued_value_index_builder.record_value();
|
||||
let serialized_multivalue_index = multivalued_value_index_builder.finish(1u32);
|
||||
let start_offsets: Vec<u32> = serialized_multivalue_index
|
||||
.start_offsets
|
||||
.boxed_iter()
|
||||
.collect();
|
||||
assert_eq!(&start_offsets, &[0, 2]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multivalued_value_index_builder() {
|
||||
let mut multivalued_value_index_builder = MultivaluedIndexBuilder::default();
|
||||
@@ -149,17 +192,15 @@ mod tests {
|
||||
multivalued_value_index_builder.record_value();
|
||||
multivalued_value_index_builder.record_row(2u32);
|
||||
multivalued_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
multivalued_value_index_builder.finish(4u32).to_vec(),
|
||||
vec![0, 0, 2, 3, 3]
|
||||
);
|
||||
multivalued_value_index_builder.reset();
|
||||
multivalued_value_index_builder.record_row(2u32);
|
||||
multivalued_value_index_builder.record_value();
|
||||
multivalued_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
multivalued_value_index_builder.finish(4u32).to_vec(),
|
||||
vec![0, 0, 0, 2, 2]
|
||||
);
|
||||
let SerializableMultivalueIndex {
|
||||
doc_ids_with_values,
|
||||
start_offsets,
|
||||
} = multivalued_value_index_builder.finish(4u32);
|
||||
assert_eq!(doc_ids_with_values.num_rows, 4u32);
|
||||
let doc_ids_with_values: Vec<u32> =
|
||||
doc_ids_with_values.non_null_row_ids.boxed_iter().collect();
|
||||
assert_eq!(&doc_ids_with_values, &[1u32, 2u32]);
|
||||
let start_offsets: Vec<u32> = start_offsets.boxed_iter().collect();
|
||||
assert_eq!(&start_offsets[..], &[0, 2, 3]);
|
||||
}
|
||||
}
|
||||
|
||||
183
columnar/src/compat_tests.rs
Normal file
183
columnar/src/compat_tests.rs
Normal file
@@ -0,0 +1,183 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use itertools::Itertools;
|
||||
|
||||
use crate::{
|
||||
merge_columnar, Cardinality, Column, ColumnarReader, DynamicColumn, StackMergeOrder,
|
||||
CURRENT_VERSION,
|
||||
};
|
||||
|
||||
const NUM_DOCS: u32 = u16::MAX as u32;
|
||||
|
||||
fn generate_columnar(num_docs: u32, value_offset: u64) -> Vec<u8> {
|
||||
use crate::ColumnarWriter;
|
||||
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
|
||||
for i in 0..num_docs {
|
||||
if i % 100 == 0 {
|
||||
columnar_writer.record_numerical(i, "sparse", value_offset + i as u64);
|
||||
}
|
||||
if i % 5 == 0 {
|
||||
columnar_writer.record_numerical(i, "dense", value_offset + i as u64);
|
||||
}
|
||||
columnar_writer.record_numerical(i, "full", value_offset + i as u64);
|
||||
columnar_writer.record_numerical(i, "multi", value_offset + i as u64);
|
||||
columnar_writer.record_numerical(i, "multi", value_offset + i as u64);
|
||||
}
|
||||
|
||||
let mut wrt: Vec<u8> = Vec::new();
|
||||
columnar_writer.serialize(num_docs, &mut wrt).unwrap();
|
||||
|
||||
wrt
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Writes a columnar for the CURRENT_VERSION to disk.
|
||||
fn create_format() {
|
||||
let version = CURRENT_VERSION.to_string();
|
||||
let file_path = path_for_version(&version);
|
||||
if PathBuf::from(file_path.clone()).exists() {
|
||||
return;
|
||||
}
|
||||
let columnar = generate_columnar(NUM_DOCS, 0);
|
||||
std::fs::write(file_path, columnar).unwrap();
|
||||
}
|
||||
|
||||
fn path_for_version(version: &str) -> String {
|
||||
format!("./compat_tests_data/{}.columnar", version)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_v1() {
|
||||
let path = path_for_version("v1");
|
||||
test_format(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_v2() {
|
||||
let path = path_for_version("v2");
|
||||
test_format(&path);
|
||||
}
|
||||
|
||||
fn test_format(path: &str) {
|
||||
let file_content = std::fs::read(path).unwrap();
|
||||
let reader = ColumnarReader::open(file_content).unwrap();
|
||||
|
||||
check_columns(&reader);
|
||||
|
||||
// Test merge
|
||||
let reader2 = ColumnarReader::open(generate_columnar(NUM_DOCS, NUM_DOCS as u64)).unwrap();
|
||||
let columnar_readers = vec![&reader, &reader2];
|
||||
let merge_row_order = StackMergeOrder::stack(&columnar_readers[..]);
|
||||
let mut out = Vec::new();
|
||||
merge_columnar(&columnar_readers, &[], merge_row_order.into(), &mut out).unwrap();
|
||||
let reader = ColumnarReader::open(out).unwrap();
|
||||
check_columns(&reader);
|
||||
}
|
||||
|
||||
fn check_columns(reader: &ColumnarReader) {
|
||||
let column = open_column(reader, "full");
|
||||
check_column(&column, |doc_id| vec![(doc_id, doc_id as u64).into()]);
|
||||
assert_eq!(column.get_cardinality(), Cardinality::Full);
|
||||
|
||||
let column = open_column(reader, "multi");
|
||||
check_column(&column, |doc_id| {
|
||||
vec![
|
||||
(doc_id * 2, doc_id as u64).into(),
|
||||
(doc_id * 2 + 1, doc_id as u64).into(),
|
||||
]
|
||||
});
|
||||
assert_eq!(column.get_cardinality(), Cardinality::Multivalued);
|
||||
|
||||
let column = open_column(reader, "sparse");
|
||||
check_column(&column, |doc_id| {
|
||||
if doc_id % 100 == 0 {
|
||||
vec![(doc_id / 100, doc_id as u64).into()]
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
});
|
||||
assert_eq!(column.get_cardinality(), Cardinality::Optional);
|
||||
|
||||
let column = open_column(reader, "dense");
|
||||
check_column(&column, |doc_id| {
|
||||
if doc_id % 5 == 0 {
|
||||
vec![(doc_id / 5, doc_id as u64).into()]
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
});
|
||||
assert_eq!(column.get_cardinality(), Cardinality::Optional);
|
||||
}
|
||||
|
||||
struct RowIdAndValue {
|
||||
row_id: u32,
|
||||
value: u64,
|
||||
}
|
||||
impl From<(u32, u64)> for RowIdAndValue {
|
||||
fn from((row_id, value): (u32, u64)) -> Self {
|
||||
Self { row_id, value }
|
||||
}
|
||||
}
|
||||
|
||||
fn check_column<F: Fn(u32) -> Vec<RowIdAndValue>>(column: &Column<u64>, expected: F) {
|
||||
let num_docs = column.num_docs();
|
||||
let test_doc = |doc: u32| {
|
||||
if expected(doc).is_empty() {
|
||||
assert_eq!(column.first(doc), None);
|
||||
} else {
|
||||
assert_eq!(column.first(doc), Some(expected(doc)[0].value));
|
||||
}
|
||||
let values = column.values_for_doc(doc).collect_vec();
|
||||
assert_eq!(values, expected(doc).iter().map(|x| x.value).collect_vec());
|
||||
let mut row_ids = Vec::new();
|
||||
column.row_ids_for_docs(&[doc], &mut vec![], &mut row_ids);
|
||||
assert_eq!(
|
||||
row_ids,
|
||||
expected(doc).iter().map(|x| x.row_id).collect_vec()
|
||||
);
|
||||
let values = column.values_for_doc(doc).collect_vec();
|
||||
assert_eq!(values, expected(doc).iter().map(|x| x.value).collect_vec());
|
||||
|
||||
// Docid rowid conversion
|
||||
let mut row_ids = Vec::new();
|
||||
let safe_next_doc = |doc: u32| (doc + 1).min(num_docs - 1);
|
||||
column
|
||||
.index
|
||||
.docids_to_rowids(&[doc, safe_next_doc(doc)], &mut vec![], &mut row_ids);
|
||||
let expected_rowids = expected(doc)
|
||||
.iter()
|
||||
.map(|x| x.row_id)
|
||||
.chain(expected(safe_next_doc(doc)).iter().map(|x| x.row_id))
|
||||
.collect_vec();
|
||||
assert_eq!(row_ids, expected_rowids);
|
||||
let rowid_range = column
|
||||
.index
|
||||
.docid_range_to_rowids(doc..safe_next_doc(doc) + 1);
|
||||
if expected_rowids.is_empty() {
|
||||
assert!(rowid_range.is_empty());
|
||||
} else {
|
||||
assert_eq!(
|
||||
rowid_range,
|
||||
expected_rowids[0]..expected_rowids.last().unwrap() + 1
|
||||
);
|
||||
}
|
||||
};
|
||||
test_doc(0);
|
||||
test_doc(num_docs - 1);
|
||||
test_doc(num_docs - 2);
|
||||
test_doc(65000);
|
||||
}
|
||||
|
||||
fn open_column(reader: &ColumnarReader, name: &str) -> Column<u64> {
|
||||
let column = reader.read_columns(name).unwrap()[0]
|
||||
.open()
|
||||
.unwrap()
|
||||
.coerce_numerical(crate::NumericalType::U64)
|
||||
.unwrap();
|
||||
let DynamicColumn::U64(column) = column else {
|
||||
panic!();
|
||||
};
|
||||
column
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::io;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use sstable::SSTable;
|
||||
use stacker::{MemoryArena, SharedArenaHashMap};
|
||||
|
||||
pub(crate) struct TermIdMapping {
|
||||
unordered_to_ord: Vec<OrderedId>,
|
||||
@@ -31,26 +31,38 @@ pub struct OrderedId(pub u32);
|
||||
/// mapping.
|
||||
#[derive(Default)]
|
||||
pub(crate) struct DictionaryBuilder {
|
||||
dict: FnvHashMap<Vec<u8>, UnorderedId>,
|
||||
dict: SharedArenaHashMap,
|
||||
}
|
||||
|
||||
impl DictionaryBuilder {
|
||||
/// Get or allocate an unordered id.
|
||||
/// (This ID is simply an auto-incremented id.)
|
||||
pub fn get_or_allocate_id(&mut self, term: &[u8]) -> UnorderedId {
|
||||
if let Some(term_id) = self.dict.get(term) {
|
||||
return *term_id;
|
||||
}
|
||||
let new_id = UnorderedId(self.dict.len() as u32);
|
||||
self.dict.insert(term.to_vec(), new_id);
|
||||
new_id
|
||||
pub fn get_or_allocate_id(&mut self, term: &[u8], arena: &mut MemoryArena) -> UnorderedId {
|
||||
let next_id = self.dict.len() as u32;
|
||||
let unordered_id = self
|
||||
.dict
|
||||
.mutate_or_create(term, arena, |unordered_id: Option<u32>| {
|
||||
if let Some(unordered_id) = unordered_id {
|
||||
unordered_id
|
||||
} else {
|
||||
next_id
|
||||
}
|
||||
});
|
||||
UnorderedId(unordered_id)
|
||||
}
|
||||
|
||||
/// Serialize the dictionary into an fst, and returns the
|
||||
/// `UnorderedId -> TermOrdinal` map.
|
||||
pub fn serialize<'a, W: io::Write + 'a>(&self, wrt: &mut W) -> io::Result<TermIdMapping> {
|
||||
let mut terms: Vec<(&[u8], UnorderedId)> =
|
||||
self.dict.iter().map(|(k, v)| (k.as_slice(), *v)).collect();
|
||||
pub fn serialize<'a, W: io::Write + 'a>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
wrt: &mut W,
|
||||
) -> io::Result<TermIdMapping> {
|
||||
let mut terms: Vec<(&[u8], UnorderedId)> = self
|
||||
.dict
|
||||
.iter(arena)
|
||||
.map(|(k, v)| (k, arena.read(v)))
|
||||
.collect();
|
||||
terms.sort_unstable_by_key(|(key, _)| *key);
|
||||
// TODO Remove the allocation.
|
||||
let mut unordered_to_ord: Vec<OrderedId> = vec![OrderedId(0u32); terms.len()];
|
||||
@@ -63,6 +75,10 @@ impl DictionaryBuilder {
|
||||
sstable_builder.finish()?;
|
||||
Ok(TermIdMapping { unordered_to_ord })
|
||||
}
|
||||
|
||||
pub(crate) fn mem_usage(&self) -> usize {
|
||||
self.dict.mem_usage()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -71,12 +87,13 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_dictionary_builder() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut dictionary_builder = DictionaryBuilder::default();
|
||||
let hello_uid = dictionary_builder.get_or_allocate_id(b"hello");
|
||||
let happy_uid = dictionary_builder.get_or_allocate_id(b"happy");
|
||||
let tax_uid = dictionary_builder.get_or_allocate_id(b"tax");
|
||||
let hello_uid = dictionary_builder.get_or_allocate_id(b"hello", &mut arena);
|
||||
let happy_uid = dictionary_builder.get_or_allocate_id(b"happy", &mut arena);
|
||||
let tax_uid = dictionary_builder.get_or_allocate_id(b"tax", &mut arena);
|
||||
let mut buffer = Vec::new();
|
||||
let id_mapping = dictionary_builder.serialize(&mut buffer).unwrap();
|
||||
let id_mapping = dictionary_builder.serialize(&arena, &mut buffer).unwrap();
|
||||
assert_eq!(id_mapping.to_ord(hello_uid), OrderedId(1));
|
||||
assert_eq!(id_mapping.to_ord(happy_uid), OrderedId(0));
|
||||
assert_eq!(id_mapping.to_ord(tax_uid), OrderedId(2));
|
||||
|
||||
@@ -8,7 +8,7 @@ use common::{ByteCount, DateTime, HasLen, OwnedBytes};
|
||||
use crate::column::{BytesColumn, Column, StrColumn};
|
||||
use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn};
|
||||
use crate::columnar::ColumnType;
|
||||
use crate::{Cardinality, ColumnIndex, NumericalType};
|
||||
use crate::{Cardinality, ColumnIndex, ColumnValues, NumericalType, Version};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum DynamicColumn {
|
||||
@@ -228,10 +228,11 @@ static_dynamic_conversions!(StrColumn, Str);
|
||||
static_dynamic_conversions!(BytesColumn, Bytes);
|
||||
static_dynamic_conversions!(Column<Ipv6Addr>, IpAddr);
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DynamicColumnHandle {
|
||||
pub(crate) file_slice: FileSlice,
|
||||
pub(crate) column_type: ColumnType,
|
||||
pub(crate) format_version: Version,
|
||||
}
|
||||
|
||||
impl DynamicColumnHandle {
|
||||
@@ -247,7 +248,12 @@ impl DynamicColumnHandle {
|
||||
}
|
||||
|
||||
/// Returns the `u64` fast field reader reader associated with `fields` of types
|
||||
/// Str, u64, i64, f64, or datetime.
|
||||
/// Str, u64, i64, f64, bool, ip, or datetime.
|
||||
///
|
||||
/// Notice that for IpAddr, the fastfield reader will return the u64 representation of the
|
||||
/// IpAddr.
|
||||
/// In order to convert to u128 back cast to `CompactSpaceU64Accessor` and call
|
||||
/// `compact_to_u128`.
|
||||
///
|
||||
/// If not, the fastfield reader will returns the u64-value associated with the original
|
||||
/// FastValue.
|
||||
@@ -255,13 +261,24 @@ impl DynamicColumnHandle {
|
||||
let column_bytes = self.file_slice.read_bytes()?;
|
||||
match self.column_type {
|
||||
ColumnType::Str | ColumnType::Bytes => {
|
||||
let column: BytesColumn = crate::column::open_column_bytes(column_bytes)?;
|
||||
let column: BytesColumn =
|
||||
crate::column::open_column_bytes(column_bytes, self.format_version)?;
|
||||
Ok(Some(column.term_ord_column))
|
||||
}
|
||||
ColumnType::Bool => Ok(None),
|
||||
ColumnType::IpAddr => Ok(None),
|
||||
ColumnType::I64 | ColumnType::U64 | ColumnType::F64 | ColumnType::DateTime => {
|
||||
let column = crate::column::open_column_u64::<u64>(column_bytes)?;
|
||||
ColumnType::IpAddr => {
|
||||
let column = crate::column::open_column_u128_as_compact_u64(
|
||||
column_bytes,
|
||||
self.format_version,
|
||||
)?;
|
||||
Ok(Some(column))
|
||||
}
|
||||
ColumnType::Bool
|
||||
| ColumnType::I64
|
||||
| ColumnType::U64
|
||||
| ColumnType::F64
|
||||
| ColumnType::DateTime => {
|
||||
let column =
|
||||
crate::column::open_column_u64::<u64>(column_bytes, self.format_version)?;
|
||||
Ok(Some(column))
|
||||
}
|
||||
}
|
||||
@@ -269,15 +286,31 @@ impl DynamicColumnHandle {
|
||||
|
||||
fn open_internal(&self, column_bytes: OwnedBytes) -> io::Result<DynamicColumn> {
|
||||
let dynamic_column: DynamicColumn = match self.column_type {
|
||||
ColumnType::Bytes => crate::column::open_column_bytes(column_bytes)?.into(),
|
||||
ColumnType::Str => crate::column::open_column_str(column_bytes)?.into(),
|
||||
ColumnType::I64 => crate::column::open_column_u64::<i64>(column_bytes)?.into(),
|
||||
ColumnType::U64 => crate::column::open_column_u64::<u64>(column_bytes)?.into(),
|
||||
ColumnType::F64 => crate::column::open_column_u64::<f64>(column_bytes)?.into(),
|
||||
ColumnType::Bool => crate::column::open_column_u64::<bool>(column_bytes)?.into(),
|
||||
ColumnType::IpAddr => crate::column::open_column_u128::<Ipv6Addr>(column_bytes)?.into(),
|
||||
ColumnType::Bytes => {
|
||||
crate::column::open_column_bytes(column_bytes, self.format_version)?.into()
|
||||
}
|
||||
ColumnType::Str => {
|
||||
crate::column::open_column_str(column_bytes, self.format_version)?.into()
|
||||
}
|
||||
ColumnType::I64 => {
|
||||
crate::column::open_column_u64::<i64>(column_bytes, self.format_version)?.into()
|
||||
}
|
||||
ColumnType::U64 => {
|
||||
crate::column::open_column_u64::<u64>(column_bytes, self.format_version)?.into()
|
||||
}
|
||||
ColumnType::F64 => {
|
||||
crate::column::open_column_u64::<f64>(column_bytes, self.format_version)?.into()
|
||||
}
|
||||
ColumnType::Bool => {
|
||||
crate::column::open_column_u64::<bool>(column_bytes, self.format_version)?.into()
|
||||
}
|
||||
ColumnType::IpAddr => {
|
||||
crate::column::open_column_u128::<Ipv6Addr>(column_bytes, self.format_version)?
|
||||
.into()
|
||||
}
|
||||
ColumnType::DateTime => {
|
||||
crate::column::open_column_u64::<DateTime>(column_bytes)?.into()
|
||||
crate::column::open_column_u64::<DateTime>(column_bytes, self.format_version)?
|
||||
.into()
|
||||
}
|
||||
};
|
||||
Ok(dynamic_column)
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::{ColumnValues, RowId};
|
||||
|
||||
pub trait Iterable<T = u64> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_>;
|
||||
@@ -17,3 +20,9 @@ where Range<T>: Iterator<Item = T>
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterable for Arc<dyn crate::ColumnValues<RowId>> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(self.iter().map(|row_id| row_id as u64))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,22 @@
|
||||
//! # Tantivy-Columnar
|
||||
//!
|
||||
//! `tantivy-columnar`provides a columnar storage for tantivy.
|
||||
//! The crate allows for efficient read operations on specific columns rather than entire records.
|
||||
//!
|
||||
//! ## Overview
|
||||
//!
|
||||
//! - **columnar**: Reading, writing, and merging multiple columns:
|
||||
//! - **[ColumnarWriter]**: Makes it possible to create a new columnar.
|
||||
//! - **[ColumnarReader]**: The ColumnarReader makes it possible to access a set of columns
|
||||
//! associated to field names.
|
||||
//! - **[merge_columnar]**: Contains the functionalities to merge multiple ColumnarReader or
|
||||
//! segments into a single one.
|
||||
//!
|
||||
//! - **column**: A single column, which contains
|
||||
//! - [column_index]: Resolves the rows for a document id. Manages the cardinality of the
|
||||
//! column.
|
||||
//! - [column_values]: Stores the values of a column in a dense format.
|
||||
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -12,7 +31,7 @@ use std::io;
|
||||
|
||||
mod block_accessor;
|
||||
mod column;
|
||||
mod column_index;
|
||||
pub mod column_index;
|
||||
pub mod column_values;
|
||||
mod columnar;
|
||||
mod dictionary;
|
||||
@@ -29,7 +48,7 @@ pub use column_values::{
|
||||
};
|
||||
pub use columnar::{
|
||||
merge_columnar, ColumnType, ColumnarReader, ColumnarWriter, HasAssociatedColumnType,
|
||||
MergeRowOrder, ShuffleMergeOrder, StackMergeOrder,
|
||||
MergeRowOrder, ShuffleMergeOrder, StackMergeOrder, Version, CURRENT_VERSION,
|
||||
};
|
||||
use sstable::VoidSSTable;
|
||||
pub use value::{NumericalType, NumericalValue};
|
||||
@@ -94,6 +113,9 @@ impl Cardinality {
|
||||
pub fn is_multivalue(&self) -> bool {
|
||||
matches!(self, Cardinality::Multivalued)
|
||||
}
|
||||
pub fn is_full(&self) -> bool {
|
||||
matches!(self, Cardinality::Full)
|
||||
}
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
@@ -109,3 +131,6 @@ impl Cardinality {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
#[cfg(test)]
|
||||
mod compat_tests;
|
||||
|
||||
@@ -21,12 +21,12 @@ fn test_dataframe_writer_str() {
|
||||
dataframe_writer.record_str(1u32, "my_string", "hello");
|
||||
dataframe_writer.record_str(3u32, "my_string", "helloeee");
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 87);
|
||||
assert_eq!(cols[0].num_bytes(), 73);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -35,12 +35,12 @@ fn test_dataframe_writer_bytes() {
|
||||
dataframe_writer.record_bytes(1u32, "my_string", b"hello");
|
||||
dataframe_writer.record_bytes(3u32, "my_string", b"helloeee");
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 87);
|
||||
assert_eq!(cols[0].num_bytes(), 73);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -49,7 +49,7 @@ fn test_dataframe_writer_bool() {
|
||||
dataframe_writer.record_bool(1u32, "bool.value", false);
|
||||
dataframe_writer.record_bool(3u32, "bool.value", true);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("bool.value").unwrap();
|
||||
@@ -57,7 +57,9 @@ fn test_dataframe_writer_bool() {
|
||||
assert_eq!(cols[0].num_bytes(), 22);
|
||||
assert_eq!(cols[0].column_type(), ColumnType::Bool);
|
||||
let dyn_bool_col = cols[0].open().unwrap();
|
||||
let DynamicColumn::Bool(bool_col) = dyn_bool_col else { panic!(); };
|
||||
let DynamicColumn::Bool(bool_col) = dyn_bool_col else {
|
||||
panic!();
|
||||
};
|
||||
let vals: Vec<Option<bool>> = (0..5).map(|row_id| bool_col.first(row_id)).collect();
|
||||
assert_eq!(&vals, &[None, Some(false), None, Some(true), None,]);
|
||||
}
|
||||
@@ -72,14 +74,16 @@ fn test_dataframe_writer_u64_multivalued() {
|
||||
dataframe_writer.record_numerical(6u32, "divisor", 2u64);
|
||||
dataframe_writer.record_numerical(6u32, "divisor", 3u64);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(7, None, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(7, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("divisor").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 29);
|
||||
assert_eq!(cols[0].num_bytes(), 50);
|
||||
let dyn_i64_col = cols[0].open().unwrap();
|
||||
let DynamicColumn::I64(divisor_col) = dyn_i64_col else { panic!(); };
|
||||
let DynamicColumn::I64(divisor_col) = dyn_i64_col else {
|
||||
panic!();
|
||||
};
|
||||
assert_eq!(
|
||||
divisor_col.get_cardinality(),
|
||||
crate::Cardinality::Multivalued
|
||||
@@ -93,7 +97,7 @@ fn test_dataframe_writer_ip_addr() {
|
||||
dataframe_writer.record_ip_addr(1, "ip_addr", Ipv6Addr::from_u128(1001));
|
||||
dataframe_writer.record_ip_addr(3, "ip_addr", Ipv6Addr::from_u128(1050));
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("ip_addr").unwrap();
|
||||
@@ -101,7 +105,9 @@ fn test_dataframe_writer_ip_addr() {
|
||||
assert_eq!(cols[0].num_bytes(), 42);
|
||||
assert_eq!(cols[0].column_type(), ColumnType::IpAddr);
|
||||
let dyn_bool_col = cols[0].open().unwrap();
|
||||
let DynamicColumn::IpAddr(ip_col) = dyn_bool_col else { panic!(); };
|
||||
let DynamicColumn::IpAddr(ip_col) = dyn_bool_col else {
|
||||
panic!();
|
||||
};
|
||||
let vals: Vec<Option<Ipv6Addr>> = (0..5).map(|row_id| ip_col.first(row_id)).collect();
|
||||
assert_eq!(
|
||||
&vals,
|
||||
@@ -122,7 +128,7 @@ fn test_dataframe_writer_numerical() {
|
||||
dataframe_writer.record_numerical(2u32, "srical.value", NumericalValue::U64(13u64));
|
||||
dataframe_writer.record_numerical(4u32, "srical.value", NumericalValue::U64(15u64));
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(6, None, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(6, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("srical.value").unwrap();
|
||||
@@ -134,7 +140,9 @@ fn test_dataframe_writer_numerical() {
|
||||
// - null footer 6 bytes
|
||||
assert_eq!(cols[0].num_bytes(), 33);
|
||||
let column = cols[0].open().unwrap();
|
||||
let DynamicColumn::I64(column_i64) = column else { panic!(); };
|
||||
let DynamicColumn::I64(column_i64) = column else {
|
||||
panic!();
|
||||
};
|
||||
assert_eq!(column_i64.index.get_cardinality(), Cardinality::Optional);
|
||||
assert_eq!(column_i64.first(0), None);
|
||||
assert_eq!(column_i64.first(1), Some(12i64));
|
||||
@@ -145,46 +153,6 @@ fn test_dataframe_writer_numerical() {
|
||||
assert_eq!(column_i64.first(6), None); //< we can change the spec for that one.
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_sort_by_full() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(0u32, "value", NumericalValue::U64(1));
|
||||
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(2));
|
||||
let data = dataframe_writer.sort_order("value", 2, false);
|
||||
assert_eq!(data, vec![0, 1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_sort_by_opt() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(3));
|
||||
dataframe_writer.record_numerical(3u32, "value", NumericalValue::U64(2));
|
||||
let data = dataframe_writer.sort_order("value", 5, false);
|
||||
// 0, 2, 4 is 0.0
|
||||
assert_eq!(data, vec![0, 2, 4, 3, 1]);
|
||||
let data = dataframe_writer.sort_order("value", 5, true);
|
||||
assert_eq!(
|
||||
data,
|
||||
vec![4, 2, 0, 3, 1].into_iter().rev().collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_sort_by_multi() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
// valid for sort
|
||||
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(2));
|
||||
// those are ignored for sort
|
||||
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(4));
|
||||
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(4));
|
||||
// valid for sort
|
||||
dataframe_writer.record_numerical(3u32, "value", NumericalValue::U64(3));
|
||||
// ignored, would change sort order
|
||||
dataframe_writer.record_numerical(3u32, "value", NumericalValue::U64(1));
|
||||
let data = dataframe_writer.sort_order("value", 4, false);
|
||||
assert_eq!(data, vec![0, 2, 1, 3]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dictionary_encoded_str() {
|
||||
let mut buffer = Vec::new();
|
||||
@@ -193,12 +161,14 @@ fn test_dictionary_encoded_str() {
|
||||
columnar_writer.record_str(3, "my.column", "c");
|
||||
columnar_writer.record_str(3, "my.column2", "different_column!");
|
||||
columnar_writer.record_str(4, "my.column", "b");
|
||||
columnar_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
columnar_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let col_handles = columnar_reader.read_columns("my.column").unwrap();
|
||||
assert_eq!(col_handles.len(), 1);
|
||||
let DynamicColumn::Str(str_col) = col_handles[0].open().unwrap() else { panic!(); };
|
||||
let DynamicColumn::Str(str_col) = col_handles[0].open().unwrap() else {
|
||||
panic!();
|
||||
};
|
||||
let index: Vec<Option<u64>> = (0..5).map(|row_id| str_col.ords().first(row_id)).collect();
|
||||
assert_eq!(index, &[None, Some(0), None, Some(2), Some(1)]);
|
||||
assert_eq!(str_col.num_rows(), 5);
|
||||
@@ -225,12 +195,14 @@ fn test_dictionary_encoded_bytes() {
|
||||
columnar_writer.record_bytes(3, "my.column", b"c");
|
||||
columnar_writer.record_bytes(3, "my.column2", b"different_column!");
|
||||
columnar_writer.record_bytes(4, "my.column", b"b");
|
||||
columnar_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
columnar_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let col_handles = columnar_reader.read_columns("my.column").unwrap();
|
||||
assert_eq!(col_handles.len(), 1);
|
||||
let DynamicColumn::Bytes(bytes_col) = col_handles[0].open().unwrap() else { panic!(); };
|
||||
let DynamicColumn::Bytes(bytes_col) = col_handles[0].open().unwrap() else {
|
||||
panic!();
|
||||
};
|
||||
let index: Vec<Option<u64>> = (0..5)
|
||||
.map(|row_id| bytes_col.ords().first(row_id))
|
||||
.collect();
|
||||
@@ -318,9 +290,9 @@ fn bytes_strategy() -> impl Strategy<Value = &'static [u8]> {
|
||||
// A random column value
|
||||
fn column_value_strategy() -> impl Strategy<Value = ColumnValue> {
|
||||
prop_oneof![
|
||||
10 => string_strategy().prop_map(|s| ColumnValue::Str(s)),
|
||||
1 => bytes_strategy().prop_map(|b| ColumnValue::Bytes(b)),
|
||||
40 => num_strategy().prop_map(|n| ColumnValue::Numerical(n)),
|
||||
10 => string_strategy().prop_map(ColumnValue::Str),
|
||||
1 => bytes_strategy().prop_map(ColumnValue::Bytes),
|
||||
40 => num_strategy().prop_map(ColumnValue::Numerical),
|
||||
1 => (1u16..3u16).prop_map(|ip_addr_byte| ColumnValue::IpAddr(Ipv6Addr::new(
|
||||
127,
|
||||
0,
|
||||
@@ -331,8 +303,8 @@ fn column_value_strategy() -> impl Strategy<Value = ColumnValue> {
|
||||
0,
|
||||
ip_addr_byte
|
||||
))),
|
||||
1 => any::<bool>().prop_map(|b| ColumnValue::Bool(b)),
|
||||
1 => (0_679_723_993i64..1_679_723_995i64)
|
||||
1 => any::<bool>().prop_map(ColumnValue::Bool),
|
||||
1 => (679_723_993i64..1_679_723_995i64)
|
||||
.prop_map(|val| { ColumnValue::DateTime(DateTime::from_timestamp_secs(val)) })
|
||||
]
|
||||
}
|
||||
@@ -357,26 +329,12 @@ fn columnar_docs_strategy() -> impl Strategy<Value = Vec<Vec<(&'static str, Colu
|
||||
.prop_flat_map(|num_docs| proptest::collection::vec(doc_strategy(), num_docs))
|
||||
}
|
||||
|
||||
fn columnar_docs_and_mapping_strategy(
|
||||
) -> impl Strategy<Value = (Vec<Vec<(&'static str, ColumnValue)>>, Vec<RowId>)> {
|
||||
columnar_docs_strategy().prop_flat_map(|docs| {
|
||||
permutation_strategy(docs.len()).prop_map(move |permutation| (docs.clone(), permutation))
|
||||
})
|
||||
}
|
||||
|
||||
fn permutation_strategy(n: usize) -> impl Strategy<Value = Vec<RowId>> {
|
||||
Just((0u32..n as RowId).collect()).prop_shuffle()
|
||||
}
|
||||
|
||||
fn permutation_and_subset_strategy(n: usize) -> impl Strategy<Value = Vec<usize>> {
|
||||
let vals: Vec<usize> = (0..n).collect();
|
||||
subsequence(vals, 0..=n).prop_shuffle()
|
||||
}
|
||||
|
||||
fn build_columnar_with_mapping(
|
||||
docs: &[Vec<(&'static str, ColumnValue)>],
|
||||
old_to_new_row_ids_opt: Option<&[RowId]>,
|
||||
) -> ColumnarReader {
|
||||
fn build_columnar_with_mapping(docs: &[Vec<(&'static str, ColumnValue)>]) -> ColumnarReader {
|
||||
let num_docs = docs.len() as u32;
|
||||
let mut buffer = Vec::new();
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
@@ -404,15 +362,13 @@ fn build_columnar_with_mapping(
|
||||
}
|
||||
}
|
||||
}
|
||||
columnar_writer
|
||||
.serialize(num_docs, old_to_new_row_ids_opt, &mut buffer)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
columnar_reader
|
||||
columnar_writer.serialize(num_docs, &mut buffer).unwrap();
|
||||
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
fn build_columnar(docs: &[Vec<(&'static str, ColumnValue)>]) -> ColumnarReader {
|
||||
build_columnar_with_mapping(docs, None)
|
||||
build_columnar_with_mapping(docs)
|
||||
}
|
||||
|
||||
fn assert_columnar_eq_strict(left: &ColumnarReader, right: &ColumnarReader) {
|
||||
@@ -436,6 +392,7 @@ fn assert_columnar_eq(
|
||||
}
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn assert_column_eq<T: Copy + PartialOrd + Debug + Send + Sync + 'static>(
|
||||
left: &Column<T>,
|
||||
right: &Column<T>,
|
||||
@@ -533,28 +490,36 @@ trait AssertEqualToColumnValue {
|
||||
|
||||
impl AssertEqualToColumnValue for bool {
|
||||
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
|
||||
let ColumnValue::Bool(val) = column_value else { panic!() };
|
||||
let ColumnValue::Bool(val) = column_value else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(self, val);
|
||||
}
|
||||
}
|
||||
|
||||
impl AssertEqualToColumnValue for Ipv6Addr {
|
||||
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
|
||||
let ColumnValue::IpAddr(val) = column_value else { panic!() };
|
||||
let ColumnValue::IpAddr(val) = column_value else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(self, val);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Coerce + PartialEq + Debug + Into<NumericalValue>> AssertEqualToColumnValue for T {
|
||||
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
|
||||
let ColumnValue::Numerical(num) = column_value else { panic!() };
|
||||
let ColumnValue::Numerical(num) = column_value else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(self, &T::coerce(*num));
|
||||
}
|
||||
}
|
||||
|
||||
impl AssertEqualToColumnValue for DateTime {
|
||||
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
|
||||
let ColumnValue::DateTime(dt) = column_value else { panic!() };
|
||||
let ColumnValue::DateTime(dt) = column_value else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(self, dt);
|
||||
}
|
||||
}
|
||||
@@ -663,54 +628,6 @@ proptest! {
|
||||
}
|
||||
}
|
||||
|
||||
// Same as `test_single_columnar_builder_proptest` but with a shuffling mapping.
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(500))]
|
||||
#[test]
|
||||
fn test_single_columnar_builder_with_shuffle_proptest((docs, mapping) in columnar_docs_and_mapping_strategy()) {
|
||||
let columnar = build_columnar_with_mapping(&docs[..], Some(&mapping));
|
||||
assert_eq!(columnar.num_rows() as usize, docs.len());
|
||||
let mut expected_columns: HashMap<(&str, ColumnTypeCategory), HashMap<u32, Vec<&ColumnValue>> > = Default::default();
|
||||
for (doc_id, doc_vals) in docs.iter().enumerate() {
|
||||
for (col_name, col_val) in doc_vals {
|
||||
expected_columns
|
||||
.entry((col_name, col_val.column_type_category()))
|
||||
.or_default()
|
||||
.entry(mapping[doc_id])
|
||||
.or_default()
|
||||
.push(col_val);
|
||||
}
|
||||
}
|
||||
let column_list = columnar.list_columns().unwrap();
|
||||
assert_eq!(expected_columns.len(), column_list.len());
|
||||
for (column_name, column) in column_list {
|
||||
let dynamic_column = column.open().unwrap();
|
||||
let col_category: ColumnTypeCategory = dynamic_column.column_type().into();
|
||||
let expected_col_values: &HashMap<u32, Vec<&ColumnValue>> = expected_columns.get(&(column_name.as_str(), col_category)).unwrap();
|
||||
for _doc_id in 0..columnar.num_rows() {
|
||||
match &dynamic_column {
|
||||
DynamicColumn::Bool(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::I64(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::U64(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::F64(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::IpAddr(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::DateTime(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::Bytes(col) =>
|
||||
assert_bytes_column_values(col, expected_col_values, false),
|
||||
DynamicColumn::Str(col) =>
|
||||
assert_bytes_column_values(col, expected_col_values, true),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This tests create 2 or 3 random small columnar and attempts to merge them.
|
||||
// It compares the resulting merged dataframe with what would have been obtained by building the
|
||||
// dataframe from the concatenated rows to begin with.
|
||||
@@ -726,7 +643,7 @@ proptest! {
|
||||
let stack_merge_order = StackMergeOrder::stack(&columnar_readers_arr[..]).into();
|
||||
crate::merge_columnar(&columnar_readers_arr[..], &[], stack_merge_order, &mut output).unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> = columnar_docs.iter().cloned().flatten().collect();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> = columnar_docs.iter().flatten().cloned().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq_strict(&merged_columnar, &expected_merged_columnar);
|
||||
}
|
||||
@@ -752,7 +669,7 @@ fn test_columnar_merging_empty_columnar() {
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> =
|
||||
columnar_docs.iter().cloned().flatten().collect();
|
||||
columnar_docs.iter().flatten().cloned().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq_strict(&merged_columnar, &expected_merged_columnar);
|
||||
}
|
||||
@@ -789,7 +706,7 @@ fn test_columnar_merging_number_columns() {
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> =
|
||||
columnar_docs.iter().cloned().flatten().collect();
|
||||
columnar_docs.iter().flatten().cloned().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq_strict(&merged_columnar, &expected_merged_columnar);
|
||||
}
|
||||
@@ -824,24 +741,68 @@ fn columnar_docs_and_remap(
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(1000))]
|
||||
#[test]
|
||||
fn test_columnar_merge_and_remap_proptest((columnar_docs, shuffle_merge_order) in columnar_docs_and_remap()) {
|
||||
let shuffled_rows: Vec<Vec<(&'static str, ColumnValue)>> = shuffle_merge_order.iter()
|
||||
.map(|row_addr| columnar_docs[row_addr.segment_ord as usize][row_addr.row_id as usize].clone())
|
||||
.collect();
|
||||
let expected_merged_columnar = build_columnar(&shuffled_rows[..]);
|
||||
let columnar_readers: Vec<ColumnarReader> = columnar_docs.iter()
|
||||
.map(|docs| build_columnar(&docs[..]))
|
||||
.collect::<Vec<_>>();
|
||||
let columnar_readers_arr: Vec<&ColumnarReader> = columnar_readers.iter().collect();
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let segment_num_rows: Vec<RowId> = columnar_docs.iter().map(|docs| docs.len() as RowId).collect();
|
||||
let shuffle_merge_order = ShuffleMergeOrder::for_test(&segment_num_rows, shuffle_merge_order);
|
||||
crate::merge_columnar(&columnar_readers_arr[..], &[], shuffle_merge_order.into(), &mut output).unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
assert_columnar_eq(&merged_columnar, &expected_merged_columnar, true);
|
||||
fn test_columnar_merge_and_remap_proptest((columnar_docs, shuffle_merge_order) in
|
||||
columnar_docs_and_remap()) {
|
||||
test_columnar_merge_and_remap(columnar_docs, shuffle_merge_order);
|
||||
}
|
||||
}
|
||||
|
||||
fn test_columnar_merge_and_remap(
|
||||
columnar_docs: Vec<Vec<Vec<(&'static str, ColumnValue)>>>,
|
||||
shuffle_merge_order: Vec<RowAddr>,
|
||||
) {
|
||||
let shuffled_rows: Vec<Vec<(&'static str, ColumnValue)>> = shuffle_merge_order
|
||||
.iter()
|
||||
.map(|row_addr| {
|
||||
columnar_docs[row_addr.segment_ord as usize][row_addr.row_id as usize].clone()
|
||||
})
|
||||
.collect();
|
||||
let expected_merged_columnar = build_columnar(&shuffled_rows[..]);
|
||||
let columnar_readers: Vec<ColumnarReader> = columnar_docs
|
||||
.iter()
|
||||
.map(|docs| build_columnar(&docs[..]))
|
||||
.collect::<Vec<_>>();
|
||||
let columnar_readers_ref: Vec<&ColumnarReader> = columnar_readers.iter().collect();
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let segment_num_rows: Vec<RowId> = columnar_docs
|
||||
.iter()
|
||||
.map(|docs| docs.len() as RowId)
|
||||
.collect();
|
||||
let shuffle_merge_order = ShuffleMergeOrder::for_test(&segment_num_rows, shuffle_merge_order);
|
||||
crate::merge_columnar(
|
||||
&columnar_readers_ref[..],
|
||||
&[],
|
||||
shuffle_merge_order.into(),
|
||||
&mut output,
|
||||
)
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
assert_columnar_eq(&merged_columnar, &expected_merged_columnar, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_columnar_merge_and_remap_bug_1() {
|
||||
let columnar_docs = vec![vec![
|
||||
vec![
|
||||
("c1", ColumnValue::Numerical(NumericalValue::U64(0))),
|
||||
("c1", ColumnValue::Numerical(NumericalValue::U64(0))),
|
||||
],
|
||||
vec![],
|
||||
]];
|
||||
let shuffle_merge_order: Vec<RowAddr> = vec![
|
||||
RowAddr {
|
||||
segment_ord: 0,
|
||||
row_id: 1,
|
||||
},
|
||||
RowAddr {
|
||||
segment_ord: 0,
|
||||
row_id: 0,
|
||||
},
|
||||
];
|
||||
|
||||
test_columnar_merge_and_remap(columnar_docs, shuffle_merge_order);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_columnar_merge_empty() {
|
||||
let columnar_reader_1 = build_columnar(&[]);
|
||||
|
||||
@@ -17,6 +17,31 @@ impl NumericalValue {
|
||||
NumericalValue::F64(_) => NumericalType::F64,
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to normalize the numerical value in the following priorities:
|
||||
/// i64, i64, f64
|
||||
pub fn normalize(self) -> Self {
|
||||
match self {
|
||||
NumericalValue::U64(val) => {
|
||||
if val <= i64::MAX as u64 {
|
||||
NumericalValue::I64(val as i64)
|
||||
} else {
|
||||
NumericalValue::F64(val as f64)
|
||||
}
|
||||
}
|
||||
NumericalValue::I64(val) => NumericalValue::I64(val),
|
||||
NumericalValue::F64(val) => {
|
||||
let fract = val.fract();
|
||||
if fract == 0.0 && val >= i64::MIN as f64 && val <= i64::MAX as f64 {
|
||||
NumericalValue::I64(val as i64)
|
||||
} else if fract == 0.0 && val >= u64::MIN as f64 && val <= u64::MAX as f64 {
|
||||
NumericalValue::U64(val as u64)
|
||||
} else {
|
||||
NumericalValue::F64(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for NumericalValue {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-common"
|
||||
version = "0.5.0"
|
||||
version = "0.7.0"
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2021"
|
||||
@@ -9,16 +9,17 @@ documentation = "https://docs.rs/tantivy_common/"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4.3"
|
||||
ownedbytes = { version= "0.5", path="../ownedbytes" }
|
||||
ownedbytes = { version= "0.7", path="../ownedbytes" }
|
||||
async-trait = "0.1"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
binggan = "0.12.0"
|
||||
proptest = "1.0.0"
|
||||
rand = "0.8.4"
|
||||
|
||||
|
||||
@@ -1,39 +1,70 @@
|
||||
#![feature(test)]
|
||||
use binggan::{black_box, BenchRunner};
|
||||
use rand::seq::IteratorRandom;
|
||||
use rand::thread_rng;
|
||||
use tantivy_common::{serialize_vint_u32, BitSet, TinySet};
|
||||
|
||||
extern crate test;
|
||||
fn bench_vint() {
|
||||
let mut runner = BenchRunner::new();
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rand::seq::IteratorRandom;
|
||||
use rand::thread_rng;
|
||||
use tantivy_common::serialize_vint_u32;
|
||||
use test::Bencher;
|
||||
let vals: Vec<u32> = (0..20_000).collect();
|
||||
runner.bench_function("bench_vint", move |_| {
|
||||
let mut out = 0u64;
|
||||
for val in vals.iter().cloned() {
|
||||
let mut buf = [0u8; 8];
|
||||
serialize_vint_u32(val, &mut buf);
|
||||
out += u64::from(buf[0]);
|
||||
}
|
||||
black_box(out);
|
||||
None
|
||||
});
|
||||
|
||||
#[bench]
|
||||
fn bench_vint(b: &mut Bencher) {
|
||||
let vals: Vec<u32> = (0..20_000).collect();
|
||||
b.iter(|| {
|
||||
let mut out = 0u64;
|
||||
for val in vals.iter().cloned() {
|
||||
let mut buf = [0u8; 8];
|
||||
serialize_vint_u32(val, &mut buf);
|
||||
out += u64::from(buf[0]);
|
||||
}
|
||||
out
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_vint_rand(b: &mut Bencher) {
|
||||
let vals: Vec<u32> = (0..20_000).choose_multiple(&mut thread_rng(), 100_000);
|
||||
b.iter(|| {
|
||||
let mut out = 0u64;
|
||||
for val in vals.iter().cloned() {
|
||||
let mut buf = [0u8; 8];
|
||||
serialize_vint_u32(val, &mut buf);
|
||||
out += u64::from(buf[0]);
|
||||
}
|
||||
out
|
||||
});
|
||||
}
|
||||
let vals: Vec<u32> = (0..20_000).choose_multiple(&mut thread_rng(), 100_000);
|
||||
runner.bench_function("bench_vint_rand", move |_| {
|
||||
let mut out = 0u64;
|
||||
for val in vals.iter().cloned() {
|
||||
let mut buf = [0u8; 8];
|
||||
serialize_vint_u32(val, &mut buf);
|
||||
out += u64::from(buf[0]);
|
||||
}
|
||||
black_box(out);
|
||||
None
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_bitset() {
|
||||
let mut runner = BenchRunner::new();
|
||||
|
||||
runner.bench_function("bench_tinyset_pop", move |_| {
|
||||
let mut tinyset = TinySet::singleton(black_box(31u32));
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
black_box(tinyset);
|
||||
None
|
||||
});
|
||||
|
||||
let tiny_set = TinySet::empty().insert(10u32).insert(14u32).insert(21u32);
|
||||
runner.bench_function("bench_tinyset_sum", move |_| {
|
||||
assert_eq!(black_box(tiny_set).into_iter().sum::<u32>(), 45u32);
|
||||
None
|
||||
});
|
||||
|
||||
let v = [10u32, 14u32, 21u32];
|
||||
runner.bench_function("bench_tinyarr_sum", move |_| {
|
||||
black_box(v.iter().cloned().sum::<u32>());
|
||||
None
|
||||
});
|
||||
|
||||
runner.bench_function("bench_bitset_initialize", move |_| {
|
||||
black_box(BitSet::with_max_value(1_000_000));
|
||||
None
|
||||
});
|
||||
}
|
||||
|
||||
fn main() {
|
||||
bench_vint();
|
||||
bench_bitset();
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io::Write;
|
||||
use std::{fmt, io, u64};
|
||||
use std::{fmt, io};
|
||||
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
@@ -697,43 +696,3 @@ mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use test;
|
||||
|
||||
use super::{BitSet, TinySet};
|
||||
|
||||
#[bench]
|
||||
fn bench_tinyset_pop(b: &mut test::Bencher) {
|
||||
b.iter(|| {
|
||||
let mut tinyset = TinySet::singleton(test::black_box(31u32));
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_tinyset_sum(b: &mut test::Bencher) {
|
||||
let tiny_set = TinySet::empty().insert(10u32).insert(14u32).insert(21u32);
|
||||
b.iter(|| {
|
||||
assert_eq!(test::black_box(tiny_set).into_iter().sum::<u32>(), 45u32);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_tinyarr_sum(b: &mut test::Bencher) {
|
||||
let v = [10u32, 14u32, 21u32];
|
||||
b.iter(|| test::black_box(v).iter().cloned().sum::<u32>());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_bitset_initialize(b: &mut test::Bencher) {
|
||||
b.iter(|| BitSet::with_max_value(1_000_000));
|
||||
}
|
||||
}
|
||||
|
||||
130
common/src/bounds.rs
Normal file
130
common/src/bounds.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
use std::io;
|
||||
use std::ops::Bound;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BoundsRange<T> {
|
||||
pub lower_bound: Bound<T>,
|
||||
pub upper_bound: Bound<T>,
|
||||
}
|
||||
impl<T> BoundsRange<T> {
|
||||
pub fn new(lower_bound: Bound<T>, upper_bound: Bound<T>) -> Self {
|
||||
BoundsRange {
|
||||
lower_bound,
|
||||
upper_bound,
|
||||
}
|
||||
}
|
||||
pub fn is_unbounded(&self) -> bool {
|
||||
matches!(self.lower_bound, Bound::Unbounded) && matches!(self.upper_bound, Bound::Unbounded)
|
||||
}
|
||||
pub fn map_bound<TTo>(&self, transform: impl Fn(&T) -> TTo) -> BoundsRange<TTo> {
|
||||
BoundsRange {
|
||||
lower_bound: map_bound(&self.lower_bound, &transform),
|
||||
upper_bound: map_bound(&self.upper_bound, &transform),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map_bound_res<TTo, Err>(
|
||||
&self,
|
||||
transform: impl Fn(&T) -> Result<TTo, Err>,
|
||||
) -> Result<BoundsRange<TTo>, Err> {
|
||||
Ok(BoundsRange {
|
||||
lower_bound: map_bound_res(&self.lower_bound, &transform)?,
|
||||
upper_bound: map_bound_res(&self.upper_bound, &transform)?,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn transform_inner<TTo>(
|
||||
&self,
|
||||
transform_lower: impl Fn(&T) -> TransformBound<TTo>,
|
||||
transform_upper: impl Fn(&T) -> TransformBound<TTo>,
|
||||
) -> BoundsRange<TTo> {
|
||||
BoundsRange {
|
||||
lower_bound: transform_bound_inner(&self.lower_bound, &transform_lower),
|
||||
upper_bound: transform_bound_inner(&self.upper_bound, &transform_upper),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the first set inner value
|
||||
pub fn get_inner(&self) -> Option<&T> {
|
||||
inner_bound(&self.lower_bound).or(inner_bound(&self.upper_bound))
|
||||
}
|
||||
}
|
||||
|
||||
pub enum TransformBound<T> {
|
||||
/// Overwrite the bounds
|
||||
NewBound(Bound<T>),
|
||||
/// Use Existing bounds with new value
|
||||
Existing(T),
|
||||
}
|
||||
|
||||
/// Takes a bound and transforms the inner value into a new bound via a closure.
|
||||
/// The bound variant may change by the value returned value from the closure.
|
||||
pub fn transform_bound_inner_res<TFrom, TTo>(
|
||||
bound: &Bound<TFrom>,
|
||||
transform: impl Fn(&TFrom) -> io::Result<TransformBound<TTo>>,
|
||||
) -> io::Result<Bound<TTo>> {
|
||||
use self::Bound::*;
|
||||
Ok(match bound {
|
||||
Excluded(ref from_val) => match transform(from_val)? {
|
||||
TransformBound::NewBound(new_val) => new_val,
|
||||
TransformBound::Existing(new_val) => Excluded(new_val),
|
||||
},
|
||||
Included(ref from_val) => match transform(from_val)? {
|
||||
TransformBound::NewBound(new_val) => new_val,
|
||||
TransformBound::Existing(new_val) => Included(new_val),
|
||||
},
|
||||
Unbounded => Unbounded,
|
||||
})
|
||||
}
|
||||
|
||||
/// Takes a bound and transforms the inner value into a new bound via a closure.
|
||||
/// The bound variant may change by the value returned value from the closure.
|
||||
pub fn transform_bound_inner<TFrom, TTo>(
|
||||
bound: &Bound<TFrom>,
|
||||
transform: impl Fn(&TFrom) -> TransformBound<TTo>,
|
||||
) -> Bound<TTo> {
|
||||
use self::Bound::*;
|
||||
match bound {
|
||||
Excluded(ref from_val) => match transform(from_val) {
|
||||
TransformBound::NewBound(new_val) => new_val,
|
||||
TransformBound::Existing(new_val) => Excluded(new_val),
|
||||
},
|
||||
Included(ref from_val) => match transform(from_val) {
|
||||
TransformBound::NewBound(new_val) => new_val,
|
||||
TransformBound::Existing(new_val) => Included(new_val),
|
||||
},
|
||||
Unbounded => Unbounded,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the inner value of a `Bound`
|
||||
pub fn inner_bound<T>(val: &Bound<T>) -> Option<&T> {
|
||||
match val {
|
||||
Bound::Included(term) | Bound::Excluded(term) => Some(term),
|
||||
Bound::Unbounded => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map_bound<TFrom, TTo>(
|
||||
bound: &Bound<TFrom>,
|
||||
transform: impl Fn(&TFrom) -> TTo,
|
||||
) -> Bound<TTo> {
|
||||
use self::Bound::*;
|
||||
match bound {
|
||||
Excluded(ref from_val) => Bound::Excluded(transform(from_val)),
|
||||
Included(ref from_val) => Bound::Included(transform(from_val)),
|
||||
Unbounded => Unbounded,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map_bound_res<TFrom, TTo, Err>(
|
||||
bound: &Bound<TFrom>,
|
||||
transform: impl Fn(&TFrom) -> Result<TTo, Err>,
|
||||
) -> Result<Bound<TTo>, Err> {
|
||||
use self::Bound::*;
|
||||
Ok(match bound {
|
||||
Excluded(ref from_val) => Excluded(transform(from_val)?),
|
||||
Included(ref from_val) => Included(transform(from_val)?),
|
||||
Unbounded => Unbounded,
|
||||
})
|
||||
}
|
||||
@@ -1,11 +1,12 @@
|
||||
#![allow(deprecated)]
|
||||
|
||||
use std::fmt;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use time::{OffsetDateTime, PrimitiveDateTime, UtcOffset};
|
||||
|
||||
use crate::BinarySerializable;
|
||||
|
||||
/// Precision with which datetimes are truncated when stored in fast fields. This setting is only
|
||||
/// relevant for fast fields. In the docstore, datetimes are always saved with nanosecond precision.
|
||||
#[derive(
|
||||
@@ -24,9 +25,6 @@ pub enum DateTimePrecision {
|
||||
Nanoseconds,
|
||||
}
|
||||
|
||||
#[deprecated(since = "0.20.0", note = "Use `DateTimePrecision` instead")]
|
||||
pub type DatePrecision = DateTimePrecision;
|
||||
|
||||
/// A date/time value with nanoseconds precision.
|
||||
///
|
||||
/// This timestamp does not carry any explicit time zone information.
|
||||
@@ -37,7 +35,7 @@ pub type DatePrecision = DateTimePrecision;
|
||||
/// All constructors and conversions are provided as explicit
|
||||
/// functions and not by implementing any `From`/`Into` traits
|
||||
/// to prevent unintended usage.
|
||||
#[derive(Clone, Default, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
#[derive(Clone, Default, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
pub struct DateTime {
|
||||
// Timestamp in nanoseconds.
|
||||
pub(crate) timestamp_nanos: i64,
|
||||
@@ -164,3 +162,15 @@ impl fmt::Debug for DateTime {
|
||||
f.write_str(&utc_rfc3339)
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for DateTime {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
let timestamp_micros = self.into_timestamp_micros();
|
||||
<i64 as BinarySerializable>::serialize(×tamp_micros, writer)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> std::io::Result<Self> {
|
||||
let timestamp_micros = <i64 as BinarySerializable>::deserialize(reader)?;
|
||||
Ok(Self::from_timestamp_micros(timestamp_micros))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::fs::File;
|
||||
use std::ops::{Deref, Range, RangeBounds};
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
@@ -32,6 +33,62 @@ pub trait FileHandle: 'static + Send + Sync + HasLen + fmt::Debug {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// A File with it's length included.
|
||||
pub struct WrapFile {
|
||||
file: File,
|
||||
len: usize,
|
||||
}
|
||||
impl WrapFile {
|
||||
/// Creates a new WrapFile and stores its length.
|
||||
pub fn new(file: File) -> io::Result<Self> {
|
||||
let len = file.metadata()?.len() as usize;
|
||||
Ok(WrapFile { file, len })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FileHandle for WrapFile {
|
||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
let file_len = self.len();
|
||||
|
||||
// Calculate the actual range to read, ensuring it stays within file boundaries
|
||||
let start = range.start;
|
||||
let end = range.end.min(file_len);
|
||||
|
||||
// Ensure the start is before the end of the range
|
||||
if start >= end {
|
||||
return Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid range"));
|
||||
}
|
||||
|
||||
let mut buffer = vec![0; end - start];
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::prelude::FileExt;
|
||||
self.file.read_exact_at(&mut buffer, start as u64)?;
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
use std::io::{Read, Seek};
|
||||
let mut file = self.file.try_clone()?; // Clone the file to read from it separately
|
||||
// Seek to the start position in the file
|
||||
file.seek(io::SeekFrom::Start(start as u64))?;
|
||||
// Read the data into the buffer
|
||||
file.read_exact(&mut buffer)?;
|
||||
}
|
||||
|
||||
Ok(OwnedBytes::new(buffer))
|
||||
}
|
||||
// todo implement async
|
||||
}
|
||||
impl HasLen for WrapFile {
|
||||
fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FileHandle for &'static [u8] {
|
||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
@@ -67,6 +124,30 @@ impl fmt::Debug for FileSlice {
|
||||
}
|
||||
}
|
||||
|
||||
impl FileSlice {
|
||||
pub fn stream_file_chunks(&self) -> impl Iterator<Item = io::Result<OwnedBytes>> + '_ {
|
||||
let len = self.range.end;
|
||||
let mut start = self.range.start;
|
||||
std::iter::from_fn(move || {
|
||||
/// Returns chunks of 1MB of data from the FileHandle.
|
||||
const CHUNK_SIZE: usize = 1024 * 1024; // 1MB
|
||||
|
||||
if start < len {
|
||||
let end = (start + CHUNK_SIZE).min(len);
|
||||
let range = start..end;
|
||||
let chunk = self.data.read_bytes(range);
|
||||
start += CHUNK_SIZE;
|
||||
match chunk {
|
||||
Ok(chunk) => Some(Ok(chunk)),
|
||||
Err(e) => Some(Err(e)),
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Takes a range, a `RangeBounds` object, and returns
|
||||
/// a `Range` that corresponds to the relative application of the
|
||||
/// `RangeBounds` object to the original `Range`.
|
||||
|
||||
@@ -27,15 +27,15 @@ pub trait GroupByIteratorExtended: Iterator {
|
||||
where
|
||||
Self: Sized,
|
||||
F: FnMut(&Self::Item) -> K,
|
||||
K: PartialEq + Copy,
|
||||
Self::Item: Copy,
|
||||
K: PartialEq + Clone,
|
||||
Self::Item: Clone,
|
||||
{
|
||||
GroupByIterator::new(self, key)
|
||||
}
|
||||
}
|
||||
impl<I: Iterator> GroupByIteratorExtended for I {}
|
||||
|
||||
pub struct GroupByIterator<I, F, K: Copy>
|
||||
pub struct GroupByIterator<I, F, K: Clone>
|
||||
where
|
||||
I: Iterator,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
@@ -50,7 +50,7 @@ where
|
||||
inner: Rc<RefCell<GroupByShared<I, F, K>>>,
|
||||
}
|
||||
|
||||
struct GroupByShared<I, F, K: Copy>
|
||||
struct GroupByShared<I, F, K: Clone>
|
||||
where
|
||||
I: Iterator,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
@@ -63,7 +63,7 @@ impl<I, F, K> GroupByIterator<I, F, K>
|
||||
where
|
||||
I: Iterator,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
K: Copy,
|
||||
K: Clone,
|
||||
{
|
||||
fn new(inner: I, group_by_fn: F) -> Self {
|
||||
let inner = GroupByShared {
|
||||
@@ -80,28 +80,28 @@ where
|
||||
impl<I, F, K> Iterator for GroupByIterator<I, F, K>
|
||||
where
|
||||
I: Iterator,
|
||||
I::Item: Copy,
|
||||
I::Item: Clone,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
K: Copy,
|
||||
K: Clone,
|
||||
{
|
||||
type Item = (K, GroupIterator<I, F, K>);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let mut inner = self.inner.borrow_mut();
|
||||
let value = *inner.iter.peek()?;
|
||||
let value = inner.iter.peek()?.clone();
|
||||
let key = (inner.group_by_fn)(&value);
|
||||
|
||||
let inner = self.inner.clone();
|
||||
|
||||
let group_iter = GroupIterator {
|
||||
inner,
|
||||
group_key: key,
|
||||
group_key: key.clone(),
|
||||
};
|
||||
Some((key, group_iter))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GroupIterator<I, F, K: Copy>
|
||||
pub struct GroupIterator<I, F, K: Clone>
|
||||
where
|
||||
I: Iterator,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
@@ -110,10 +110,10 @@ where
|
||||
group_key: K,
|
||||
}
|
||||
|
||||
impl<I, F, K: PartialEq + Copy> Iterator for GroupIterator<I, F, K>
|
||||
impl<I, F, K: PartialEq + Clone> Iterator for GroupIterator<I, F, K>
|
||||
where
|
||||
I: Iterator,
|
||||
I::Item: Copy,
|
||||
I::Item: Clone,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
{
|
||||
type Item = I::Item;
|
||||
@@ -121,7 +121,7 @@ where
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let mut inner = self.inner.borrow_mut();
|
||||
// peek if next value is in group
|
||||
let peek_val = *inner.iter.peek()?;
|
||||
let peek_val = inner.iter.peek()?.clone();
|
||||
if (inner.group_by_fn)(&peek_val) == self.group_key {
|
||||
inner.iter.next()
|
||||
} else {
|
||||
|
||||
144
common/src/json_path_writer.rs
Normal file
144
common/src/json_path_writer.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
use crate::replace_in_place;
|
||||
|
||||
/// Separates the different segments of a json path.
|
||||
pub const JSON_PATH_SEGMENT_SEP: u8 = 1u8;
|
||||
pub const JSON_PATH_SEGMENT_SEP_STR: &str =
|
||||
unsafe { std::str::from_utf8_unchecked(&[JSON_PATH_SEGMENT_SEP]) };
|
||||
|
||||
/// Separates the json path and the value in
|
||||
/// a JSON term binary representation.
|
||||
pub const JSON_END_OF_PATH: u8 = 0u8;
|
||||
pub const JSON_END_OF_PATH_STR: &str =
|
||||
unsafe { std::str::from_utf8_unchecked(&[JSON_END_OF_PATH]) };
|
||||
|
||||
/// Create a new JsonPathWriter, that creates flattened json paths for tantivy.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct JsonPathWriter {
|
||||
path: String,
|
||||
indices: Vec<usize>,
|
||||
expand_dots: bool,
|
||||
}
|
||||
|
||||
impl JsonPathWriter {
|
||||
pub fn with_expand_dots(expand_dots: bool) -> Self {
|
||||
JsonPathWriter {
|
||||
path: String::new(),
|
||||
indices: Vec::new(),
|
||||
expand_dots,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
JsonPathWriter {
|
||||
path: String::new(),
|
||||
indices: Vec::new(),
|
||||
expand_dots: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// When expand_dots is enabled, json object like
|
||||
/// `{"k8s.node.id": 5}` is processed as if it was
|
||||
/// `{"k8s": {"node": {"id": 5}}}`.
|
||||
/// This option has the merit of allowing users to
|
||||
/// write queries like `k8s.node.id:5`.
|
||||
/// On the other, enabling that feature can lead to
|
||||
/// ambiguity.
|
||||
#[inline]
|
||||
pub fn set_expand_dots(&mut self, expand_dots: bool) {
|
||||
self.expand_dots = expand_dots;
|
||||
}
|
||||
|
||||
/// Push a new segment to the path.
|
||||
#[inline]
|
||||
pub fn push(&mut self, segment: &str) {
|
||||
let len_path = self.path.len();
|
||||
self.indices.push(len_path);
|
||||
if self.indices.len() > 1 {
|
||||
self.path.push(JSON_PATH_SEGMENT_SEP as char);
|
||||
}
|
||||
self.path.push_str(segment);
|
||||
if self.expand_dots {
|
||||
// This might include the separation byte, which is ok because it is not a dot.
|
||||
let appended_segment = &mut self.path[len_path..];
|
||||
// The unsafe below is safe as long as b'.' and JSON_PATH_SEGMENT_SEP are
|
||||
// valid single byte ut8 strings.
|
||||
// By utf-8 design, they cannot be part of another codepoint.
|
||||
unsafe {
|
||||
replace_in_place(b'.', JSON_PATH_SEGMENT_SEP, appended_segment.as_bytes_mut())
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the end of JSON path marker.
|
||||
#[inline]
|
||||
pub fn set_end(&mut self) {
|
||||
self.path.push_str(JSON_END_OF_PATH_STR);
|
||||
}
|
||||
|
||||
/// Remove the last segment. Does nothing if the path is empty.
|
||||
#[inline]
|
||||
pub fn pop(&mut self) {
|
||||
if let Some(last_idx) = self.indices.pop() {
|
||||
self.path.truncate(last_idx);
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear the path.
|
||||
#[inline]
|
||||
pub fn clear(&mut self) {
|
||||
self.path.clear();
|
||||
self.indices.clear();
|
||||
}
|
||||
|
||||
/// Get the current path.
|
||||
#[inline]
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.path
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JsonPathWriter> for String {
|
||||
#[inline]
|
||||
fn from(value: JsonPathWriter) -> Self {
|
||||
value.path
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn json_path_writer_test() {
|
||||
let mut writer = JsonPathWriter::new();
|
||||
writer.set_expand_dots(false);
|
||||
|
||||
writer.push("root");
|
||||
assert_eq!(writer.as_str(), "root");
|
||||
|
||||
writer.push("child");
|
||||
assert_eq!(writer.as_str(), "root\u{1}child");
|
||||
|
||||
writer.pop();
|
||||
assert_eq!(writer.as_str(), "root");
|
||||
|
||||
writer.push("k8s.node.id");
|
||||
assert_eq!(writer.as_str(), "root\u{1}k8s.node.id");
|
||||
|
||||
writer.set_expand_dots(true);
|
||||
writer.pop();
|
||||
writer.push("k8s.node.id");
|
||||
assert_eq!(writer.as_str(), "root\u{1}k8s\u{1}node\u{1}id");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_path_expand_dots_enabled_pop_segment() {
|
||||
let mut json_writer = JsonPathWriter::with_expand_dots(true);
|
||||
json_writer.push("hello");
|
||||
assert_eq!(json_writer.as_str(), "hello");
|
||||
json_writer.push("color.hue");
|
||||
assert_eq!(json_writer.as_str(), "hello\x01color\x01hue");
|
||||
json_writer.pop();
|
||||
assert_eq!(json_writer.as_str(), "hello");
|
||||
}
|
||||
}
|
||||
@@ -5,19 +5,20 @@ use std::ops::Deref;
|
||||
pub use byteorder::LittleEndian as Endianness;
|
||||
|
||||
mod bitset;
|
||||
pub mod bounds;
|
||||
mod byte_count;
|
||||
mod datetime;
|
||||
pub mod file_slice;
|
||||
mod group_by;
|
||||
pub mod json_path_writer;
|
||||
mod serialize;
|
||||
mod vint;
|
||||
mod writer;
|
||||
pub use bitset::*;
|
||||
pub use byte_count::ByteCount;
|
||||
#[allow(deprecated)]
|
||||
pub use datetime::DatePrecision;
|
||||
pub use datetime::{DateTime, DateTimePrecision};
|
||||
pub use group_by::GroupByIteratorExtended;
|
||||
pub use json_path_writer::JsonPathWriter;
|
||||
pub use ownedbytes::{OwnedBytes, StableDeref};
|
||||
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
||||
pub use vint::{
|
||||
@@ -116,6 +117,7 @@ pub fn u64_to_f64(val: u64) -> f64 {
|
||||
///
|
||||
/// This function assumes that the needle is rarely contained in the bytes string
|
||||
/// and offers a fast path if the needle is not present.
|
||||
#[inline]
|
||||
pub fn replace_in_place(needle: u8, replacement: u8, bytes: &mut [u8]) {
|
||||
if !bytes.contains(&needle) {
|
||||
return;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::borrow::Cow;
|
||||
use std::io::{Read, Write};
|
||||
use std::{fmt, io};
|
||||
|
||||
@@ -249,11 +250,47 @@ impl BinarySerializable for String {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BinarySerializable for Cow<'a, str> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let data: &[u8] = self.as_bytes();
|
||||
VInt(data.len() as u64).serialize(writer)?;
|
||||
writer.write_all(data)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Cow<'a, str>> {
|
||||
let string_length = VInt::deserialize(reader)?.val() as usize;
|
||||
let mut result = String::with_capacity(string_length);
|
||||
reader
|
||||
.take(string_length as u64)
|
||||
.read_to_string(&mut result)?;
|
||||
Ok(Cow::Owned(result))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BinarySerializable for Cow<'a, [u8]> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.len() as u64).serialize(writer)?;
|
||||
for it in self.iter() {
|
||||
it.serialize(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Cow<'a, [u8]>> {
|
||||
let num_items = VInt::deserialize(reader)?.val();
|
||||
let mut items: Vec<u8> = Vec::with_capacity(num_items as usize);
|
||||
for _ in 0..num_items {
|
||||
let item = u8::deserialize(reader)?;
|
||||
items.push(item);
|
||||
}
|
||||
Ok(Cow::Owned(items))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
|
||||
use super::{VInt, *};
|
||||
use crate::serialize::BinarySerializable;
|
||||
use super::*;
|
||||
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
||||
let mut buffer = Vec::new();
|
||||
O::default().serialize(&mut buffer).unwrap();
|
||||
|
||||
@@ -151,7 +151,7 @@ pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
|
||||
(result, vlen)
|
||||
}
|
||||
/// Write a `u32` as a vint payload.
|
||||
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
||||
pub fn write_u32_vint<W: io::Write + ?Sized>(val: u32, writer: &mut W) -> io::Result<()> {
|
||||
let mut buf = [0u8; 8];
|
||||
let data = serialize_vint_u32(val, &mut buf);
|
||||
writer.write_all(data)
|
||||
|
||||
BIN
doc/assets/images/paradedb.png
Normal file
BIN
doc/assets/images/paradedb.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 30 KiB |
@@ -7,6 +7,11 @@
|
||||
- [Other](#other)
|
||||
- [Usage](#usage)
|
||||
|
||||
# Index Sorting has been removed!
|
||||
More infos here:
|
||||
|
||||
https://github.com/quickwit-oss/tantivy/issues/2352
|
||||
|
||||
# Index Sorting
|
||||
|
||||
Tantivy allows you to sort the index according to a property.
|
||||
|
||||
@@ -12,7 +12,7 @@ use tantivy::aggregation::agg_result::AggregationResults;
|
||||
use tantivy::aggregation::AggregationCollector;
|
||||
use tantivy::query::AllQuery;
|
||||
use tantivy::schema::{self, IndexRecordOption, Schema, TextFieldIndexing, FAST};
|
||||
use tantivy::Index;
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Create Schema
|
||||
@@ -132,10 +132,10 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let stream = Deserializer::from_str(data).into_iter::<Value>();
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut num_indexed = 0;
|
||||
for value in stream {
|
||||
let doc = schema.parse_document(&serde_json::to_string(&value.unwrap())?)?;
|
||||
let doc = TantivyDocument::parse_json(&schema, &serde_json::to_string(&value.unwrap())?)?;
|
||||
index_writer.add_document(doc)?;
|
||||
num_indexed += 1;
|
||||
if num_indexed > 4 {
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
@@ -75,7 +75,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Here we give tantivy a budget of `50MB`.
|
||||
// Using a bigger memory_arena for the indexer may increase
|
||||
// throughput, but 50 MB is already plenty.
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// Let's index our documents!
|
||||
// We first need a handle on the title and the body field.
|
||||
@@ -87,7 +87,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
let mut old_man_doc = Document::default();
|
||||
let mut old_man_doc = TantivyDocument::default();
|
||||
old_man_doc.add_text(title, "The Old Man and the Sea");
|
||||
old_man_doc.add_text(
|
||||
body,
|
||||
@@ -164,7 +164,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// will reload the index automatically after each commit.
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.try_into()?;
|
||||
|
||||
// We now need to acquire a searcher.
|
||||
@@ -217,9 +217,23 @@ fn main() -> tantivy::Result<()> {
|
||||
// the document returned will only contain
|
||||
// a title.
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
}
|
||||
|
||||
// We can also get an explanation to understand
|
||||
// how a found document got its score.
|
||||
let query = query_parser.parse_query("title:sea^20 body:whale^70")?;
|
||||
|
||||
let (_score, doc_address) = searcher
|
||||
.search(&query, &TopDocs::with_limit(1))?
|
||||
.into_iter()
|
||||
.next()
|
||||
.unwrap();
|
||||
|
||||
let explanation = query.explain(&searcher, doc_address)?;
|
||||
|
||||
println!("{}", explanation.to_pretty_json());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -11,9 +11,10 @@ use columnar::Column;
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::{Collector, SegmentCollector};
|
||||
use tantivy::index::SegmentReader;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||
use tantivy::{doc, Index, Score, SegmentReader};
|
||||
use tantivy::{doc, Index, IndexWriter, Score};
|
||||
|
||||
#[derive(Default)]
|
||||
struct Stats {
|
||||
@@ -142,7 +143,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// this example.
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
index_writer.add_document(doc!(
|
||||
product_name => "Super Broom 2000",
|
||||
product_description => "While it is ok for short distance travel, this broom \
|
||||
|
||||
@@ -6,7 +6,7 @@ use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::NgramTokenizer;
|
||||
use tantivy::{doc, Index};
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -53,7 +53,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// this will store tokens of 3 characters each
|
||||
index
|
||||
.tokenizers()
|
||||
.register("ngram3", NgramTokenizer::new(3, 3, false));
|
||||
.register("ngram3", NgramTokenizer::new(3, 3, false).unwrap());
|
||||
|
||||
// To insert document we need an index writer.
|
||||
// There must be only one writer at a time.
|
||||
@@ -62,7 +62,7 @@ fn main() -> tantivy::Result<()> {
|
||||
//
|
||||
// Here we use a buffer of 50MB per thread. Using a bigger
|
||||
// memory arena for the indexer can increase its throughput.
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
index_writer.add_document(doc!(
|
||||
title => "The Old Man and the Sea",
|
||||
body => "He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
@@ -103,8 +103,8 @@ fn main() -> tantivy::Result<()> {
|
||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||
|
||||
for (_, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{DateOptions, Schema, Value, INDEXED, STORED, STRING};
|
||||
use tantivy::Index;
|
||||
use tantivy::schema::{DateOptions, Document, Schema, Value, INDEXED, STORED, STRING};
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -13,7 +13,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let opts = DateOptions::from(INDEXED)
|
||||
.set_stored()
|
||||
.set_fast()
|
||||
.set_precision(tantivy::DateTimePrecision::Seconds);
|
||||
.set_precision(tantivy::schema::DateTimePrecision::Seconds);
|
||||
// Add `occurred_at` date field type
|
||||
let occurred_at = schema_builder.add_date_field("occurred_at", opts);
|
||||
let event_type = schema_builder.add_text_field("event", STRING | STORED);
|
||||
@@ -22,16 +22,18 @@ fn main() -> tantivy::Result<()> {
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
// The dates are passed as string in the RFC3339 format
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T12:53:50.53Z",
|
||||
"event": "pull-request"
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T13:00:00.22Z",
|
||||
"event": "comment"
|
||||
@@ -58,13 +60,15 @@ fn main() -> tantivy::Result<()> {
|
||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(4))?;
|
||||
assert_eq!(count_docs.len(), 1);
|
||||
for (_score, doc_address) in count_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
assert!(matches!(
|
||||
retrieved_doc.get_first(occurred_at),
|
||||
Some(Value::Date(_))
|
||||
));
|
||||
let retrieved_doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
||||
assert!(retrieved_doc
|
||||
.get_first(occurred_at)
|
||||
.unwrap()
|
||||
.as_value()
|
||||
.as_datetime()
|
||||
.is_some(),);
|
||||
assert_eq!(
|
||||
schema.to_json(&retrieved_doc),
|
||||
retrieved_doc.to_json(&schema),
|
||||
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#
|
||||
);
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, IndexReader};
|
||||
use tantivy::{doc, Index, IndexReader, IndexWriter};
|
||||
|
||||
// A simple helper function to fetch a single document
|
||||
// given its id from our index.
|
||||
@@ -19,7 +19,7 @@ use tantivy::{doc, Index, IndexReader};
|
||||
fn extract_doc_given_isbn(
|
||||
reader: &IndexReader,
|
||||
isbn_term: &Term,
|
||||
) -> tantivy::Result<Option<Document>> {
|
||||
) -> tantivy::Result<Option<TantivyDocument>> {
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// This is the simplest query you can think of.
|
||||
@@ -69,10 +69,10 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// Let's add a couple of documents, for the sake of the example.
|
||||
let mut old_man_doc = Document::default();
|
||||
let mut old_man_doc = TantivyDocument::default();
|
||||
old_man_doc.add_text(title, "The Old Man and the Sea");
|
||||
index_writer.add_document(doc!(
|
||||
isbn => "978-0099908401",
|
||||
@@ -94,7 +94,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Oops our frankenstein doc seems misspelled
|
||||
let frankenstein_doc_misspelled = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
|
||||
assert_eq!(
|
||||
schema.to_json(&frankenstein_doc_misspelled),
|
||||
frankenstein_doc_misspelled.to_json(&schema),
|
||||
r#"{"isbn":["978-9176370711"],"title":["Frankentein"]}"#,
|
||||
);
|
||||
|
||||
@@ -136,7 +136,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// No more typo!
|
||||
let frankenstein_new_doc = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
|
||||
assert_eq!(
|
||||
schema.to_json(&frankenstein_new_doc),
|
||||
frankenstein_new_doc.to_json(&schema),
|
||||
r#"{"isbn":["978-9176370711"],"title":["Frankenstein"]}"#,
|
||||
);
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
use tantivy::collector::FacetCollector;
|
||||
use tantivy::query::{AllQuery, TermQuery};
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index};
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the sake of this example
|
||||
@@ -30,7 +30,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(30_000_000)?;
|
||||
|
||||
// For convenience, tantivy also comes with a macro to
|
||||
// reduce the boilerplate above.
|
||||
|
||||
@@ -12,7 +12,7 @@ use std::collections::HashSet;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::BooleanQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, DocId, Index, Score, SegmentReader};
|
||||
use tantivy::{doc, DocId, Index, IndexWriter, Score, SegmentReader};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -23,7 +23,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(30_000_000)?;
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "Fried egg",
|
||||
@@ -51,7 +51,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
{
|
||||
let facets = vec![
|
||||
let facets = [
|
||||
Facet::from("/ingredient/egg"),
|
||||
Facet::from("/ingredient/oil"),
|
||||
Facet::from("/ingredient/garlic"),
|
||||
@@ -91,13 +91,11 @@ fn main() -> tantivy::Result<()> {
|
||||
.iter()
|
||||
.map(|(_, doc_id)| {
|
||||
searcher
|
||||
.doc(*doc_id)
|
||||
.doc::<TantivyDocument>(*doc_id)
|
||||
.unwrap()
|
||||
.get_first(title)
|
||||
.and_then(|v| v.as_str().map(|el| el.to_string()))
|
||||
.unwrap()
|
||||
.as_text()
|
||||
.unwrap()
|
||||
.to_owned()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::FuzzyTermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
@@ -66,7 +66,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Here we give tantivy a budget of `50MB`.
|
||||
// Using a bigger memory_arena for the indexer may increase
|
||||
// throughput, but 50 MB is already plenty.
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// Let's index our documents!
|
||||
// We first need a handle on the title and the body field.
|
||||
@@ -123,7 +123,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// will reload the index automatically after each commit.
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.try_into()?;
|
||||
|
||||
// We now need to acquire a searcher.
|
||||
@@ -151,10 +151,10 @@ fn main() -> tantivy::Result<()> {
|
||||
assert_eq!(count, 3);
|
||||
assert_eq!(top_docs.len(), 3);
|
||||
for (score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
// Note that the score is not lower for the fuzzy hit.
|
||||
// There's an issue open for that: https://github.com/quickwit-oss/tantivy/issues/563
|
||||
println!("score {score:?} doc {}", schema.to_json(&retrieved_doc));
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("score {score:?} doc {}", retrieved_doc.to_json(&schema));
|
||||
// score 1.0 doc {"title":["The Diary of Muadib"]}
|
||||
//
|
||||
// score 1.0 doc {"title":["The Diary of a Young Girl"]}
|
||||
|
||||
@@ -61,7 +61,7 @@ fn main() -> tantivy::Result<()> {
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
))?;
|
||||
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
|
||||
println!("add doc {i} from thread 1 - opstamp {opstamp}");
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
}
|
||||
Result::<(), TantivyError>::Ok(())
|
||||
@@ -82,7 +82,7 @@ fn main() -> tantivy::Result<()> {
|
||||
body => "Some great book description..."
|
||||
))?
|
||||
};
|
||||
println!("add doc {} from thread 2 - opstamp {}", i, opstamp);
|
||||
println!("add doc {i} from thread 2 - opstamp {opstamp}");
|
||||
thread::sleep(Duration::from_millis(10));
|
||||
}
|
||||
Result::<(), TantivyError>::Ok(())
|
||||
|
||||
@@ -21,7 +21,7 @@ fn main() -> tantivy::Result<()> {
|
||||
}"#;
|
||||
|
||||
// We can parse our document
|
||||
let _mice_and_men_doc = schema.parse_document(mice_and_men_doc_json)?;
|
||||
let _mice_and_men_doc = TantivyDocument::parse_json(&schema, mice_and_men_doc_json)?;
|
||||
|
||||
// Multi-valued field are allowed, they are
|
||||
// expressed in JSON by an array.
|
||||
@@ -30,7 +30,7 @@ fn main() -> tantivy::Result<()> {
|
||||
"title": ["Frankenstein", "The Modern Prometheus"],
|
||||
"year": 1818
|
||||
}"#;
|
||||
let _frankenstein_doc = schema.parse_document(frankenstein_json)?;
|
||||
let _frankenstein_doc = TantivyDocument::parse_json(&schema, frankenstein_json)?;
|
||||
|
||||
// Note that the schema is saved in your index directory.
|
||||
//
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user