mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-02 23:32:54 +00:00
Compare commits
301 Commits
column-tra
...
commit-cha
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1e50f96fb0 | ||
|
|
a05a0035f8 | ||
|
|
976128a412 | ||
|
|
f27b3e312d | ||
|
|
56dea6f08d | ||
|
|
789d29cf45 | ||
|
|
a36b50d825 | ||
|
|
09f65e5467 | ||
|
|
2c2f5c3877 | ||
|
|
96c93a6ba3 | ||
|
|
11b01e4141 | ||
|
|
3e8852c606 | ||
|
|
725f1ecb80 | ||
|
|
afa27afe7d | ||
|
|
495824361a | ||
|
|
485a8f507e | ||
|
|
1119e59eae | ||
|
|
ee1f2c1f28 | ||
|
|
600548fd26 | ||
|
|
9929c0c221 | ||
|
|
f53e65648b | ||
|
|
0281b22b77 | ||
|
|
a05c184830 | ||
|
|
0b40a7fe43 | ||
|
|
e758080465 | ||
|
|
2a39289a1b | ||
|
|
ca6231170e | ||
|
|
eda6e5a10a | ||
|
|
8641155cbb | ||
|
|
9a090ed994 | ||
|
|
b7d0dd154a | ||
|
|
ce10fab20f | ||
|
|
e034328a8b | ||
|
|
f811d1616b | ||
|
|
c665b16ff0 | ||
|
|
3b5f810051 | ||
|
|
5765c261aa | ||
|
|
fb9f03118d | ||
|
|
55a9d808d4 | ||
|
|
32166682b3 | ||
|
|
e6acf8f76d | ||
|
|
9e8a0c2cca | ||
|
|
3edf0a2724 | ||
|
|
8ca12a5683 | ||
|
|
a4b759d2fe | ||
|
|
3e9c806890 | ||
|
|
c69a873dd3 | ||
|
|
666afcf641 | ||
|
|
38ad46e580 | ||
|
|
e948889f4c | ||
|
|
6e636c9cea | ||
|
|
5a610efbc1 | ||
|
|
500a0d5e48 | ||
|
|
509a265659 | ||
|
|
5b2cea1b97 | ||
|
|
a5a80ffaea | ||
|
|
0f98d91a39 | ||
|
|
2af6b01c17 | ||
|
|
c32ab66bbd | ||
|
|
3f3a6f9990 | ||
|
|
83325d8f3f | ||
|
|
4e46f4f8c4 | ||
|
|
43df356010 | ||
|
|
6647362464 | ||
|
|
279b1b28d3 | ||
|
|
7a80851e36 | ||
|
|
cd952429d2 | ||
|
|
d777c964da | ||
|
|
bbb058d976 | ||
|
|
5f7d027a52 | ||
|
|
dfab201191 | ||
|
|
0c2bd36fe3 | ||
|
|
af839753e0 | ||
|
|
fec2b63571 | ||
|
|
6213ea476a | ||
|
|
5e159c26bf | ||
|
|
a5e59ab598 | ||
|
|
e772d3170d | ||
|
|
8c2ba7bd55 | ||
|
|
02328b0151 | ||
|
|
7cc775256c | ||
|
|
07b40f8b8b | ||
|
|
9b6b6be5b9 | ||
|
|
6bb73a527f | ||
|
|
03885d0f3c | ||
|
|
f2e5135870 | ||
|
|
c24157f28b | ||
|
|
873382cdcb | ||
|
|
791350091c | ||
|
|
483b1d13d4 | ||
|
|
8de7fa9d95 | ||
|
|
94313b62f8 | ||
|
|
f2b2628feb | ||
|
|
449f595832 | ||
|
|
c9235df059 | ||
|
|
a4485f7611 | ||
|
|
1082ff60f9 | ||
|
|
491854155c | ||
|
|
96c3d54ac7 | ||
|
|
6800fdec9d | ||
|
|
c9cf9c952a | ||
|
|
024e53a99c | ||
|
|
8d75e451bd | ||
|
|
fcfd76ec55 | ||
|
|
6b7b1cc4fa | ||
|
|
129f7422f5 | ||
|
|
f39cce2c8b | ||
|
|
d2478fac8a | ||
|
|
952b048341 | ||
|
|
80f9596ec8 | ||
|
|
84f9e77e1d | ||
|
|
a602c248fb | ||
|
|
4b9d1fe828 | ||
|
|
63bc390b02 | ||
|
|
07393c2fa0 | ||
|
|
77a415cbe4 | ||
|
|
4b4c231bba | ||
|
|
11d3409286 | ||
|
|
9cb8cfbea8 | ||
|
|
8b69aab0fc | ||
|
|
3650d1f36a | ||
|
|
2efebdb1bb | ||
|
|
e443ca63aa | ||
|
|
5c9cbee29d | ||
|
|
b2ca83a93c | ||
|
|
3b189080d4 | ||
|
|
00a6586efe | ||
|
|
b9b913510e | ||
|
|
534b1d33c3 | ||
|
|
f465173872 | ||
|
|
96315df20d | ||
|
|
9a1609d364 | ||
|
|
39f4e58450 | ||
|
|
a8a36b62cd | ||
|
|
226a49338f | ||
|
|
2864bf7123 | ||
|
|
5171ff611b | ||
|
|
e50e74acf8 | ||
|
|
0b86658389 | ||
|
|
5d6602a8d9 | ||
|
|
4d29ff4d01 | ||
|
|
cdc8e3a8be | ||
|
|
67f453b534 | ||
|
|
787a37bacf | ||
|
|
f5039f1846 | ||
|
|
eeb1f19093 | ||
|
|
087beaf328 | ||
|
|
309449dba3 | ||
|
|
5a76e6c5d3 | ||
|
|
c8713a01ed | ||
|
|
6113e0408c | ||
|
|
400a20b7af | ||
|
|
5f565e77de | ||
|
|
516e60900d | ||
|
|
36e1c79f37 | ||
|
|
c2f1c250f9 | ||
|
|
c694bc039a | ||
|
|
2063f1717f | ||
|
|
d742275048 | ||
|
|
b9f06bc287 | ||
|
|
8b42c4c126 | ||
|
|
7905965800 | ||
|
|
f60a551890 | ||
|
|
7baa6e3ec5 | ||
|
|
2100ec5d26 | ||
|
|
b3bf9a5716 | ||
|
|
0dc8c458e0 | ||
|
|
e5043d78d2 | ||
|
|
6d0bb82bd2 | ||
|
|
5945dbf0bd | ||
|
|
4cf911d56a | ||
|
|
0f5cff762f | ||
|
|
6d9a123cf2 | ||
|
|
0f4a47816a | ||
|
|
b062ab2196 | ||
|
|
a9d2f3db23 | ||
|
|
44e03791f9 | ||
|
|
2d23763e9f | ||
|
|
a24ae8d924 | ||
|
|
927dff5262 | ||
|
|
a695edcc95 | ||
|
|
b4b4f3fa73 | ||
|
|
b50e4b7c20 | ||
|
|
f8686ab1ec | ||
|
|
2fe42719d8 | ||
|
|
fadd784a25 | ||
|
|
0e94213af0 | ||
|
|
0da2a2e70d | ||
|
|
0bcdf3cbbf | ||
|
|
8f647b817f | ||
|
|
a86b0df6f4 | ||
|
|
f842da758c | ||
|
|
97ccd6d712 | ||
|
|
cb252a42af | ||
|
|
d9609dd6b6 | ||
|
|
f03667d967 | ||
|
|
10f10a322f | ||
|
|
f757471077 | ||
|
|
21e0adefda | ||
|
|
ea8e6d7b1d | ||
|
|
dac7da780e | ||
|
|
20c87903b2 | ||
|
|
f9c3947803 | ||
|
|
e9a384bb15 | ||
|
|
d231671fe2 | ||
|
|
fa3d786a2f | ||
|
|
75aafeeb9b | ||
|
|
6f066c7f65 | ||
|
|
22e56aaee3 | ||
|
|
d641979127 | ||
|
|
1998111521 | ||
|
|
acb2e2e282 | ||
|
|
1ff5da5eb4 | ||
|
|
c3b25710ad | ||
|
|
8492010d43 | ||
|
|
cf02e32578 | ||
|
|
8cca1014c9 | ||
|
|
938f884e32 | ||
|
|
ed68afb698 | ||
|
|
8a7962dc22 | ||
|
|
a06039dea8 | ||
|
|
68b6254b09 | ||
|
|
6a88ac3fe3 | ||
|
|
191b934650 | ||
|
|
1a2ba7025a | ||
|
|
02599ebeb7 | ||
|
|
a16b466460 | ||
|
|
b8d8fdeb6e | ||
|
|
12856d80fa | ||
|
|
e75472ec9a | ||
|
|
e2e6c94ba8 | ||
|
|
9f610b25af | ||
|
|
237b64025e | ||
|
|
592caeefa0 | ||
|
|
570009b5b1 | ||
|
|
61b5110db7 | ||
|
|
58af1235e4 | ||
|
|
d3e7c41a1f | ||
|
|
11275854ca | ||
|
|
3ca48cd826 | ||
|
|
47dc511733 | ||
|
|
cae6b28a8f | ||
|
|
9aa9efe2a4 | ||
|
|
57570b38a2 | ||
|
|
584394db1e | ||
|
|
3aeb026970 | ||
|
|
df32ee2df2 | ||
|
|
762e662bfd | ||
|
|
63b2420058 | ||
|
|
ced21b8791 | ||
|
|
bc85947105 | ||
|
|
64f08a1a5c | ||
|
|
e029fdfca7 | ||
|
|
817225edfb | ||
|
|
1eab12396d | ||
|
|
8006f63426 | ||
|
|
0a907d0319 | ||
|
|
45924711fd | ||
|
|
14cb817a52 | ||
|
|
edd9155b88 | ||
|
|
9497794d40 | ||
|
|
29d56111de | ||
|
|
4d634d61ff | ||
|
|
1f3d8ca7e2 | ||
|
|
54696da771 | ||
|
|
21c2205de9 | ||
|
|
9436049d85 | ||
|
|
21c9a26182 | ||
|
|
56c68f5869 | ||
|
|
f5e66042d8 | ||
|
|
bf3327acd3 | ||
|
|
2a6479b66d | ||
|
|
9c2ef81198 | ||
|
|
c5d30a54bc | ||
|
|
c632fc014e | ||
|
|
085e63ae43 | ||
|
|
f6f23ba684 | ||
|
|
ea72cf34d6 | ||
|
|
00657d9e99 | ||
|
|
26876d41d7 | ||
|
|
8e775b6c3d | ||
|
|
e1f9af4384 | ||
|
|
4e350c5f1b | ||
|
|
84e0c75598 | ||
|
|
08c4412d73 | ||
|
|
70e58adff9 | ||
|
|
0d1cd119e9 | ||
|
|
d3dd620048 | ||
|
|
e89c220b56 | ||
|
|
a451f6d60d | ||
|
|
f740ddeee3 | ||
|
|
7a26cc9022 | ||
|
|
54972caa7c | ||
|
|
5d436759b0 | ||
|
|
6f563b1606 | ||
|
|
095fb68fda | ||
|
|
6316eaefc6 | ||
|
|
5331be800b | ||
|
|
c73b425bc1 | ||
|
|
54cfd0d154 | ||
|
|
3984cafccc |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
|||||||
cpp/* linguist-vendored
|
|
||||||
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@@ -48,7 +48,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
features: [
|
features: [
|
||||||
{ label: "all", flags: "mmap,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints" },
|
{ label: "all", flags: "mmap,stopwords,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints" },
|
||||||
{ label: "quickwit", flags: "mmap,quickwit,failpoints" }
|
{ label: "quickwit", flags: "mmap,quickwit,failpoints" }
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -9,7 +9,6 @@ target/release
|
|||||||
Cargo.lock
|
Cargo.lock
|
||||||
benchmark
|
benchmark
|
||||||
.DS_Store
|
.DS_Store
|
||||||
cpp/simdcomp/bitpackingbenchmark
|
|
||||||
*.bk
|
*.bk
|
||||||
.idea
|
.idea
|
||||||
trace.dat
|
trace.dat
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ called [`Directory`](src/directory/directory.rs).
|
|||||||
Contrary to Lucene however, "files" are quite different from some kind of `io::Read` object.
|
Contrary to Lucene however, "files" are quite different from some kind of `io::Read` object.
|
||||||
Check out [`src/directory/directory.rs`](src/directory/directory.rs) trait for more details.
|
Check out [`src/directory/directory.rs`](src/directory/directory.rs) trait for more details.
|
||||||
|
|
||||||
Tantivy ships two main directory implementation: the `MMapDirectory` and the `RAMDirectory`,
|
Tantivy ships two main directory implementation: the `MmapDirectory` and the `RamDirectory`,
|
||||||
but users can extend tantivy with their own implementation.
|
but users can extend tantivy with their own implementation.
|
||||||
|
|
||||||
## [schema/](src/schema): What are documents?
|
## [schema/](src/schema): What are documents?
|
||||||
|
|||||||
36
CHANGELOG.md
36
CHANGELOG.md
@@ -1,10 +1,38 @@
|
|||||||
Tantivy 0.19
|
Tantivy 0.19
|
||||||
================================
|
================================
|
||||||
|
#### Bugfixes
|
||||||
|
- Fix missing fieldnorms for u64, i64, f64, bool, bytes and date [#1620](https://github.com/quickwit-oss/tantivy/pull/1620) (@PSeitz)
|
||||||
|
- Fix interpolation overflow in linear interpolation fastfield codec [#1480](https://github.com/quickwit-oss/tantivy/pull/1480 (@PSeitz @fulmicoton)
|
||||||
|
|
||||||
|
#### Features/Improvements
|
||||||
|
- Add support for `IN` in queryparser , e.g. `field: IN [val1 val2 val3]` [#1683](https://github.com/quickwit-oss/tantivy/pull/1683) (@trinity-1686a)
|
||||||
|
- Skip score calculation, when no scoring is required [#1646](https://github.com/quickwit-oss/tantivy/pull/1646) (@PSeitz)
|
||||||
|
- Limit fast fields to u32 (`get_val(u32)`) [#1644](https://github.com/quickwit-oss/tantivy/pull/1644) (@PSeitz)
|
||||||
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
|
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
|
||||||
The `DateTime` type has been updated to hold timestamps with microseconds precision.
|
The `DateTime` type has been updated to hold timestamps with microseconds precision.
|
||||||
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing).
|
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing). (@evanxg852000)
|
||||||
- Remove Searcher pool and make `Searcher` cloneable.
|
- Add IP address field type [#1553](https://github.com/quickwit-oss/tantivy/pull/1553) (@PSeitz)
|
||||||
|
- Add boolean field type [#1382](https://github.com/quickwit-oss/tantivy/pull/1382) (@boraarslan)
|
||||||
|
- Remove Searcher pool and make `Searcher` cloneable. (@PSeitz)
|
||||||
|
- Validate settings on create [#1570](https://github.com/quickwit-oss/tantivy/pull/1570 (@PSeitz)
|
||||||
|
- Detect and apply gcd on fastfield codecs [#1418](https://github.com/quickwit-oss/tantivy/pull/1418) (@PSeitz)
|
||||||
|
- Doc store
|
||||||
|
- use separate thread to compress block store [#1389](https://github.com/quickwit-oss/tantivy/pull/1389) [#1510](https://github.com/quickwit-oss/tantivy/pull/1510 (@PSeitz @fulmicoton)
|
||||||
|
- Expose doc store cache size [#1403](https://github.com/quickwit-oss/tantivy/pull/1403) (@PSeitz)
|
||||||
|
- Enable compression levels for doc store [#1378](https://github.com/quickwit-oss/tantivy/pull/1378) (@PSeitz)
|
||||||
|
- Make block size configurable [#1374](https://github.com/quickwit-oss/tantivy/pull/1374) (@kryesh)
|
||||||
|
- Make `tantivy::TantivyError` cloneable [#1402](https://github.com/quickwit-oss/tantivy/pull/1402) (@PSeitz)
|
||||||
|
- Add support for phrase slop in query language [#1393](https://github.com/quickwit-oss/tantivy/pull/1393) (@saroh)
|
||||||
|
- Aggregation
|
||||||
|
- Add aggregation support for date type [#1693](https://github.com/quickwit-oss/tantivy/pull/1693)(@PSeitz)
|
||||||
|
- Add support for keyed parameter in range and histgram aggregations [#1424](https://github.com/quickwit-oss/tantivy/pull/1424) (@k-yomo)
|
||||||
|
- Add aggregation bucket limit [#1363](https://github.com/quickwit-oss/tantivy/pull/1363) (@PSeitz)
|
||||||
|
- Faster indexing
|
||||||
|
- [#1610](https://github.com/quickwit-oss/tantivy/pull/1610) (@PSeitz)
|
||||||
|
- [#1594](https://github.com/quickwit-oss/tantivy/pull/1594) (@PSeitz)
|
||||||
|
- [#1582](https://github.com/quickwit-oss/tantivy/pull/1582) (@PSeitz)
|
||||||
|
- [#1611](https://github.com/quickwit-oss/tantivy/pull/1611) (@PSeitz)
|
||||||
|
- Added a pre-configured stop word filter for various language [#1666](https://github.com/quickwit-oss/tantivy/pull/1666) (@adamreichold)
|
||||||
|
|
||||||
Tantivy 0.18
|
Tantivy 0.18
|
||||||
================================
|
================================
|
||||||
@@ -22,6 +50,10 @@ Tantivy 0.18
|
|||||||
- Add terms aggregation (@PSeitz)
|
- Add terms aggregation (@PSeitz)
|
||||||
- Add support for zstd compression (@kryesh)
|
- Add support for zstd compression (@kryesh)
|
||||||
|
|
||||||
|
Tantivy 0.18.1
|
||||||
|
================================
|
||||||
|
- Hotfix: positions computation. #1629 (@fmassot, @fulmicoton, @PSeitz)
|
||||||
|
|
||||||
Tantivy 0.17
|
Tantivy 0.17
|
||||||
================================
|
================================
|
||||||
|
|
||||||
|
|||||||
40
Cargo.toml
40
Cargo.toml
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy"
|
name = "tantivy"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
@@ -11,40 +11,37 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
|||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
keywords = ["search", "information", "retrieval"]
|
keywords = ["search", "information", "retrieval"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
rust-version = "1.62"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
oneshot = "0.1.3"
|
oneshot = "0.1.5"
|
||||||
base64 = "0.13.0"
|
base64 = "0.13.0"
|
||||||
byteorder = "1.4.3"
|
byteorder = "1.4.3"
|
||||||
crc32fast = "1.3.2"
|
crc32fast = "1.3.2"
|
||||||
once_cell = "1.10.0"
|
once_cell = "1.10.0"
|
||||||
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
||||||
tantivy-fst = "0.3.0"
|
aho-corasick = "0.7"
|
||||||
|
tantivy-fst = "0.4.0"
|
||||||
memmap2 = { version = "0.5.3", optional = true }
|
memmap2 = { version = "0.5.3", optional = true }
|
||||||
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
|
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
|
||||||
brotli = { version = "3.3.4", optional = true }
|
brotli = { version = "3.3.4", optional = true }
|
||||||
zstd = { version = "0.11", optional = true }
|
zstd = { version = "0.12", optional = true, default-features = false }
|
||||||
snap = { version = "1.0.5", optional = true }
|
snap = { version = "1.0.5", optional = true }
|
||||||
tempfile = { version = "3.3.0", optional = true }
|
tempfile = { version = "3.3.0", optional = true }
|
||||||
log = "0.4.16"
|
log = "0.4.16"
|
||||||
serde = { version = "1.0.136", features = ["derive"] }
|
serde = { version = "1.0.136", features = ["derive"] }
|
||||||
serde_json = "1.0.79"
|
serde_json = "1.0.79"
|
||||||
num_cpus = "1.13.1"
|
num_cpus = "1.13.1"
|
||||||
fs2={ version = "0.4.3", optional = true }
|
fs2 = { version = "0.4.3", optional = true }
|
||||||
levenshtein_automata = "0.2.1"
|
levenshtein_automata = "0.2.1"
|
||||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||||
crossbeam-channel = "0.5.4"
|
crossbeam-channel = "0.5.4"
|
||||||
tantivy-query-grammar = { version="0.18.0", path="./query-grammar" }
|
|
||||||
tantivy-bitpacker = { version="0.2", path="./bitpacker" }
|
|
||||||
common = { version = "0.3", path = "./common/", package = "tantivy-common" }
|
|
||||||
fastfield_codecs = { version="0.2", path="./fastfield_codecs", default-features = false }
|
|
||||||
ownedbytes = { version="0.3", path="./ownedbytes" }
|
|
||||||
stable_deref_trait = "1.2.0"
|
stable_deref_trait = "1.2.0"
|
||||||
rust-stemmers = "1.2.0"
|
rust-stemmers = "1.2.0"
|
||||||
downcast-rs = "1.2.0"
|
downcast-rs = "1.2.0"
|
||||||
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
|
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
|
||||||
census = "0.4.0"
|
census = "0.4.0"
|
||||||
fnv = "1.0.7"
|
rustc-hash = "1.1.0"
|
||||||
thiserror = "1.0.30"
|
thiserror = "1.0.30"
|
||||||
htmlescape = "0.3.1"
|
htmlescape = "0.3.1"
|
||||||
fail = "0.5.0"
|
fail = "0.5.0"
|
||||||
@@ -56,11 +53,16 @@ lru = "0.7.5"
|
|||||||
fastdivide = "0.4.0"
|
fastdivide = "0.4.0"
|
||||||
itertools = "0.10.3"
|
itertools = "0.10.3"
|
||||||
measure_time = "0.8.2"
|
measure_time = "0.8.2"
|
||||||
pretty_assertions = "1.2.1"
|
ciborium = { version = "0.2", optional = true}
|
||||||
serde_cbor = { version = "0.11.2", optional = true }
|
|
||||||
async-trait = "0.1.53"
|
async-trait = "0.1.53"
|
||||||
arc-swap = "1.5.0"
|
arc-swap = "1.5.0"
|
||||||
|
|
||||||
|
tantivy-query-grammar = { version= "0.19.0", path="./query-grammar" }
|
||||||
|
tantivy-bitpacker = { version= "0.3", path="./bitpacker" }
|
||||||
|
common = { version= "0.4", path = "./common/", package = "tantivy-common" }
|
||||||
|
fastfield_codecs = { version= "0.3", path="./fastfield_codecs", default-features = false }
|
||||||
|
ownedbytes = { version= "0.4", path="./ownedbytes" }
|
||||||
|
|
||||||
[target.'cfg(windows)'.dependencies]
|
[target.'cfg(windows)'.dependencies]
|
||||||
winapi = "0.3.9"
|
winapi = "0.3.9"
|
||||||
|
|
||||||
@@ -68,11 +70,12 @@ winapi = "0.3.9"
|
|||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
matches = "0.1.9"
|
matches = "0.1.9"
|
||||||
|
pretty_assertions = "1.2.1"
|
||||||
proptest = "1.0.0"
|
proptest = "1.0.0"
|
||||||
criterion = "0.3.5"
|
criterion = "0.4"
|
||||||
test-log = "0.2.10"
|
test-log = "0.2.10"
|
||||||
env_logger = "0.9.0"
|
env_logger = "0.10.0"
|
||||||
pprof = { version = "0.10.0", features = ["flamegraph", "criterion"] }
|
pprof = { version = "0.11.0", features = ["flamegraph", "criterion"] }
|
||||||
futures = "0.3.21"
|
futures = "0.3.21"
|
||||||
|
|
||||||
[dev-dependencies.fail]
|
[dev-dependencies.fail]
|
||||||
@@ -89,8 +92,9 @@ debug-assertions = true
|
|||||||
overflow-checks = true
|
overflow-checks = true
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["mmap", "lz4-compression" ]
|
default = ["mmap", "stopwords", "lz4-compression"]
|
||||||
mmap = ["fs2", "tempfile", "memmap2"]
|
mmap = ["fs2", "tempfile", "memmap2"]
|
||||||
|
stopwords = []
|
||||||
|
|
||||||
brotli-compression = ["brotli"]
|
brotli-compression = ["brotli"]
|
||||||
lz4-compression = ["lz4_flex"]
|
lz4-compression = ["lz4_flex"]
|
||||||
@@ -100,7 +104,7 @@ zstd-compression = ["zstd"]
|
|||||||
failpoints = ["fail/failpoints"]
|
failpoints = ["fail/failpoints"]
|
||||||
unstable = [] # useful for benches.
|
unstable = [] # useful for benches.
|
||||||
|
|
||||||
quickwit = ["serde_cbor"]
|
quickwit = ["ciborium"]
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
|
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ Distributed search is out of the scope of Tantivy, but if you are looking for th
|
|||||||
|
|
||||||
# Getting started
|
# Getting started
|
||||||
|
|
||||||
Tantivy works on stable Rust (>= 1.27) and supports Linux, macOS, and Windows.
|
Tantivy works on stable Rust and supports Linux, macOS, and Windows.
|
||||||
|
|
||||||
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
||||||
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command-line interface that makes it easy for you to create a search engine,
|
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command-line interface that makes it easy for you to create a search engine,
|
||||||
@@ -81,9 +81,13 @@ There are many ways to support this project.
|
|||||||
|
|
||||||
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
||||||
|
|
||||||
|
## Minimum supported Rust version
|
||||||
|
|
||||||
|
Tantivy currently requires at least Rust 1.62 or later to compile.
|
||||||
|
|
||||||
## Clone and build locally
|
## Clone and build locally
|
||||||
|
|
||||||
Tantivy compiles on stable Rust but requires `Rust >= 1.27`.
|
Tantivy compiles on stable Rust.
|
||||||
To check out and run tests, you can simply run:
|
To check out and run tests, you can simply run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -127,6 +131,7 @@ $ gdb run
|
|||||||
# Companies Using Tantivy
|
# Companies Using Tantivy
|
||||||
|
|
||||||
<p align="left">
|
<p align="left">
|
||||||
|
<img align="center" src="doc/assets/images/etsy.png" alt="Etsy" height="25" width="auto" />
|
||||||
<img align="center" src="doc/assets/images/Nuclia.png#gh-light-mode-only" alt="Nuclia" height="25" width="auto" />
|
<img align="center" src="doc/assets/images/Nuclia.png#gh-light-mode-only" alt="Nuclia" height="25" width="auto" />
|
||||||
<img align="center" src="doc/assets/images/humanfirst.png#gh-light-mode-only" alt="Humanfirst.ai" height="30" width="auto" />
|
<img align="center" src="doc/assets/images/humanfirst.png#gh-light-mode-only" alt="Humanfirst.ai" height="30" width="auto" />
|
||||||
<img align="center" src="doc/assets/images/element.io.svg#gh-light-mode-only" alt="Element.io" height="25" width="auto" />
|
<img align="center" src="doc/assets/images/element.io.svg#gh-light-mode-only" alt="Element.io" height="25" width="auto" />
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-bitpacker"
|
name = "tantivy-bitpacker"
|
||||||
version = "0.2.0"
|
version = "0.3.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
@@ -8,6 +8,8 @@ categories = []
|
|||||||
description = """Tantivy-sub crate: bitpacking"""
|
description = """Tantivy-sub crate: bitpacking"""
|
||||||
repository = "https://github.com/quickwit-oss/tantivy"
|
repository = "https://github.com/quickwit-oss/tantivy"
|
||||||
keywords = []
|
keywords = []
|
||||||
|
documentation = "https://docs.rs/tantivy-bitpacker/latest/tantivy_bitpacker"
|
||||||
|
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||||
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|||||||
@@ -87,15 +87,15 @@ impl BitUnpacker {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn get(&self, idx: u64, data: &[u8]) -> u64 {
|
pub fn get(&self, idx: u32, data: &[u8]) -> u64 {
|
||||||
if self.num_bits == 0 {
|
if self.num_bits == 0 {
|
||||||
return 0u64;
|
return 0u64;
|
||||||
}
|
}
|
||||||
let addr_in_bits = idx * self.num_bits;
|
let addr_in_bits = idx * self.num_bits as u32;
|
||||||
let addr = addr_in_bits >> 3;
|
let addr = addr_in_bits >> 3;
|
||||||
let bit_shift = addr_in_bits & 7;
|
let bit_shift = addr_in_bits & 7;
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
addr + 8 <= data.len() as u64,
|
addr + 8 <= data.len() as u32,
|
||||||
"The fast field field should have been padded with 7 bytes."
|
"The fast field field should have been padded with 7 bytes."
|
||||||
);
|
);
|
||||||
let bytes: [u8; 8] = (&data[(addr as usize)..(addr as usize) + 8])
|
let bytes: [u8; 8] = (&data[(addr as usize)..(addr as usize) + 8])
|
||||||
@@ -130,7 +130,7 @@ mod test {
|
|||||||
fn test_bitpacker_util(len: usize, num_bits: u8) {
|
fn test_bitpacker_util(len: usize, num_bits: u8) {
|
||||||
let (bitunpacker, vals, data) = create_fastfield_bitpacker(len, num_bits);
|
let (bitunpacker, vals, data) = create_fastfield_bitpacker(len, num_bits);
|
||||||
for (i, val) in vals.iter().enumerate() {
|
for (i, val) in vals.iter().enumerate() {
|
||||||
assert_eq!(bitunpacker.get(i as u64, &data), *val);
|
assert_eq!(bitunpacker.get(i as u32, &data), *val);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -130,7 +130,7 @@ impl BlockedBitpacker {
|
|||||||
let pos_in_block = idx % BLOCK_SIZE as usize;
|
let pos_in_block = idx % BLOCK_SIZE as usize;
|
||||||
if let Some(metadata) = self.offset_and_bits.get(metadata_pos) {
|
if let Some(metadata) = self.offset_and_bits.get(metadata_pos) {
|
||||||
let unpacked = BitUnpacker::new(metadata.num_bits()).get(
|
let unpacked = BitUnpacker::new(metadata.num_bits()).get(
|
||||||
pos_in_block as u64,
|
pos_in_block as u32,
|
||||||
&self.compressed_blocks[metadata.offset() as usize..],
|
&self.compressed_blocks[metadata.offset() as usize..],
|
||||||
);
|
);
|
||||||
unpacked + metadata.base_value()
|
unpacked + metadata.base_value()
|
||||||
|
|||||||
@@ -1,16 +1,20 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-common"
|
name = "tantivy-common"
|
||||||
version = "0.3.0"
|
version = "0.4.0"
|
||||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "common traits and utility functions used by multiple tantivy subcrates"
|
description = "common traits and utility functions used by multiple tantivy subcrates"
|
||||||
|
documentation = "https://docs.rs/tantivy_common/"
|
||||||
|
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||||
|
repository = "https://github.com/quickwit-oss/tantivy"
|
||||||
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
byteorder = "1.4.3"
|
byteorder = "1.4.3"
|
||||||
ownedbytes = { version="0.3", path="../ownedbytes" }
|
ownedbytes = { version= "0.4", path="../ownedbytes" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
proptest = "1.0.0"
|
proptest = "1.0.0"
|
||||||
|
|||||||
@@ -259,11 +259,7 @@ impl BitSet {
|
|||||||
// we do not check saturated els.
|
// we do not check saturated els.
|
||||||
let higher = el / 64u32;
|
let higher = el / 64u32;
|
||||||
let lower = el % 64u32;
|
let lower = el % 64u32;
|
||||||
self.len += if self.tinysets[higher as usize].insert_mut(lower) {
|
self.len += u64::from(self.tinysets[higher as usize].insert_mut(lower));
|
||||||
1
|
|
||||||
} else {
|
|
||||||
0
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Inserts an element in the `BitSet`
|
/// Inserts an element in the `BitSet`
|
||||||
@@ -272,11 +268,7 @@ impl BitSet {
|
|||||||
// we do not check saturated els.
|
// we do not check saturated els.
|
||||||
let higher = el / 64u32;
|
let higher = el / 64u32;
|
||||||
let lower = el % 64u32;
|
let lower = el % 64u32;
|
||||||
self.len -= if self.tinysets[higher as usize].remove_mut(lower) {
|
self.len -= u64::from(self.tinysets[higher as usize].remove_mut(lower));
|
||||||
1
|
|
||||||
} else {
|
|
||||||
0
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true iff the elements is in the `BitSet`.
|
/// Returns true iff the elements is in the `BitSet`.
|
||||||
@@ -285,7 +277,7 @@ impl BitSet {
|
|||||||
self.tinyset(el / 64u32).contains(el % 64)
|
self.tinyset(el / 64u32).contains(el % 64)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the first non-empty `TinySet` associated to a bucket lower
|
/// Returns the first non-empty `TinySet` associated with a bucket lower
|
||||||
/// or greater than bucket.
|
/// or greater than bucket.
|
||||||
///
|
///
|
||||||
/// Reminder: the tiny set with the bucket `bucket`, represents the
|
/// Reminder: the tiny set with the bucket `bucket`, represents the
|
||||||
|
|||||||
@@ -11,7 +11,10 @@ mod writer;
|
|||||||
|
|
||||||
pub use bitset::*;
|
pub use bitset::*;
|
||||||
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
||||||
pub use vint::{read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt};
|
pub use vint::{
|
||||||
|
deserialize_vint_u128, read_u32_vint, read_u32_vint_no_advance, serialize_vint_u128,
|
||||||
|
serialize_vint_u32, write_u32_vint, VInt, VIntU128,
|
||||||
|
};
|
||||||
pub use writer::{AntiCallToken, CountingWriter, TerminatingWrite};
|
pub use writer::{AntiCallToken, CountingWriter, TerminatingWrite};
|
||||||
|
|
||||||
/// Has length trait
|
/// Has length trait
|
||||||
@@ -52,13 +55,13 @@ const HIGHEST_BIT: u64 = 1 << 63;
|
|||||||
/// to values over 2^63, and all values end up requiring 64 bits.
|
/// to values over 2^63, and all values end up requiring 64 bits.
|
||||||
///
|
///
|
||||||
/// # See also
|
/// # See also
|
||||||
/// The [reverse mapping is `u64_to_i64`](./fn.u64_to_i64.html).
|
/// The reverse mapping is [`u64_to_i64()`].
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn i64_to_u64(val: i64) -> u64 {
|
pub fn i64_to_u64(val: i64) -> u64 {
|
||||||
(val as u64) ^ HIGHEST_BIT
|
(val as u64) ^ HIGHEST_BIT
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reverse the mapping given by [`i64_to_u64`](./fn.i64_to_u64.html).
|
/// Reverse the mapping given by [`i64_to_u64()`].
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn u64_to_i64(val: u64) -> i64 {
|
pub fn u64_to_i64(val: u64) -> i64 {
|
||||||
(val ^ HIGHEST_BIT) as i64
|
(val ^ HIGHEST_BIT) as i64
|
||||||
@@ -80,7 +83,7 @@ pub fn u64_to_i64(val: u64) -> i64 {
|
|||||||
/// explains the mapping in a clear manner.
|
/// explains the mapping in a clear manner.
|
||||||
///
|
///
|
||||||
/// # See also
|
/// # See also
|
||||||
/// The [reverse mapping is `u64_to_f64`](./fn.u64_to_f64.html).
|
/// The reverse mapping is [`u64_to_f64()`].
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn f64_to_u64(val: f64) -> u64 {
|
pub fn f64_to_u64(val: f64) -> u64 {
|
||||||
let bits = val.to_bits();
|
let bits = val.to_bits();
|
||||||
@@ -91,7 +94,7 @@ pub fn f64_to_u64(val: f64) -> u64 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reverse the mapping given by [`i64_to_u64`](./fn.i64_to_u64.html).
|
/// Reverse the mapping given by [`f64_to_u64()`].
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn u64_to_f64(val: u64) -> f64 {
|
pub fn u64_to_f64(val: u64) -> f64 {
|
||||||
f64::from_bits(if val & HIGHEST_BIT != 0 {
|
f64::from_bits(if val & HIGHEST_BIT != 0 {
|
||||||
|
|||||||
@@ -94,6 +94,20 @@ impl FixedSize for u32 {
|
|||||||
const SIZE_IN_BYTES: usize = 4;
|
const SIZE_IN_BYTES: usize = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for u16 {
|
||||||
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
writer.write_u16::<Endianness>(*self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u16> {
|
||||||
|
reader.read_u16::<Endianness>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FixedSize for u16 {
|
||||||
|
const SIZE_IN_BYTES: usize = 2;
|
||||||
|
}
|
||||||
|
|
||||||
impl BinarySerializable for u64 {
|
impl BinarySerializable for u64 {
|
||||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_u64::<Endianness>(*self)
|
writer.write_u64::<Endianness>(*self)
|
||||||
@@ -107,6 +121,19 @@ impl FixedSize for u64 {
|
|||||||
const SIZE_IN_BYTES: usize = 8;
|
const SIZE_IN_BYTES: usize = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for u128 {
|
||||||
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
writer.write_u128::<Endianness>(*self)
|
||||||
|
}
|
||||||
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
reader.read_u128::<Endianness>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FixedSize for u128 {
|
||||||
|
const SIZE_IN_BYTES: usize = 16;
|
||||||
|
}
|
||||||
|
|
||||||
impl BinarySerializable for f32 {
|
impl BinarySerializable for f32 {
|
||||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_f32::<Endianness>(*self)
|
writer.write_f32::<Endianness>(*self)
|
||||||
@@ -161,8 +188,7 @@ impl FixedSize for u8 {
|
|||||||
|
|
||||||
impl BinarySerializable for bool {
|
impl BinarySerializable for bool {
|
||||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
let val = if *self { 1 } else { 0 };
|
writer.write_u8(u8::from(*self))
|
||||||
writer.write_u8(val)
|
|
||||||
}
|
}
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<bool> {
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<bool> {
|
||||||
let val = reader.read_u8()?;
|
let val = reader.read_u8()?;
|
||||||
|
|||||||
@@ -5,6 +5,75 @@ use byteorder::{ByteOrder, LittleEndian};
|
|||||||
|
|
||||||
use super::BinarySerializable;
|
use super::BinarySerializable;
|
||||||
|
|
||||||
|
/// Variable int serializes a u128 number
|
||||||
|
pub fn serialize_vint_u128(mut val: u128, output: &mut Vec<u8>) {
|
||||||
|
loop {
|
||||||
|
let next_byte: u8 = (val % 128u128) as u8;
|
||||||
|
val /= 128u128;
|
||||||
|
if val == 0 {
|
||||||
|
output.push(next_byte | STOP_BIT);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
output.push(next_byte);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deserializes a u128 number
|
||||||
|
///
|
||||||
|
/// Returns the number and the slice after the vint
|
||||||
|
pub fn deserialize_vint_u128(data: &[u8]) -> io::Result<(u128, &[u8])> {
|
||||||
|
let mut result = 0u128;
|
||||||
|
let mut shift = 0u64;
|
||||||
|
for i in 0..19 {
|
||||||
|
let b = data[i];
|
||||||
|
result |= u128::from(b % 128u8) << shift;
|
||||||
|
if b >= STOP_BIT {
|
||||||
|
return Ok((result, &data[i + 1..]));
|
||||||
|
}
|
||||||
|
shift += 7;
|
||||||
|
}
|
||||||
|
Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
"Failed to deserialize u128 vint",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wrapper over a `u128` that serializes as a variable int.
|
||||||
|
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||||
|
pub struct VIntU128(pub u128);
|
||||||
|
|
||||||
|
impl BinarySerializable for VIntU128 {
|
||||||
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
let mut buffer = vec![];
|
||||||
|
serialize_vint_u128(self.0, &mut buffer);
|
||||||
|
writer.write_all(&buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let mut bytes = reader.bytes();
|
||||||
|
let mut result = 0u128;
|
||||||
|
let mut shift = 0u64;
|
||||||
|
loop {
|
||||||
|
match bytes.next() {
|
||||||
|
Some(Ok(b)) => {
|
||||||
|
result |= u128::from(b % 128u8) << shift;
|
||||||
|
if b >= STOP_BIT {
|
||||||
|
return Ok(VIntU128(result));
|
||||||
|
}
|
||||||
|
shift += 7;
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
"Reach end of buffer while reading VInt",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Wrapper over a `u64` that serializes as a variable int.
|
/// Wrapper over a `u64` that serializes as a variable int.
|
||||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||||
pub struct VInt(pub u64);
|
pub struct VInt(pub u64);
|
||||||
@@ -88,7 +157,7 @@ fn vint_len(data: &[u8]) -> usize {
|
|||||||
/// If the buffer does not start by a valid
|
/// If the buffer does not start by a valid
|
||||||
/// vint payload
|
/// vint payload
|
||||||
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
||||||
let (result, vlen) = read_u32_vint_no_advance(*data);
|
let (result, vlen) = read_u32_vint_no_advance(data);
|
||||||
*data = &data[vlen..];
|
*data = &data[vlen..];
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
@@ -176,6 +245,7 @@ impl BinarySerializable for VInt {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::{serialize_vint_u32, BinarySerializable, VInt};
|
use super::{serialize_vint_u32, BinarySerializable, VInt};
|
||||||
|
use crate::vint::{deserialize_vint_u128, serialize_vint_u128, VIntU128};
|
||||||
|
|
||||||
fn aux_test_vint(val: u64) {
|
fn aux_test_vint(val: u64) {
|
||||||
let mut v = [14u8; 10];
|
let mut v = [14u8; 10];
|
||||||
@@ -217,6 +287,26 @@ mod tests {
|
|||||||
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
|
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn aux_test_vint_u128(val: u128) {
|
||||||
|
let mut data = vec![];
|
||||||
|
serialize_vint_u128(val, &mut data);
|
||||||
|
let (deser_val, _data) = deserialize_vint_u128(&data).unwrap();
|
||||||
|
assert_eq!(val, deser_val);
|
||||||
|
|
||||||
|
let mut out = vec![];
|
||||||
|
VIntU128(val).serialize(&mut out).unwrap();
|
||||||
|
let deser_val = VIntU128::deserialize(&mut &out[..]).unwrap();
|
||||||
|
assert_eq!(val, deser_val.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_vint_u128() {
|
||||||
|
aux_test_vint_u128(0);
|
||||||
|
aux_test_vint_u128(1);
|
||||||
|
aux_test_vint_u128(u128::MAX / 3);
|
||||||
|
aux_test_vint_u128(u128::MAX);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_vint_u32() {
|
fn test_vint_u32() {
|
||||||
aux_test_serialize_vint_u32(0);
|
aux_test_serialize_vint_u32(0);
|
||||||
|
|||||||
@@ -55,14 +55,14 @@ impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Struct used to prevent from calling
|
/// Struct used to prevent from calling
|
||||||
/// [`terminate_ref`](trait.TerminatingWrite.html#tymethod.terminate_ref) directly
|
/// [`terminate_ref`](TerminatingWrite::terminate_ref) directly
|
||||||
///
|
///
|
||||||
/// The point is that while the type is public, it cannot be built by anyone
|
/// The point is that while the type is public, it cannot be built by anyone
|
||||||
/// outside of this module.
|
/// outside of this module.
|
||||||
pub struct AntiCallToken(());
|
pub struct AntiCallToken(());
|
||||||
|
|
||||||
/// Trait used to indicate when no more write need to be done on a writer
|
/// Trait used to indicate when no more write need to be done on a writer
|
||||||
pub trait TerminatingWrite: Write + Send {
|
pub trait TerminatingWrite: Write + Send + Sync {
|
||||||
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
|
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
|
||||||
fn terminate(mut self) -> io::Result<()>
|
fn terminate(mut self) -> io::Result<()>
|
||||||
where Self: Sized {
|
where Self: Sized {
|
||||||
|
|||||||
BIN
doc/assets/images/etsy.png
Normal file
BIN
doc/assets/images/etsy.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 85 KiB |
@@ -50,7 +50,7 @@ to get tantivy to fit your use case:
|
|||||||
|
|
||||||
*Example 1* You could for instance use hadoop to build a very large search index in a timely manner, copy all of the resulting segment files in the same directory and edit the `meta.json` to get a functional index.[^2]
|
*Example 1* You could for instance use hadoop to build a very large search index in a timely manner, copy all of the resulting segment files in the same directory and edit the `meta.json` to get a functional index.[^2]
|
||||||
|
|
||||||
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated to segment `D-7`.
|
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated with segment `D-7`.
|
||||||
|
|
||||||
## Merging
|
## Merging
|
||||||
|
|
||||||
|
|||||||
@@ -118,7 +118,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||||
|
|||||||
@@ -7,10 +7,12 @@
|
|||||||
// Of course, you can have a look at the tantivy's built-in collectors
|
// Of course, you can have a look at the tantivy's built-in collectors
|
||||||
// such as the `CountCollector` for more examples.
|
// such as the `CountCollector` for more examples.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use fastfield_codecs::Column;
|
||||||
// ---
|
// ---
|
||||||
// Importing tantivy...
|
// Importing tantivy...
|
||||||
use tantivy::collector::{Collector, SegmentCollector};
|
use tantivy::collector::{Collector, SegmentCollector};
|
||||||
use tantivy::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
|
||||||
use tantivy::query::QueryParser;
|
use tantivy::query::QueryParser;
|
||||||
use tantivy::schema::{Field, Schema, FAST, INDEXED, TEXT};
|
use tantivy::schema::{Field, Schema, FAST, INDEXED, TEXT};
|
||||||
use tantivy::{doc, Index, Score, SegmentReader};
|
use tantivy::{doc, Index, Score, SegmentReader};
|
||||||
@@ -95,7 +97,7 @@ impl Collector for StatsCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct StatsSegmentCollector {
|
struct StatsSegmentCollector {
|
||||||
fast_field_reader: DynamicFastFieldReader<u64>,
|
fast_field_reader: Arc<dyn Column<u64>>,
|
||||||
stats: Stats,
|
stats: Stats,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -103,7 +105,7 @@ impl SegmentCollector for StatsSegmentCollector {
|
|||||||
type Fruit = Option<Stats>;
|
type Fruit = Option<Stats>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, _score: Score) {
|
fn collect(&mut self, doc: u32, _score: Score) {
|
||||||
let value = self.fast_field_reader.get(doc) as f64;
|
let value = self.fast_field_reader.get_val(doc) as f64;
|
||||||
self.stats.count += 1;
|
self.stats.count += 1;
|
||||||
self.stats.sum += value;
|
self.stats.sum += value;
|
||||||
self.stats.squared_sum += value * value;
|
self.stats.squared_sum += value * value;
|
||||||
|
|||||||
@@ -36,8 +36,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// need to be able to be able to retrieve it
|
// need to be able to be able to retrieve it
|
||||||
// for our application.
|
// for our application.
|
||||||
//
|
//
|
||||||
// We can make our index lighter and
|
// We can make our index lighter by omitting the `STORED` flag.
|
||||||
// by omitting `STORED` flag.
|
|
||||||
let body = schema_builder.add_text_field("body", TEXT);
|
let body = schema_builder.add_text_field("body", TEXT);
|
||||||
|
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// on its id.
|
// on its id.
|
||||||
//
|
//
|
||||||
// Note that `tantivy` does nothing to enforce the idea that
|
// Note that `tantivy` does nothing to enforce the idea that
|
||||||
// there is only one document associated to this id.
|
// there is only one document associated with this id.
|
||||||
//
|
//
|
||||||
// Also you might have noticed that we apply the delete before
|
// Also you might have noticed that we apply the delete before
|
||||||
// having committed. This does not matter really...
|
// having committed. This does not matter really...
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// A segment contains different data structure.
|
// A segment contains different data structure.
|
||||||
// Inverted index stands for the combination of
|
// Inverted index stands for the combination of
|
||||||
// - the term dictionary
|
// - the term dictionary
|
||||||
// - the inverted lists associated to each terms and their positions
|
// - the inverted lists associated with each terms and their positions
|
||||||
let inverted_index = segment_reader.inverted_index(title)?;
|
let inverted_index = segment_reader.inverted_index(title)?;
|
||||||
|
|
||||||
// A `Term` is a text token associated with a field.
|
// A `Term` is a text token associated with a field.
|
||||||
@@ -105,7 +105,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// A segment contains different data structure.
|
// A segment contains different data structure.
|
||||||
// Inverted index stands for the combination of
|
// Inverted index stands for the combination of
|
||||||
// - the term dictionary
|
// - the term dictionary
|
||||||
// - the inverted lists associated to each terms and their positions
|
// - the inverted lists associated with each terms and their positions
|
||||||
let inverted_index = segment_reader.inverted_index(title)?;
|
let inverted_index = segment_reader.inverted_index(title)?;
|
||||||
|
|
||||||
// This segment posting object is like a cursor over the documents matching the term.
|
// This segment posting object is like a cursor over the documents matching the term.
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ use std::collections::{HashMap, HashSet};
|
|||||||
use std::sync::{Arc, RwLock, Weak};
|
use std::sync::{Arc, RwLock, Weak};
|
||||||
|
|
||||||
use tantivy::collector::TopDocs;
|
use tantivy::collector::TopDocs;
|
||||||
use tantivy::fastfield::FastFieldReader;
|
|
||||||
use tantivy::query::QueryParser;
|
use tantivy::query::QueryParser;
|
||||||
use tantivy::schema::{Field, Schema, FAST, TEXT};
|
use tantivy::schema::{Field, Schema, FAST, TEXT};
|
||||||
use tantivy::{
|
use tantivy::{
|
||||||
@@ -52,7 +51,7 @@ impl Warmer for DynamicPriceColumn {
|
|||||||
let product_id_reader = segment.fast_fields().u64(self.field)?;
|
let product_id_reader = segment.fast_fields().u64(self.field)?;
|
||||||
let product_ids: Vec<ProductId> = segment
|
let product_ids: Vec<ProductId> = segment
|
||||||
.doc_ids_alive()
|
.doc_ids_alive()
|
||||||
.map(|doc| product_id_reader.get(doc))
|
.map(|doc| product_id_reader.get_val(doc))
|
||||||
.collect();
|
.collect();
|
||||||
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
|
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
|
||||||
let mut price_vals: Vec<Price> = Vec::new();
|
let mut price_vals: Vec<Price> = Vec::new();
|
||||||
|
|||||||
@@ -1,19 +1,26 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "fastfield_codecs"
|
name = "fastfield_codecs"
|
||||||
version = "0.2.0"
|
version = "0.3.0"
|
||||||
authors = ["Pascal Seitz <pascal@quickwit.io>"]
|
authors = ["Pascal Seitz <pascal@quickwit.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "Fast field codecs used by tantivy"
|
description = "Fast field codecs used by tantivy"
|
||||||
|
documentation = "https://docs.rs/fastfield_codecs/"
|
||||||
|
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||||
|
repository = "https://github.com/quickwit-oss/tantivy"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
common = { version = "0.3", path = "../common/", package = "tantivy-common" }
|
common = { version = "0.4", path = "../common/", package = "tantivy-common" }
|
||||||
tantivy-bitpacker = { version="0.2", path = "../bitpacker/" }
|
tantivy-bitpacker = { version= "0.3", path = "../bitpacker/" }
|
||||||
ownedbytes = { version = "0.3.0", path = "../ownedbytes" }
|
ownedbytes = { version = "0.4.0", path = "../ownedbytes" }
|
||||||
prettytable-rs = {version="0.9.0", optional= true}
|
prettytable-rs = {version="0.9.0", optional= true}
|
||||||
rand = {version="0.8.3", optional= true}
|
rand = {version="0.8.3", optional= true}
|
||||||
|
fastdivide = "0.4"
|
||||||
|
log = "0.4"
|
||||||
|
itertools = { version = "0.10.3" }
|
||||||
|
measure_time = { version="0.8.2", optional=true}
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
more-asserts = "0.3.0"
|
more-asserts = "0.3.0"
|
||||||
@@ -21,6 +28,7 @@ proptest = "1.0.0"
|
|||||||
rand = "0.8.3"
|
rand = "0.8.3"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
bin = ["prettytable-rs", "rand"]
|
bin = ["prettytable-rs", "rand", "measure_time"]
|
||||||
default = ["bin"]
|
default = ["bin"]
|
||||||
|
unstable = []
|
||||||
|
|
||||||
|
|||||||
@@ -4,93 +4,243 @@ extern crate test;
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use fastfield_codecs::bitpacked::{BitpackedReader, BitpackedSerializer};
|
use std::iter;
|
||||||
use fastfield_codecs::blockwise_linear::{BlockwiseLinearReader, BlockwiseLinearSerializer};
|
use std::sync::Arc;
|
||||||
use fastfield_codecs::linear::{LinearReader, LinearSerializer};
|
|
||||||
use fastfield_codecs::*;
|
|
||||||
|
|
||||||
fn get_data() -> Vec<u64> {
|
use fastfield_codecs::*;
|
||||||
let mut data: Vec<_> = (100..55000_u64)
|
use ownedbytes::OwnedBytes;
|
||||||
.map(|num| num + rand::random::<u8>() as u64)
|
use rand::prelude::*;
|
||||||
|
use test::Bencher;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
// Warning: this generates the same permutation at each call
|
||||||
|
fn generate_permutation() -> Vec<u64> {
|
||||||
|
let mut permutation: Vec<u64> = (0u64..100_000u64).collect();
|
||||||
|
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||||
|
permutation
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate_random() -> Vec<u64> {
|
||||||
|
let mut permutation: Vec<u64> = (0u64..100_000u64)
|
||||||
|
.map(|el| el + random::<u16>() as u64)
|
||||||
.collect();
|
.collect();
|
||||||
data.push(99_000);
|
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||||
data.insert(1000, 2000);
|
permutation
|
||||||
data.insert(2000, 100);
|
}
|
||||||
data.insert(3000, 4100);
|
|
||||||
data.insert(4000, 100);
|
// Warning: this generates the same permutation at each call
|
||||||
data.insert(5000, 800);
|
fn generate_permutation_gcd() -> Vec<u64> {
|
||||||
|
let mut permutation: Vec<u64> = (1u64..100_000u64).map(|el| el * 1000).collect();
|
||||||
|
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||||
|
permutation
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
|
||||||
|
column: &[T],
|
||||||
|
) -> Arc<dyn Column<T>> {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
serialize(VecColumn::from(&column), &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||||
|
open(OwnedBytes::new(buffer)).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_intfastfield_jumpy_veclookup(b: &mut Bencher) {
|
||||||
|
let permutation = generate_permutation();
|
||||||
|
let n = permutation.len();
|
||||||
|
b.iter(|| {
|
||||||
|
let mut a = 0u64;
|
||||||
|
for _ in 0..n {
|
||||||
|
a = permutation[a as usize];
|
||||||
|
}
|
||||||
|
a
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_intfastfield_jumpy_fflookup(b: &mut Bencher) {
|
||||||
|
let permutation = generate_permutation();
|
||||||
|
let n = permutation.len();
|
||||||
|
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||||
|
b.iter(|| {
|
||||||
|
let mut a = 0u64;
|
||||||
|
for _ in 0..n {
|
||||||
|
a = column.get_val(a as u32);
|
||||||
|
}
|
||||||
|
a
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_exp_data() -> Vec<u64> {
|
||||||
|
let mut data = vec![];
|
||||||
|
for i in 0..100 {
|
||||||
|
let num = i * i;
|
||||||
|
data.extend(iter::repeat(i as u64).take(num));
|
||||||
|
}
|
||||||
|
data.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||||
|
|
||||||
|
// lengt = 328350
|
||||||
data
|
data
|
||||||
}
|
}
|
||||||
|
|
||||||
fn value_iter() -> impl Iterator<Item = u64> {
|
fn get_data_50percent_item() -> (u128, u128, Vec<u128>) {
|
||||||
0..20_000
|
let mut permutation = get_exp_data();
|
||||||
|
let major_item = 20;
|
||||||
|
let minor_item = 10;
|
||||||
|
permutation.extend(iter::repeat(major_item).take(permutation.len()));
|
||||||
|
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||||
|
let permutation = permutation.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||||
|
(major_item as u128, minor_item as u128, permutation)
|
||||||
}
|
}
|
||||||
fn bench_get<
|
fn get_u128_column_random() -> Arc<dyn Column<u128>> {
|
||||||
S: FastFieldCodecSerializer,
|
let permutation = generate_random();
|
||||||
R: FastFieldCodecDeserializer + FastFieldDataAccess,
|
let permutation = permutation.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||||
>(
|
get_u128_column_from_data(&permutation)
|
||||||
b: &mut Bencher,
|
|
||||||
data: &[u64],
|
|
||||||
) {
|
|
||||||
let mut bytes = vec![];
|
|
||||||
S::serialize(&mut bytes, &data).unwrap();
|
|
||||||
let reader = R::open_from_bytes(OwnedBytes::new(bytes)).unwrap();
|
|
||||||
b.iter(|| {
|
|
||||||
let mut sum = 0u64;
|
|
||||||
for pos in value_iter() {
|
|
||||||
let val = reader.get_val(pos as u64);
|
|
||||||
debug_assert_eq!(data[pos as usize], val);
|
|
||||||
sum = sum.wrapping_add(val);
|
|
||||||
}
|
|
||||||
sum
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
fn bench_create<S: FastFieldCodecSerializer>(b: &mut Bencher, data: &[u64]) {
|
|
||||||
let mut bytes = vec![];
|
fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn Column<u128>> {
|
||||||
|
let mut out = vec![];
|
||||||
|
let iter_gen = || data.iter().cloned();
|
||||||
|
serialize_u128(iter_gen, data.len() as u32, &mut out).unwrap();
|
||||||
|
let out = OwnedBytes::new(out);
|
||||||
|
open_u128::<u128>(out).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_intfastfield_getrange_u128_50percent_hit(b: &mut Bencher) {
|
||||||
|
let (major_item, _minor_item, data) = get_data_50percent_item();
|
||||||
|
let column = get_u128_column_from_data(&data);
|
||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
S::serialize(&mut bytes, &data).unwrap();
|
let mut positions = Vec::new();
|
||||||
|
column.get_docids_for_value_range(
|
||||||
|
major_item..=major_item,
|
||||||
|
0..data.len() as u32,
|
||||||
|
&mut positions,
|
||||||
|
);
|
||||||
|
positions
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
use ownedbytes::OwnedBytes;
|
|
||||||
use test::Bencher;
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
|
fn bench_intfastfield_getrange_u128_single_hit(b: &mut Bencher) {
|
||||||
let data: Vec<_> = get_data();
|
let (_major_item, minor_item, data) = get_data_50percent_item();
|
||||||
bench_create::<BitpackedSerializer>(b, &data);
|
let column = get_u128_column_from_data(&data);
|
||||||
|
|
||||||
|
b.iter(|| {
|
||||||
|
let mut positions = Vec::new();
|
||||||
|
column.get_docids_for_value_range(
|
||||||
|
minor_item..=minor_item,
|
||||||
|
0..data.len() as u32,
|
||||||
|
&mut positions,
|
||||||
|
);
|
||||||
|
positions
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
|
fn bench_intfastfield_getrange_u128_hit_all(b: &mut Bencher) {
|
||||||
let data: Vec<_> = get_data();
|
let (_major_item, _minor_item, data) = get_data_50percent_item();
|
||||||
bench_create::<LinearSerializer>(b, &data);
|
let column = get_u128_column_from_data(&data);
|
||||||
|
|
||||||
|
b.iter(|| {
|
||||||
|
let mut positions = Vec::new();
|
||||||
|
column.get_docids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
|
||||||
|
positions
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
|
fn bench_intfastfield_scan_all_fflookup_u128(b: &mut Bencher) {
|
||||||
let data: Vec<_> = get_data();
|
let column = get_u128_column_random();
|
||||||
bench_create::<BlockwiseLinearSerializer>(b, &data);
|
|
||||||
|
b.iter(|| {
|
||||||
|
let mut a = 0u128;
|
||||||
|
for i in 0u64..column.num_vals() as u64 {
|
||||||
|
a += column.get_val(i as u32);
|
||||||
|
}
|
||||||
|
a
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
|
fn bench_intfastfield_jumpy_stride5_u128(b: &mut Bencher) {
|
||||||
let data: Vec<_> = get_data();
|
let column = get_u128_column_random();
|
||||||
bench_get::<BitpackedSerializer, BitpackedReader>(b, &data);
|
|
||||||
|
b.iter(|| {
|
||||||
|
let n = column.num_vals();
|
||||||
|
let mut a = 0u128;
|
||||||
|
for i in (0..n / 5).map(|val| val * 5) {
|
||||||
|
a += column.get_val(i);
|
||||||
|
}
|
||||||
|
a
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
|
fn bench_intfastfield_stride7_vec(b: &mut Bencher) {
|
||||||
let data: Vec<_> = get_data();
|
let permutation = generate_permutation();
|
||||||
bench_get::<LinearSerializer, LinearReader>(b, &data);
|
let n = permutation.len();
|
||||||
|
b.iter(|| {
|
||||||
|
let mut a = 0u64;
|
||||||
|
for i in (0..n / 7).map(|val| val * 7) {
|
||||||
|
a += permutation[i as usize];
|
||||||
|
}
|
||||||
|
a
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
|
fn bench_intfastfield_stride7_fflookup(b: &mut Bencher) {
|
||||||
let data: Vec<_> = get_data();
|
let permutation = generate_permutation();
|
||||||
bench_get::<BlockwiseLinearSerializer, BlockwiseLinearReader>(b, &data);
|
let n = permutation.len();
|
||||||
|
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||||
|
b.iter(|| {
|
||||||
|
let mut a = 0;
|
||||||
|
for i in (0..n / 7).map(|val| val * 7) {
|
||||||
|
a += column.get_val(i as u32);
|
||||||
|
}
|
||||||
|
a
|
||||||
|
});
|
||||||
}
|
}
|
||||||
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
|
|
||||||
let min_value = data.iter().cloned().min().unwrap_or(0);
|
#[bench]
|
||||||
let max_value = data.iter().cloned().max().unwrap_or(0);
|
fn bench_intfastfield_scan_all_fflookup(b: &mut Bencher) {
|
||||||
FastFieldStats {
|
let permutation = generate_permutation();
|
||||||
min_value,
|
let n = permutation.len();
|
||||||
max_value,
|
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||||
num_vals: data.len() as u64,
|
b.iter(|| {
|
||||||
}
|
let mut a = 0u64;
|
||||||
|
for i in 0u32..n as u32 {
|
||||||
|
a += column.get_val(i);
|
||||||
|
}
|
||||||
|
a
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_intfastfield_scan_all_fflookup_gcd(b: &mut Bencher) {
|
||||||
|
let permutation = generate_permutation_gcd();
|
||||||
|
let n = permutation.len();
|
||||||
|
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||||
|
b.iter(|| {
|
||||||
|
let mut a = 0u64;
|
||||||
|
for i in 0..n {
|
||||||
|
a += column.get_val(i as u32);
|
||||||
|
}
|
||||||
|
a
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_intfastfield_scan_all_vec(b: &mut Bencher) {
|
||||||
|
let permutation = generate_permutation();
|
||||||
|
b.iter(|| {
|
||||||
|
let mut a = 0u64;
|
||||||
|
for i in 0..permutation.len() {
|
||||||
|
a += permutation[i as usize] as u64;
|
||||||
|
}
|
||||||
|
a
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
use std::io::{self, Write};
|
use std::io::{self, Write};
|
||||||
|
|
||||||
use common::BinarySerializable;
|
|
||||||
use ownedbytes::OwnedBytes;
|
use ownedbytes::OwnedBytes;
|
||||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||||
|
|
||||||
use crate::{
|
use crate::serialize::NormalizedHeader;
|
||||||
FastFieldCodecDeserializer, FastFieldCodecSerializer, FastFieldCodecType, FastFieldDataAccess,
|
use crate::{Column, FastFieldCodec, FastFieldCodecType};
|
||||||
};
|
|
||||||
|
|
||||||
/// Depending on the field type, a different
|
/// Depending on the field type, a different
|
||||||
/// fast field is required.
|
/// fast field is required.
|
||||||
@@ -14,157 +12,88 @@ use crate::{
|
|||||||
pub struct BitpackedReader {
|
pub struct BitpackedReader {
|
||||||
data: OwnedBytes,
|
data: OwnedBytes,
|
||||||
bit_unpacker: BitUnpacker,
|
bit_unpacker: BitUnpacker,
|
||||||
pub min_value_u64: u64,
|
normalized_header: NormalizedHeader,
|
||||||
pub max_value_u64: u64,
|
|
||||||
pub num_vals: u64,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastFieldCodecDeserializer for BitpackedReader {
|
impl Column for BitpackedReader {
|
||||||
|
#[inline]
|
||||||
|
fn get_val(&self, doc: u32) -> u64 {
|
||||||
|
self.bit_unpacker.get(doc, &self.data)
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
fn min_value(&self) -> u64 {
|
||||||
|
// The BitpackedReader assumes a normalized vector.
|
||||||
|
0
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
fn max_value(&self) -> u64 {
|
||||||
|
self.normalized_header.max_value
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
fn num_vals(&self) -> u32 {
|
||||||
|
self.normalized_header.num_vals
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct BitpackedCodec;
|
||||||
|
|
||||||
|
impl FastFieldCodec for BitpackedCodec {
|
||||||
|
/// The CODEC_TYPE is an enum value used for serialization.
|
||||||
|
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Bitpacked;
|
||||||
|
|
||||||
|
type Reader = BitpackedReader;
|
||||||
|
|
||||||
/// Opens a fast field given a file.
|
/// Opens a fast field given a file.
|
||||||
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self> {
|
fn open_from_bytes(
|
||||||
let footer_offset = bytes.len() - 24;
|
data: OwnedBytes,
|
||||||
let (data, mut footer) = bytes.split(footer_offset);
|
normalized_header: NormalizedHeader,
|
||||||
let min_value = u64::deserialize(&mut footer)?;
|
) -> io::Result<Self::Reader> {
|
||||||
let amplitude = u64::deserialize(&mut footer)?;
|
let num_bits = compute_num_bits(normalized_header.max_value);
|
||||||
let num_vals = u64::deserialize(&mut footer)?;
|
|
||||||
let max_value = min_value + amplitude;
|
|
||||||
let num_bits = compute_num_bits(amplitude);
|
|
||||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||||
Ok(BitpackedReader {
|
Ok(BitpackedReader {
|
||||||
data,
|
data,
|
||||||
bit_unpacker,
|
bit_unpacker,
|
||||||
min_value_u64: min_value,
|
normalized_header,
|
||||||
max_value_u64: max_value,
|
|
||||||
num_vals,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
|
||||||
impl FastFieldDataAccess for BitpackedReader {
|
|
||||||
#[inline]
|
|
||||||
fn get_val(&self, doc: u64) -> u64 {
|
|
||||||
self.min_value_u64 + self.bit_unpacker.get(doc, &self.data)
|
|
||||||
}
|
|
||||||
#[inline]
|
|
||||||
fn min_value(&self) -> u64 {
|
|
||||||
self.min_value_u64
|
|
||||||
}
|
|
||||||
#[inline]
|
|
||||||
fn max_value(&self) -> u64 {
|
|
||||||
self.max_value_u64
|
|
||||||
}
|
|
||||||
#[inline]
|
|
||||||
fn num_vals(&self) -> u64 {
|
|
||||||
self.num_vals
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub struct BitpackedSerializerLegacy<'a, W: 'a + Write> {
|
|
||||||
bit_packer: BitPacker,
|
|
||||||
write: &'a mut W,
|
|
||||||
min_value: u64,
|
|
||||||
num_vals: u64,
|
|
||||||
amplitude: u64,
|
|
||||||
num_bits: u8,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, W: Write> BitpackedSerializerLegacy<'a, W> {
|
|
||||||
/// Creates a new fast field serializer.
|
|
||||||
///
|
|
||||||
/// The serializer in fact encode the values by bitpacking
|
|
||||||
/// `(val - min_value)`.
|
|
||||||
///
|
|
||||||
/// It requires a `min_value` and a `max_value` to compute
|
|
||||||
/// compute the minimum number of bits required to encode
|
|
||||||
/// values.
|
|
||||||
pub fn open(
|
|
||||||
write: &'a mut W,
|
|
||||||
min_value: u64,
|
|
||||||
max_value: u64,
|
|
||||||
) -> io::Result<BitpackedSerializerLegacy<'a, W>> {
|
|
||||||
assert!(min_value <= max_value);
|
|
||||||
let amplitude = max_value - min_value;
|
|
||||||
let num_bits = compute_num_bits(amplitude);
|
|
||||||
let bit_packer = BitPacker::new();
|
|
||||||
Ok(BitpackedSerializerLegacy {
|
|
||||||
bit_packer,
|
|
||||||
write,
|
|
||||||
min_value,
|
|
||||||
num_vals: 0,
|
|
||||||
amplitude,
|
|
||||||
num_bits,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
/// Pushes a new value to the currently open u64 fast field.
|
|
||||||
#[inline]
|
|
||||||
pub fn add_val(&mut self, val: u64) -> io::Result<()> {
|
|
||||||
let val_to_write: u64 = val - self.min_value;
|
|
||||||
self.bit_packer
|
|
||||||
.write(val_to_write, self.num_bits, &mut self.write)?;
|
|
||||||
self.num_vals += 1;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
pub fn close_field(mut self) -> io::Result<()> {
|
|
||||||
self.bit_packer.close(&mut self.write)?;
|
|
||||||
self.min_value.serialize(&mut self.write)?;
|
|
||||||
self.amplitude.serialize(&mut self.write)?;
|
|
||||||
self.num_vals.serialize(&mut self.write)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct BitpackedSerializer {}
|
|
||||||
|
|
||||||
impl FastFieldCodecSerializer for BitpackedSerializer {
|
|
||||||
/// The CODEC_TYPE is an enum value used for serialization.
|
|
||||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Bitpacked;
|
|
||||||
|
|
||||||
/// Serializes data with the BitpackedFastFieldSerializer.
|
/// Serializes data with the BitpackedFastFieldSerializer.
|
||||||
///
|
///
|
||||||
/// The serializer in fact encode the values by bitpacking
|
/// The bitpacker assumes that the column has been normalized.
|
||||||
/// `(val - min_value)`.
|
/// i.e. It has already been shifted by its minimum value, so that its
|
||||||
|
/// current minimum value is 0.
|
||||||
///
|
///
|
||||||
/// It requires a `min_value` and a `max_value` to compute
|
/// Ideally, we made a shift upstream on the column so that `col.min_value() == 0`.
|
||||||
/// compute the minimum number of bits required to encode
|
fn serialize(column: &dyn Column, write: &mut impl Write) -> io::Result<()> {
|
||||||
/// values.
|
assert_eq!(column.min_value(), 0u64);
|
||||||
fn serialize(
|
let num_bits = compute_num_bits(column.max_value());
|
||||||
write: &mut impl Write,
|
let mut bit_packer = BitPacker::new();
|
||||||
fastfield_accessor: &dyn FastFieldDataAccess,
|
for val in column.iter() {
|
||||||
) -> io::Result<()> {
|
bit_packer.write(val, num_bits, write)?;
|
||||||
let mut serializer = BitpackedSerializerLegacy::open(
|
|
||||||
write,
|
|
||||||
fastfield_accessor.min_value(),
|
|
||||||
fastfield_accessor.max_value(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
for val in fastfield_accessor.iter() {
|
|
||||||
serializer.add_val(val)?;
|
|
||||||
}
|
}
|
||||||
serializer.close_field()?;
|
bit_packer.close(write)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
fn is_applicable(_fastfield_accessor: &impl FastFieldDataAccess) -> bool {
|
|
||||||
true
|
fn estimate(column: &dyn Column) -> Option<f32> {
|
||||||
}
|
let num_bits = compute_num_bits(column.max_value());
|
||||||
fn estimate(fastfield_accessor: &impl FastFieldDataAccess) -> f32 {
|
|
||||||
let amplitude = fastfield_accessor.max_value() - fastfield_accessor.min_value();
|
|
||||||
let num_bits = compute_num_bits(amplitude);
|
|
||||||
let num_bits_uncompressed = 64;
|
let num_bits_uncompressed = 64;
|
||||||
num_bits as f32 / num_bits_uncompressed as f32
|
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::tests::get_codec_test_data_sets;
|
use crate::tests::get_codec_test_datasets;
|
||||||
|
|
||||||
fn create_and_validate(data: &[u64], name: &str) {
|
fn create_and_validate(data: &[u64], name: &str) {
|
||||||
crate::tests::create_and_validate::<BitpackedSerializer, BitpackedReader>(data, name);
|
crate::tests::create_and_validate::<BitpackedCodec>(data, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_with_codec_data_sets() {
|
fn test_with_codec_data_sets() {
|
||||||
let data_sets = get_codec_test_data_sets();
|
let data_sets = get_codec_test_datasets();
|
||||||
for (mut data, name) in data_sets {
|
for (mut data, name) in data_sets {
|
||||||
create_and_validate(&data, name);
|
create_and_validate(&data, name);
|
||||||
data.reverse();
|
data.reverse();
|
||||||
|
|||||||
@@ -1,442 +1,186 @@
|
|||||||
//! The BlockwiseLinear codec uses linear interpolation to guess a values and stores the
|
use std::sync::Arc;
|
||||||
//! offset, but in blocks of 512.
|
use std::{io, iter};
|
||||||
//!
|
|
||||||
//! With a CHUNK_SIZE of 512 and 29 byte metadata per block, we get a overhead for metadata of 232 /
|
|
||||||
//! 512 = 0,45 bits per element. The additional space required per element in a block is the the
|
|
||||||
//! maximum deviation of the linear interpolation estimation function.
|
|
||||||
//!
|
|
||||||
//! E.g. if the maximum deviation of an element is 12, all elements cost 4bits.
|
|
||||||
//!
|
|
||||||
//! Size per block:
|
|
||||||
//! Num Elements * Maximum Deviation from Interpolation + 29 Byte Metadata
|
|
||||||
|
|
||||||
use std::io::{self, Read, Write};
|
|
||||||
use std::ops::Sub;
|
|
||||||
|
|
||||||
use common::{BinarySerializable, CountingWriter, DeserializeFrom};
|
use common::{BinarySerializable, CountingWriter, DeserializeFrom};
|
||||||
use ownedbytes::OwnedBytes;
|
use ownedbytes::OwnedBytes;
|
||||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||||
|
|
||||||
use crate::linear::{get_calculated_value, get_slope};
|
use crate::line::Line;
|
||||||
use crate::{
|
use crate::serialize::NormalizedHeader;
|
||||||
FastFieldCodecDeserializer, FastFieldCodecSerializer, FastFieldCodecType, FastFieldDataAccess,
|
use crate::{Column, FastFieldCodec, FastFieldCodecType, VecColumn};
|
||||||
};
|
|
||||||
|
|
||||||
const CHUNK_SIZE: u64 = 512;
|
const CHUNK_SIZE: usize = 512;
|
||||||
|
|
||||||
/// Depending on the field type, a different
|
#[derive(Debug, Default)]
|
||||||
/// fast field is required.
|
struct Block {
|
||||||
#[derive(Clone)]
|
line: Line,
|
||||||
pub struct BlockwiseLinearReader {
|
|
||||||
data: OwnedBytes,
|
|
||||||
pub footer: BlockwiseLinearFooter,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default)]
|
|
||||||
struct Function {
|
|
||||||
// The offset in the data is required, because we have different bit_widths per block
|
|
||||||
data_start_offset: u64,
|
|
||||||
// start_pos in the block will be CHUNK_SIZE * BLOCK_NUM
|
|
||||||
start_pos: u64,
|
|
||||||
// only used during serialization, 0 after deserialization
|
|
||||||
end_pos: u64,
|
|
||||||
// only used during serialization, 0 after deserialization
|
|
||||||
value_start_pos: u64,
|
|
||||||
// only used during serialization, 0 after deserialization
|
|
||||||
value_end_pos: u64,
|
|
||||||
slope: f32,
|
|
||||||
// The offset so that all values are positive when writing them
|
|
||||||
positive_val_offset: u64,
|
|
||||||
num_bits: u8,
|
|
||||||
bit_unpacker: BitUnpacker,
|
bit_unpacker: BitUnpacker,
|
||||||
|
data_start_offset: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Function {
|
impl BinarySerializable for Block {
|
||||||
fn calc_slope(&mut self) {
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
let num_vals = self.end_pos - self.start_pos;
|
self.line.serialize(writer)?;
|
||||||
self.slope = get_slope(self.value_start_pos, self.value_end_pos, num_vals);
|
self.bit_unpacker.bit_width().serialize(writer)?;
|
||||||
}
|
|
||||||
// split the interpolation into two function, change self and return the second split
|
|
||||||
fn split(&mut self, split_pos: u64, split_pos_value: u64) -> Function {
|
|
||||||
let mut new_function = Function {
|
|
||||||
start_pos: split_pos,
|
|
||||||
end_pos: self.end_pos,
|
|
||||||
value_start_pos: split_pos_value,
|
|
||||||
value_end_pos: self.value_end_pos,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
new_function.calc_slope();
|
|
||||||
self.end_pos = split_pos;
|
|
||||||
self.value_end_pos = split_pos_value;
|
|
||||||
self.calc_slope();
|
|
||||||
new_function
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BinarySerializable for Function {
|
|
||||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
|
||||||
self.data_start_offset.serialize(write)?;
|
|
||||||
self.value_start_pos.serialize(write)?;
|
|
||||||
self.positive_val_offset.serialize(write)?;
|
|
||||||
self.slope.serialize(write)?;
|
|
||||||
self.num_bits.serialize(write)?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Function> {
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
let data_start_offset = u64::deserialize(reader)?;
|
let line = Line::deserialize(reader)?;
|
||||||
let value_start_pos = u64::deserialize(reader)?;
|
let bit_width = u8::deserialize(reader)?;
|
||||||
let offset = u64::deserialize(reader)?;
|
Ok(Block {
|
||||||
let slope = f32::deserialize(reader)?;
|
line,
|
||||||
let num_bits = u8::deserialize(reader)?;
|
bit_unpacker: BitUnpacker::new(bit_width),
|
||||||
let interpolation = Function {
|
data_start_offset: 0,
|
||||||
data_start_offset,
|
})
|
||||||
value_start_pos,
|
|
||||||
positive_val_offset: offset,
|
|
||||||
num_bits,
|
|
||||||
bit_unpacker: BitUnpacker::new(num_bits),
|
|
||||||
slope,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(interpolation)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
fn compute_num_blocks(num_vals: u32) -> usize {
|
||||||
pub struct BlockwiseLinearFooter {
|
(num_vals as usize + CHUNK_SIZE - 1) / CHUNK_SIZE
|
||||||
pub num_vals: u64,
|
|
||||||
pub min_value: u64,
|
|
||||||
pub max_value: u64,
|
|
||||||
interpolations: Vec<Function>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BinarySerializable for BlockwiseLinearFooter {
|
pub struct BlockwiseLinearCodec;
|
||||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
|
||||||
let mut out = vec![];
|
|
||||||
self.num_vals.serialize(&mut out)?;
|
|
||||||
self.min_value.serialize(&mut out)?;
|
|
||||||
self.max_value.serialize(&mut out)?;
|
|
||||||
self.interpolations.serialize(&mut out)?;
|
|
||||||
write.write_all(&out)?;
|
|
||||||
(out.len() as u32).serialize(write)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<BlockwiseLinearFooter> {
|
impl FastFieldCodec for BlockwiseLinearCodec {
|
||||||
let mut footer = BlockwiseLinearFooter {
|
const CODEC_TYPE: crate::FastFieldCodecType = FastFieldCodecType::BlockwiseLinear;
|
||||||
num_vals: u64::deserialize(reader)?,
|
type Reader = BlockwiseLinearReader;
|
||||||
min_value: u64::deserialize(reader)?,
|
|
||||||
max_value: u64::deserialize(reader)?,
|
|
||||||
interpolations: Vec::<Function>::deserialize(reader)?,
|
|
||||||
};
|
|
||||||
for (num, interpol) in footer.interpolations.iter_mut().enumerate() {
|
|
||||||
interpol.start_pos = CHUNK_SIZE * num as u64;
|
|
||||||
}
|
|
||||||
Ok(footer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
fn open_from_bytes(
|
||||||
fn get_interpolation_position(doc: u64) -> usize {
|
bytes: ownedbytes::OwnedBytes,
|
||||||
let index = doc / CHUNK_SIZE;
|
normalized_header: NormalizedHeader,
|
||||||
index as usize
|
) -> io::Result<Self::Reader> {
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn get_interpolation_function(doc: u64, interpolations: &[Function]) -> &Function {
|
|
||||||
&interpolations[get_interpolation_position(doc)]
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FastFieldCodecDeserializer for BlockwiseLinearReader {
|
|
||||||
/// Opens a fast field given a file.
|
|
||||||
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self> {
|
|
||||||
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
||||||
let footer_offset = bytes.len() - 4 - footer_len as usize;
|
let footer_offset = bytes.len() - 4 - footer_len as usize;
|
||||||
let (data, mut footer) = bytes.split(footer_offset);
|
let (data, mut footer) = bytes.split(footer_offset);
|
||||||
let footer = BlockwiseLinearFooter::deserialize(&mut footer)?;
|
let num_blocks = compute_num_blocks(normalized_header.num_vals);
|
||||||
Ok(BlockwiseLinearReader { data, footer })
|
let mut blocks: Vec<Block> = iter::repeat_with(|| Block::deserialize(&mut footer))
|
||||||
}
|
.take(num_blocks)
|
||||||
}
|
.collect::<io::Result<_>>()?;
|
||||||
|
|
||||||
impl FastFieldDataAccess for BlockwiseLinearReader {
|
let mut start_offset = 0;
|
||||||
#[inline]
|
for block in &mut blocks {
|
||||||
fn get_val(&self, idx: u64) -> u64 {
|
block.data_start_offset = start_offset;
|
||||||
let interpolation = get_interpolation_function(idx, &self.footer.interpolations);
|
start_offset += (block.bit_unpacker.bit_width() as usize) * CHUNK_SIZE / 8;
|
||||||
let in_block_idx = idx - interpolation.start_pos;
|
|
||||||
let calculated_value = get_calculated_value(
|
|
||||||
interpolation.value_start_pos,
|
|
||||||
in_block_idx,
|
|
||||||
interpolation.slope,
|
|
||||||
);
|
|
||||||
let diff = interpolation.bit_unpacker.get(
|
|
||||||
in_block_idx,
|
|
||||||
&self.data[interpolation.data_start_offset as usize..],
|
|
||||||
);
|
|
||||||
(calculated_value + diff) - interpolation.positive_val_offset
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn min_value(&self) -> u64 {
|
|
||||||
self.footer.min_value
|
|
||||||
}
|
|
||||||
#[inline]
|
|
||||||
fn max_value(&self) -> u64 {
|
|
||||||
self.footer.max_value
|
|
||||||
}
|
|
||||||
#[inline]
|
|
||||||
fn num_vals(&self) -> u64 {
|
|
||||||
self.footer.num_vals
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Same as LinearSerializer, but working on chunks of CHUNK_SIZE elements.
|
|
||||||
pub struct BlockwiseLinearSerializer {}
|
|
||||||
|
|
||||||
impl FastFieldCodecSerializer for BlockwiseLinearSerializer {
|
|
||||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::BlockwiseLinear;
|
|
||||||
/// Creates a new fast field serializer.
|
|
||||||
fn serialize(
|
|
||||||
write: &mut impl Write,
|
|
||||||
fastfield_accessor: &dyn FastFieldDataAccess,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
assert!(fastfield_accessor.min_value() <= fastfield_accessor.max_value());
|
|
||||||
|
|
||||||
let first_val = fastfield_accessor.get_val(0);
|
|
||||||
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
|
|
||||||
|
|
||||||
let mut first_function = Function {
|
|
||||||
end_pos: fastfield_accessor.num_vals(),
|
|
||||||
value_start_pos: first_val,
|
|
||||||
value_end_pos: last_val,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
first_function.calc_slope();
|
|
||||||
let mut interpolations = vec![first_function];
|
|
||||||
|
|
||||||
// Since we potentially apply multiple passes over the data, the data is cached.
|
|
||||||
// Multiple iteration can be expensive (merge with index sorting can add lot of overhead per
|
|
||||||
// iteration)
|
|
||||||
let data = fastfield_accessor.iter().collect::<Vec<_>>();
|
|
||||||
|
|
||||||
//// let's split this into chunks of CHUNK_SIZE
|
|
||||||
for data_pos in (0..data.len() as u64).step_by(CHUNK_SIZE as usize).skip(1) {
|
|
||||||
let new_fun = {
|
|
||||||
let current_interpolation = interpolations.last_mut().unwrap();
|
|
||||||
current_interpolation.split(data_pos, data[data_pos as usize])
|
|
||||||
};
|
|
||||||
interpolations.push(new_fun);
|
|
||||||
}
|
}
|
||||||
// calculate offset and max (-> numbits) for each function
|
Ok(BlockwiseLinearReader {
|
||||||
for interpolation in &mut interpolations {
|
blocks: Arc::new(blocks),
|
||||||
let mut offset = 0;
|
data,
|
||||||
let mut rel_positive_max = 0;
|
normalized_header,
|
||||||
for (pos, actual_value) in data
|
})
|
||||||
[interpolation.start_pos as usize..interpolation.end_pos as usize]
|
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.enumerate()
|
|
||||||
{
|
|
||||||
let calculated_value = get_calculated_value(
|
|
||||||
interpolation.value_start_pos,
|
|
||||||
pos as u64,
|
|
||||||
interpolation.slope,
|
|
||||||
);
|
|
||||||
if calculated_value > actual_value {
|
|
||||||
// negative value we need to apply an offset
|
|
||||||
// we ignore negative values in the max value calculation, because negative
|
|
||||||
// values will be offset to 0
|
|
||||||
offset = offset.max(calculated_value - actual_value);
|
|
||||||
} else {
|
|
||||||
// positive value no offset reuqired
|
|
||||||
rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
interpolation.positive_val_offset = offset;
|
|
||||||
interpolation.num_bits = compute_num_bits(rel_positive_max + offset);
|
|
||||||
}
|
|
||||||
let mut bit_packer = BitPacker::new();
|
|
||||||
|
|
||||||
let write = &mut CountingWriter::wrap(write);
|
|
||||||
for interpolation in &mut interpolations {
|
|
||||||
interpolation.data_start_offset = write.written_bytes();
|
|
||||||
let num_bits = interpolation.num_bits;
|
|
||||||
for (pos, actual_value) in data
|
|
||||||
[interpolation.start_pos as usize..interpolation.end_pos as usize]
|
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.enumerate()
|
|
||||||
{
|
|
||||||
let calculated_value = get_calculated_value(
|
|
||||||
interpolation.value_start_pos,
|
|
||||||
pos as u64,
|
|
||||||
interpolation.slope,
|
|
||||||
);
|
|
||||||
let diff = (actual_value + interpolation.positive_val_offset) - calculated_value;
|
|
||||||
bit_packer.write(diff, num_bits, write)?;
|
|
||||||
}
|
|
||||||
bit_packer.flush(write)?;
|
|
||||||
}
|
|
||||||
bit_packer.close(write)?;
|
|
||||||
|
|
||||||
let footer = BlockwiseLinearFooter {
|
|
||||||
num_vals: fastfield_accessor.num_vals(),
|
|
||||||
min_value: fastfield_accessor.min_value(),
|
|
||||||
max_value: fastfield_accessor.max_value(),
|
|
||||||
interpolations,
|
|
||||||
};
|
|
||||||
footer.serialize(write)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_applicable(fastfield_accessor: &impl FastFieldDataAccess) -> bool {
|
// Estimate first_chunk and extrapolate
|
||||||
if fastfield_accessor.num_vals() < 5_000 {
|
fn estimate(column: &dyn crate::Column) -> Option<f32> {
|
||||||
return false;
|
if column.num_vals() < 10 * CHUNK_SIZE as u32 {
|
||||||
|
return None;
|
||||||
}
|
}
|
||||||
// On serialization the offset is added to the actual value.
|
let mut first_chunk: Vec<u64> = column.iter().take(CHUNK_SIZE as usize).collect();
|
||||||
// We need to make sure this won't run into overflow calculation issues.
|
let line = Line::train(&VecColumn::from(&first_chunk));
|
||||||
// For this we take the maximum theroretical offset and add this to the max value.
|
for (i, buffer_val) in first_chunk.iter_mut().enumerate() {
|
||||||
// If this doesn't overflow the algorithm should be fine
|
let interpolated_val = line.eval(i as u32);
|
||||||
let theorethical_maximum_offset =
|
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
|
||||||
fastfield_accessor.max_value() - fastfield_accessor.min_value();
|
|
||||||
if fastfield_accessor
|
|
||||||
.max_value()
|
|
||||||
.checked_add(theorethical_maximum_offset)
|
|
||||||
.is_none()
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
true
|
let estimated_bit_width = first_chunk
|
||||||
}
|
|
||||||
/// estimation for linear interpolation is hard because, you don't know
|
|
||||||
/// where the local maxima are for the deviation of the calculated value and
|
|
||||||
/// the offset is also unknown.
|
|
||||||
fn estimate(fastfield_accessor: &impl FastFieldDataAccess) -> f32 {
|
|
||||||
let first_val_in_first_block = fastfield_accessor.get_val(0);
|
|
||||||
let last_elem_in_first_chunk = CHUNK_SIZE.min(fastfield_accessor.num_vals());
|
|
||||||
let last_val_in_first_block =
|
|
||||||
fastfield_accessor.get_val(last_elem_in_first_chunk as u64 - 1);
|
|
||||||
let slope = get_slope(
|
|
||||||
first_val_in_first_block,
|
|
||||||
last_val_in_first_block,
|
|
||||||
fastfield_accessor.num_vals(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// let's sample at 0%, 5%, 10% .. 95%, 100%, but for the first block only
|
|
||||||
let sample_positions = (0..20)
|
|
||||||
.map(|pos| (last_elem_in_first_chunk as f32 / 100.0 * pos as f32 * 5.0) as usize)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let max_distance = sample_positions
|
|
||||||
.iter()
|
.iter()
|
||||||
.map(|pos| {
|
.map(|el| ((el + 1) as f32 * 3.0) as u64)
|
||||||
let calculated_value =
|
.map(compute_num_bits)
|
||||||
get_calculated_value(first_val_in_first_block, *pos as u64, slope);
|
|
||||||
let actual_value = fastfield_accessor.get_val(*pos as u64);
|
|
||||||
distance(calculated_value, actual_value)
|
|
||||||
})
|
|
||||||
.max()
|
.max()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Estimate one block and extrapolate the cost to all blocks.
|
let metadata_per_block = {
|
||||||
// the theory would be that we don't have the actual max_distance, but we are close within
|
let mut out = vec![];
|
||||||
// 50% threshold.
|
Block::default().serialize(&mut out).unwrap();
|
||||||
// It is multiplied by 2 because in a log case scenario the line would be as much above as
|
out.len()
|
||||||
// below. So the offset would = max_distance
|
};
|
||||||
//
|
let num_bits = estimated_bit_width as u64 * column.num_vals() as u64
|
||||||
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
|
|
||||||
|
|
||||||
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * fastfield_accessor.num_vals() as u64
|
|
||||||
// function metadata per block
|
// function metadata per block
|
||||||
+ 29 * (fastfield_accessor.num_vals() / CHUNK_SIZE);
|
+ metadata_per_block as u64 * (column.num_vals() as u64 / CHUNK_SIZE as u64);
|
||||||
let num_bits_uncompressed = 64 * fastfield_accessor.num_vals();
|
let num_bits_uncompressed = 64 * column.num_vals();
|
||||||
num_bits as f32 / num_bits_uncompressed as f32
|
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
|
|
||||||
if x < y {
|
|
||||||
y - x
|
|
||||||
} else {
|
|
||||||
x - y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::tests::get_codec_test_data_sets;
|
|
||||||
|
|
||||||
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
|
|
||||||
crate::tests::create_and_validate::<BlockwiseLinearSerializer, BlockwiseLinearReader>(
|
|
||||||
data, name,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const HIGHEST_BIT: u64 = 1 << 63;
|
fn serialize(column: &dyn Column, wrt: &mut impl io::Write) -> io::Result<()> {
|
||||||
pub fn i64_to_u64(val: i64) -> u64 {
|
// The BitpackedReader assumes a normalized vector.
|
||||||
(val as u64) ^ HIGHEST_BIT
|
assert_eq!(column.min_value(), 0);
|
||||||
}
|
let mut buffer = Vec::with_capacity(CHUNK_SIZE);
|
||||||
|
let num_vals = column.num_vals();
|
||||||
|
|
||||||
#[test]
|
let num_blocks = compute_num_blocks(num_vals);
|
||||||
fn test_compression_i64() {
|
let mut blocks = Vec::with_capacity(num_blocks);
|
||||||
let data = (i64::MAX - 600_000..=i64::MAX - 550_000)
|
|
||||||
.map(i64_to_u64)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let (estimate, actual_compression) =
|
|
||||||
create_and_validate(&data, "simple monotonically large i64");
|
|
||||||
assert!(actual_compression < 0.2);
|
|
||||||
assert!(estimate < 0.20);
|
|
||||||
assert!(estimate > 0.15);
|
|
||||||
assert!(actual_compression > 0.01);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
let mut vals = column.iter();
|
||||||
fn test_compression() {
|
|
||||||
let data = (10..=6_000_u64).collect::<Vec<_>>();
|
|
||||||
let (estimate, actual_compression) =
|
|
||||||
create_and_validate(&data, "simple monotonically large");
|
|
||||||
assert!(actual_compression < 0.2);
|
|
||||||
assert!(estimate < 0.20);
|
|
||||||
assert!(estimate > 0.15);
|
|
||||||
assert!(actual_compression > 0.01);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
let mut bit_packer = BitPacker::new();
|
||||||
fn test_with_codec_data_sets() {
|
|
||||||
let data_sets = get_codec_test_data_sets();
|
for _ in 0..num_blocks {
|
||||||
for (mut data, name) in data_sets {
|
buffer.clear();
|
||||||
create_and_validate(&data, name);
|
buffer.extend((&mut vals).take(CHUNK_SIZE));
|
||||||
data.reverse();
|
let line = Line::train(&VecColumn::from(&buffer));
|
||||||
create_and_validate(&data, name);
|
|
||||||
|
assert!(!buffer.is_empty());
|
||||||
|
|
||||||
|
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
||||||
|
let interpolated_val = line.eval(i as u32);
|
||||||
|
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
|
||||||
|
}
|
||||||
|
let bit_width = buffer.iter().copied().map(compute_num_bits).max().unwrap();
|
||||||
|
|
||||||
|
for &buffer_val in &buffer {
|
||||||
|
bit_packer.write(buffer_val, bit_width, wrt)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
blocks.push(Block {
|
||||||
|
line,
|
||||||
|
bit_unpacker: BitUnpacker::new(bit_width),
|
||||||
|
data_start_offset: 0,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
fn test_simple() {
|
|
||||||
let data = (10..=20_u64).collect::<Vec<_>>();
|
|
||||||
create_and_validate(&data, "simple monotonically");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
bit_packer.close(wrt)?;
|
||||||
fn border_cases_1() {
|
|
||||||
let data = (0..1024).collect::<Vec<_>>();
|
assert_eq!(blocks.len(), compute_num_blocks(num_vals));
|
||||||
create_and_validate(&data, "border case");
|
|
||||||
}
|
let mut counting_wrt = CountingWriter::wrap(wrt);
|
||||||
#[test]
|
for block in &blocks {
|
||||||
fn border_case_2() {
|
block.serialize(&mut counting_wrt)?;
|
||||||
let data = (0..1025).collect::<Vec<_>>();
|
|
||||||
create_and_validate(&data, "border case");
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
fn rand() {
|
|
||||||
for _ in 0..10 {
|
|
||||||
let mut data = (5_000..20_000)
|
|
||||||
.map(|_| rand::random::<u32>() as u64)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let _ = create_and_validate(&data, "random");
|
|
||||||
data.reverse();
|
|
||||||
create_and_validate(&data, "random");
|
|
||||||
}
|
}
|
||||||
|
let footer_len = counting_wrt.written_bytes();
|
||||||
|
(footer_len as u32).serialize(&mut counting_wrt)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct BlockwiseLinearReader {
|
||||||
|
blocks: Arc<Vec<Block>>,
|
||||||
|
normalized_header: NormalizedHeader,
|
||||||
|
data: OwnedBytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Column for BlockwiseLinearReader {
|
||||||
|
#[inline(always)]
|
||||||
|
fn get_val(&self, idx: u32) -> u64 {
|
||||||
|
let block_id = (idx / CHUNK_SIZE as u32) as usize;
|
||||||
|
let idx_within_block = idx % (CHUNK_SIZE as u32);
|
||||||
|
let block = &self.blocks[block_id];
|
||||||
|
let interpoled_val: u64 = block.line.eval(idx_within_block);
|
||||||
|
let block_bytes = &self.data[block.data_start_offset..];
|
||||||
|
let bitpacked_diff = block.bit_unpacker.get(idx_within_block, block_bytes);
|
||||||
|
interpoled_val.wrapping_add(bitpacked_diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn min_value(&self) -> u64 {
|
||||||
|
// The BlockwiseLinearReader assumes a normalized vector.
|
||||||
|
0u64
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_value(&self) -> u64 {
|
||||||
|
self.normalized_header.max_value
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_vals(&self) -> u32 {
|
||||||
|
self.normalized_header.num_vals
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
348
fastfield_codecs/src/column.rs
Normal file
348
fastfield_codecs/src/column.rs
Normal file
@@ -0,0 +1,348 @@
|
|||||||
|
use std::marker::PhantomData;
|
||||||
|
use std::ops::{Range, RangeInclusive};
|
||||||
|
|
||||||
|
use tantivy_bitpacker::minmax;
|
||||||
|
|
||||||
|
use crate::monotonic_mapping::StrictlyMonotonicFn;
|
||||||
|
|
||||||
|
/// `Column` provides columnar access on a field.
|
||||||
|
pub trait Column<T: PartialOrd = u64>: Send + Sync {
|
||||||
|
/// Return the value associated with the given idx.
|
||||||
|
///
|
||||||
|
/// This accessor should return as fast as possible.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// May panic if `idx` is greater than the column length.
|
||||||
|
fn get_val(&self, idx: u32) -> T;
|
||||||
|
|
||||||
|
/// Fills an output buffer with the fast field values
|
||||||
|
/// associated with the `DocId` going from
|
||||||
|
/// `start` to `start + output.len()`.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Must panic if `start + output.len()` is greater than
|
||||||
|
/// the segment's `maxdoc`.
|
||||||
|
#[inline]
|
||||||
|
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||||
|
for (out, idx) in output.iter_mut().zip(start..) {
|
||||||
|
*out = self.get_val(idx as u32);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the positions of values which are in the provided value range.
|
||||||
|
///
|
||||||
|
/// Note that position == docid for single value fast fields
|
||||||
|
#[inline]
|
||||||
|
fn get_docids_for_value_range(
|
||||||
|
&self,
|
||||||
|
value_range: RangeInclusive<T>,
|
||||||
|
doc_id_range: Range<u32>,
|
||||||
|
positions: &mut Vec<u32>,
|
||||||
|
) {
|
||||||
|
let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals());
|
||||||
|
|
||||||
|
for idx in doc_id_range.start..doc_id_range.end {
|
||||||
|
let val = self.get_val(idx);
|
||||||
|
if value_range.contains(&val) {
|
||||||
|
positions.push(idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the minimum value for this fast field.
|
||||||
|
///
|
||||||
|
/// This min_value may not be exact.
|
||||||
|
/// For instance, the min value does not take in account of possible
|
||||||
|
/// deleted document. All values are however guaranteed to be higher than
|
||||||
|
/// `.min_value()`.
|
||||||
|
fn min_value(&self) -> T;
|
||||||
|
|
||||||
|
/// Returns the maximum value for this fast field.
|
||||||
|
///
|
||||||
|
/// This max_value may not be exact.
|
||||||
|
/// For instance, the max value does not take in account of possible
|
||||||
|
/// deleted document. All values are however guaranteed to be higher than
|
||||||
|
/// `.max_value()`.
|
||||||
|
fn max_value(&self) -> T;
|
||||||
|
|
||||||
|
/// The number of values in the column.
|
||||||
|
fn num_vals(&self) -> u32;
|
||||||
|
|
||||||
|
/// Returns a iterator over the data
|
||||||
|
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = T> + 'a> {
|
||||||
|
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// VecColumn provides `Column` over a slice.
|
||||||
|
pub struct VecColumn<'a, T = u64> {
|
||||||
|
values: &'a [T],
|
||||||
|
min_value: T,
|
||||||
|
max_value: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, C: Column<T>, T: Copy + PartialOrd> Column<T> for &'a C {
|
||||||
|
fn get_val(&self, idx: u32) -> T {
|
||||||
|
(*self).get_val(idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn min_value(&self) -> T {
|
||||||
|
(*self).min_value()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_value(&self) -> T {
|
||||||
|
(*self).max_value()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_vals(&self) -> u32 {
|
||||||
|
(*self).num_vals()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = T> + 'b> {
|
||||||
|
(*self).iter()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||||
|
(*self).get_range(start, output)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T: Copy + PartialOrd + Send + Sync> Column<T> for VecColumn<'a, T> {
|
||||||
|
fn get_val(&self, position: u32) -> T {
|
||||||
|
self.values[position as usize]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||||
|
Box::new(self.values.iter().copied())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn min_value(&self) -> T {
|
||||||
|
self.min_value
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_value(&self) -> T {
|
||||||
|
self.max_value
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_vals(&self) -> u32 {
|
||||||
|
self.values.len() as u32
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||||
|
output.copy_from_slice(&self.values[start as usize..][..output.len()])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T: Copy + Ord + Default, V> From<&'a V> for VecColumn<'a, T>
|
||||||
|
where V: AsRef<[T]> + ?Sized
|
||||||
|
{
|
||||||
|
fn from(values: &'a V) -> Self {
|
||||||
|
let values = values.as_ref();
|
||||||
|
let (min_value, max_value) = minmax(values.iter().copied()).unwrap_or_default();
|
||||||
|
Self {
|
||||||
|
values,
|
||||||
|
min_value,
|
||||||
|
max_value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MonotonicMappingColumn<C, T, Input> {
|
||||||
|
from_column: C,
|
||||||
|
monotonic_mapping: T,
|
||||||
|
_phantom: PhantomData<Input>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a view of a column transformed by a strictly monotonic mapping. See
|
||||||
|
/// [`StrictlyMonotonicFn`].
|
||||||
|
///
|
||||||
|
/// E.g. apply a gcd monotonic_mapping([100, 200, 300]) == [1, 2, 3]
|
||||||
|
/// monotonic_mapping.mapping() is expected to be injective, and we should always have
|
||||||
|
/// monotonic_mapping.inverse(monotonic_mapping.mapping(el)) == el
|
||||||
|
///
|
||||||
|
/// The inverse of the mapping is required for:
|
||||||
|
/// `fn get_positions_for_value_range(&self, range: RangeInclusive<T>) -> Vec<u64> `
|
||||||
|
/// The user provides the original value range and we need to monotonic map them in the same way the
|
||||||
|
/// serialization does before calling the underlying column.
|
||||||
|
///
|
||||||
|
/// Note that when opening a codec, the monotonic_mapping should be the inverse of the mapping
|
||||||
|
/// during serialization. And therefore the monotonic_mapping_inv when opening is the same as
|
||||||
|
/// monotonic_mapping during serialization.
|
||||||
|
pub fn monotonic_map_column<C, T, Input, Output>(
|
||||||
|
from_column: C,
|
||||||
|
monotonic_mapping: T,
|
||||||
|
) -> impl Column<Output>
|
||||||
|
where
|
||||||
|
C: Column<Input>,
|
||||||
|
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||||
|
Input: PartialOrd + Send + Sync + Clone,
|
||||||
|
Output: PartialOrd + Send + Sync + Clone,
|
||||||
|
{
|
||||||
|
MonotonicMappingColumn {
|
||||||
|
from_column,
|
||||||
|
monotonic_mapping,
|
||||||
|
_phantom: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, T, Input, Output> Column<Output> for MonotonicMappingColumn<C, T, Input>
|
||||||
|
where
|
||||||
|
C: Column<Input>,
|
||||||
|
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||||
|
Input: PartialOrd + Send + Sync + Clone,
|
||||||
|
Output: PartialOrd + Send + Sync + Clone,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn get_val(&self, idx: u32) -> Output {
|
||||||
|
let from_val = self.from_column.get_val(idx);
|
||||||
|
self.monotonic_mapping.mapping(from_val)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn min_value(&self) -> Output {
|
||||||
|
let from_min_value = self.from_column.min_value();
|
||||||
|
self.monotonic_mapping.mapping(from_min_value)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_value(&self) -> Output {
|
||||||
|
let from_max_value = self.from_column.max_value();
|
||||||
|
self.monotonic_mapping.mapping(from_max_value)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_vals(&self) -> u32 {
|
||||||
|
self.from_column.num_vals()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter(&self) -> Box<dyn Iterator<Item = Output> + '_> {
|
||||||
|
Box::new(
|
||||||
|
self.from_column
|
||||||
|
.iter()
|
||||||
|
.map(|el| self.monotonic_mapping.mapping(el)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_docids_for_value_range(
|
||||||
|
&self,
|
||||||
|
range: RangeInclusive<Output>,
|
||||||
|
doc_id_range: Range<u32>,
|
||||||
|
positions: &mut Vec<u32>,
|
||||||
|
) {
|
||||||
|
self.from_column.get_docids_for_value_range(
|
||||||
|
self.monotonic_mapping.inverse(range.start().clone())
|
||||||
|
..=self.monotonic_mapping.inverse(range.end().clone()),
|
||||||
|
doc_id_range,
|
||||||
|
positions,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We voluntarily do not implement get_range as it yields a regression,
|
||||||
|
// and we do not have any specialized implementation anyway.
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wraps an iterator into a `Column`.
|
||||||
|
pub struct IterColumn<T>(T);
|
||||||
|
|
||||||
|
impl<T> From<T> for IterColumn<T>
|
||||||
|
where T: Iterator + Clone + ExactSizeIterator
|
||||||
|
{
|
||||||
|
fn from(iter: T) -> Self {
|
||||||
|
IterColumn(iter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Column<T::Item> for IterColumn<T>
|
||||||
|
where
|
||||||
|
T: Iterator + Clone + ExactSizeIterator + Send + Sync,
|
||||||
|
T::Item: PartialOrd,
|
||||||
|
{
|
||||||
|
fn get_val(&self, idx: u32) -> T::Item {
|
||||||
|
self.0.clone().nth(idx as usize).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn min_value(&self) -> T::Item {
|
||||||
|
self.0.clone().next().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_value(&self) -> T::Item {
|
||||||
|
self.0.clone().last().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_vals(&self) -> u32 {
|
||||||
|
self.0.len() as u32
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter(&self) -> Box<dyn Iterator<Item = T::Item> + '_> {
|
||||||
|
Box::new(self.0.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::monotonic_mapping::{
|
||||||
|
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternalBaseval,
|
||||||
|
StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_monotonic_mapping() {
|
||||||
|
let vals = &[3u64, 5u64][..];
|
||||||
|
let col = VecColumn::from(vals);
|
||||||
|
let mapped = monotonic_map_column(col, StrictlyMonotonicMappingToInternalBaseval::new(2));
|
||||||
|
assert_eq!(mapped.min_value(), 1u64);
|
||||||
|
assert_eq!(mapped.max_value(), 3u64);
|
||||||
|
assert_eq!(mapped.num_vals(), 2);
|
||||||
|
assert_eq!(mapped.num_vals(), 2);
|
||||||
|
assert_eq!(mapped.get_val(0), 1);
|
||||||
|
assert_eq!(mapped.get_val(1), 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_range_as_col() {
|
||||||
|
let col = IterColumn::from(10..100);
|
||||||
|
assert_eq!(col.num_vals(), 90);
|
||||||
|
assert_eq!(col.max_value(), 99);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_monotonic_mapping_iter() {
|
||||||
|
let vals: Vec<u64> = (10..110u64).map(|el| el * 10).collect();
|
||||||
|
let col = VecColumn::from(&vals);
|
||||||
|
let mapped = monotonic_map_column(
|
||||||
|
col,
|
||||||
|
StrictlyMonotonicMappingInverter::from(
|
||||||
|
StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 100),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
let val_i64s: Vec<u64> = mapped.iter().collect();
|
||||||
|
for i in 0..100 {
|
||||||
|
assert_eq!(val_i64s[i as usize], mapped.get_val(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_monotonic_mapping_get_range() {
|
||||||
|
let vals: Vec<u64> = (0..100u64).map(|el| el * 10).collect();
|
||||||
|
let col = VecColumn::from(&vals);
|
||||||
|
let mapped = monotonic_map_column(
|
||||||
|
col,
|
||||||
|
StrictlyMonotonicMappingInverter::from(
|
||||||
|
StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 0),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(mapped.min_value(), 0u64);
|
||||||
|
assert_eq!(mapped.max_value(), 9900u64);
|
||||||
|
assert_eq!(mapped.num_vals(), 100);
|
||||||
|
let val_u64s: Vec<u64> = mapped.iter().collect();
|
||||||
|
assert_eq!(val_u64s.len(), 100);
|
||||||
|
for i in 0..100 {
|
||||||
|
assert_eq!(val_u64s[i as usize], mapped.get_val(i));
|
||||||
|
assert_eq!(val_u64s[i as usize], vals[i as usize] * 10);
|
||||||
|
}
|
||||||
|
let mut buf = [0u64; 20];
|
||||||
|
mapped.get_range(7, &mut buf[..]);
|
||||||
|
assert_eq!(&val_u64s[7..][..20], &buf);
|
||||||
|
}
|
||||||
|
}
|
||||||
43
fastfield_codecs/src/compact_space/blank_range.rs
Normal file
43
fastfield_codecs/src/compact_space/blank_range.rs
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
use std::ops::RangeInclusive;
|
||||||
|
|
||||||
|
/// The range of a blank in value space.
|
||||||
|
///
|
||||||
|
/// A blank is an unoccupied space in the data.
|
||||||
|
/// Use try_into() to construct.
|
||||||
|
/// A range has to have at least length of 3. Invalid ranges will be rejected.
|
||||||
|
///
|
||||||
|
/// Ordered by range length.
|
||||||
|
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||||
|
pub(crate) struct BlankRange {
|
||||||
|
blank_range: RangeInclusive<u128>,
|
||||||
|
}
|
||||||
|
impl TryFrom<RangeInclusive<u128>> for BlankRange {
|
||||||
|
type Error = &'static str;
|
||||||
|
fn try_from(range: RangeInclusive<u128>) -> Result<Self, Self::Error> {
|
||||||
|
let blank_size = range.end().saturating_sub(*range.start());
|
||||||
|
if blank_size < 2 {
|
||||||
|
Err("invalid range")
|
||||||
|
} else {
|
||||||
|
Ok(BlankRange { blank_range: range })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl BlankRange {
|
||||||
|
pub(crate) fn blank_size(&self) -> u128 {
|
||||||
|
self.blank_range.end() - self.blank_range.start() + 1
|
||||||
|
}
|
||||||
|
pub(crate) fn blank_range(&self) -> RangeInclusive<u128> {
|
||||||
|
self.blank_range.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for BlankRange {
|
||||||
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
|
self.blank_size().cmp(&other.blank_size())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl PartialOrd for BlankRange {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||||
|
Some(self.blank_size().cmp(&other.blank_size()))
|
||||||
|
}
|
||||||
|
}
|
||||||
231
fastfield_codecs/src/compact_space/build_compact_space.rs
Normal file
231
fastfield_codecs/src/compact_space/build_compact_space.rs
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
use std::collections::{BTreeSet, BinaryHeap};
|
||||||
|
use std::iter;
|
||||||
|
use std::ops::RangeInclusive;
|
||||||
|
|
||||||
|
use itertools::Itertools;
|
||||||
|
|
||||||
|
use super::blank_range::BlankRange;
|
||||||
|
use super::{CompactSpace, RangeMapping};
|
||||||
|
|
||||||
|
/// Put the blanks for the sorted values into a binary heap
|
||||||
|
fn get_blanks(values_sorted: &BTreeSet<u128>) -> BinaryHeap<BlankRange> {
|
||||||
|
let mut blanks: BinaryHeap<BlankRange> = BinaryHeap::new();
|
||||||
|
for (first, second) in values_sorted.iter().tuple_windows() {
|
||||||
|
// Correctness Overflow: the values are deduped and sorted (BTreeSet property), that means
|
||||||
|
// there's always space between two values.
|
||||||
|
let blank_range = first + 1..=second - 1;
|
||||||
|
let blank_range: Result<BlankRange, _> = blank_range.try_into();
|
||||||
|
if let Ok(blank_range) = blank_range {
|
||||||
|
blanks.push(blank_range);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blanks
|
||||||
|
}
|
||||||
|
|
||||||
|
struct BlankCollector {
|
||||||
|
blanks: Vec<BlankRange>,
|
||||||
|
staged_blanks_sum: u128,
|
||||||
|
}
|
||||||
|
impl BlankCollector {
|
||||||
|
fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
blanks: vec![],
|
||||||
|
staged_blanks_sum: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn stage_blank(&mut self, blank: BlankRange) {
|
||||||
|
self.staged_blanks_sum += blank.blank_size();
|
||||||
|
self.blanks.push(blank);
|
||||||
|
}
|
||||||
|
fn drain(&mut self) -> impl Iterator<Item = BlankRange> + '_ {
|
||||||
|
self.staged_blanks_sum = 0;
|
||||||
|
self.blanks.drain(..)
|
||||||
|
}
|
||||||
|
fn staged_blanks_sum(&self) -> u128 {
|
||||||
|
self.staged_blanks_sum
|
||||||
|
}
|
||||||
|
fn num_staged_blanks(&self) -> usize {
|
||||||
|
self.blanks.len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn num_bits(val: u128) -> u8 {
|
||||||
|
(128u32 - val.leading_zeros()) as u8
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Will collect blanks and add them to compact space if more bits are saved than cost from
|
||||||
|
/// metadata.
|
||||||
|
pub fn get_compact_space(
|
||||||
|
values_deduped_sorted: &BTreeSet<u128>,
|
||||||
|
total_num_values: u32,
|
||||||
|
cost_per_blank: usize,
|
||||||
|
) -> CompactSpace {
|
||||||
|
let mut compact_space_builder = CompactSpaceBuilder::new();
|
||||||
|
if values_deduped_sorted.is_empty() {
|
||||||
|
return compact_space_builder.finish();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut blanks: BinaryHeap<BlankRange> = get_blanks(values_deduped_sorted);
|
||||||
|
// Replace after stabilization of https://github.com/rust-lang/rust/issues/62924
|
||||||
|
|
||||||
|
// We start by space that's limited to min_value..=max_value
|
||||||
|
let min_value = *values_deduped_sorted.iter().next().unwrap_or(&0);
|
||||||
|
let max_value = *values_deduped_sorted.iter().last().unwrap_or(&0);
|
||||||
|
|
||||||
|
// +1 for null, in case min and max covers the whole space, we are off by one.
|
||||||
|
let mut amplitude_compact_space = (max_value - min_value).saturating_add(1);
|
||||||
|
if min_value != 0 {
|
||||||
|
compact_space_builder.add_blanks(iter::once(0..=min_value - 1));
|
||||||
|
}
|
||||||
|
if max_value != u128::MAX {
|
||||||
|
compact_space_builder.add_blanks(iter::once(max_value + 1..=u128::MAX));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut amplitude_bits: u8 = num_bits(amplitude_compact_space);
|
||||||
|
|
||||||
|
let mut blank_collector = BlankCollector::new();
|
||||||
|
// We will stage blanks until they reduce the compact space by at least 1 bit and then flush
|
||||||
|
// them if the metadata cost is lower than the total number of saved bits.
|
||||||
|
// Binary heap to process the gaps by their size
|
||||||
|
while let Some(blank_range) = blanks.pop() {
|
||||||
|
blank_collector.stage_blank(blank_range);
|
||||||
|
|
||||||
|
let staged_spaces_sum: u128 = blank_collector.staged_blanks_sum();
|
||||||
|
let amplitude_new_compact_space = amplitude_compact_space - staged_spaces_sum;
|
||||||
|
let amplitude_new_bits = num_bits(amplitude_new_compact_space);
|
||||||
|
if amplitude_bits == amplitude_new_bits {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let saved_bits = (amplitude_bits - amplitude_new_bits) as usize * total_num_values as usize;
|
||||||
|
// TODO: Maybe calculate exact cost of blanks and run this more expensive computation only,
|
||||||
|
// when amplitude_new_bits changes
|
||||||
|
let cost = blank_collector.num_staged_blanks() * cost_per_blank;
|
||||||
|
if cost >= saved_bits {
|
||||||
|
// Continue here, since although we walk over the blanks by size,
|
||||||
|
// we can potentially save a lot at the last bits, which are smaller blanks
|
||||||
|
//
|
||||||
|
// E.g. if the first range reduces the compact space by 1000 from 2000 to 1000, which
|
||||||
|
// saves 11-10=1 bit and the next range reduces the compact space by 950 to
|
||||||
|
// 50, which saves 10-6=4 bit
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
amplitude_compact_space = amplitude_new_compact_space;
|
||||||
|
amplitude_bits = amplitude_new_bits;
|
||||||
|
compact_space_builder.add_blanks(blank_collector.drain().map(|blank| blank.blank_range()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// special case, when we don't collected any blanks because:
|
||||||
|
// * the data is empty (early exit)
|
||||||
|
// * the algorithm did decide it's not worth the cost, which can be the case for single values
|
||||||
|
//
|
||||||
|
// We drain one collected blank unconditionally, so the empty case is reserved for empty
|
||||||
|
// data, and therefore empty compact_space means the data is empty and no data is covered
|
||||||
|
// (conversely to all data) and we can assign null to it.
|
||||||
|
if compact_space_builder.is_empty() {
|
||||||
|
compact_space_builder.add_blanks(
|
||||||
|
blank_collector
|
||||||
|
.drain()
|
||||||
|
.map(|blank| blank.blank_range())
|
||||||
|
.take(1),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let compact_space = compact_space_builder.finish();
|
||||||
|
if max_value - min_value != u128::MAX {
|
||||||
|
debug_assert_eq!(
|
||||||
|
compact_space.amplitude_compact_space(),
|
||||||
|
amplitude_compact_space
|
||||||
|
);
|
||||||
|
}
|
||||||
|
compact_space
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||||
|
struct CompactSpaceBuilder {
|
||||||
|
blanks: Vec<RangeInclusive<u128>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CompactSpaceBuilder {
|
||||||
|
/// Creates a new compact space builder which will initially cover the whole space.
|
||||||
|
fn new() -> Self {
|
||||||
|
Self { blanks: Vec::new() }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Assumes that repeated add_blank calls don't overlap and are not adjacent,
|
||||||
|
/// e.g. [3..=5, 5..=10] is not allowed
|
||||||
|
///
|
||||||
|
/// Both of those assumptions are true when blanks are produced from sorted values.
|
||||||
|
fn add_blanks(&mut self, blank: impl Iterator<Item = RangeInclusive<u128>>) {
|
||||||
|
self.blanks.extend(blank);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_empty(&self) -> bool {
|
||||||
|
self.blanks.is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert blanks to covered space and assign null value
|
||||||
|
fn finish(mut self) -> CompactSpace {
|
||||||
|
// sort by start. ranges are not allowed to overlap
|
||||||
|
self.blanks.sort_unstable_by_key(|blank| *blank.start());
|
||||||
|
|
||||||
|
let mut covered_space = Vec::with_capacity(self.blanks.len());
|
||||||
|
|
||||||
|
// begining of the blanks
|
||||||
|
if let Some(first_blank_start) = self.blanks.first().map(RangeInclusive::start) {
|
||||||
|
if *first_blank_start != 0 {
|
||||||
|
covered_space.push(0..=first_blank_start - 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Between the blanks
|
||||||
|
let between_blanks = self.blanks.iter().tuple_windows().map(|(left, right)| {
|
||||||
|
assert!(
|
||||||
|
left.end() < right.start(),
|
||||||
|
"overlapping or adjacent ranges detected"
|
||||||
|
);
|
||||||
|
*left.end() + 1..=*right.start() - 1
|
||||||
|
});
|
||||||
|
covered_space.extend(between_blanks);
|
||||||
|
|
||||||
|
// end of the blanks
|
||||||
|
if let Some(last_blank_end) = self.blanks.last().map(RangeInclusive::end) {
|
||||||
|
if *last_blank_end != u128::MAX {
|
||||||
|
covered_space.push(last_blank_end + 1..=u128::MAX);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if covered_space.is_empty() {
|
||||||
|
covered_space.push(0..=0); // empty data case
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut compact_start: u64 = 1; // 0 is reserved for `null`
|
||||||
|
let mut ranges_mapping: Vec<RangeMapping> = Vec::with_capacity(covered_space.len());
|
||||||
|
for cov in covered_space {
|
||||||
|
let range_mapping = super::RangeMapping {
|
||||||
|
value_range: cov,
|
||||||
|
compact_start,
|
||||||
|
};
|
||||||
|
let covered_range_len = range_mapping.range_length();
|
||||||
|
ranges_mapping.push(range_mapping);
|
||||||
|
compact_start += covered_range_len as u64;
|
||||||
|
}
|
||||||
|
// println!("num ranges {}", ranges_mapping.len());
|
||||||
|
CompactSpace { ranges_mapping }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_binary_heap_pop_order() {
|
||||||
|
let mut blanks: BinaryHeap<BlankRange> = BinaryHeap::new();
|
||||||
|
blanks.push((0..=10).try_into().unwrap());
|
||||||
|
blanks.push((100..=200).try_into().unwrap());
|
||||||
|
blanks.push((100..=110).try_into().unwrap());
|
||||||
|
assert_eq!(blanks.pop().unwrap().blank_size(), 101);
|
||||||
|
assert_eq!(blanks.pop().unwrap().blank_size(), 11);
|
||||||
|
}
|
||||||
|
}
|
||||||
819
fastfield_codecs/src/compact_space/mod.rs
Normal file
819
fastfield_codecs/src/compact_space/mod.rs
Normal file
@@ -0,0 +1,819 @@
|
|||||||
|
/// This codec takes a large number space (u128) and reduces it to a compact number space.
|
||||||
|
///
|
||||||
|
/// It will find spaces in the number range. For example:
|
||||||
|
///
|
||||||
|
/// 100, 101, 102, 103, 104, 50000, 50001
|
||||||
|
/// could be mapped to
|
||||||
|
/// 100..104 -> 0..4
|
||||||
|
/// 50000..50001 -> 5..6
|
||||||
|
///
|
||||||
|
/// Compact space 0..=6 requires much less bits than 100..=50001
|
||||||
|
///
|
||||||
|
/// The codec is created to compress ip addresses, but may be employed in other use cases.
|
||||||
|
use std::{
|
||||||
|
cmp::Ordering,
|
||||||
|
collections::BTreeSet,
|
||||||
|
io::{self, Write},
|
||||||
|
ops::{Range, RangeInclusive},
|
||||||
|
};
|
||||||
|
|
||||||
|
use common::{BinarySerializable, CountingWriter, VInt, VIntU128};
|
||||||
|
use ownedbytes::OwnedBytes;
|
||||||
|
use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
|
||||||
|
|
||||||
|
use crate::compact_space::build_compact_space::get_compact_space;
|
||||||
|
use crate::Column;
|
||||||
|
|
||||||
|
mod blank_range;
|
||||||
|
mod build_compact_space;
|
||||||
|
|
||||||
|
/// The cost per blank is quite hard actually, since blanks are delta encoded, the actual cost of
|
||||||
|
/// blanks depends on the number of blanks.
|
||||||
|
///
|
||||||
|
/// The number is taken by looking at a real dataset. It is optimized for larger datasets.
|
||||||
|
const COST_PER_BLANK_IN_BITS: usize = 36;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||||
|
pub struct CompactSpace {
|
||||||
|
ranges_mapping: Vec<RangeMapping>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Maps the range from the original space to compact_start + range.len()
|
||||||
|
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||||
|
struct RangeMapping {
|
||||||
|
value_range: RangeInclusive<u128>,
|
||||||
|
compact_start: u64,
|
||||||
|
}
|
||||||
|
impl RangeMapping {
|
||||||
|
fn range_length(&self) -> u64 {
|
||||||
|
(self.value_range.end() - self.value_range.start()) as u64 + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// The last value of the compact space in this range
|
||||||
|
fn compact_end(&self) -> u64 {
|
||||||
|
self.compact_start + self.range_length() - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for CompactSpace {
|
||||||
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
VInt(self.ranges_mapping.len() as u64).serialize(writer)?;
|
||||||
|
|
||||||
|
let mut prev_value = 0;
|
||||||
|
for value_range in self
|
||||||
|
.ranges_mapping
|
||||||
|
.iter()
|
||||||
|
.map(|range_mapping| &range_mapping.value_range)
|
||||||
|
{
|
||||||
|
let blank_delta_start = value_range.start() - prev_value;
|
||||||
|
VIntU128(blank_delta_start).serialize(writer)?;
|
||||||
|
prev_value = *value_range.start();
|
||||||
|
|
||||||
|
let blank_delta_end = value_range.end() - prev_value;
|
||||||
|
VIntU128(blank_delta_end).serialize(writer)?;
|
||||||
|
prev_value = *value_range.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let num_ranges = VInt::deserialize(reader)?.0;
|
||||||
|
let mut ranges_mapping: Vec<RangeMapping> = vec![];
|
||||||
|
let mut value = 0u128;
|
||||||
|
let mut compact_start = 1u64; // 0 is reserved for `null`
|
||||||
|
for _ in 0..num_ranges {
|
||||||
|
let blank_delta_start = VIntU128::deserialize(reader)?.0;
|
||||||
|
value += blank_delta_start;
|
||||||
|
let blank_start = value;
|
||||||
|
|
||||||
|
let blank_delta_end = VIntU128::deserialize(reader)?.0;
|
||||||
|
value += blank_delta_end;
|
||||||
|
let blank_end = value;
|
||||||
|
|
||||||
|
let range_mapping = RangeMapping {
|
||||||
|
value_range: blank_start..=blank_end,
|
||||||
|
compact_start,
|
||||||
|
};
|
||||||
|
let range_length = range_mapping.range_length();
|
||||||
|
ranges_mapping.push(range_mapping);
|
||||||
|
compact_start += range_length as u64;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self { ranges_mapping })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CompactSpace {
|
||||||
|
/// Amplitude is the value range of the compact space including the sentinel value used to
|
||||||
|
/// identify null values. The compact space is 0..=amplitude .
|
||||||
|
///
|
||||||
|
/// It's only used to verify we don't exceed u64 number space, which would indicate a bug.
|
||||||
|
fn amplitude_compact_space(&self) -> u128 {
|
||||||
|
self.ranges_mapping
|
||||||
|
.last()
|
||||||
|
.map(|last_range| last_range.compact_end() as u128)
|
||||||
|
.unwrap_or(1) // compact space starts at 1, 0 == null
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_range_mapping(&self, pos: usize) -> &RangeMapping {
|
||||||
|
&self.ranges_mapping[pos]
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns either Ok(the value in the compact space) or if it is outside the compact space the
|
||||||
|
/// Err(position where it would be inserted)
|
||||||
|
fn u128_to_compact(&self, value: u128) -> Result<u64, usize> {
|
||||||
|
self.ranges_mapping
|
||||||
|
.binary_search_by(|probe| {
|
||||||
|
let value_range = &probe.value_range;
|
||||||
|
if value < *value_range.start() {
|
||||||
|
Ordering::Greater
|
||||||
|
} else if value > *value_range.end() {
|
||||||
|
Ordering::Less
|
||||||
|
} else {
|
||||||
|
Ordering::Equal
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.map(|pos| {
|
||||||
|
let range_mapping = &self.ranges_mapping[pos];
|
||||||
|
let pos_in_range = (value - range_mapping.value_range.start()) as u64;
|
||||||
|
range_mapping.compact_start + pos_in_range
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unpacks a value from compact space u64 to u128 space
|
||||||
|
fn compact_to_u128(&self, compact: u64) -> u128 {
|
||||||
|
let pos = self
|
||||||
|
.ranges_mapping
|
||||||
|
.binary_search_by_key(&compact, |range_mapping| range_mapping.compact_start)
|
||||||
|
// Correctness: Overflow. The first range starts at compact space 0, the error from
|
||||||
|
// binary search can never be 0
|
||||||
|
.map_or_else(|e| e - 1, |v| v);
|
||||||
|
|
||||||
|
let range_mapping = &self.ranges_mapping[pos];
|
||||||
|
let diff = compact - range_mapping.compact_start;
|
||||||
|
range_mapping.value_range.start() + diff as u128
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct CompactSpaceCompressor {
|
||||||
|
params: IPCodecParams,
|
||||||
|
}
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct IPCodecParams {
|
||||||
|
compact_space: CompactSpace,
|
||||||
|
bit_unpacker: BitUnpacker,
|
||||||
|
min_value: u128,
|
||||||
|
max_value: u128,
|
||||||
|
num_vals: u32,
|
||||||
|
num_bits: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CompactSpaceCompressor {
|
||||||
|
/// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals.
|
||||||
|
pub fn train_from(iter: impl Iterator<Item = u128>, num_vals: u32) -> Self {
|
||||||
|
let mut values_sorted = BTreeSet::new();
|
||||||
|
values_sorted.extend(iter);
|
||||||
|
let total_num_values = num_vals;
|
||||||
|
|
||||||
|
let compact_space =
|
||||||
|
get_compact_space(&values_sorted, total_num_values, COST_PER_BLANK_IN_BITS);
|
||||||
|
let amplitude_compact_space = compact_space.amplitude_compact_space();
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
amplitude_compact_space <= u64::MAX as u128,
|
||||||
|
"case unsupported."
|
||||||
|
);
|
||||||
|
|
||||||
|
let num_bits = tantivy_bitpacker::compute_num_bits(amplitude_compact_space as u64);
|
||||||
|
let min_value = *values_sorted.iter().next().unwrap_or(&0);
|
||||||
|
let max_value = *values_sorted.iter().last().unwrap_or(&0);
|
||||||
|
assert_eq!(
|
||||||
|
compact_space
|
||||||
|
.u128_to_compact(max_value)
|
||||||
|
.expect("could not convert max value to compact space"),
|
||||||
|
amplitude_compact_space as u64
|
||||||
|
);
|
||||||
|
CompactSpaceCompressor {
|
||||||
|
params: IPCodecParams {
|
||||||
|
compact_space,
|
||||||
|
bit_unpacker: BitUnpacker::new(num_bits),
|
||||||
|
min_value,
|
||||||
|
max_value,
|
||||||
|
num_vals: total_num_values,
|
||||||
|
num_bits,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_footer(self, writer: &mut impl Write) -> io::Result<()> {
|
||||||
|
let writer = &mut CountingWriter::wrap(writer);
|
||||||
|
self.params.serialize(writer)?;
|
||||||
|
|
||||||
|
let footer_len = writer.written_bytes() as u32;
|
||||||
|
footer_len.serialize(writer)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn compress_into(
|
||||||
|
self,
|
||||||
|
vals: impl Iterator<Item = u128>,
|
||||||
|
write: &mut impl Write,
|
||||||
|
) -> io::Result<()> {
|
||||||
|
let mut bitpacker = BitPacker::default();
|
||||||
|
for val in vals {
|
||||||
|
let compact = self
|
||||||
|
.params
|
||||||
|
.compact_space
|
||||||
|
.u128_to_compact(val)
|
||||||
|
.map_err(|_| {
|
||||||
|
io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
"Could not convert value to compact_space. This is a bug.",
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
bitpacker.write(compact, self.params.num_bits, write)?;
|
||||||
|
}
|
||||||
|
bitpacker.close(write)?;
|
||||||
|
self.write_footer(write)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct CompactSpaceDecompressor {
|
||||||
|
data: OwnedBytes,
|
||||||
|
params: IPCodecParams,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for IPCodecParams {
|
||||||
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
// header flags for future optional dictionary encoding
|
||||||
|
let footer_flags = 0u64;
|
||||||
|
footer_flags.serialize(writer)?;
|
||||||
|
|
||||||
|
VIntU128(self.min_value).serialize(writer)?;
|
||||||
|
VIntU128(self.max_value).serialize(writer)?;
|
||||||
|
VIntU128(self.num_vals as u128).serialize(writer)?;
|
||||||
|
self.num_bits.serialize(writer)?;
|
||||||
|
|
||||||
|
self.compact_space.serialize(writer)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let _header_flags = u64::deserialize(reader)?;
|
||||||
|
let min_value = VIntU128::deserialize(reader)?.0;
|
||||||
|
let max_value = VIntU128::deserialize(reader)?.0;
|
||||||
|
let num_vals = VIntU128::deserialize(reader)?.0 as u32;
|
||||||
|
let num_bits = u8::deserialize(reader)?;
|
||||||
|
let compact_space = CompactSpace::deserialize(reader)?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
compact_space,
|
||||||
|
bit_unpacker: BitUnpacker::new(num_bits),
|
||||||
|
min_value,
|
||||||
|
max_value,
|
||||||
|
num_vals,
|
||||||
|
num_bits,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Column<u128> for CompactSpaceDecompressor {
|
||||||
|
#[inline]
|
||||||
|
fn get_val(&self, doc: u32) -> u128 {
|
||||||
|
self.get(doc)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn min_value(&self) -> u128 {
|
||||||
|
self.min_value()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_value(&self) -> u128 {
|
||||||
|
self.max_value()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_vals(&self) -> u32 {
|
||||||
|
self.params.num_vals
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn iter(&self) -> Box<dyn Iterator<Item = u128> + '_> {
|
||||||
|
Box::new(self.iter())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn get_docids_for_value_range(
|
||||||
|
&self,
|
||||||
|
value_range: RangeInclusive<u128>,
|
||||||
|
positions_range: Range<u32>,
|
||||||
|
positions: &mut Vec<u32>,
|
||||||
|
) {
|
||||||
|
self.get_positions_for_value_range(value_range, positions_range, positions)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CompactSpaceDecompressor {
|
||||||
|
pub fn open(data: OwnedBytes) -> io::Result<CompactSpaceDecompressor> {
|
||||||
|
let (data_slice, footer_len_bytes) = data.split_at(data.len() - 4);
|
||||||
|
let footer_len = u32::deserialize(&mut &footer_len_bytes[..])?;
|
||||||
|
|
||||||
|
let data_footer = &data_slice[data_slice.len() - footer_len as usize..];
|
||||||
|
let params = IPCodecParams::deserialize(&mut &data_footer[..])?;
|
||||||
|
let decompressor = CompactSpaceDecompressor { data, params };
|
||||||
|
|
||||||
|
Ok(decompressor)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converting to compact space for the decompressor is more complex, since we may get values
|
||||||
|
/// which are outside the compact space. e.g. if we map
|
||||||
|
/// 1000 => 5
|
||||||
|
/// 2000 => 6
|
||||||
|
///
|
||||||
|
/// and we want a mapping for 1005, there is no equivalent compact space. We instead return an
|
||||||
|
/// error with the index of the next range.
|
||||||
|
fn u128_to_compact(&self, value: u128) -> Result<u64, usize> {
|
||||||
|
self.params.compact_space.u128_to_compact(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compact_to_u128(&self, compact: u64) -> u128 {
|
||||||
|
self.params.compact_space.compact_to_u128(compact)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Comparing on compact space: Random dataset 0,24 (50% random hit) - 1.05 GElements/s
|
||||||
|
/// Comparing on compact space: Real dataset 1.08 GElements/s
|
||||||
|
///
|
||||||
|
/// Comparing on original space: Real dataset .06 GElements/s (not completely optimized)
|
||||||
|
#[inline]
|
||||||
|
pub fn get_positions_for_value_range(
|
||||||
|
&self,
|
||||||
|
value_range: RangeInclusive<u128>,
|
||||||
|
position_range: Range<u32>,
|
||||||
|
positions: &mut Vec<u32>,
|
||||||
|
) {
|
||||||
|
if value_range.start() > value_range.end() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let position_range = position_range.start..position_range.end.min(self.num_vals());
|
||||||
|
let from_value = *value_range.start();
|
||||||
|
let to_value = *value_range.end();
|
||||||
|
assert!(to_value >= from_value);
|
||||||
|
let compact_from = self.u128_to_compact(from_value);
|
||||||
|
let compact_to = self.u128_to_compact(to_value);
|
||||||
|
|
||||||
|
// Quick return, if both ranges fall into the same non-mapped space, the range can't cover
|
||||||
|
// any values, so we can early exit
|
||||||
|
match (compact_to, compact_from) {
|
||||||
|
(Err(pos1), Err(pos2)) if pos1 == pos2 => return,
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
let compact_from = compact_from.unwrap_or_else(|pos| {
|
||||||
|
// Correctness: Out of bounds, if this value is Err(last_index + 1), we early exit,
|
||||||
|
// since the to_value also mapps into the same non-mapped space
|
||||||
|
let range_mapping = self.params.compact_space.get_range_mapping(pos);
|
||||||
|
range_mapping.compact_start
|
||||||
|
});
|
||||||
|
// If there is no compact space, we go to the closest upperbound compact space
|
||||||
|
let compact_to = compact_to.unwrap_or_else(|pos| {
|
||||||
|
// Correctness: Overflow, if this value is Err(0), we early exit,
|
||||||
|
// since the from_value also mapps into the same non-mapped space
|
||||||
|
|
||||||
|
// Get end of previous range
|
||||||
|
let pos = pos - 1;
|
||||||
|
let range_mapping = self.params.compact_space.get_range_mapping(pos);
|
||||||
|
range_mapping.compact_end()
|
||||||
|
});
|
||||||
|
|
||||||
|
let range = compact_from..=compact_to;
|
||||||
|
|
||||||
|
let scan_num_docs = position_range.end - position_range.start;
|
||||||
|
|
||||||
|
let step_size = 4;
|
||||||
|
let cutoff = position_range.start + scan_num_docs - scan_num_docs % step_size;
|
||||||
|
|
||||||
|
let mut push_if_in_range = |idx, val| {
|
||||||
|
if range.contains(&val) {
|
||||||
|
positions.push(idx);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let get_val = |idx| self.params.bit_unpacker.get(idx, &self.data);
|
||||||
|
// unrolled loop
|
||||||
|
for idx in (position_range.start..cutoff).step_by(step_size as usize) {
|
||||||
|
let idx1 = idx;
|
||||||
|
let idx2 = idx + 1;
|
||||||
|
let idx3 = idx + 2;
|
||||||
|
let idx4 = idx + 3;
|
||||||
|
let val1 = get_val(idx1 as u32);
|
||||||
|
let val2 = get_val(idx2 as u32);
|
||||||
|
let val3 = get_val(idx3 as u32);
|
||||||
|
let val4 = get_val(idx4 as u32);
|
||||||
|
push_if_in_range(idx1, val1);
|
||||||
|
push_if_in_range(idx2, val2);
|
||||||
|
push_if_in_range(idx3, val3);
|
||||||
|
push_if_in_range(idx4, val4);
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle rest
|
||||||
|
for idx in cutoff..position_range.end {
|
||||||
|
push_if_in_range(idx, get_val(idx as u32));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn iter_compact(&self) -> impl Iterator<Item = u64> + '_ {
|
||||||
|
(0..self.params.num_vals)
|
||||||
|
.map(move |idx| self.params.bit_unpacker.get(idx, &self.data) as u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn iter(&self) -> impl Iterator<Item = u128> + '_ {
|
||||||
|
// TODO: Performance. It would be better to iterate on the ranges and check existence via
|
||||||
|
// the bit_unpacker.
|
||||||
|
self.iter_compact()
|
||||||
|
.map(|compact| self.compact_to_u128(compact))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn get(&self, idx: u32) -> u128 {
|
||||||
|
let compact = self.params.bit_unpacker.get(idx, &self.data);
|
||||||
|
self.compact_to_u128(compact)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn min_value(&self) -> u128 {
|
||||||
|
self.params.min_value
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn max_value(&self) -> u128 {
|
||||||
|
self.params.max_value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use crate::format_version::read_format_version;
|
||||||
|
use crate::null_index_footer::read_null_index_footer;
|
||||||
|
use crate::serialize::U128Header;
|
||||||
|
use crate::{open_u128, serialize_u128};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn compact_space_test() {
|
||||||
|
let ips = &[
|
||||||
|
2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
let compact_space = get_compact_space(ips, ips.len() as u32, 11);
|
||||||
|
let amplitude = compact_space.amplitude_compact_space();
|
||||||
|
assert_eq!(amplitude, 17);
|
||||||
|
assert_eq!(1, compact_space.u128_to_compact(2).unwrap());
|
||||||
|
assert_eq!(2, compact_space.u128_to_compact(3).unwrap());
|
||||||
|
assert_eq!(compact_space.u128_to_compact(100).unwrap_err(), 1);
|
||||||
|
|
||||||
|
for (num1, num2) in (0..3).tuple_windows() {
|
||||||
|
assert_eq!(
|
||||||
|
compact_space.get_range_mapping(num1).compact_end() + 1,
|
||||||
|
compact_space.get_range_mapping(num2).compact_start
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut output: Vec<u8> = Vec::new();
|
||||||
|
compact_space.serialize(&mut output).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
compact_space,
|
||||||
|
CompactSpace::deserialize(&mut &output[..]).unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
for ip in ips {
|
||||||
|
let compact = compact_space.u128_to_compact(*ip).unwrap();
|
||||||
|
assert_eq!(compact_space.compact_to_u128(compact), *ip);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn compact_space_amplitude_test() {
|
||||||
|
let ips = &[100000u128, 1000000].into_iter().collect();
|
||||||
|
let compact_space = get_compact_space(ips, ips.len() as u32, 1);
|
||||||
|
let amplitude = compact_space.amplitude_compact_space();
|
||||||
|
assert_eq!(amplitude, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_all(mut data: OwnedBytes, expected: &[u128]) {
|
||||||
|
let _header = U128Header::deserialize(&mut data);
|
||||||
|
let decompressor = CompactSpaceDecompressor::open(data).unwrap();
|
||||||
|
for (idx, expected_val) in expected.iter().cloned().enumerate() {
|
||||||
|
let val = decompressor.get(idx as u32);
|
||||||
|
assert_eq!(val, expected_val);
|
||||||
|
|
||||||
|
let test_range = |range: RangeInclusive<u128>| {
|
||||||
|
let expected_positions = expected
|
||||||
|
.iter()
|
||||||
|
.positions(|val| range.contains(val))
|
||||||
|
.map(|pos| pos as u32)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let mut positions = Vec::new();
|
||||||
|
decompressor.get_positions_for_value_range(
|
||||||
|
range,
|
||||||
|
0..decompressor.num_vals(),
|
||||||
|
&mut positions,
|
||||||
|
);
|
||||||
|
assert_eq!(positions, expected_positions);
|
||||||
|
};
|
||||||
|
|
||||||
|
test_range(expected_val.saturating_sub(1)..=expected_val);
|
||||||
|
test_range(expected_val..=expected_val);
|
||||||
|
test_range(expected_val..=expected_val.saturating_add(1));
|
||||||
|
test_range(expected_val.saturating_sub(1)..=expected_val.saturating_add(1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_aux_vals(u128_vals: &[u128]) -> OwnedBytes {
|
||||||
|
let mut out = Vec::new();
|
||||||
|
serialize_u128(
|
||||||
|
|| u128_vals.iter().cloned(),
|
||||||
|
u128_vals.len() as u32,
|
||||||
|
&mut out,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let data = OwnedBytes::new(out);
|
||||||
|
let (data, _format_version) = read_format_version(data).unwrap();
|
||||||
|
let (data, _null_index_footer) = read_null_index_footer(data).unwrap();
|
||||||
|
test_all(data.clone(), u128_vals);
|
||||||
|
|
||||||
|
data
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_range_1() {
|
||||||
|
let vals = &[
|
||||||
|
1u128,
|
||||||
|
100u128,
|
||||||
|
3u128,
|
||||||
|
99999u128,
|
||||||
|
100000u128,
|
||||||
|
100001u128,
|
||||||
|
4_000_211_221u128,
|
||||||
|
4_000_211_222u128,
|
||||||
|
333u128,
|
||||||
|
];
|
||||||
|
let mut data = test_aux_vals(vals);
|
||||||
|
|
||||||
|
let _header = U128Header::deserialize(&mut data);
|
||||||
|
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||||
|
let complete_range = 0..vals.len() as u32;
|
||||||
|
for (pos, val) in vals.iter().enumerate() {
|
||||||
|
let val = *val as u128;
|
||||||
|
let pos = pos as u32;
|
||||||
|
let mut positions = Vec::new();
|
||||||
|
decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions);
|
||||||
|
assert_eq!(positions, vec![pos]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle docid range out of bounds
|
||||||
|
let positions = get_positions_for_value_range_helper(&decomp, 0..=1, 1..u32::MAX);
|
||||||
|
assert_eq!(positions, vec![]);
|
||||||
|
|
||||||
|
let positions =
|
||||||
|
get_positions_for_value_range_helper(&decomp, 0..=1, complete_range.clone());
|
||||||
|
assert_eq!(positions, vec![0]);
|
||||||
|
let positions =
|
||||||
|
get_positions_for_value_range_helper(&decomp, 0..=2, complete_range.clone());
|
||||||
|
assert_eq!(positions, vec![0]);
|
||||||
|
let positions =
|
||||||
|
get_positions_for_value_range_helper(&decomp, 0..=3, complete_range.clone());
|
||||||
|
assert_eq!(positions, vec![0, 2]);
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(
|
||||||
|
&decomp,
|
||||||
|
99999u128..=99999u128,
|
||||||
|
complete_range.clone()
|
||||||
|
),
|
||||||
|
vec![3]
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(
|
||||||
|
&decomp,
|
||||||
|
99999u128..=100000u128,
|
||||||
|
complete_range.clone()
|
||||||
|
),
|
||||||
|
vec![3, 4]
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(
|
||||||
|
&decomp,
|
||||||
|
99998u128..=100000u128,
|
||||||
|
complete_range.clone()
|
||||||
|
),
|
||||||
|
vec![3, 4]
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(
|
||||||
|
&decomp,
|
||||||
|
99998u128..=99999u128,
|
||||||
|
complete_range.clone()
|
||||||
|
),
|
||||||
|
vec![3]
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(
|
||||||
|
&decomp,
|
||||||
|
99998u128..=99998u128,
|
||||||
|
complete_range.clone()
|
||||||
|
),
|
||||||
|
vec![]
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(
|
||||||
|
&decomp,
|
||||||
|
333u128..=333u128,
|
||||||
|
complete_range.clone()
|
||||||
|
),
|
||||||
|
vec![8]
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(
|
||||||
|
&decomp,
|
||||||
|
332u128..=333u128,
|
||||||
|
complete_range.clone()
|
||||||
|
),
|
||||||
|
vec![8]
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(
|
||||||
|
&decomp,
|
||||||
|
332u128..=334u128,
|
||||||
|
complete_range.clone()
|
||||||
|
),
|
||||||
|
vec![8]
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(
|
||||||
|
&decomp,
|
||||||
|
333u128..=334u128,
|
||||||
|
complete_range.clone()
|
||||||
|
),
|
||||||
|
vec![8]
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(
|
||||||
|
&decomp,
|
||||||
|
4_000_211_221u128..=5_000_000_000u128,
|
||||||
|
complete_range.clone()
|
||||||
|
),
|
||||||
|
vec![6, 7]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_empty() {
|
||||||
|
let vals = &[];
|
||||||
|
let data = test_aux_vals(vals);
|
||||||
|
let _decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_range_2() {
|
||||||
|
let vals = &[
|
||||||
|
100u128,
|
||||||
|
99999u128,
|
||||||
|
100000u128,
|
||||||
|
100001u128,
|
||||||
|
4_000_211_221u128,
|
||||||
|
4_000_211_222u128,
|
||||||
|
333u128,
|
||||||
|
];
|
||||||
|
let mut data = test_aux_vals(vals);
|
||||||
|
let _header = U128Header::deserialize(&mut data);
|
||||||
|
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||||
|
let complete_range = 0..vals.len() as u32;
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(&decomp, 0..=5, complete_range.clone()),
|
||||||
|
vec![]
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(&decomp, 0..=100, complete_range.clone()),
|
||||||
|
vec![0]
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(&decomp, 0..=105, complete_range.clone()),
|
||||||
|
vec![0]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_positions_for_value_range_helper<C: Column<T> + ?Sized, T: PartialOrd>(
|
||||||
|
column: &C,
|
||||||
|
value_range: RangeInclusive<T>,
|
||||||
|
doc_id_range: Range<u32>,
|
||||||
|
) -> Vec<u32> {
|
||||||
|
let mut positions = Vec::new();
|
||||||
|
column.get_docids_for_value_range(value_range, doc_id_range, &mut positions);
|
||||||
|
positions
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_range_3() {
|
||||||
|
let vals = &[
|
||||||
|
200u128,
|
||||||
|
201,
|
||||||
|
202,
|
||||||
|
203,
|
||||||
|
204,
|
||||||
|
204,
|
||||||
|
206,
|
||||||
|
207,
|
||||||
|
208,
|
||||||
|
209,
|
||||||
|
210,
|
||||||
|
1_000_000,
|
||||||
|
5_000_000_000,
|
||||||
|
];
|
||||||
|
let mut out = Vec::new();
|
||||||
|
serialize_u128(|| vals.iter().cloned(), vals.len() as u32, &mut out).unwrap();
|
||||||
|
let decomp = open_u128::<u128>(OwnedBytes::new(out)).unwrap();
|
||||||
|
let complete_range = 0..vals.len() as u32;
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(&*decomp, 199..=200, complete_range.clone()),
|
||||||
|
vec![0]
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(&*decomp, 199..=201, complete_range.clone()),
|
||||||
|
vec![0, 1]
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(&*decomp, 200..=200, complete_range.clone()),
|
||||||
|
vec![0]
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
get_positions_for_value_range_helper(
|
||||||
|
&*decomp,
|
||||||
|
1_000_000..=1_000_000,
|
||||||
|
complete_range.clone()
|
||||||
|
),
|
||||||
|
vec![11]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_bug1() {
|
||||||
|
let vals = &[9223372036854775806];
|
||||||
|
let _data = test_aux_vals(vals);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_bug2() {
|
||||||
|
let vals = &[340282366920938463463374607431768211455u128];
|
||||||
|
let _data = test_aux_vals(vals);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_bug3() {
|
||||||
|
let vals = &[340282366920938463463374607431768211454];
|
||||||
|
let _data = test_aux_vals(vals);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_bug4() {
|
||||||
|
let vals = &[340282366920938463463374607431768211455, 0];
|
||||||
|
let _data = test_aux_vals(vals);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_first_large_gaps() {
|
||||||
|
let vals = &[1_000_000_000u128; 100];
|
||||||
|
let _data = test_aux_vals(vals);
|
||||||
|
}
|
||||||
|
use itertools::Itertools;
|
||||||
|
use proptest::prelude::*;
|
||||||
|
|
||||||
|
fn num_strategy() -> impl Strategy<Value = u128> {
|
||||||
|
prop_oneof![
|
||||||
|
1 => prop::num::u128::ANY.prop_map(|num| u128::MAX - (num % 10) ),
|
||||||
|
1 => prop::num::u128::ANY.prop_map(|num| i64::MAX as u128 + 5 - (num % 10) ),
|
||||||
|
1 => prop::num::u128::ANY.prop_map(|num| i128::MAX as u128 + 5 - (num % 10) ),
|
||||||
|
1 => prop::num::u128::ANY.prop_map(|num| num % 10 ),
|
||||||
|
20 => prop::num::u128::ANY,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
proptest! {
|
||||||
|
#![proptest_config(ProptestConfig::with_cases(10))]
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn compress_decompress_random(vals in proptest::collection::vec(num_strategy()
|
||||||
|
, 1..1000)) {
|
||||||
|
let _data = test_aux_vals(&vals);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
39
fastfield_codecs/src/format_version.rs
Normal file
39
fastfield_codecs/src/format_version.rs
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
use std::io;
|
||||||
|
|
||||||
|
use common::BinarySerializable;
|
||||||
|
use ownedbytes::OwnedBytes;
|
||||||
|
|
||||||
|
const MAGIC_NUMBER: u16 = 4335u16;
|
||||||
|
const FASTFIELD_FORMAT_VERSION: u8 = 1;
|
||||||
|
|
||||||
|
pub(crate) fn append_format_version(output: &mut impl io::Write) -> io::Result<()> {
|
||||||
|
FASTFIELD_FORMAT_VERSION.serialize(output)?;
|
||||||
|
MAGIC_NUMBER.serialize(output)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn read_format_version(data: OwnedBytes) -> io::Result<(OwnedBytes, u8)> {
|
||||||
|
let (data, magic_number_bytes) = data.rsplit(2);
|
||||||
|
|
||||||
|
let magic_number = u16::deserialize(&mut magic_number_bytes.as_slice())?;
|
||||||
|
if magic_number != MAGIC_NUMBER {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
format!("magic number mismatch {} != {}", magic_number, MAGIC_NUMBER),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let (data, format_version_bytes) = data.rsplit(1);
|
||||||
|
let format_version = u8::deserialize(&mut format_version_bytes.as_slice())?;
|
||||||
|
if format_version > FASTFIELD_FORMAT_VERSION {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
format!(
|
||||||
|
"Unsupported fastfield format version: {}. Max supported version: {}",
|
||||||
|
format_version, FASTFIELD_FORMAT_VERSION
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((data, format_version))
|
||||||
|
}
|
||||||
170
fastfield_codecs/src/gcd.rs
Normal file
170
fastfield_codecs/src/gcd.rs
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
use std::num::NonZeroU64;
|
||||||
|
|
||||||
|
use fastdivide::DividerU64;
|
||||||
|
|
||||||
|
/// Compute the gcd of two non null numbers.
|
||||||
|
///
|
||||||
|
/// It is recommended, but not required, to feed values such that `large >= small`.
|
||||||
|
fn compute_gcd(mut large: NonZeroU64, mut small: NonZeroU64) -> NonZeroU64 {
|
||||||
|
loop {
|
||||||
|
let rem: u64 = large.get() % small;
|
||||||
|
if let Some(new_small) = NonZeroU64::new(rem) {
|
||||||
|
(large, small) = (small, new_small);
|
||||||
|
} else {
|
||||||
|
return small;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find GCD for iterator of numbers
|
||||||
|
pub fn find_gcd(numbers: impl Iterator<Item = u64>) -> Option<NonZeroU64> {
|
||||||
|
let mut numbers = numbers.flat_map(NonZeroU64::new);
|
||||||
|
let mut gcd: NonZeroU64 = numbers.next()?;
|
||||||
|
if gcd.get() == 1 {
|
||||||
|
return Some(gcd);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut gcd_divider = DividerU64::divide_by(gcd.get());
|
||||||
|
for val in numbers {
|
||||||
|
let remainder = val.get() - (gcd_divider.divide(val.get())) * gcd.get();
|
||||||
|
if remainder == 0 {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
gcd = compute_gcd(val, gcd);
|
||||||
|
if gcd.get() == 1 {
|
||||||
|
return Some(gcd);
|
||||||
|
}
|
||||||
|
|
||||||
|
gcd_divider = DividerU64::divide_by(gcd.get());
|
||||||
|
}
|
||||||
|
Some(gcd)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::io;
|
||||||
|
use std::num::NonZeroU64;
|
||||||
|
|
||||||
|
use ownedbytes::OwnedBytes;
|
||||||
|
|
||||||
|
use crate::gcd::{compute_gcd, find_gcd};
|
||||||
|
use crate::{FastFieldCodecType, VecColumn};
|
||||||
|
|
||||||
|
fn test_fastfield_gcd_i64_with_codec(
|
||||||
|
codec_type: FastFieldCodecType,
|
||||||
|
num_vals: usize,
|
||||||
|
) -> io::Result<()> {
|
||||||
|
let mut vals: Vec<i64> = (-4..=(num_vals as i64) - 5).map(|val| val * 1000).collect();
|
||||||
|
let mut buffer: Vec<u8> = Vec::new();
|
||||||
|
crate::serialize(VecColumn::from(&vals), &mut buffer, &[codec_type])?;
|
||||||
|
let buffer = OwnedBytes::new(buffer);
|
||||||
|
let column = crate::open::<i64>(buffer.clone())?;
|
||||||
|
assert_eq!(column.get_val(0), -4000i64);
|
||||||
|
assert_eq!(column.get_val(1), -3000i64);
|
||||||
|
assert_eq!(column.get_val(2), -2000i64);
|
||||||
|
assert_eq!(column.max_value(), (num_vals as i64 - 5) * 1000);
|
||||||
|
assert_eq!(column.min_value(), -4000i64);
|
||||||
|
|
||||||
|
// Can't apply gcd
|
||||||
|
let mut buffer_without_gcd = Vec::new();
|
||||||
|
vals.pop();
|
||||||
|
vals.push(1001i64);
|
||||||
|
crate::serialize(
|
||||||
|
VecColumn::from(&vals),
|
||||||
|
&mut buffer_without_gcd,
|
||||||
|
&[codec_type],
|
||||||
|
)?;
|
||||||
|
let buffer_without_gcd = OwnedBytes::new(buffer_without_gcd);
|
||||||
|
assert!(buffer_without_gcd.len() > buffer.len());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_fastfield_gcd_i64() -> io::Result<()> {
|
||||||
|
for &codec_type in &[
|
||||||
|
FastFieldCodecType::Bitpacked,
|
||||||
|
FastFieldCodecType::BlockwiseLinear,
|
||||||
|
FastFieldCodecType::Linear,
|
||||||
|
] {
|
||||||
|
test_fastfield_gcd_i64_with_codec(codec_type, 5500)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_fastfield_gcd_u64_with_codec(
|
||||||
|
codec_type: FastFieldCodecType,
|
||||||
|
num_vals: usize,
|
||||||
|
) -> io::Result<()> {
|
||||||
|
let mut vals: Vec<u64> = (1..=num_vals).map(|i| i as u64 * 1000u64).collect();
|
||||||
|
let mut buffer: Vec<u8> = Vec::new();
|
||||||
|
crate::serialize(VecColumn::from(&vals), &mut buffer, &[codec_type])?;
|
||||||
|
let buffer = OwnedBytes::new(buffer);
|
||||||
|
let column = crate::open::<u64>(buffer.clone())?;
|
||||||
|
assert_eq!(column.get_val(0), 1000u64);
|
||||||
|
assert_eq!(column.get_val(1), 2000u64);
|
||||||
|
assert_eq!(column.get_val(2), 3000u64);
|
||||||
|
assert_eq!(column.max_value(), num_vals as u64 * 1000);
|
||||||
|
assert_eq!(column.min_value(), 1000u64);
|
||||||
|
|
||||||
|
// Can't apply gcd
|
||||||
|
let mut buffer_without_gcd = Vec::new();
|
||||||
|
vals.pop();
|
||||||
|
vals.push(1001u64);
|
||||||
|
crate::serialize(
|
||||||
|
VecColumn::from(&vals),
|
||||||
|
&mut buffer_without_gcd,
|
||||||
|
&[codec_type],
|
||||||
|
)?;
|
||||||
|
let buffer_without_gcd = OwnedBytes::new(buffer_without_gcd);
|
||||||
|
assert!(buffer_without_gcd.len() > buffer.len());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_fastfield_gcd_u64() -> io::Result<()> {
|
||||||
|
for &codec_type in &[
|
||||||
|
FastFieldCodecType::Bitpacked,
|
||||||
|
FastFieldCodecType::BlockwiseLinear,
|
||||||
|
FastFieldCodecType::Linear,
|
||||||
|
] {
|
||||||
|
test_fastfield_gcd_u64_with_codec(codec_type, 5500)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_fastfield2() {
|
||||||
|
let test_fastfield = crate::serialize_and_load(&[100u64, 200u64, 300u64]);
|
||||||
|
assert_eq!(test_fastfield.get_val(0), 100);
|
||||||
|
assert_eq!(test_fastfield.get_val(1), 200);
|
||||||
|
assert_eq!(test_fastfield.get_val(2), 300);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_compute_gcd() {
|
||||||
|
let test_compute_gcd_aux = |large, small, expected| {
|
||||||
|
let large = NonZeroU64::new(large).unwrap();
|
||||||
|
let small = NonZeroU64::new(small).unwrap();
|
||||||
|
let expected = NonZeroU64::new(expected).unwrap();
|
||||||
|
assert_eq!(compute_gcd(small, large), expected);
|
||||||
|
assert_eq!(compute_gcd(large, small), expected);
|
||||||
|
};
|
||||||
|
test_compute_gcd_aux(1, 4, 1);
|
||||||
|
test_compute_gcd_aux(2, 4, 2);
|
||||||
|
test_compute_gcd_aux(10, 25, 5);
|
||||||
|
test_compute_gcd_aux(25, 25, 25);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn find_gcd_test() {
|
||||||
|
assert_eq!(find_gcd([0].into_iter()), None);
|
||||||
|
assert_eq!(find_gcd([0, 10].into_iter()), NonZeroU64::new(10));
|
||||||
|
assert_eq!(find_gcd([10, 0].into_iter()), NonZeroU64::new(10));
|
||||||
|
assert_eq!(find_gcd([].into_iter()), None);
|
||||||
|
assert_eq!(find_gcd([15, 30, 5, 10].into_iter()), NonZeroU64::new(5));
|
||||||
|
assert_eq!(find_gcd([15, 16, 10].into_iter()), NonZeroU64::new(1));
|
||||||
|
assert_eq!(find_gcd([0, 5, 5, 5].into_iter()), NonZeroU64::new(5));
|
||||||
|
assert_eq!(find_gcd([0, 0].into_iter()), None);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,41 +1,75 @@
|
|||||||
|
#![warn(missing_docs)]
|
||||||
|
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||||
|
|
||||||
|
//! # `fastfield_codecs`
|
||||||
|
//!
|
||||||
|
//! - Columnar storage of data for tantivy [`Column`].
|
||||||
|
//! - Encode data in different codecs.
|
||||||
|
//! - Monotonically map values to u64/u128
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate more_asserts;
|
extern crate more_asserts;
|
||||||
|
|
||||||
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
|
extern crate test;
|
||||||
|
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common::BinarySerializable;
|
use common::BinarySerializable;
|
||||||
|
use compact_space::CompactSpaceDecompressor;
|
||||||
|
use format_version::read_format_version;
|
||||||
|
use monotonic_mapping::{
|
||||||
|
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
|
||||||
|
StrictlyMonotonicMappingToInternalBaseval, StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||||
|
};
|
||||||
|
use null_index_footer::read_null_index_footer;
|
||||||
use ownedbytes::OwnedBytes;
|
use ownedbytes::OwnedBytes;
|
||||||
|
use serialize::{Header, U128Header};
|
||||||
|
|
||||||
pub mod bitpacked;
|
mod bitpacked;
|
||||||
pub mod blockwise_linear;
|
mod blockwise_linear;
|
||||||
pub mod linear;
|
mod compact_space;
|
||||||
|
mod format_version;
|
||||||
|
mod line;
|
||||||
|
mod linear;
|
||||||
|
mod monotonic_mapping;
|
||||||
|
mod monotonic_mapping_u128;
|
||||||
|
mod null_index;
|
||||||
|
mod null_index_footer;
|
||||||
|
|
||||||
pub trait FastFieldCodecDeserializer: Sized {
|
mod column;
|
||||||
/// Reads the metadata and returns the CodecReader
|
mod gcd;
|
||||||
fn open_from_bytes(bytes: OwnedBytes) -> std::io::Result<Self>
|
mod serialize;
|
||||||
where Self: FastFieldDataAccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait FastFieldDataAccess {
|
/// TODO: remove when codec is used
|
||||||
fn get_val(&self, doc: u64) -> u64;
|
pub use null_index::*;
|
||||||
fn min_value(&self) -> u64;
|
|
||||||
fn max_value(&self) -> u64;
|
use self::bitpacked::BitpackedCodec;
|
||||||
fn num_vals(&self) -> u64;
|
use self::blockwise_linear::BlockwiseLinearCodec;
|
||||||
/// Returns a iterator over the data
|
pub use self::column::{monotonic_map_column, Column, IterColumn, VecColumn};
|
||||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = u64> + 'a> {
|
use self::linear::LinearCodec;
|
||||||
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
pub use self::monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
|
||||||
}
|
pub use self::monotonic_mapping_u128::MonotonicallyMappableToU128;
|
||||||
}
|
pub use self::serialize::{
|
||||||
|
estimate, serialize, serialize_and_load, serialize_u128, NormalizedHeader,
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
||||||
#[repr(u8)]
|
#[repr(u8)]
|
||||||
|
/// Available codecs to use to encode the u64 (via [`MonotonicallyMappableToU64`]) converted data.
|
||||||
pub enum FastFieldCodecType {
|
pub enum FastFieldCodecType {
|
||||||
|
/// Bitpack all values in the value range. The number of bits is defined by the amplitude
|
||||||
|
/// `column.max_value() - column.min_value()`
|
||||||
Bitpacked = 1,
|
Bitpacked = 1,
|
||||||
|
/// Linear interpolation puts a line between the first and last value and then bitpacks the
|
||||||
|
/// values by the offset from the line. The number of bits is defined by the max deviation from
|
||||||
|
/// the line.
|
||||||
Linear = 2,
|
Linear = 2,
|
||||||
|
/// Same as [`FastFieldCodecType::Linear`], but encodes in blocks of 512 elements.
|
||||||
BlockwiseLinear = 3,
|
BlockwiseLinear = 3,
|
||||||
Gcd = 4,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BinarySerializable for FastFieldCodecType {
|
impl BinarySerializable for FastFieldCodecType {
|
||||||
@@ -52,158 +86,245 @@ impl BinarySerializable for FastFieldCodecType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FastFieldCodecType {
|
impl FastFieldCodecType {
|
||||||
pub fn to_code(self) -> u8 {
|
pub(crate) fn to_code(self) -> u8 {
|
||||||
self as u8
|
self as u8
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_code(code: u8) -> Option<Self> {
|
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
||||||
match code {
|
match code {
|
||||||
1 => Some(Self::Bitpacked),
|
1 => Some(Self::Bitpacked),
|
||||||
2 => Some(Self::Linear),
|
2 => Some(Self::Linear),
|
||||||
3 => Some(Self::BlockwiseLinear),
|
3 => Some(Self::BlockwiseLinear),
|
||||||
4 => Some(Self::Gcd),
|
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
||||||
|
#[repr(u8)]
|
||||||
|
/// Available codecs to use to encode the u128 (via [`MonotonicallyMappableToU128`]) converted data.
|
||||||
|
pub enum U128FastFieldCodecType {
|
||||||
|
/// This codec takes a large number space (u128) and reduces it to a compact number space, by
|
||||||
|
/// removing the holes.
|
||||||
|
CompactSpace = 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for U128FastFieldCodecType {
|
||||||
|
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
|
||||||
|
self.to_code().serialize(wrt)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let code = u8::deserialize(reader)?;
|
||||||
|
let codec_type: Self = Self::from_code(code)
|
||||||
|
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
||||||
|
Ok(codec_type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl U128FastFieldCodecType {
|
||||||
|
pub(crate) fn to_code(self) -> u8 {
|
||||||
|
self as u8
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
||||||
|
match code {
|
||||||
|
1 => Some(Self::CompactSpace),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
||||||
|
pub fn open_u128<Item: MonotonicallyMappableToU128>(
|
||||||
|
bytes: OwnedBytes,
|
||||||
|
) -> io::Result<Arc<dyn Column<Item>>> {
|
||||||
|
let (bytes, _format_version) = read_format_version(bytes)?;
|
||||||
|
let (mut bytes, _null_index_footer) = read_null_index_footer(bytes)?;
|
||||||
|
let header = U128Header::deserialize(&mut bytes)?;
|
||||||
|
assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
|
||||||
|
let reader = CompactSpaceDecompressor::open(bytes)?;
|
||||||
|
let inverted: StrictlyMonotonicMappingInverter<StrictlyMonotonicMappingToInternal<Item>> =
|
||||||
|
StrictlyMonotonicMappingToInternal::<Item>::new().into();
|
||||||
|
Ok(Arc::new(monotonic_map_column(reader, inverted)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
||||||
|
pub fn open<T: MonotonicallyMappableToU64>(bytes: OwnedBytes) -> io::Result<Arc<dyn Column<T>>> {
|
||||||
|
let (bytes, _format_version) = read_format_version(bytes)?;
|
||||||
|
let (mut bytes, _null_index_footer) = read_null_index_footer(bytes)?;
|
||||||
|
let header = Header::deserialize(&mut bytes)?;
|
||||||
|
match header.codec_type {
|
||||||
|
FastFieldCodecType::Bitpacked => open_specific_codec::<BitpackedCodec, _>(bytes, &header),
|
||||||
|
FastFieldCodecType::Linear => open_specific_codec::<LinearCodec, _>(bytes, &header),
|
||||||
|
FastFieldCodecType::BlockwiseLinear => {
|
||||||
|
open_specific_codec::<BlockwiseLinearCodec, _>(bytes, &header)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open_specific_codec<C: FastFieldCodec, Item: MonotonicallyMappableToU64>(
|
||||||
|
bytes: OwnedBytes,
|
||||||
|
header: &Header,
|
||||||
|
) -> io::Result<Arc<dyn Column<Item>>> {
|
||||||
|
let normalized_header = header.normalized();
|
||||||
|
let reader = C::open_from_bytes(bytes, normalized_header)?;
|
||||||
|
let min_value = header.min_value;
|
||||||
|
if let Some(gcd) = header.gcd {
|
||||||
|
let mapping = StrictlyMonotonicMappingInverter::from(
|
||||||
|
StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd.get(), min_value),
|
||||||
|
);
|
||||||
|
Ok(Arc::new(monotonic_map_column(reader, mapping)))
|
||||||
|
} else {
|
||||||
|
let mapping = StrictlyMonotonicMappingInverter::from(
|
||||||
|
StrictlyMonotonicMappingToInternalBaseval::new(min_value),
|
||||||
|
);
|
||||||
|
Ok(Arc::new(monotonic_map_column(reader, mapping)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The FastFieldSerializerEstimate trait is required on all variants
|
/// The FastFieldSerializerEstimate trait is required on all variants
|
||||||
/// of fast field compressions, to decide which one to choose.
|
/// of fast field compressions, to decide which one to choose.
|
||||||
pub trait FastFieldCodecSerializer {
|
trait FastFieldCodec: 'static {
|
||||||
/// A codex needs to provide a unique name and id, which is
|
/// A codex needs to provide a unique name and id, which is
|
||||||
/// used for debugging and de/serialization.
|
/// used for debugging and de/serialization.
|
||||||
const CODEC_TYPE: FastFieldCodecType;
|
const CODEC_TYPE: FastFieldCodecType;
|
||||||
|
|
||||||
/// Check if the Codec is able to compress the data
|
type Reader: Column<u64> + 'static;
|
||||||
fn is_applicable(fastfield_accessor: &impl FastFieldDataAccess) -> bool;
|
|
||||||
|
/// Reads the metadata and returns the CodecReader
|
||||||
|
fn open_from_bytes(bytes: OwnedBytes, header: NormalizedHeader) -> io::Result<Self::Reader>;
|
||||||
|
|
||||||
|
/// Serializes the data using the serializer into write.
|
||||||
|
///
|
||||||
|
/// The column iterator should be preferred over using column `get_val` method for
|
||||||
|
/// performance reasons.
|
||||||
|
fn serialize(column: &dyn Column, write: &mut impl Write) -> io::Result<()>;
|
||||||
|
|
||||||
/// Returns an estimate of the compression ratio.
|
/// Returns an estimate of the compression ratio.
|
||||||
|
/// If the codec is not applicable, returns `None`.
|
||||||
|
///
|
||||||
/// The baseline is uncompressed 64bit data.
|
/// The baseline is uncompressed 64bit data.
|
||||||
///
|
///
|
||||||
/// It could make sense to also return a value representing
|
/// It could make sense to also return a value representing
|
||||||
/// computational complexity.
|
/// computational complexity.
|
||||||
fn estimate(fastfield_accessor: &impl FastFieldDataAccess) -> f32;
|
fn estimate(column: &dyn Column) -> Option<f32>;
|
||||||
|
|
||||||
/// Serializes the data using the serializer into write.
|
|
||||||
///
|
|
||||||
/// The fastfield_accessor iterator should be preferred over using fastfield_accessor for
|
|
||||||
/// performance reasons.
|
|
||||||
fn serialize(
|
|
||||||
write: &mut impl Write,
|
|
||||||
fastfield_accessor: &dyn FastFieldDataAccess,
|
|
||||||
) -> io::Result<()>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
/// The list of all available codecs for u64 convertible data.
|
||||||
/// Statistics are used in codec detection and stored in the fast field footer.
|
pub const ALL_CODEC_TYPES: [FastFieldCodecType; 3] = [
|
||||||
pub struct FastFieldStats {
|
FastFieldCodecType::Bitpacked,
|
||||||
pub min_value: u64,
|
FastFieldCodecType::BlockwiseLinear,
|
||||||
pub max_value: u64,
|
FastFieldCodecType::Linear,
|
||||||
pub num_vals: u64,
|
];
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> FastFieldDataAccess for &'a [u64] {
|
|
||||||
fn get_val(&self, position: u64) -> u64 {
|
|
||||||
self[position as usize]
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
|
|
||||||
Box::new((self as &[u64]).iter().cloned())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn min_value(&self) -> u64 {
|
|
||||||
self.iter().min().unwrap_or(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn max_value(&self) -> u64 {
|
|
||||||
self.iter().max().unwrap_or(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn num_vals(&self) -> u64 {
|
|
||||||
self.len() as u64
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FastFieldDataAccess for Vec<u64> {
|
|
||||||
fn get_val(&self, position: u64) -> u64 {
|
|
||||||
self[position as usize]
|
|
||||||
}
|
|
||||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
|
|
||||||
Box::new((self as &[u64]).iter().cloned())
|
|
||||||
}
|
|
||||||
fn min_value(&self) -> u64 {
|
|
||||||
self.iter().min().unwrap_or(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn max_value(&self) -> u64 {
|
|
||||||
self.iter().max().unwrap_or(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn num_vals(&self) -> u64 {
|
|
||||||
self.len() as u64
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use proptest::arbitrary::any;
|
|
||||||
use proptest::proptest;
|
|
||||||
|
|
||||||
use crate::bitpacked::{BitpackedReader, BitpackedSerializer};
|
use proptest::prelude::*;
|
||||||
use crate::blockwise_linear::{BlockwiseLinearReader, BlockwiseLinearSerializer};
|
use proptest::strategy::Strategy;
|
||||||
use crate::linear::{LinearReader, LinearSerializer};
|
use proptest::{prop_oneof, proptest};
|
||||||
|
|
||||||
pub fn create_and_validate<
|
use crate::bitpacked::BitpackedCodec;
|
||||||
S: FastFieldCodecSerializer,
|
use crate::blockwise_linear::BlockwiseLinearCodec;
|
||||||
R: FastFieldCodecDeserializer + FastFieldDataAccess,
|
use crate::linear::LinearCodec;
|
||||||
>(
|
use crate::serialize::Header;
|
||||||
|
|
||||||
|
pub(crate) fn create_and_validate<Codec: FastFieldCodec>(
|
||||||
data: &[u64],
|
data: &[u64],
|
||||||
name: &str,
|
name: &str,
|
||||||
) -> (f32, f32) {
|
) -> Option<(f32, f32)> {
|
||||||
if !S::is_applicable(&data) {
|
let col = &VecColumn::from(data);
|
||||||
return (f32::MAX, 0.0);
|
let header = Header::compute_header(col, &[Codec::CODEC_TYPE])?;
|
||||||
}
|
let normalized_col = header.normalize_column(col);
|
||||||
let estimation = S::estimate(&data);
|
let estimation = Codec::estimate(&normalized_col)?;
|
||||||
let mut out: Vec<u8> = Vec::new();
|
|
||||||
S::serialize(&mut out, &data).unwrap();
|
let mut out = Vec::new();
|
||||||
|
let col = VecColumn::from(data);
|
||||||
|
serialize(col, &mut out, &[Codec::CODEC_TYPE]).unwrap();
|
||||||
|
|
||||||
let actual_compression = out.len() as f32 / (data.len() as f32 * 8.0);
|
let actual_compression = out.len() as f32 / (data.len() as f32 * 8.0);
|
||||||
|
|
||||||
let reader = R::open_from_bytes(OwnedBytes::new(out)).unwrap();
|
let reader = crate::open::<u64>(OwnedBytes::new(out)).unwrap();
|
||||||
assert_eq!(reader.num_vals(), data.len() as u64);
|
assert_eq!(reader.num_vals(), data.len() as u32);
|
||||||
for (doc, orig_val) in data.iter().enumerate() {
|
for (doc, orig_val) in data.iter().copied().enumerate() {
|
||||||
let val = reader.get_val(doc as u64);
|
let val = reader.get_val(doc as u32);
|
||||||
if val != *orig_val {
|
assert_eq!(
|
||||||
panic!(
|
val, orig_val,
|
||||||
"val {val:?} does not match orig_val {orig_val:?}, in data set {name}, data \
|
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data \
|
||||||
{data:?}",
|
`{data:?}`",
|
||||||
);
|
);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
(estimation, actual_compression)
|
|
||||||
|
if !data.is_empty() {
|
||||||
|
let test_rand_idx = rand::thread_rng().gen_range(0..=data.len() - 1);
|
||||||
|
let expected_positions: Vec<u32> = data
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(_, el)| **el == data[test_rand_idx])
|
||||||
|
.map(|(pos, _)| pos as u32)
|
||||||
|
.collect();
|
||||||
|
let mut positions = Vec::new();
|
||||||
|
reader.get_docids_for_value_range(
|
||||||
|
data[test_rand_idx]..=data[test_rand_idx],
|
||||||
|
0..data.len() as u32,
|
||||||
|
&mut positions,
|
||||||
|
);
|
||||||
|
assert_eq!(expected_positions, positions);
|
||||||
|
}
|
||||||
|
Some((estimation, actual_compression))
|
||||||
}
|
}
|
||||||
|
|
||||||
proptest! {
|
proptest! {
|
||||||
|
#![proptest_config(ProptestConfig::with_cases(100))]
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_proptest_small(data in proptest::collection::vec(any::<u64>(), 1..10)) {
|
fn test_proptest_small_bitpacked(data in proptest::collection::vec(num_strategy(), 1..10)) {
|
||||||
create_and_validate::<LinearSerializer, LinearReader>(&data, "proptest linearinterpol");
|
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
|
||||||
create_and_validate::<BlockwiseLinearSerializer, BlockwiseLinearReader>(&data, "proptest multilinearinterpol");
|
|
||||||
create_and_validate::<BitpackedSerializer, BitpackedReader>(&data, "proptest bitpacked");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_proptest_large(data in proptest::collection::vec(any::<u64>(), 1..6000)) {
|
fn test_proptest_small_linear(data in proptest::collection::vec(num_strategy(), 1..10)) {
|
||||||
create_and_validate::<LinearSerializer, LinearReader>(&data, "proptest linearinterpol");
|
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
|
||||||
create_and_validate::<BlockwiseLinearSerializer, BlockwiseLinearReader>(&data, "proptest multilinearinterpol");
|
|
||||||
create_and_validate::<BitpackedSerializer, BitpackedReader>(&data, "proptest bitpacked");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_proptest_small_blockwise_linear(data in proptest::collection::vec(num_strategy(), 1..10)) {
|
||||||
|
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
|
proptest! {
|
||||||
|
#![proptest_config(ProptestConfig::with_cases(10))]
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_proptest_large_bitpacked(data in proptest::collection::vec(num_strategy(), 1..6000)) {
|
||||||
|
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_proptest_large_linear(data in proptest::collection::vec(num_strategy(), 1..6000)) {
|
||||||
|
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_proptest_large_blockwise_linear(data in proptest::collection::vec(num_strategy(), 1..6000)) {
|
||||||
|
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_strategy() -> impl Strategy<Value = u64> {
|
||||||
|
prop_oneof![
|
||||||
|
1 => prop::num::u64::ANY.prop_map(|num| u64::MAX - (num % 10) ),
|
||||||
|
1 => prop::num::u64::ANY.prop_map(|num| num % 10 ),
|
||||||
|
20 => prop::num::u64::ANY,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_codec_test_datasets() -> Vec<(Vec<u64>, &'static str)> {
|
||||||
let mut data_and_names = vec![];
|
let mut data_and_names = vec![];
|
||||||
|
|
||||||
let data = (10..=20_u64).collect::<Vec<_>>();
|
let data = (10..=10_000_u64).collect::<Vec<_>>();
|
||||||
data_and_names.push((data, "simple monotonically increasing"));
|
data_and_names.push((data, "simple monotonically increasing"));
|
||||||
|
|
||||||
data_and_names.push((
|
data_and_names.push((
|
||||||
@@ -213,35 +334,38 @@ mod tests {
|
|||||||
data_and_names.push((vec![5, 50, 3, 13, 1, 1000, 35], "rand small"));
|
data_and_names.push((vec![5, 50, 3, 13, 1, 1000, 35], "rand small"));
|
||||||
data_and_names.push((vec![10], "single value"));
|
data_and_names.push((vec![10], "single value"));
|
||||||
|
|
||||||
|
data_and_names.push((
|
||||||
|
vec![1572656989877777, 1170935903116329, 720575940379279, 0],
|
||||||
|
"overflow error",
|
||||||
|
));
|
||||||
|
|
||||||
data_and_names
|
data_and_names
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_codec<
|
fn test_codec<C: FastFieldCodec>() {
|
||||||
S: FastFieldCodecSerializer,
|
let codec_name = format!("{:?}", C::CODEC_TYPE);
|
||||||
R: FastFieldDataAccess + FastFieldCodecDeserializer,
|
for (data, dataset_name) in get_codec_test_datasets() {
|
||||||
>() {
|
let estimate_actual_opt: Option<(f32, f32)> =
|
||||||
let codec_name = format!("{:?}", S::CODEC_TYPE);
|
crate::tests::create_and_validate::<C>(&data, dataset_name);
|
||||||
for (data, dataset_name) in get_codec_test_data_sets() {
|
let result = if let Some((estimate, actual)) = estimate_actual_opt {
|
||||||
let (estimate, actual) = crate::tests::create_and_validate::<S, R>(&data, dataset_name);
|
|
||||||
let result = if estimate == f32::MAX {
|
|
||||||
"Disabled".to_string()
|
|
||||||
} else {
|
|
||||||
format!("Estimate `{estimate}` Actual `{actual}`")
|
format!("Estimate `{estimate}` Actual `{actual}`")
|
||||||
|
} else {
|
||||||
|
"Disabled".to_string()
|
||||||
};
|
};
|
||||||
println!("Codec {codec_name}, DataSet {dataset_name}, {result}");
|
println!("Codec {codec_name}, DataSet {dataset_name}, {result}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn test_codec_bitpacking() {
|
fn test_codec_bitpacking() {
|
||||||
test_codec::<BitpackedSerializer, BitpackedReader>();
|
test_codec::<BitpackedCodec>();
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn test_codec_interpolation() {
|
fn test_codec_interpolation() {
|
||||||
test_codec::<LinearSerializer, LinearReader>();
|
test_codec::<LinearCodec>();
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn test_codec_multi_interpolation() {
|
fn test_codec_multi_interpolation() {
|
||||||
test_codec::<BlockwiseLinearSerializer, BlockwiseLinearReader>();
|
test_codec::<BlockwiseLinearCodec>();
|
||||||
}
|
}
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -249,38 +373,50 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn estimation_good_interpolation_case() {
|
fn estimation_good_interpolation_case() {
|
||||||
let data = (10..=20000_u64).collect::<Vec<_>>();
|
let data = (10..=20000_u64).collect::<Vec<_>>();
|
||||||
|
let data: VecColumn = data.as_slice().into();
|
||||||
|
|
||||||
let linear_interpol_estimation = LinearSerializer::estimate(&data);
|
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||||
assert_le!(linear_interpol_estimation, 0.01);
|
assert_le!(linear_interpol_estimation, 0.01);
|
||||||
|
|
||||||
let multi_linear_interpol_estimation = BlockwiseLinearSerializer::estimate(&data);
|
let multi_linear_interpol_estimation = BlockwiseLinearCodec::estimate(&data).unwrap();
|
||||||
assert_le!(multi_linear_interpol_estimation, 0.2);
|
assert_le!(multi_linear_interpol_estimation, 0.2);
|
||||||
assert_le!(linear_interpol_estimation, multi_linear_interpol_estimation);
|
assert_lt!(linear_interpol_estimation, multi_linear_interpol_estimation);
|
||||||
|
|
||||||
let bitpacked_estimation = BitpackedSerializer::estimate(&data);
|
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||||
assert_le!(linear_interpol_estimation, bitpacked_estimation);
|
assert_lt!(linear_interpol_estimation, bitpacked_estimation);
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn estimation_test_bad_interpolation_case() {
|
fn estimation_test_bad_interpolation_case() {
|
||||||
let data = vec![200, 10, 10, 10, 10, 1000, 20];
|
let data: &[u64] = &[200, 10, 10, 10, 10, 1000, 20];
|
||||||
|
|
||||||
let linear_interpol_estimation = LinearSerializer::estimate(&data);
|
let data: VecColumn = data.into();
|
||||||
assert_le!(linear_interpol_estimation, 0.32);
|
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||||
|
assert_le!(linear_interpol_estimation, 0.34);
|
||||||
|
|
||||||
let bitpacked_estimation = BitpackedSerializer::estimate(&data);
|
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||||
assert_le!(bitpacked_estimation, linear_interpol_estimation);
|
assert_lt!(bitpacked_estimation, linear_interpol_estimation);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn estimation_prefer_bitpacked() {
|
||||||
|
let data = VecColumn::from(&[10, 10, 10, 10]);
|
||||||
|
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||||
|
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||||
|
assert_lt!(bitpacked_estimation, linear_interpol_estimation);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
|
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
|
||||||
let mut data = (200..=20000_u64).collect::<Vec<_>>();
|
let mut data: Vec<u64> = (201..=20000_u64).collect();
|
||||||
data.push(1_000_000);
|
data.push(1_000_000);
|
||||||
|
let data: VecColumn = data.as_slice().into();
|
||||||
|
|
||||||
// in this case the linear interpolation can't in fact not be worse than bitpacking,
|
// in this case the linear interpolation can't in fact not be worse than bitpacking,
|
||||||
// but the estimator adds some threshold, which leads to estimated worse behavior
|
// but the estimator adds some threshold, which leads to estimated worse behavior
|
||||||
let linear_interpol_estimation = LinearSerializer::estimate(&data);
|
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||||
assert_le!(linear_interpol_estimation, 0.35);
|
assert_le!(linear_interpol_estimation, 0.35);
|
||||||
|
|
||||||
let bitpacked_estimation = BitpackedSerializer::estimate(&data);
|
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||||
assert_le!(bitpacked_estimation, 0.32);
|
assert_le!(bitpacked_estimation, 0.32);
|
||||||
assert_le!(bitpacked_estimation, linear_interpol_estimation);
|
assert_le!(bitpacked_estimation, linear_interpol_estimation);
|
||||||
}
|
}
|
||||||
@@ -294,6 +430,134 @@ mod tests {
|
|||||||
count_codec += 1;
|
count_codec += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert_eq!(count_codec, 4);
|
assert_eq!(count_codec, 3);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
|
mod bench {
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use ownedbytes::OwnedBytes;
|
||||||
|
use rand::rngs::StdRng;
|
||||||
|
use rand::{Rng, SeedableRng};
|
||||||
|
use test::{self, Bencher};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use crate::Column;
|
||||||
|
|
||||||
|
fn get_data() -> Vec<u64> {
|
||||||
|
let mut rng = StdRng::seed_from_u64(2u64);
|
||||||
|
let mut data: Vec<_> = (100..55000_u64)
|
||||||
|
.map(|num| num + rng.gen::<u8>() as u64)
|
||||||
|
.collect();
|
||||||
|
data.push(99_000);
|
||||||
|
data.insert(1000, 2000);
|
||||||
|
data.insert(2000, 100);
|
||||||
|
data.insert(3000, 4100);
|
||||||
|
data.insert(4000, 100);
|
||||||
|
data.insert(5000, 800);
|
||||||
|
data
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(never)]
|
||||||
|
fn value_iter() -> impl Iterator<Item = u64> {
|
||||||
|
0..20_000
|
||||||
|
}
|
||||||
|
fn get_reader_for_bench<Codec: FastFieldCodec>(data: &[u64]) -> Codec::Reader {
|
||||||
|
let mut bytes = Vec::new();
|
||||||
|
let min_value = *data.iter().min().unwrap();
|
||||||
|
let data = data.iter().map(|el| *el - min_value).collect::<Vec<_>>();
|
||||||
|
let col = VecColumn::from(&data);
|
||||||
|
let normalized_header = crate::NormalizedHeader {
|
||||||
|
num_vals: col.num_vals(),
|
||||||
|
max_value: col.max_value(),
|
||||||
|
};
|
||||||
|
Codec::serialize(&VecColumn::from(&data), &mut bytes).unwrap();
|
||||||
|
Codec::open_from_bytes(OwnedBytes::new(bytes), normalized_header).unwrap()
|
||||||
|
}
|
||||||
|
fn bench_get<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||||
|
let col = get_reader_for_bench::<Codec>(data);
|
||||||
|
b.iter(|| {
|
||||||
|
let mut sum = 0u64;
|
||||||
|
for pos in value_iter() {
|
||||||
|
let val = col.get_val(pos as u32);
|
||||||
|
sum = sum.wrapping_add(val);
|
||||||
|
}
|
||||||
|
sum
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(never)]
|
||||||
|
fn bench_get_dynamic_helper(b: &mut Bencher, col: Arc<dyn Column>) {
|
||||||
|
b.iter(|| {
|
||||||
|
let mut sum = 0u64;
|
||||||
|
for pos in value_iter() {
|
||||||
|
let val = col.get_val(pos as u32);
|
||||||
|
sum = sum.wrapping_add(val);
|
||||||
|
}
|
||||||
|
sum
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_get_dynamic<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||||
|
let col = Arc::new(get_reader_for_bench::<Codec>(data));
|
||||||
|
bench_get_dynamic_helper(b, col);
|
||||||
|
}
|
||||||
|
fn bench_create<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||||
|
let min_value = *data.iter().min().unwrap();
|
||||||
|
let data = data.iter().map(|el| *el - min_value).collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let mut bytes = Vec::new();
|
||||||
|
b.iter(|| {
|
||||||
|
bytes.clear();
|
||||||
|
Codec::serialize(&VecColumn::from(&data), &mut bytes).unwrap();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_create::<BitpackedCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_create::<LinearCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_create::<BlockwiseLinearCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_get::<BitpackedCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_bitpack_get_dynamic(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_get_dynamic::<BitpackedCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_get::<LinearCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_linearinterpol_get_dynamic(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_get_dynamic::<LinearCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_get::<BlockwiseLinearCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_multilinearinterpol_get_dynamic(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_get_dynamic::<BlockwiseLinearCodec>(b, &data);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
222
fastfield_codecs/src/line.rs
Normal file
222
fastfield_codecs/src/line.rs
Normal file
@@ -0,0 +1,222 @@
|
|||||||
|
use std::io;
|
||||||
|
use std::num::NonZeroU32;
|
||||||
|
|
||||||
|
use common::{BinarySerializable, VInt};
|
||||||
|
|
||||||
|
use crate::Column;
|
||||||
|
|
||||||
|
const MID_POINT: u64 = (1u64 << 32) - 1u64;
|
||||||
|
|
||||||
|
/// `Line` describes a line function `y: ax + b` using integer
|
||||||
|
/// arithmetics.
|
||||||
|
///
|
||||||
|
/// The slope is in fact a decimal split into a 32 bit integer value,
|
||||||
|
/// and a 32-bit decimal value.
|
||||||
|
///
|
||||||
|
/// The multiplication then becomes.
|
||||||
|
/// `y = m * x >> 32 + b`
|
||||||
|
#[derive(Debug, Clone, Copy, Default)]
|
||||||
|
pub struct Line {
|
||||||
|
slope: u64,
|
||||||
|
intercept: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute the line slope.
|
||||||
|
///
|
||||||
|
/// This function has the nice property of being
|
||||||
|
/// invariant by translation.
|
||||||
|
/// `
|
||||||
|
/// compute_slope(y0, y1)
|
||||||
|
/// = compute_slope(y0 + X % 2^64, y1 + X % 2^64)
|
||||||
|
/// `
|
||||||
|
fn compute_slope(y0: u64, y1: u64, num_vals: NonZeroU32) -> u64 {
|
||||||
|
let dy = y1.wrapping_sub(y0);
|
||||||
|
let sign = dy <= (1 << 63);
|
||||||
|
let abs_dy = if sign {
|
||||||
|
y1.wrapping_sub(y0)
|
||||||
|
} else {
|
||||||
|
y0.wrapping_sub(y1)
|
||||||
|
};
|
||||||
|
if abs_dy >= 1 << 32 {
|
||||||
|
// This is outside of realm we handle.
|
||||||
|
// Let's just bail.
|
||||||
|
return 0u64;
|
||||||
|
}
|
||||||
|
|
||||||
|
let abs_slope = (abs_dy << 32) / num_vals.get() as u64;
|
||||||
|
if sign {
|
||||||
|
abs_slope
|
||||||
|
} else {
|
||||||
|
// The complement does indeed create the
|
||||||
|
// opposite decreasing slope...
|
||||||
|
//
|
||||||
|
// Intuitively (without the bitshifts and % u64::MAX)
|
||||||
|
// ```
|
||||||
|
// (x + shift)*(u64::MAX - abs_slope)
|
||||||
|
// - (x * (u64::MAX - abs_slope))
|
||||||
|
// = - shift * abs_slope
|
||||||
|
// ```
|
||||||
|
u64::MAX - abs_slope
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Line {
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn eval(&self, x: u32) -> u64 {
|
||||||
|
let linear_part = ((x as u64).wrapping_mul(self.slope) >> 32) as i32 as u64;
|
||||||
|
self.intercept.wrapping_add(linear_part)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Same as train, but the intercept is only estimated from provided sample positions
|
||||||
|
pub fn estimate(sample_positions_and_values: &[(u64, u64)]) -> Self {
|
||||||
|
let first_val = sample_positions_and_values[0].1;
|
||||||
|
let last_val = sample_positions_and_values[sample_positions_and_values.len() - 1].1;
|
||||||
|
let num_vals = sample_positions_and_values[sample_positions_and_values.len() - 1].0 + 1;
|
||||||
|
Self::train_from(
|
||||||
|
first_val,
|
||||||
|
last_val,
|
||||||
|
num_vals as u32,
|
||||||
|
sample_positions_and_values.iter().cloned(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Intercept is only computed from provided positions
|
||||||
|
fn train_from(
|
||||||
|
first_val: u64,
|
||||||
|
last_val: u64,
|
||||||
|
num_vals: u32,
|
||||||
|
positions_and_values: impl Iterator<Item = (u64, u64)>,
|
||||||
|
) -> Self {
|
||||||
|
// TODO replace with let else
|
||||||
|
let idx_last_val = if let Some(idx_last_val) = NonZeroU32::new(num_vals - 1) {
|
||||||
|
idx_last_val
|
||||||
|
} else {
|
||||||
|
return Line::default();
|
||||||
|
};
|
||||||
|
|
||||||
|
let y0 = first_val;
|
||||||
|
let y1 = last_val;
|
||||||
|
|
||||||
|
// We first independently pick our slope.
|
||||||
|
let slope = compute_slope(y0, y1, idx_last_val);
|
||||||
|
|
||||||
|
// We picked our slope. Note that it does not have to be perfect.
|
||||||
|
// Now we need to compute the best intercept.
|
||||||
|
//
|
||||||
|
// Intuitively, the best intercept is such that line passes through one of the
|
||||||
|
// `(i, ys[])`.
|
||||||
|
//
|
||||||
|
// The best intercept therefore has the form
|
||||||
|
// `y[i] - line.eval(i)` (using wrapping arithmetics).
|
||||||
|
// In other words, the best intercept is one of the `y - Line::eval(ys[i])`
|
||||||
|
// and our task is just to pick the one that minimizes our error.
|
||||||
|
//
|
||||||
|
// Without sorting our values, this is a difficult problem.
|
||||||
|
// We however rely on the following trick...
|
||||||
|
//
|
||||||
|
// We only focus on the case where the interpolation is half decent.
|
||||||
|
// If the line interpolation is doing its job on a dataset suited for it,
|
||||||
|
// we can hope that the maximum error won't be larger than `u64::MAX / 2`.
|
||||||
|
//
|
||||||
|
// In other words, even without the intercept the values `y - Line::eval(ys[i])` will all be
|
||||||
|
// within an interval that takes less than half of the modulo space of `u64`.
|
||||||
|
//
|
||||||
|
// Our task is therefore to identify this interval.
|
||||||
|
// Here we simply translate all of our values by `y0 - 2^63` and pick the min.
|
||||||
|
let mut line = Line {
|
||||||
|
slope,
|
||||||
|
intercept: 0,
|
||||||
|
};
|
||||||
|
let heuristic_shift = y0.wrapping_sub(MID_POINT);
|
||||||
|
line.intercept = positions_and_values
|
||||||
|
.map(|(pos, y)| y.wrapping_sub(line.eval(pos as u32)))
|
||||||
|
.min_by_key(|&val| val.wrapping_sub(heuristic_shift))
|
||||||
|
.unwrap_or(0u64); //< Never happens.
|
||||||
|
line
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a line that attemps to approximate a function
|
||||||
|
/// f: i in 0..[ys.num_vals()) -> ys[i].
|
||||||
|
///
|
||||||
|
/// - The approximation is always lower than the actual value.
|
||||||
|
/// Or more rigorously, formally `f(i).wrapping_sub(ys[i])` is small
|
||||||
|
/// for any i in [0..ys.len()).
|
||||||
|
/// - It computes without panicking for any value of it.
|
||||||
|
///
|
||||||
|
/// This function is only invariable by translation if all of the
|
||||||
|
/// `ys` are packaged into half of the space. (See heuristic below)
|
||||||
|
pub fn train(ys: &dyn Column) -> Self {
|
||||||
|
let first_val = ys.iter().next().unwrap();
|
||||||
|
let last_val = ys.iter().nth(ys.num_vals() as usize - 1).unwrap();
|
||||||
|
Self::train_from(
|
||||||
|
first_val,
|
||||||
|
last_val,
|
||||||
|
ys.num_vals(),
|
||||||
|
ys.iter().enumerate().map(|(pos, val)| (pos as u64, val)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for Line {
|
||||||
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
VInt(self.slope).serialize(writer)?;
|
||||||
|
VInt(self.intercept).serialize(writer)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let slope = VInt::deserialize(reader)?.0;
|
||||||
|
let intercept = VInt::deserialize(reader)?.0;
|
||||||
|
Ok(Line { slope, intercept })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::VecColumn;
|
||||||
|
|
||||||
|
/// Test training a line and ensuring that the maximum difference between
|
||||||
|
/// the data points and the line is `expected`.
|
||||||
|
///
|
||||||
|
/// This function operates translation over the data for better coverage.
|
||||||
|
#[track_caller]
|
||||||
|
fn test_line_interpol_with_translation(ys: &[u64], expected: Option<u64>) {
|
||||||
|
let mut translations = vec![0, 100, u64::MAX / 2, u64::MAX, u64::MAX - 1];
|
||||||
|
translations.extend_from_slice(ys);
|
||||||
|
for translation in translations {
|
||||||
|
let translated_ys: Vec<u64> = ys
|
||||||
|
.iter()
|
||||||
|
.copied()
|
||||||
|
.map(|y| y.wrapping_add(translation))
|
||||||
|
.collect();
|
||||||
|
let largest_err = test_eval_max_err(&translated_ys);
|
||||||
|
assert_eq!(largest_err, expected);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_eval_max_err(ys: &[u64]) -> Option<u64> {
|
||||||
|
let line = Line::train(&VecColumn::from(&ys));
|
||||||
|
ys.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(x, y)| y.wrapping_sub(line.eval(x as u32)))
|
||||||
|
.max()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_train() {
|
||||||
|
test_line_interpol_with_translation(&[11, 11, 11, 12, 12, 13], Some(1));
|
||||||
|
test_line_interpol_with_translation(&[13, 12, 12, 11, 11, 11], Some(1));
|
||||||
|
test_line_interpol_with_translation(&[13, 13, 12, 11, 11, 11], Some(1));
|
||||||
|
test_line_interpol_with_translation(&[13, 13, 12, 11, 11, 11], Some(1));
|
||||||
|
test_line_interpol_with_translation(&[u64::MAX - 1, 0, 0, 1], Some(1));
|
||||||
|
test_line_interpol_with_translation(&[u64::MAX - 1, u64::MAX, 0, 1], Some(0));
|
||||||
|
test_line_interpol_with_translation(&[0, 1, 2, 3, 5], Some(0));
|
||||||
|
test_line_interpol_with_translation(&[1, 2, 3, 4], Some(0));
|
||||||
|
|
||||||
|
let data: Vec<u64> = (0..255).collect();
|
||||||
|
test_line_interpol_with_translation(&data, Some(0));
|
||||||
|
let data: Vec<u64> = (0..255).map(|el| el * 2).collect();
|
||||||
|
test_line_interpol_with_translation(&data, Some(0));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,308 +1,184 @@
|
|||||||
use std::io::{self, Read, Write};
|
use std::io::{self, Write};
|
||||||
use std::ops::Sub;
|
|
||||||
|
|
||||||
use common::{BinarySerializable, FixedSize};
|
use common::BinarySerializable;
|
||||||
use ownedbytes::OwnedBytes;
|
use ownedbytes::OwnedBytes;
|
||||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||||
|
|
||||||
use crate::{
|
use crate::line::Line;
|
||||||
FastFieldCodecDeserializer, FastFieldCodecSerializer, FastFieldCodecType, FastFieldDataAccess,
|
use crate::serialize::NormalizedHeader;
|
||||||
};
|
use crate::{Column, FastFieldCodec, FastFieldCodecType};
|
||||||
|
|
||||||
/// Depending on the field type, a different
|
/// Depending on the field type, a different
|
||||||
/// fast field is required.
|
/// fast field is required.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct LinearReader {
|
pub struct LinearReader {
|
||||||
data: OwnedBytes,
|
data: OwnedBytes,
|
||||||
bit_unpacker: BitUnpacker,
|
linear_params: LinearParams,
|
||||||
pub footer: LinearFooter,
|
header: NormalizedHeader,
|
||||||
pub slope: f32,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
impl Column for LinearReader {
|
||||||
pub struct LinearFooter {
|
|
||||||
pub relative_max_value: u64,
|
|
||||||
pub offset: u64,
|
|
||||||
pub first_val: u64,
|
|
||||||
pub last_val: u64,
|
|
||||||
pub num_vals: u64,
|
|
||||||
pub min_value: u64,
|
|
||||||
pub max_value: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BinarySerializable for LinearFooter {
|
|
||||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
|
||||||
self.relative_max_value.serialize(write)?;
|
|
||||||
self.offset.serialize(write)?;
|
|
||||||
self.first_val.serialize(write)?;
|
|
||||||
self.last_val.serialize(write)?;
|
|
||||||
self.num_vals.serialize(write)?;
|
|
||||||
self.min_value.serialize(write)?;
|
|
||||||
self.max_value.serialize(write)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearFooter> {
|
|
||||||
Ok(LinearFooter {
|
|
||||||
relative_max_value: u64::deserialize(reader)?,
|
|
||||||
offset: u64::deserialize(reader)?,
|
|
||||||
first_val: u64::deserialize(reader)?,
|
|
||||||
last_val: u64::deserialize(reader)?,
|
|
||||||
num_vals: u64::deserialize(reader)?,
|
|
||||||
min_value: u64::deserialize(reader)?,
|
|
||||||
max_value: u64::deserialize(reader)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FixedSize for LinearFooter {
|
|
||||||
const SIZE_IN_BYTES: usize = 56;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FastFieldCodecDeserializer for LinearReader {
|
|
||||||
/// Opens a fast field given a file.
|
|
||||||
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self> {
|
|
||||||
let footer_offset = bytes.len() - LinearFooter::SIZE_IN_BYTES;
|
|
||||||
let (data, mut footer) = bytes.split(footer_offset);
|
|
||||||
let footer = LinearFooter::deserialize(&mut footer)?;
|
|
||||||
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
|
|
||||||
let num_bits = compute_num_bits(footer.relative_max_value);
|
|
||||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
|
||||||
Ok(LinearReader {
|
|
||||||
data,
|
|
||||||
bit_unpacker,
|
|
||||||
footer,
|
|
||||||
slope,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FastFieldDataAccess for LinearReader {
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get_val(&self, doc: u64) -> u64 {
|
fn get_val(&self, doc: u32) -> u64 {
|
||||||
let calculated_value = get_calculated_value(self.footer.first_val, doc, self.slope);
|
let interpoled_val: u64 = self.linear_params.line.eval(doc);
|
||||||
(calculated_value + self.bit_unpacker.get(doc, &self.data)) - self.footer.offset
|
let bitpacked_diff = self.linear_params.bit_unpacker.get(doc, &self.data);
|
||||||
|
interpoled_val.wrapping_add(bitpacked_diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn min_value(&self) -> u64 {
|
fn min_value(&self) -> u64 {
|
||||||
self.footer.min_value
|
// The LinearReader assumes a normalized vector.
|
||||||
|
0u64
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn max_value(&self) -> u64 {
|
fn max_value(&self) -> u64 {
|
||||||
self.footer.max_value
|
self.header.max_value
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn num_vals(&self) -> u64 {
|
fn num_vals(&self) -> u32 {
|
||||||
self.footer.num_vals
|
self.header.num_vals
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fastfield serializer, which tries to guess values by linear interpolation
|
/// Fastfield serializer, which tries to guess values by linear interpolation
|
||||||
/// and stores the difference bitpacked.
|
/// and stores the difference bitpacked.
|
||||||
pub struct LinearSerializer {}
|
pub struct LinearCodec;
|
||||||
|
|
||||||
#[inline]
|
#[derive(Debug, Clone)]
|
||||||
pub(crate) fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
|
struct LinearParams {
|
||||||
if num_vals <= 1 {
|
line: Line,
|
||||||
return 0.0;
|
bit_unpacker: BitUnpacker,
|
||||||
}
|
|
||||||
// We calculate the slope with f64 high precision and use the result in lower precision f32
|
|
||||||
// This is done in order to handle estimations for very large values like i64::MAX
|
|
||||||
let diff = diff(last_val, first_val);
|
|
||||||
(diff / (num_vals - 1) as f64) as f32
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delay the cast, to improve precision for very large u64 values.
|
impl BinarySerializable for LinearParams {
|
||||||
///
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
/// Since i64 is mapped monotonically to u64 space, 0i64 is after the mapping i64::MAX.
|
self.line.serialize(writer)?;
|
||||||
/// So very large values are not uncommon.
|
self.bit_unpacker.bit_width().serialize(writer)?;
|
||||||
///
|
Ok(())
|
||||||
/// ```rust
|
}
|
||||||
/// let val1 = i64::MAX;
|
|
||||||
/// let val2 = i64::MAX - 100;
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
/// assert_eq!(val1 - val2, 100);
|
let line = Line::deserialize(reader)?;
|
||||||
/// assert_eq!(val1 as f64 - val2 as f64, 0.0);
|
let bit_width = u8::deserialize(reader)?;
|
||||||
/// ```
|
Ok(Self {
|
||||||
fn diff(val1: u64, val2: u64) -> f64 {
|
line,
|
||||||
if val1 >= val2 {
|
bit_unpacker: BitUnpacker::new(bit_width),
|
||||||
(val1 - val2) as f64
|
})
|
||||||
} else {
|
|
||||||
(val2 - val1) as f64 * -1.0
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
impl FastFieldCodec for LinearCodec {
|
||||||
pub fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
|
|
||||||
if slope < 0.0 {
|
|
||||||
first_val - (pos as f32 * -slope) as u64
|
|
||||||
} else {
|
|
||||||
first_val + (pos as f32 * slope) as u64
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FastFieldCodecSerializer for LinearSerializer {
|
|
||||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Linear;
|
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Linear;
|
||||||
|
|
||||||
|
type Reader = LinearReader;
|
||||||
|
|
||||||
|
/// Opens a fast field given a file.
|
||||||
|
fn open_from_bytes(mut data: OwnedBytes, header: NormalizedHeader) -> io::Result<Self::Reader> {
|
||||||
|
let linear_params = LinearParams::deserialize(&mut data)?;
|
||||||
|
Ok(LinearReader {
|
||||||
|
data,
|
||||||
|
linear_params,
|
||||||
|
header,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates a new fast field serializer.
|
/// Creates a new fast field serializer.
|
||||||
fn serialize(
|
fn serialize(column: &dyn Column, write: &mut impl Write) -> io::Result<()> {
|
||||||
write: &mut impl Write,
|
assert_eq!(column.min_value(), 0);
|
||||||
fastfield_accessor: &dyn FastFieldDataAccess,
|
let line = Line::train(column);
|
||||||
) -> io::Result<()> {
|
|
||||||
assert!(fastfield_accessor.min_value() <= fastfield_accessor.max_value());
|
|
||||||
|
|
||||||
let first_val = fastfield_accessor.get_val(0);
|
let max_offset_from_line = column
|
||||||
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
|
.iter()
|
||||||
let slope = get_slope(first_val, last_val, fastfield_accessor.num_vals());
|
.enumerate()
|
||||||
// calculate offset to ensure all values are positive
|
.map(|(pos, actual_value)| {
|
||||||
let mut offset = 0;
|
let calculated_value = line.eval(pos as u32);
|
||||||
let mut rel_positive_max = 0;
|
actual_value.wrapping_sub(calculated_value)
|
||||||
for (pos, actual_value) in fastfield_accessor.iter().enumerate() {
|
})
|
||||||
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
|
.max()
|
||||||
if calculated_value > actual_value {
|
.unwrap();
|
||||||
// negative value we need to apply an offset
|
|
||||||
// we ignore negative values in the max value calculation, because negative values
|
|
||||||
// will be offset to 0
|
|
||||||
offset = offset.max(calculated_value - actual_value);
|
|
||||||
} else {
|
|
||||||
// positive value no offset reuqired
|
|
||||||
rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// rel_positive_max will be adjusted by offset
|
let num_bits = compute_num_bits(max_offset_from_line);
|
||||||
let relative_max_value = rel_positive_max + offset;
|
let linear_params = LinearParams {
|
||||||
|
line,
|
||||||
|
bit_unpacker: BitUnpacker::new(num_bits),
|
||||||
|
};
|
||||||
|
linear_params.serialize(write)?;
|
||||||
|
|
||||||
let num_bits = compute_num_bits(relative_max_value);
|
|
||||||
let mut bit_packer = BitPacker::new();
|
let mut bit_packer = BitPacker::new();
|
||||||
for (pos, val) in fastfield_accessor.iter().enumerate() {
|
for (pos, actual_value) in column.iter().enumerate() {
|
||||||
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
|
let calculated_value = line.eval(pos as u32);
|
||||||
let diff = (val + offset) - calculated_value;
|
let offset = actual_value.wrapping_sub(calculated_value);
|
||||||
bit_packer.write(diff, num_bits, write)?;
|
bit_packer.write(offset, num_bits, write)?;
|
||||||
}
|
}
|
||||||
bit_packer.close(write)?;
|
bit_packer.close(write)?;
|
||||||
|
|
||||||
let footer = LinearFooter {
|
|
||||||
relative_max_value,
|
|
||||||
offset,
|
|
||||||
first_val,
|
|
||||||
last_val,
|
|
||||||
num_vals: fastfield_accessor.num_vals(),
|
|
||||||
min_value: fastfield_accessor.min_value(),
|
|
||||||
max_value: fastfield_accessor.max_value(),
|
|
||||||
};
|
|
||||||
footer.serialize(write)?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
fn is_applicable(fastfield_accessor: &impl FastFieldDataAccess) -> bool {
|
|
||||||
if fastfield_accessor.num_vals() < 3 {
|
|
||||||
return false; // disable compressor for this case
|
|
||||||
}
|
|
||||||
// On serialisation the offset is added to the actual value.
|
|
||||||
// We need to make sure this won't run into overflow calculation issues.
|
|
||||||
// For this we take the maximum theroretical offset and add this to the max value.
|
|
||||||
// If this doesn't overflow the algorithm should be fine
|
|
||||||
let theorethical_maximum_offset =
|
|
||||||
fastfield_accessor.max_value() - fastfield_accessor.min_value();
|
|
||||||
if fastfield_accessor
|
|
||||||
.max_value()
|
|
||||||
.checked_add(theorethical_maximum_offset)
|
|
||||||
.is_none()
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}
|
|
||||||
/// estimation for linear interpolation is hard because, you don't know
|
/// estimation for linear interpolation is hard because, you don't know
|
||||||
/// where the local maxima for the deviation of the calculated value are and
|
/// where the local maxima for the deviation of the calculated value are and
|
||||||
/// the offset to shift all values to >=0 is also unknown.
|
/// the offset to shift all values to >=0 is also unknown.
|
||||||
fn estimate(fastfield_accessor: &impl FastFieldDataAccess) -> f32 {
|
#[allow(clippy::question_mark)]
|
||||||
let first_val = fastfield_accessor.get_val(0);
|
fn estimate(column: &dyn Column) -> Option<f32> {
|
||||||
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
|
if column.num_vals() < 3 {
|
||||||
let slope = get_slope(first_val, last_val, fastfield_accessor.num_vals());
|
return None; // disable compressor for this case
|
||||||
|
}
|
||||||
|
|
||||||
// let's sample at 0%, 5%, 10% .. 95%, 100%
|
let limit_num_vals = column.num_vals().min(100_000);
|
||||||
let num_vals = fastfield_accessor.num_vals() as f32 / 100.0;
|
|
||||||
let sample_positions = (0..20)
|
|
||||||
.map(|pos| (num_vals * pos as f32 * 5.0) as usize)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let max_distance = sample_positions
|
let num_samples = 100;
|
||||||
.iter()
|
let step_size = (limit_num_vals / num_samples).max(1); // 20 samples
|
||||||
.map(|pos| {
|
let mut sample_positions_and_values: Vec<_> = Vec::new();
|
||||||
let calculated_value = get_calculated_value(first_val, *pos as u64, slope);
|
for (pos, val) in column.iter().enumerate().step_by(step_size as usize) {
|
||||||
let actual_value = fastfield_accessor.get_val(*pos as u64);
|
sample_positions_and_values.push((pos as u64, val));
|
||||||
distance(calculated_value, actual_value)
|
}
|
||||||
|
|
||||||
|
let line = Line::estimate(&sample_positions_and_values);
|
||||||
|
|
||||||
|
let estimated_bit_width = sample_positions_and_values
|
||||||
|
.into_iter()
|
||||||
|
.map(|(pos, actual_value)| {
|
||||||
|
let interpolated_val = line.eval(pos as u32);
|
||||||
|
actual_value.wrapping_sub(interpolated_val)
|
||||||
})
|
})
|
||||||
|
.map(|diff| ((diff as f32 * 1.5) * 2.0) as u64)
|
||||||
|
.map(compute_num_bits)
|
||||||
.max()
|
.max()
|
||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
|
|
||||||
// the theory would be that we don't have the actual max_distance, but we are close within
|
// Extrapolate to whole column
|
||||||
// 50% threshold.
|
let num_bits = (estimated_bit_width as u64 * column.num_vals() as u64) + 64;
|
||||||
// It is multiplied by 2 because in a log case scenario the line would be as much above as
|
let num_bits_uncompressed = 64 * column.num_vals();
|
||||||
// below. So the offset would = max_distance
|
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||||
//
|
|
||||||
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
|
|
||||||
|
|
||||||
let num_bits = compute_num_bits(relative_max_value as u64) as u64
|
|
||||||
* fastfield_accessor.num_vals()
|
|
||||||
+ LinearFooter::SIZE_IN_BYTES as u64;
|
|
||||||
let num_bits_uncompressed = 64 * fastfield_accessor.num_vals();
|
|
||||||
num_bits as f32 / num_bits_uncompressed as f32
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
|
|
||||||
if x < y {
|
|
||||||
y - x
|
|
||||||
} else {
|
|
||||||
x - y
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use rand::RngCore;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::tests::get_codec_test_data_sets;
|
use crate::tests::get_codec_test_datasets;
|
||||||
|
|
||||||
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
|
fn create_and_validate(data: &[u64], name: &str) -> Option<(f32, f32)> {
|
||||||
crate::tests::create_and_validate::<LinearSerializer, LinearReader>(data, name)
|
crate::tests::create_and_validate::<LinearCodec>(data, name)
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn get_calculated_value_test() {
|
|
||||||
// pos slope
|
|
||||||
assert_eq!(get_calculated_value(100, 10, 5.0), 150);
|
|
||||||
|
|
||||||
// neg slope
|
|
||||||
assert_eq!(get_calculated_value(100, 10, -5.0), 50);
|
|
||||||
|
|
||||||
// pos slope, very high values
|
|
||||||
assert_eq!(
|
|
||||||
get_calculated_value(i64::MAX as u64, 10, 5.0),
|
|
||||||
i64::MAX as u64 + 50
|
|
||||||
);
|
|
||||||
// neg slope, very high values
|
|
||||||
assert_eq!(
|
|
||||||
get_calculated_value(i64::MAX as u64, 10, -5.0),
|
|
||||||
i64::MAX as u64 - 50
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_compression() {
|
fn test_compression() {
|
||||||
let data = (10..=6_000_u64).collect::<Vec<_>>();
|
let data = (10..=6_000_u64).collect::<Vec<_>>();
|
||||||
let (estimate, actual_compression) =
|
let (estimate, actual_compression) =
|
||||||
create_and_validate(&data, "simple monotonically large");
|
create_and_validate(&data, "simple monotonically large").unwrap();
|
||||||
|
|
||||||
assert!(actual_compression < 0.01);
|
assert_le!(actual_compression, 0.001);
|
||||||
assert!(estimate < 0.01);
|
assert_le!(estimate, 0.02);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_with_codec_data_sets() {
|
fn test_with_codec_datasets() {
|
||||||
let data_sets = get_codec_test_data_sets();
|
let data_sets = get_codec_test_datasets();
|
||||||
for (mut data, name) in data_sets {
|
for (mut data, name) in data_sets {
|
||||||
create_and_validate(&data, name);
|
create_and_validate(&data, name);
|
||||||
data.reverse();
|
data.reverse();
|
||||||
@@ -319,6 +195,13 @@ mod tests {
|
|||||||
|
|
||||||
create_and_validate(&data, "large amplitude");
|
create_and_validate(&data, "large amplitude");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn overflow_error_test() {
|
||||||
|
let data = vec![1572656989877777, 1170935903116329, 720575940379279, 0];
|
||||||
|
create_and_validate(&data, "overflow test");
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn linear_interpol_fast_concave_data() {
|
fn linear_interpol_fast_concave_data() {
|
||||||
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
|
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
|
||||||
@@ -332,16 +215,15 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn linear_interpol_fast_field_test_simple() {
|
fn linear_interpol_fast_field_test_simple() {
|
||||||
let data = (10..=20_u64).collect::<Vec<_>>();
|
let data = (10..=20_u64).collect::<Vec<_>>();
|
||||||
|
|
||||||
create_and_validate(&data, "simple monotonically");
|
create_and_validate(&data, "simple monotonically");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn linear_interpol_fast_field_rand() {
|
fn linear_interpol_fast_field_rand() {
|
||||||
for _ in 0..5000 {
|
let mut rng = rand::thread_rng();
|
||||||
let mut data = (0..50).map(|_| rand::random::<u64>()).collect::<Vec<_>>();
|
for _ in 0..50 {
|
||||||
|
let mut data = (0..10_000).map(|_| rng.next_u64()).collect::<Vec<_>>();
|
||||||
create_and_validate(&data, "random");
|
create_and_validate(&data, "random");
|
||||||
|
|
||||||
data.reverse();
|
data.reverse();
|
||||||
create_and_validate(&data, "random");
|
create_and_validate(&data, "random");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,48 +1,168 @@
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate prettytable;
|
extern crate prettytable;
|
||||||
use fastfield_codecs::blockwise_linear::BlockwiseLinearSerializer;
|
use std::collections::HashSet;
|
||||||
use fastfield_codecs::linear::LinearSerializer;
|
use std::env;
|
||||||
use fastfield_codecs::{FastFieldCodecSerializer, FastFieldCodecType, FastFieldStats};
|
use std::io::BufRead;
|
||||||
|
use std::net::{IpAddr, Ipv6Addr};
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use fastfield_codecs::{open_u128, serialize_u128, Column, FastFieldCodecType, VecColumn};
|
||||||
|
use itertools::Itertools;
|
||||||
|
use measure_time::print_time;
|
||||||
|
use ownedbytes::OwnedBytes;
|
||||||
use prettytable::{Cell, Row, Table};
|
use prettytable::{Cell, Row, Table};
|
||||||
|
|
||||||
|
fn print_set_stats(ip_addrs: &[u128]) {
|
||||||
|
println!("NumIps\t{}", ip_addrs.len());
|
||||||
|
let ip_addr_set: HashSet<u128> = ip_addrs.iter().cloned().collect();
|
||||||
|
println!("NumUniqueIps\t{}", ip_addr_set.len());
|
||||||
|
let ratio_unique = ip_addr_set.len() as f64 / ip_addrs.len() as f64;
|
||||||
|
println!("RatioUniqueOverTotal\t{ratio_unique:.4}");
|
||||||
|
|
||||||
|
// histogram
|
||||||
|
let mut ip_addrs = ip_addrs.to_vec();
|
||||||
|
ip_addrs.sort();
|
||||||
|
let mut cnts: Vec<usize> = ip_addrs
|
||||||
|
.into_iter()
|
||||||
|
.dedup_with_count()
|
||||||
|
.map(|(cnt, _)| cnt)
|
||||||
|
.collect();
|
||||||
|
cnts.sort();
|
||||||
|
|
||||||
|
let top_256_cnt: usize = cnts.iter().rev().take(256).sum();
|
||||||
|
let top_128_cnt: usize = cnts.iter().rev().take(128).sum();
|
||||||
|
let top_64_cnt: usize = cnts.iter().rev().take(64).sum();
|
||||||
|
let top_8_cnt: usize = cnts.iter().rev().take(8).sum();
|
||||||
|
let total: usize = cnts.iter().sum();
|
||||||
|
|
||||||
|
println!("{}", total);
|
||||||
|
println!("{}", top_256_cnt);
|
||||||
|
println!("{}", top_128_cnt);
|
||||||
|
println!("Percentage Top8 {:02}", top_8_cnt as f32 / total as f32);
|
||||||
|
println!("Percentage Top64 {:02}", top_64_cnt as f32 / total as f32);
|
||||||
|
println!("Percentage Top128 {:02}", top_128_cnt as f32 / total as f32);
|
||||||
|
println!("Percentage Top256 {:02}", top_256_cnt as f32 / total as f32);
|
||||||
|
|
||||||
|
let mut cnts: Vec<(usize, usize)> = cnts.into_iter().dedup_with_count().collect();
|
||||||
|
cnts.sort_by(|a, b| {
|
||||||
|
if a.1 == b.1 {
|
||||||
|
a.0.cmp(&b.0)
|
||||||
|
} else {
|
||||||
|
b.1.cmp(&a.1)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ip_dataset() -> Vec<u128> {
|
||||||
|
let mut ip_addr_v4 = 0;
|
||||||
|
|
||||||
|
let stdin = std::io::stdin();
|
||||||
|
let ip_addrs: Vec<u128> = stdin
|
||||||
|
.lock()
|
||||||
|
.lines()
|
||||||
|
.flat_map(|line| {
|
||||||
|
let line = line.unwrap();
|
||||||
|
let line = line.trim();
|
||||||
|
let ip_addr = IpAddr::from_str(line.trim()).ok()?;
|
||||||
|
if ip_addr.is_ipv4() {
|
||||||
|
ip_addr_v4 += 1;
|
||||||
|
}
|
||||||
|
let ip_addr_v6: Ipv6Addr = match ip_addr {
|
||||||
|
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
|
||||||
|
IpAddr::V6(v6) => v6,
|
||||||
|
};
|
||||||
|
Some(ip_addr_v6)
|
||||||
|
})
|
||||||
|
.map(|ip_v6| u128::from_be_bytes(ip_v6.octets()))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
println!("IpAddrsAny\t{}", ip_addrs.len());
|
||||||
|
println!("IpAddrsV4\t{}", ip_addr_v4);
|
||||||
|
|
||||||
|
ip_addrs
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_ip() {
|
||||||
|
let dataset = ip_dataset();
|
||||||
|
print_set_stats(&dataset);
|
||||||
|
|
||||||
|
// Chunks
|
||||||
|
{
|
||||||
|
let mut data = vec![];
|
||||||
|
for dataset in dataset.chunks(500_000) {
|
||||||
|
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
|
||||||
|
}
|
||||||
|
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
||||||
|
println!("Compression 50_000 chunks {:.4}", compression);
|
||||||
|
println!(
|
||||||
|
"Num Bits per elem {:.2}",
|
||||||
|
(data.len() * 8) as f32 / dataset.len() as f32
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut data = vec![];
|
||||||
|
{
|
||||||
|
print_time!("creation");
|
||||||
|
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
||||||
|
println!("Compression {:.2}", compression);
|
||||||
|
println!(
|
||||||
|
"Num Bits per elem {:.2}",
|
||||||
|
(data.len() * 8) as f32 / dataset.len() as f32
|
||||||
|
);
|
||||||
|
|
||||||
|
let decompressor = open_u128::<u128>(OwnedBytes::new(data)).unwrap();
|
||||||
|
// Sample some ranges
|
||||||
|
let mut doc_values = Vec::new();
|
||||||
|
for value in dataset.iter().take(1110).skip(1100).cloned() {
|
||||||
|
doc_values.clear();
|
||||||
|
print_time!("get range");
|
||||||
|
decompressor.get_docids_for_value_range(
|
||||||
|
value..=value,
|
||||||
|
0..decompressor.num_vals(),
|
||||||
|
&mut doc_values,
|
||||||
|
);
|
||||||
|
println!("{:?}", doc_values.len());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
if env::args().nth(1).unwrap() == "bench_ip" {
|
||||||
|
bench_ip();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
let mut table = Table::new();
|
let mut table = Table::new();
|
||||||
|
|
||||||
// Add a row per time
|
// Add a row per time
|
||||||
table.add_row(row!["", "Compression Ratio", "Compression Estimation"]);
|
table.add_row(row!["", "Compression Ratio", "Compression Estimation"]);
|
||||||
|
|
||||||
for (data, data_set_name) in get_codec_test_data_sets() {
|
for (data, data_set_name) in get_codec_test_data_sets() {
|
||||||
let mut results = vec![];
|
let results: Vec<(f32, f32, FastFieldCodecType)> = [
|
||||||
let res = serialize_with_codec::<LinearSerializer>(&data);
|
serialize_with_codec(&data, FastFieldCodecType::Bitpacked),
|
||||||
results.push(res);
|
serialize_with_codec(&data, FastFieldCodecType::Linear),
|
||||||
let res = serialize_with_codec::<BlockwiseLinearSerializer>(&data);
|
serialize_with_codec(&data, FastFieldCodecType::BlockwiseLinear),
|
||||||
results.push(res);
|
]
|
||||||
let res = serialize_with_codec::<fastfield_codecs::bitpacked::BitpackedSerializer>(&data);
|
.into_iter()
|
||||||
results.push(res);
|
.flatten()
|
||||||
|
.collect();
|
||||||
// let best_estimation_codec = results
|
|
||||||
//.iter()
|
|
||||||
//.min_by(|res1, res2| res1.partial_cmp(&res2).unwrap())
|
|
||||||
//.unwrap();
|
|
||||||
let best_compression_ratio_codec = results
|
let best_compression_ratio_codec = results
|
||||||
.iter()
|
.iter()
|
||||||
.min_by(|res1, res2| res1.partial_cmp(res2).unwrap())
|
.min_by(|&res1, &res2| res1.partial_cmp(res2).unwrap())
|
||||||
.cloned()
|
.cloned()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")]));
|
table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")]));
|
||||||
for (is_applicable, est, comp, codec_type) in results {
|
for (est, comp, codec_type) in results {
|
||||||
let (est_cell, ratio_cell) = if !is_applicable {
|
let est_cell = est.to_string();
|
||||||
("Codec Disabled".to_string(), "".to_string())
|
let ratio_cell = comp.to_string();
|
||||||
} else {
|
|
||||||
(est.to_string(), comp.to_string())
|
|
||||||
};
|
|
||||||
let style = if comp == best_compression_ratio_codec.1 {
|
let style = if comp == best_compression_ratio_codec.1 {
|
||||||
"Fb"
|
"Fb"
|
||||||
} else {
|
} else {
|
||||||
""
|
""
|
||||||
};
|
};
|
||||||
|
|
||||||
table.add_row(Row::new(vec![
|
table.add_row(Row::new(vec![
|
||||||
Cell::new(&format!("{codec_type:?}")).style_spec("bFg"),
|
Cell::new(&format!("{codec_type:?}")).style_spec("bFg"),
|
||||||
Cell::new(&ratio_cell).style_spec(style),
|
Cell::new(&ratio_cell).style_spec(style),
|
||||||
@@ -89,27 +209,14 @@ pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
|
|||||||
data_and_names
|
data_and_names
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn serialize_with_codec<S: FastFieldCodecSerializer>(
|
pub fn serialize_with_codec(
|
||||||
data: &[u64],
|
data: &[u64],
|
||||||
) -> (bool, f32, f32, FastFieldCodecType) {
|
codec_type: FastFieldCodecType,
|
||||||
let is_applicable = S::is_applicable(&data);
|
) -> Option<(f32, f32, FastFieldCodecType)> {
|
||||||
if !is_applicable {
|
let col = VecColumn::from(data);
|
||||||
return (false, 0.0, 0.0, S::CODEC_TYPE);
|
let estimation = fastfield_codecs::estimate(&col, codec_type)?;
|
||||||
}
|
let mut out = Vec::new();
|
||||||
let estimation = S::estimate(&data);
|
fastfield_codecs::serialize(&col, &mut out, &[codec_type]).ok()?;
|
||||||
let mut out = vec![];
|
let actual_compression = out.len() as f32 / (col.num_vals() * 8) as f32;
|
||||||
S::serialize(&mut out, &data).unwrap();
|
Some((estimation, actual_compression, codec_type))
|
||||||
|
|
||||||
let actual_compression = out.len() as f32 / (data.len() * 8) as f32;
|
|
||||||
(true, estimation, actual_compression, S::CODEC_TYPE)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
|
|
||||||
let min_value = data.iter().cloned().min().unwrap_or(0);
|
|
||||||
let max_value = data.iter().cloned().max().unwrap_or(0);
|
|
||||||
FastFieldStats {
|
|
||||||
min_value,
|
|
||||||
max_value,
|
|
||||||
num_vals: data.len() as u64,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
233
fastfield_codecs/src/monotonic_mapping.rs
Normal file
233
fastfield_codecs/src/monotonic_mapping.rs
Normal file
@@ -0,0 +1,233 @@
|
|||||||
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
|
use fastdivide::DividerU64;
|
||||||
|
|
||||||
|
use crate::MonotonicallyMappableToU128;
|
||||||
|
|
||||||
|
/// Monotonic maps a value to u64 value space.
|
||||||
|
/// Monotonic mapping enables `PartialOrd` on u64 space without conversion to original space.
|
||||||
|
pub trait MonotonicallyMappableToU64: 'static + PartialOrd + Copy + Send + Sync {
|
||||||
|
/// Converts a value to u64.
|
||||||
|
///
|
||||||
|
/// Internally all fast field values are encoded as u64.
|
||||||
|
fn to_u64(self) -> u64;
|
||||||
|
|
||||||
|
/// Converts a value from u64
|
||||||
|
///
|
||||||
|
/// Internally all fast field values are encoded as u64.
|
||||||
|
/// **Note: To be used for converting encoded Term, Posting values.**
|
||||||
|
fn from_u64(val: u64) -> Self;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Values need to be strictly monotonic mapped to a `Internal` value (u64 or u128) that can be
|
||||||
|
/// used in fast field codecs.
|
||||||
|
///
|
||||||
|
/// The monotonic mapping is required so that `PartialOrd` can be used on `Internal` without
|
||||||
|
/// converting to `External`.
|
||||||
|
///
|
||||||
|
/// All strictly monotonic functions are invertible because they are guaranteed to have a one-to-one
|
||||||
|
/// mapping from their range to their domain. The `inverse` method is required when opening a codec,
|
||||||
|
/// so a value can be converted back to its original domain (e.g. ip address or f64) from its
|
||||||
|
/// internal representation.
|
||||||
|
pub trait StrictlyMonotonicFn<External, Internal> {
|
||||||
|
/// Strictly monotonically maps the value from External to Internal.
|
||||||
|
fn mapping(&self, inp: External) -> Internal;
|
||||||
|
/// Inverse of `mapping`. Maps the value from Internal to External.
|
||||||
|
fn inverse(&self, out: Internal) -> External;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Inverts a strictly monotonic mapping from `StrictlyMonotonicFn<A, B>` to
|
||||||
|
/// `StrictlyMonotonicFn<B, A>`.
|
||||||
|
///
|
||||||
|
/// # Warning
|
||||||
|
///
|
||||||
|
/// This type comes with a footgun. A type being strictly monotonic does not impose that the inverse
|
||||||
|
/// mapping is strictly monotonic over the entire space External. e.g. a -> a * 2. Use at your own
|
||||||
|
/// risks.
|
||||||
|
pub(crate) struct StrictlyMonotonicMappingInverter<T> {
|
||||||
|
orig_mapping: T,
|
||||||
|
}
|
||||||
|
impl<T> From<T> for StrictlyMonotonicMappingInverter<T> {
|
||||||
|
fn from(orig_mapping: T) -> Self {
|
||||||
|
Self { orig_mapping }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<From, To, T> StrictlyMonotonicFn<To, From> for StrictlyMonotonicMappingInverter<T>
|
||||||
|
where T: StrictlyMonotonicFn<From, To>
|
||||||
|
{
|
||||||
|
fn mapping(&self, val: To) -> From {
|
||||||
|
self.orig_mapping.inverse(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn inverse(&self, val: From) -> To {
|
||||||
|
self.orig_mapping.mapping(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Applies the strictly monotonic mapping from `T` without any additional changes.
|
||||||
|
pub(crate) struct StrictlyMonotonicMappingToInternal<T> {
|
||||||
|
_phantom: PhantomData<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> StrictlyMonotonicMappingToInternal<T> {
|
||||||
|
pub(crate) fn new() -> StrictlyMonotonicMappingToInternal<T> {
|
||||||
|
Self {
|
||||||
|
_phantom: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<External: MonotonicallyMappableToU128, T: MonotonicallyMappableToU128>
|
||||||
|
StrictlyMonotonicFn<External, u128> for StrictlyMonotonicMappingToInternal<T>
|
||||||
|
where T: MonotonicallyMappableToU128
|
||||||
|
{
|
||||||
|
fn mapping(&self, inp: External) -> u128 {
|
||||||
|
External::to_u128(inp)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn inverse(&self, out: u128) -> External {
|
||||||
|
External::from_u128(out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<External: MonotonicallyMappableToU64, T: MonotonicallyMappableToU64>
|
||||||
|
StrictlyMonotonicFn<External, u64> for StrictlyMonotonicMappingToInternal<T>
|
||||||
|
where T: MonotonicallyMappableToU64
|
||||||
|
{
|
||||||
|
fn mapping(&self, inp: External) -> u64 {
|
||||||
|
External::to_u64(inp)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn inverse(&self, out: u64) -> External {
|
||||||
|
External::from_u64(out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Mapping dividing by gcd and a base value.
|
||||||
|
///
|
||||||
|
/// The function is assumed to be only called on values divided by passed
|
||||||
|
/// gcd value. (It is necessary for the function to be monotonic.)
|
||||||
|
pub(crate) struct StrictlyMonotonicMappingToInternalGCDBaseval {
|
||||||
|
gcd_divider: DividerU64,
|
||||||
|
gcd: u64,
|
||||||
|
min_value: u64,
|
||||||
|
}
|
||||||
|
impl StrictlyMonotonicMappingToInternalGCDBaseval {
|
||||||
|
pub(crate) fn new(gcd: u64, min_value: u64) -> Self {
|
||||||
|
let gcd_divider = DividerU64::divide_by(gcd);
|
||||||
|
Self {
|
||||||
|
gcd_divider,
|
||||||
|
gcd,
|
||||||
|
min_value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
|
||||||
|
for StrictlyMonotonicMappingToInternalGCDBaseval
|
||||||
|
{
|
||||||
|
fn mapping(&self, inp: External) -> u64 {
|
||||||
|
self.gcd_divider
|
||||||
|
.divide(External::to_u64(inp) - self.min_value)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn inverse(&self, out: u64) -> External {
|
||||||
|
External::from_u64(self.min_value + out * self.gcd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Strictly monotonic mapping with a base value.
|
||||||
|
pub(crate) struct StrictlyMonotonicMappingToInternalBaseval {
|
||||||
|
min_value: u64,
|
||||||
|
}
|
||||||
|
impl StrictlyMonotonicMappingToInternalBaseval {
|
||||||
|
pub(crate) fn new(min_value: u64) -> Self {
|
||||||
|
Self { min_value }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
|
||||||
|
for StrictlyMonotonicMappingToInternalBaseval
|
||||||
|
{
|
||||||
|
fn mapping(&self, val: External) -> u64 {
|
||||||
|
External::to_u64(val) - self.min_value
|
||||||
|
}
|
||||||
|
|
||||||
|
fn inverse(&self, val: u64) -> External {
|
||||||
|
External::from_u64(self.min_value + val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MonotonicallyMappableToU64 for u64 {
|
||||||
|
fn to_u64(self) -> u64 {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_u64(val: u64) -> Self {
|
||||||
|
val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MonotonicallyMappableToU64 for i64 {
|
||||||
|
#[inline(always)]
|
||||||
|
fn to_u64(self) -> u64 {
|
||||||
|
common::i64_to_u64(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn from_u64(val: u64) -> Self {
|
||||||
|
common::u64_to_i64(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MonotonicallyMappableToU64 for bool {
|
||||||
|
#[inline(always)]
|
||||||
|
fn to_u64(self) -> u64 {
|
||||||
|
u64::from(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn from_u64(val: u64) -> Self {
|
||||||
|
val > 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MonotonicallyMappableToU64 for f64 {
|
||||||
|
fn to_u64(self) -> u64 {
|
||||||
|
common::f64_to_u64(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_u64(val: u64) -> Self {
|
||||||
|
common::u64_to_f64(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn strictly_monotonic_test() {
|
||||||
|
// identity mapping
|
||||||
|
test_round_trip(&StrictlyMonotonicMappingToInternal::<u64>::new(), 100u64);
|
||||||
|
// round trip to i64
|
||||||
|
test_round_trip(&StrictlyMonotonicMappingToInternal::<i64>::new(), 100u64);
|
||||||
|
// identity mapping
|
||||||
|
test_round_trip(&StrictlyMonotonicMappingToInternal::<u128>::new(), 100u128);
|
||||||
|
|
||||||
|
// base value to i64 round trip
|
||||||
|
let mapping = StrictlyMonotonicMappingToInternalBaseval::new(100);
|
||||||
|
test_round_trip::<_, _, u64>(&mapping, 100i64);
|
||||||
|
// base value and gcd to u64 round trip
|
||||||
|
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 100);
|
||||||
|
test_round_trip::<_, _, u64>(&mapping, 100u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_round_trip<T: StrictlyMonotonicFn<K, L>, K: std::fmt::Debug + Eq + Copy, L>(
|
||||||
|
mapping: &T,
|
||||||
|
test_val: K,
|
||||||
|
) {
|
||||||
|
assert_eq!(mapping.inverse(mapping.mapping(test_val)), test_val);
|
||||||
|
}
|
||||||
|
}
|
||||||
40
fastfield_codecs/src/monotonic_mapping_u128.rs
Normal file
40
fastfield_codecs/src/monotonic_mapping_u128.rs
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
use std::net::Ipv6Addr;
|
||||||
|
|
||||||
|
/// Montonic maps a value to u128 value space
|
||||||
|
/// Monotonic mapping enables `PartialOrd` on u128 space without conversion to original space.
|
||||||
|
pub trait MonotonicallyMappableToU128: 'static + PartialOrd + Copy + Send + Sync {
|
||||||
|
/// Converts a value to u128.
|
||||||
|
///
|
||||||
|
/// Internally all fast field values are encoded as u64.
|
||||||
|
fn to_u128(self) -> u128;
|
||||||
|
|
||||||
|
/// Converts a value from u128
|
||||||
|
///
|
||||||
|
/// Internally all fast field values are encoded as u64.
|
||||||
|
/// **Note: To be used for converting encoded Term, Posting values.**
|
||||||
|
fn from_u128(val: u128) -> Self;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MonotonicallyMappableToU128 for u128 {
|
||||||
|
fn to_u128(self) -> u128 {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_u128(val: u128) -> Self {
|
||||||
|
val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MonotonicallyMappableToU128 for Ipv6Addr {
|
||||||
|
fn to_u128(self) -> u128 {
|
||||||
|
ip_to_u128(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_u128(val: u128) -> Self {
|
||||||
|
Ipv6Addr::from(val.to_be_bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ip_to_u128(ip_addr: Ipv6Addr) -> u128 {
|
||||||
|
u128::from_be_bytes(ip_addr.octets())
|
||||||
|
}
|
||||||
453
fastfield_codecs/src/null_index/dense.rs
Normal file
453
fastfield_codecs/src/null_index/dense.rs
Normal file
@@ -0,0 +1,453 @@
|
|||||||
|
use std::convert::TryInto;
|
||||||
|
use std::io::{self, Write};
|
||||||
|
|
||||||
|
use common::BinarySerializable;
|
||||||
|
use itertools::Itertools;
|
||||||
|
use ownedbytes::OwnedBytes;
|
||||||
|
|
||||||
|
use super::{get_bit_at, set_bit_at};
|
||||||
|
|
||||||
|
/// For the `DenseCodec`, `data` which contains the encoded blocks.
|
||||||
|
/// Each block consists of [u8; 12]. The first 8 bytes is a bitvec for 64 elements.
|
||||||
|
/// The last 4 bytes are the offset, the number of set bits so far.
|
||||||
|
///
|
||||||
|
/// When translating the original index to a dense index, the correct block can be computed
|
||||||
|
/// directly `orig_idx/64`. Inside the block the position is `orig_idx%64`.
|
||||||
|
///
|
||||||
|
/// When translating a dense index to the original index, we can use the offset to find the correct
|
||||||
|
/// block. Direct computation is not possible, but we can employ a linear or binary search.
|
||||||
|
pub struct DenseCodec {
|
||||||
|
// data consists of blocks of 64 bits.
|
||||||
|
//
|
||||||
|
// The format is &[(u64, u32)]
|
||||||
|
// u64 is the bitvec
|
||||||
|
// u32 is the offset of the block, the number of set bits so far.
|
||||||
|
//
|
||||||
|
// At the end one block is appended, to store the number of values in the index in offset.
|
||||||
|
data: OwnedBytes,
|
||||||
|
}
|
||||||
|
const ELEMENTS_PER_BLOCK: u32 = 64;
|
||||||
|
const BLOCK_BITVEC_SIZE: usize = 8;
|
||||||
|
const BLOCK_OFFSET_SIZE: usize = 4;
|
||||||
|
const SERIALIZED_BLOCK_SIZE: usize = BLOCK_BITVEC_SIZE + BLOCK_OFFSET_SIZE;
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn count_ones(bitvec: u64, pos_in_bitvec: u32) -> u32 {
|
||||||
|
if pos_in_bitvec == 63 {
|
||||||
|
bitvec.count_ones()
|
||||||
|
} else {
|
||||||
|
let mask = (1u64 << (pos_in_bitvec + 1)) - 1;
|
||||||
|
let masked_bitvec = bitvec & mask;
|
||||||
|
masked_bitvec.count_ones()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
struct DenseIndexBlock {
|
||||||
|
bitvec: u64,
|
||||||
|
offset: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<[u8; SERIALIZED_BLOCK_SIZE]> for DenseIndexBlock {
|
||||||
|
fn from(data: [u8; SERIALIZED_BLOCK_SIZE]) -> Self {
|
||||||
|
let bitvec = u64::from_le_bytes(data[..BLOCK_BITVEC_SIZE].try_into().unwrap());
|
||||||
|
let offset = u32::from_le_bytes(data[BLOCK_BITVEC_SIZE..].try_into().unwrap());
|
||||||
|
Self { bitvec, offset }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DenseCodec {
|
||||||
|
/// Open the DenseCodec from OwnedBytes
|
||||||
|
pub fn open(data: OwnedBytes) -> Self {
|
||||||
|
Self { data }
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
/// Check if value at position is not null.
|
||||||
|
pub fn exists(&self, idx: u32) -> bool {
|
||||||
|
let block_pos = idx / ELEMENTS_PER_BLOCK;
|
||||||
|
let bitvec = self.dense_index_block(block_pos).bitvec;
|
||||||
|
|
||||||
|
let pos_in_bitvec = idx % ELEMENTS_PER_BLOCK;
|
||||||
|
|
||||||
|
get_bit_at(bitvec, pos_in_bitvec)
|
||||||
|
}
|
||||||
|
#[inline]
|
||||||
|
fn dense_index_block(&self, block_pos: u32) -> DenseIndexBlock {
|
||||||
|
dense_index_block(&self.data, block_pos)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the number of non-null values in an index
|
||||||
|
pub fn num_non_null_vals(&self) -> u32 {
|
||||||
|
let last_block = (self.data.len() / SERIALIZED_BLOCK_SIZE) - 1;
|
||||||
|
self.dense_index_block(last_block as u32).offset
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
/// Translate from the original index to the codec index.
|
||||||
|
pub fn translate_to_codec_idx(&self, idx: u32) -> Option<u32> {
|
||||||
|
let block_pos = idx / ELEMENTS_PER_BLOCK;
|
||||||
|
let index_block = self.dense_index_block(block_pos);
|
||||||
|
let pos_in_block_bit_vec = idx % ELEMENTS_PER_BLOCK;
|
||||||
|
let ones_in_block = count_ones(index_block.bitvec, pos_in_block_bit_vec);
|
||||||
|
if get_bit_at(index_block.bitvec, pos_in_block_bit_vec) {
|
||||||
|
// -1 is ok, since idx does exist, so there's at least one
|
||||||
|
Some(index_block.offset + ones_in_block - 1)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Translate positions from the codec index to the original index.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// May panic if any `idx` is greater than the column length.
|
||||||
|
pub fn translate_codec_idx_to_original_idx<'a>(
|
||||||
|
&'a self,
|
||||||
|
iter: impl Iterator<Item = u32> + 'a,
|
||||||
|
) -> impl Iterator<Item = u32> + 'a {
|
||||||
|
let mut block_pos = 0u32;
|
||||||
|
iter.map(move |dense_idx| {
|
||||||
|
// update block_pos to limit search scope
|
||||||
|
block_pos = find_block(dense_idx, block_pos, &self.data);
|
||||||
|
let index_block = self.dense_index_block(block_pos);
|
||||||
|
|
||||||
|
// The next offset is higher than dense_idx and therefore:
|
||||||
|
// dense_idx <= offset + num_set_bits in block
|
||||||
|
let mut num_set_bits = 0;
|
||||||
|
for idx_in_bitvec in 0..ELEMENTS_PER_BLOCK {
|
||||||
|
if get_bit_at(index_block.bitvec, idx_in_bitvec) {
|
||||||
|
num_set_bits += 1;
|
||||||
|
}
|
||||||
|
if num_set_bits == (dense_idx - index_block.offset + 1) {
|
||||||
|
let orig_idx = block_pos * ELEMENTS_PER_BLOCK + idx_in_bitvec as u32;
|
||||||
|
return orig_idx;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic!("Internal Error: Offset calculation in dense idx seems to be wrong.");
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn dense_index_block(data: &[u8], block_pos: u32) -> DenseIndexBlock {
|
||||||
|
let data_start_pos = block_pos as usize * SERIALIZED_BLOCK_SIZE;
|
||||||
|
let block_data: [u8; SERIALIZED_BLOCK_SIZE] = data[data_start_pos..][..SERIALIZED_BLOCK_SIZE]
|
||||||
|
.try_into()
|
||||||
|
.unwrap();
|
||||||
|
block_data.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
/// Finds the block position containing the dense_idx.
|
||||||
|
///
|
||||||
|
/// # Correctness
|
||||||
|
/// dense_idx needs to be smaller than the number of values in the index
|
||||||
|
///
|
||||||
|
/// The last offset number is equal to the number of values in the index.
|
||||||
|
fn find_block(dense_idx: u32, mut block_pos: u32, data: &[u8]) -> u32 {
|
||||||
|
loop {
|
||||||
|
let offset = dense_index_block(data, block_pos).offset;
|
||||||
|
if offset > dense_idx {
|
||||||
|
return block_pos - 1;
|
||||||
|
}
|
||||||
|
block_pos += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterator over all values, true if set, otherwise false
|
||||||
|
pub fn serialize_dense_codec(
|
||||||
|
iter: impl Iterator<Item = bool>,
|
||||||
|
mut out: impl Write,
|
||||||
|
) -> io::Result<()> {
|
||||||
|
let mut offset: u32 = 0;
|
||||||
|
|
||||||
|
for chunk in &iter.chunks(ELEMENTS_PER_BLOCK as usize) {
|
||||||
|
let mut block: u64 = 0;
|
||||||
|
for (pos, is_bit_set) in chunk.enumerate() {
|
||||||
|
if is_bit_set {
|
||||||
|
set_bit_at(&mut block, pos as u64);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
block.serialize(&mut out)?;
|
||||||
|
offset.serialize(&mut out)?;
|
||||||
|
|
||||||
|
offset += block.count_ones() as u32;
|
||||||
|
}
|
||||||
|
// Add sentinal block for the offset
|
||||||
|
let block: u64 = 0;
|
||||||
|
block.serialize(&mut out)?;
|
||||||
|
offset.serialize(&mut out)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use proptest::prelude::{any, prop, *};
|
||||||
|
use proptest::strategy::Strategy;
|
||||||
|
use proptest::{prop_oneof, proptest};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
fn random_bitvec() -> BoxedStrategy<Vec<bool>> {
|
||||||
|
prop_oneof![
|
||||||
|
1 => prop::collection::vec(proptest::bool::weighted(1.0), 0..100),
|
||||||
|
1 => prop::collection::vec(proptest::bool::weighted(1.0), 0..64),
|
||||||
|
1 => prop::collection::vec(proptest::bool::weighted(0.0), 0..100),
|
||||||
|
1 => prop::collection::vec(proptest::bool::weighted(0.0), 0..64),
|
||||||
|
8 => vec![any::<bool>()],
|
||||||
|
2 => prop::collection::vec(any::<bool>(), 0..50),
|
||||||
|
]
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
proptest! {
|
||||||
|
#![proptest_config(ProptestConfig::with_cases(500))]
|
||||||
|
#[test]
|
||||||
|
fn test_with_random_bitvecs(bitvec1 in random_bitvec(), bitvec2 in random_bitvec(), bitvec3 in random_bitvec()) {
|
||||||
|
let mut bitvec = Vec::new();
|
||||||
|
bitvec.extend_from_slice(&bitvec1);
|
||||||
|
bitvec.extend_from_slice(&bitvec2);
|
||||||
|
bitvec.extend_from_slice(&bitvec3);
|
||||||
|
test_null_index(bitvec);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn dense_codec_test_one_block_false() {
|
||||||
|
let mut iter = vec![false; 64];
|
||||||
|
iter.push(true);
|
||||||
|
test_null_index(iter);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_null_index(data: Vec<bool>) {
|
||||||
|
let mut out = vec![];
|
||||||
|
|
||||||
|
serialize_dense_codec(data.iter().cloned(), &mut out).unwrap();
|
||||||
|
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||||
|
|
||||||
|
let orig_idx_with_value: Vec<u32> = data
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(_pos, val)| **val)
|
||||||
|
.map(|(pos, _val)| pos as u32)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
null_index
|
||||||
|
.translate_codec_idx_to_original_idx(0..orig_idx_with_value.len() as u32)
|
||||||
|
.collect_vec(),
|
||||||
|
orig_idx_with_value
|
||||||
|
);
|
||||||
|
|
||||||
|
for (dense_idx, orig_idx) in orig_idx_with_value.iter().enumerate() {
|
||||||
|
assert_eq!(
|
||||||
|
null_index.translate_to_codec_idx(*orig_idx),
|
||||||
|
Some(dense_idx as u32)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (pos, value) in data.iter().enumerate() {
|
||||||
|
assert_eq!(null_index.exists(pos as u32), *value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn dense_codec_test_translation() {
|
||||||
|
let mut out = vec![];
|
||||||
|
|
||||||
|
let iter = ([true, false, true, false]).iter().cloned();
|
||||||
|
serialize_dense_codec(iter, &mut out).unwrap();
|
||||||
|
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
null_index
|
||||||
|
.translate_codec_idx_to_original_idx(0..2)
|
||||||
|
.collect_vec(),
|
||||||
|
vec![0, 2]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn dense_codec_translate() {
|
||||||
|
let mut out = vec![];
|
||||||
|
|
||||||
|
let iter = ([true, false, true, false]).iter().cloned();
|
||||||
|
serialize_dense_codec(iter, &mut out).unwrap();
|
||||||
|
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||||
|
assert_eq!(null_index.translate_to_codec_idx(0), Some(0));
|
||||||
|
assert_eq!(null_index.translate_to_codec_idx(2), Some(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn dense_codec_test_small() {
|
||||||
|
let mut out = vec![];
|
||||||
|
|
||||||
|
let iter = ([true, false, true, false]).iter().cloned();
|
||||||
|
serialize_dense_codec(iter, &mut out).unwrap();
|
||||||
|
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||||
|
assert!(null_index.exists(0));
|
||||||
|
assert!(!null_index.exists(1));
|
||||||
|
assert!(null_index.exists(2));
|
||||||
|
assert!(!null_index.exists(3));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn dense_codec_test_large() {
|
||||||
|
let mut docs = vec![];
|
||||||
|
docs.extend((0..1000).map(|_idx| false));
|
||||||
|
docs.extend((0..=1000).map(|_idx| true));
|
||||||
|
|
||||||
|
let iter = docs.iter().cloned();
|
||||||
|
let mut out = vec![];
|
||||||
|
serialize_dense_codec(iter, &mut out).unwrap();
|
||||||
|
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||||
|
assert!(!null_index.exists(0));
|
||||||
|
assert!(!null_index.exists(100));
|
||||||
|
assert!(!null_index.exists(999));
|
||||||
|
assert!(null_index.exists(1000));
|
||||||
|
assert!(null_index.exists(1999));
|
||||||
|
assert!(null_index.exists(2000));
|
||||||
|
assert!(!null_index.exists(2001));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_count_ones() {
|
||||||
|
let mut block = 0;
|
||||||
|
set_bit_at(&mut block, 0);
|
||||||
|
set_bit_at(&mut block, 2);
|
||||||
|
|
||||||
|
assert_eq!(count_ones(block, 0), 1);
|
||||||
|
assert_eq!(count_ones(block, 1), 1);
|
||||||
|
assert_eq!(count_ones(block, 2), 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
|
mod bench {
|
||||||
|
|
||||||
|
use rand::rngs::StdRng;
|
||||||
|
use rand::{Rng, SeedableRng};
|
||||||
|
use test::Bencher;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
const TOTAL_NUM_VALUES: u32 = 1_000_000;
|
||||||
|
fn gen_bools(fill_ratio: f64) -> DenseCodec {
|
||||||
|
let mut out = Vec::new();
|
||||||
|
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||||
|
let bools: Vec<_> = (0..TOTAL_NUM_VALUES)
|
||||||
|
.map(|_| rng.gen_bool(fill_ratio))
|
||||||
|
.collect();
|
||||||
|
serialize_dense_codec(bools.into_iter(), &mut out).unwrap();
|
||||||
|
|
||||||
|
let codec = DenseCodec::open(OwnedBytes::new(out));
|
||||||
|
codec
|
||||||
|
}
|
||||||
|
|
||||||
|
fn random_range_iterator(start: u32, end: u32, step_size: u32) -> impl Iterator<Item = u32> {
|
||||||
|
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||||
|
let mut current = start;
|
||||||
|
std::iter::from_fn(move || {
|
||||||
|
current += rng.gen_range(1..step_size + 1);
|
||||||
|
if current >= end {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(current)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn walk_over_data(codec: &DenseCodec, max_step_size: u32) -> Option<u32> {
|
||||||
|
walk_over_data_from_positions(
|
||||||
|
codec,
|
||||||
|
random_range_iterator(0, TOTAL_NUM_VALUES, max_step_size),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn walk_over_data_from_positions(
|
||||||
|
codec: &DenseCodec,
|
||||||
|
positions: impl Iterator<Item = u32>,
|
||||||
|
) -> Option<u32> {
|
||||||
|
let mut dense_idx: Option<u32> = None;
|
||||||
|
for idx in positions {
|
||||||
|
dense_idx = dense_idx.or(codec.translate_to_codec_idx(idx));
|
||||||
|
}
|
||||||
|
dense_idx
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_dense_codec_translate_orig_to_dense_90percent_filled_random_stride(
|
||||||
|
bench: &mut Bencher,
|
||||||
|
) {
|
||||||
|
let codec = gen_bools(0.9f64);
|
||||||
|
bench.iter(|| walk_over_data(&codec, 100));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_dense_codec_translate_orig_to_dense_50percent_filled_random_stride(
|
||||||
|
bench: &mut Bencher,
|
||||||
|
) {
|
||||||
|
let codec = gen_bools(0.5f64);
|
||||||
|
bench.iter(|| walk_over_data(&codec, 100));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_dense_codec_translate_orig_to_dense_full_scan_10percent(bench: &mut Bencher) {
|
||||||
|
let codec = gen_bools(0.1f64);
|
||||||
|
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_dense_codec_translate_orig_to_dense_full_scan_90percent(bench: &mut Bencher) {
|
||||||
|
let codec = gen_bools(0.9f64);
|
||||||
|
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_dense_codec_translate_orig_to_dense_10percent_filled_random_stride(
|
||||||
|
bench: &mut Bencher,
|
||||||
|
) {
|
||||||
|
let codec = gen_bools(0.1f64);
|
||||||
|
bench.iter(|| walk_over_data(&codec, 100));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_dense_codec_translate_dense_to_orig_90percent_filled_random_stride_big_step(
|
||||||
|
bench: &mut Bencher,
|
||||||
|
) {
|
||||||
|
let codec = gen_bools(0.9f64);
|
||||||
|
let num_vals = codec.num_non_null_vals();
|
||||||
|
bench.iter(|| {
|
||||||
|
codec
|
||||||
|
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 50_000))
|
||||||
|
.last()
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_dense_codec_translate_dense_to_orig_90percent_filled_random_stride(
|
||||||
|
bench: &mut Bencher,
|
||||||
|
) {
|
||||||
|
let codec = gen_bools(0.9f64);
|
||||||
|
let num_vals = codec.num_non_null_vals();
|
||||||
|
bench.iter(|| {
|
||||||
|
codec
|
||||||
|
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 100))
|
||||||
|
.last()
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_dense_codec_translate_dense_to_orig_90percent_filled_full_scan(bench: &mut Bencher) {
|
||||||
|
let codec = gen_bools(0.9f64);
|
||||||
|
let num_vals = codec.num_non_null_vals();
|
||||||
|
bench.iter(|| {
|
||||||
|
codec
|
||||||
|
.translate_codec_idx_to_original_idx(0..num_vals)
|
||||||
|
.last()
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
13
fastfield_codecs/src/null_index/mod.rs
Normal file
13
fastfield_codecs/src/null_index/mod.rs
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
pub use dense::{serialize_dense_codec, DenseCodec};
|
||||||
|
|
||||||
|
mod dense;
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn get_bit_at(input: u64, n: u32) -> bool {
|
||||||
|
input & (1 << n) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn set_bit_at(input: &mut u64, n: u64) {
|
||||||
|
*input |= 1 << n;
|
||||||
|
}
|
||||||
144
fastfield_codecs/src/null_index_footer.rs
Normal file
144
fastfield_codecs/src/null_index_footer.rs
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
use std::io::{self, Write};
|
||||||
|
use std::ops::Range;
|
||||||
|
|
||||||
|
use common::{BinarySerializable, CountingWriter, VInt};
|
||||||
|
use ownedbytes::OwnedBytes;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
||||||
|
pub(crate) enum FastFieldCardinality {
|
||||||
|
Single = 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for FastFieldCardinality {
|
||||||
|
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
|
||||||
|
self.to_code().serialize(wrt)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let code = u8::deserialize(reader)?;
|
||||||
|
let codec_type: Self = Self::from_code(code)
|
||||||
|
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
||||||
|
Ok(codec_type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FastFieldCardinality {
|
||||||
|
pub(crate) fn to_code(self) -> u8 {
|
||||||
|
self as u8
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
||||||
|
match code {
|
||||||
|
1 => Some(Self::Single),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub(crate) enum NullIndexCodec {
|
||||||
|
Full = 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for NullIndexCodec {
|
||||||
|
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
|
||||||
|
self.to_code().serialize(wrt)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let code = u8::deserialize(reader)?;
|
||||||
|
let codec_type: Self = Self::from_code(code)
|
||||||
|
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
||||||
|
Ok(codec_type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NullIndexCodec {
|
||||||
|
pub(crate) fn to_code(self) -> u8 {
|
||||||
|
self as u8
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
||||||
|
match code {
|
||||||
|
1 => Some(Self::Full),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||||
|
pub(crate) struct NullIndexFooter {
|
||||||
|
pub(crate) cardinality: FastFieldCardinality,
|
||||||
|
pub(crate) null_index_codec: NullIndexCodec,
|
||||||
|
// Unused for NullIndexCodec::Full
|
||||||
|
pub(crate) null_index_byte_range: Range<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for NullIndexFooter {
|
||||||
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
self.cardinality.serialize(writer)?;
|
||||||
|
self.null_index_codec.serialize(writer)?;
|
||||||
|
VInt(self.null_index_byte_range.start).serialize(writer)?;
|
||||||
|
VInt(self.null_index_byte_range.end - self.null_index_byte_range.start)
|
||||||
|
.serialize(writer)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let cardinality = FastFieldCardinality::deserialize(reader)?;
|
||||||
|
let null_index_codec = NullIndexCodec::deserialize(reader)?;
|
||||||
|
let null_index_byte_range_start = VInt::deserialize(reader)?.0;
|
||||||
|
let null_index_byte_range_end = VInt::deserialize(reader)?.0 + null_index_byte_range_start;
|
||||||
|
Ok(Self {
|
||||||
|
cardinality,
|
||||||
|
null_index_codec,
|
||||||
|
null_index_byte_range: null_index_byte_range_start..null_index_byte_range_end,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn append_null_index_footer(
|
||||||
|
output: &mut impl io::Write,
|
||||||
|
null_index_footer: NullIndexFooter,
|
||||||
|
) -> io::Result<()> {
|
||||||
|
let mut counting_write = CountingWriter::wrap(output);
|
||||||
|
null_index_footer.serialize(&mut counting_write)?;
|
||||||
|
let footer_payload_len = counting_write.written_bytes();
|
||||||
|
BinarySerializable::serialize(&(footer_payload_len as u16), &mut counting_write)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn read_null_index_footer(
|
||||||
|
data: OwnedBytes,
|
||||||
|
) -> io::Result<(OwnedBytes, NullIndexFooter)> {
|
||||||
|
let (data, null_footer_length_bytes) = data.rsplit(2);
|
||||||
|
|
||||||
|
let footer_length = u16::deserialize(&mut null_footer_length_bytes.as_slice())?;
|
||||||
|
let (data, null_index_footer_bytes) = data.rsplit(footer_length as usize);
|
||||||
|
let null_index_footer = NullIndexFooter::deserialize(&mut null_index_footer_bytes.as_ref())?;
|
||||||
|
|
||||||
|
Ok((data, null_index_footer))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn null_index_footer_deser_test() {
|
||||||
|
let null_index_footer = NullIndexFooter {
|
||||||
|
cardinality: FastFieldCardinality::Single,
|
||||||
|
null_index_codec: NullIndexCodec::Full,
|
||||||
|
null_index_byte_range: 100..120,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut out = vec![];
|
||||||
|
null_index_footer.serialize(&mut out).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
null_index_footer,
|
||||||
|
NullIndexFooter::deserialize(&mut &out[..]).unwrap()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
355
fastfield_codecs/src/serialize.rs
Normal file
355
fastfield_codecs/src/serialize.rs
Normal file
@@ -0,0 +1,355 @@
|
|||||||
|
// Copyright (C) 2022 Quickwit, Inc.
|
||||||
|
//
|
||||||
|
// Quickwit is offered under the AGPL v3.0 and as commercial software.
|
||||||
|
// For commercial licensing, contact us at hello@quickwit.io.
|
||||||
|
//
|
||||||
|
// AGPL:
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as
|
||||||
|
// published by the Free Software Foundation, either version 3 of the
|
||||||
|
// License, or (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::io;
|
||||||
|
use std::num::NonZeroU64;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common::{BinarySerializable, VInt};
|
||||||
|
use log::warn;
|
||||||
|
use ownedbytes::OwnedBytes;
|
||||||
|
|
||||||
|
use crate::bitpacked::BitpackedCodec;
|
||||||
|
use crate::blockwise_linear::BlockwiseLinearCodec;
|
||||||
|
use crate::compact_space::CompactSpaceCompressor;
|
||||||
|
use crate::format_version::append_format_version;
|
||||||
|
use crate::linear::LinearCodec;
|
||||||
|
use crate::monotonic_mapping::{
|
||||||
|
StrictlyMonotonicFn, StrictlyMonotonicMappingToInternal,
|
||||||
|
StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||||
|
};
|
||||||
|
use crate::null_index_footer::{
|
||||||
|
append_null_index_footer, FastFieldCardinality, NullIndexCodec, NullIndexFooter,
|
||||||
|
};
|
||||||
|
use crate::{
|
||||||
|
monotonic_map_column, Column, FastFieldCodec, FastFieldCodecType, MonotonicallyMappableToU64,
|
||||||
|
U128FastFieldCodecType, VecColumn, ALL_CODEC_TYPES,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// The normalized header gives some parameters after applying the following
|
||||||
|
/// normalization of the vector:
|
||||||
|
/// `val -> (val - min_value) / gcd`
|
||||||
|
///
|
||||||
|
/// By design, after normalization, `min_value = 0` and `gcd = 1`.
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
pub struct NormalizedHeader {
|
||||||
|
/// The number of values in the underlying column.
|
||||||
|
pub num_vals: u32,
|
||||||
|
/// The max value of the underlying column.
|
||||||
|
pub max_value: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
pub(crate) struct Header {
|
||||||
|
pub num_vals: u32,
|
||||||
|
pub min_value: u64,
|
||||||
|
pub max_value: u64,
|
||||||
|
pub gcd: Option<NonZeroU64>,
|
||||||
|
pub codec_type: FastFieldCodecType,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Header {
|
||||||
|
pub fn normalized(self) -> NormalizedHeader {
|
||||||
|
let gcd = self.gcd.map(|gcd| gcd.get()).unwrap_or(1);
|
||||||
|
let gcd_min_val_mapping =
|
||||||
|
StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd, self.min_value);
|
||||||
|
|
||||||
|
let max_value = gcd_min_val_mapping.mapping(self.max_value);
|
||||||
|
NormalizedHeader {
|
||||||
|
num_vals: self.num_vals,
|
||||||
|
max_value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn normalize_column<C: Column>(&self, from_column: C) -> impl Column {
|
||||||
|
normalize_column(from_column, self.min_value, self.gcd)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn compute_header(
|
||||||
|
column: impl Column<u64>,
|
||||||
|
codecs: &[FastFieldCodecType],
|
||||||
|
) -> Option<Header> {
|
||||||
|
let num_vals = column.num_vals();
|
||||||
|
let min_value = column.min_value();
|
||||||
|
let max_value = column.max_value();
|
||||||
|
let gcd = crate::gcd::find_gcd(column.iter().map(|val| val - min_value))
|
||||||
|
.filter(|gcd| gcd.get() > 1u64);
|
||||||
|
let normalized_column = normalize_column(column, min_value, gcd);
|
||||||
|
let codec_type = detect_codec(normalized_column, codecs)?;
|
||||||
|
Some(Header {
|
||||||
|
num_vals,
|
||||||
|
min_value,
|
||||||
|
max_value,
|
||||||
|
gcd,
|
||||||
|
codec_type,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||||
|
pub(crate) struct U128Header {
|
||||||
|
pub num_vals: u32,
|
||||||
|
pub codec_type: U128FastFieldCodecType,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for U128Header {
|
||||||
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
VInt(self.num_vals as u64).serialize(writer)?;
|
||||||
|
self.codec_type.serialize(writer)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let num_vals = VInt::deserialize(reader)?.0 as u32;
|
||||||
|
let codec_type = U128FastFieldCodecType::deserialize(reader)?;
|
||||||
|
Ok(U128Header {
|
||||||
|
num_vals,
|
||||||
|
codec_type,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn normalize_column<C: Column>(
|
||||||
|
from_column: C,
|
||||||
|
min_value: u64,
|
||||||
|
gcd: Option<NonZeroU64>,
|
||||||
|
) -> impl Column {
|
||||||
|
let gcd = gcd.map(|gcd| gcd.get()).unwrap_or(1);
|
||||||
|
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd, min_value);
|
||||||
|
monotonic_map_column(from_column, mapping)
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for Header {
|
||||||
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
VInt(self.num_vals as u64).serialize(writer)?;
|
||||||
|
VInt(self.min_value).serialize(writer)?;
|
||||||
|
VInt(self.max_value - self.min_value).serialize(writer)?;
|
||||||
|
if let Some(gcd) = self.gcd {
|
||||||
|
VInt(gcd.get()).serialize(writer)?;
|
||||||
|
} else {
|
||||||
|
VInt(0u64).serialize(writer)?;
|
||||||
|
}
|
||||||
|
self.codec_type.serialize(writer)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let num_vals = VInt::deserialize(reader)?.0 as u32;
|
||||||
|
let min_value = VInt::deserialize(reader)?.0;
|
||||||
|
let amplitude = VInt::deserialize(reader)?.0;
|
||||||
|
let max_value = min_value + amplitude;
|
||||||
|
let gcd_u64 = VInt::deserialize(reader)?.0;
|
||||||
|
let codec_type = FastFieldCodecType::deserialize(reader)?;
|
||||||
|
Ok(Header {
|
||||||
|
num_vals,
|
||||||
|
min_value,
|
||||||
|
max_value,
|
||||||
|
gcd: NonZeroU64::new(gcd_u64),
|
||||||
|
codec_type,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return estimated compression for given codec in the value range [0.0..1.0], where 1.0 means no
|
||||||
|
/// compression.
|
||||||
|
pub fn estimate<T: MonotonicallyMappableToU64>(
|
||||||
|
typed_column: impl Column<T>,
|
||||||
|
codec_type: FastFieldCodecType,
|
||||||
|
) -> Option<f32> {
|
||||||
|
let column = monotonic_map_column(typed_column, StrictlyMonotonicMappingToInternal::<T>::new());
|
||||||
|
let min_value = column.min_value();
|
||||||
|
let gcd = crate::gcd::find_gcd(column.iter().map(|val| val - min_value))
|
||||||
|
.filter(|gcd| gcd.get() > 1u64);
|
||||||
|
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(
|
||||||
|
gcd.map(|gcd| gcd.get()).unwrap_or(1u64),
|
||||||
|
min_value,
|
||||||
|
);
|
||||||
|
let normalized_column = monotonic_map_column(&column, mapping);
|
||||||
|
match codec_type {
|
||||||
|
FastFieldCodecType::Bitpacked => BitpackedCodec::estimate(&normalized_column),
|
||||||
|
FastFieldCodecType::Linear => LinearCodec::estimate(&normalized_column),
|
||||||
|
FastFieldCodecType::BlockwiseLinear => BlockwiseLinearCodec::estimate(&normalized_column),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serializes u128 values with the compact space codec.
|
||||||
|
pub fn serialize_u128<F: Fn() -> I, I: Iterator<Item = u128>>(
|
||||||
|
iter_gen: F,
|
||||||
|
num_vals: u32,
|
||||||
|
output: &mut impl io::Write,
|
||||||
|
) -> io::Result<()> {
|
||||||
|
let header = U128Header {
|
||||||
|
num_vals,
|
||||||
|
codec_type: U128FastFieldCodecType::CompactSpace,
|
||||||
|
};
|
||||||
|
header.serialize(output)?;
|
||||||
|
let compressor = CompactSpaceCompressor::train_from(iter_gen(), num_vals);
|
||||||
|
compressor.compress_into(iter_gen(), output).unwrap();
|
||||||
|
|
||||||
|
let null_index_footer = NullIndexFooter {
|
||||||
|
cardinality: FastFieldCardinality::Single,
|
||||||
|
null_index_codec: NullIndexCodec::Full,
|
||||||
|
null_index_byte_range: 0..0,
|
||||||
|
};
|
||||||
|
append_null_index_footer(output, null_index_footer)?;
|
||||||
|
append_format_version(output)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serializes the column with the codec with the best estimate on the data.
|
||||||
|
pub fn serialize<T: MonotonicallyMappableToU64>(
|
||||||
|
typed_column: impl Column<T>,
|
||||||
|
output: &mut impl io::Write,
|
||||||
|
codecs: &[FastFieldCodecType],
|
||||||
|
) -> io::Result<()> {
|
||||||
|
let column = monotonic_map_column(typed_column, StrictlyMonotonicMappingToInternal::<T>::new());
|
||||||
|
let header = Header::compute_header(&column, codecs).ok_or_else(|| {
|
||||||
|
io::Error::new(
|
||||||
|
io::ErrorKind::InvalidInput,
|
||||||
|
format!(
|
||||||
|
"Data cannot be serialized with this list of codec. {:?}",
|
||||||
|
codecs
|
||||||
|
),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
header.serialize(output)?;
|
||||||
|
let normalized_column = header.normalize_column(column);
|
||||||
|
assert_eq!(normalized_column.min_value(), 0u64);
|
||||||
|
serialize_given_codec(normalized_column, header.codec_type, output)?;
|
||||||
|
|
||||||
|
let null_index_footer = NullIndexFooter {
|
||||||
|
cardinality: FastFieldCardinality::Single,
|
||||||
|
null_index_codec: NullIndexCodec::Full,
|
||||||
|
null_index_byte_range: 0..0,
|
||||||
|
};
|
||||||
|
append_null_index_footer(output, null_index_footer)?;
|
||||||
|
append_format_version(output)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn detect_codec(
|
||||||
|
column: impl Column<u64>,
|
||||||
|
codecs: &[FastFieldCodecType],
|
||||||
|
) -> Option<FastFieldCodecType> {
|
||||||
|
let mut estimations = Vec::new();
|
||||||
|
for &codec in codecs {
|
||||||
|
let estimation_opt = match codec {
|
||||||
|
FastFieldCodecType::Bitpacked => BitpackedCodec::estimate(&column),
|
||||||
|
FastFieldCodecType::Linear => LinearCodec::estimate(&column),
|
||||||
|
FastFieldCodecType::BlockwiseLinear => BlockwiseLinearCodec::estimate(&column),
|
||||||
|
};
|
||||||
|
if let Some(estimation) = estimation_opt {
|
||||||
|
estimations.push((estimation, codec));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(broken_estimation) = estimations.iter().find(|estimation| estimation.0.is_nan()) {
|
||||||
|
warn!(
|
||||||
|
"broken estimation for fast field codec {:?}",
|
||||||
|
broken_estimation.1
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// removing nan values for codecs with broken calculations, and max values which disables
|
||||||
|
// codecs
|
||||||
|
estimations.retain(|estimation| !estimation.0.is_nan() && estimation.0 != f32::MAX);
|
||||||
|
estimations.sort_by(|(score_left, _), (score_right, _)| score_left.total_cmp(score_right));
|
||||||
|
Some(estimations.first()?.1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn serialize_given_codec(
|
||||||
|
column: impl Column<u64>,
|
||||||
|
codec_type: FastFieldCodecType,
|
||||||
|
output: &mut impl io::Write,
|
||||||
|
) -> io::Result<()> {
|
||||||
|
match codec_type {
|
||||||
|
FastFieldCodecType::Bitpacked => {
|
||||||
|
BitpackedCodec::serialize(&column, output)?;
|
||||||
|
}
|
||||||
|
FastFieldCodecType::Linear => {
|
||||||
|
LinearCodec::serialize(&column, output)?;
|
||||||
|
}
|
||||||
|
FastFieldCodecType::BlockwiseLinear => {
|
||||||
|
BlockwiseLinearCodec::serialize(&column, output)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
output.flush()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to serialize a column (autodetect from all codecs) and then open it
|
||||||
|
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
|
||||||
|
column: &[T],
|
||||||
|
) -> Arc<dyn Column<T>> {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
super::serialize(VecColumn::from(&column), &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||||
|
super::open(OwnedBytes::new(buffer)).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_serialize_deserialize_u128_header() {
|
||||||
|
let original = U128Header {
|
||||||
|
num_vals: 11,
|
||||||
|
codec_type: U128FastFieldCodecType::CompactSpace,
|
||||||
|
};
|
||||||
|
let mut out = Vec::new();
|
||||||
|
original.serialize(&mut out).unwrap();
|
||||||
|
let restored = U128Header::deserialize(&mut &out[..]).unwrap();
|
||||||
|
assert_eq!(restored, original);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_serialize_deserialize() {
|
||||||
|
let original = [1u64, 5u64, 10u64];
|
||||||
|
let restored: Vec<u64> = serialize_and_load(&original[..]).iter().collect();
|
||||||
|
assert_eq!(&restored, &original[..]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_fastfield_bool_size_bitwidth_1() {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
let col = VecColumn::from(&[false, true][..]);
|
||||||
|
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||||
|
// 5 bytes of header, 1 byte of value, 7 bytes of padding.
|
||||||
|
assert_eq!(buffer.len(), 3 + 5 + 8 + 4 + 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_fastfield_bool_bit_size_bitwidth_0() {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
let col = VecColumn::from(&[true][..]);
|
||||||
|
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||||
|
// 5 bytes of header, 0 bytes of value, 7 bytes of padding.
|
||||||
|
assert_eq!(buffer.len(), 3 + 5 + 7 + 4 + 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_fastfield_gcd() {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
let vals: Vec<u64> = (0..80).map(|val| (val % 7) * 1_000u64).collect();
|
||||||
|
let col = VecColumn::from(&vals[..]);
|
||||||
|
serialize(col, &mut buffer, &[FastFieldCodecType::Bitpacked]).unwrap();
|
||||||
|
// Values are stored over 3 bits.
|
||||||
|
assert_eq!(buffer.len(), 3 + 7 + (3 * 80 / 8) + 7 + 4 + 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,10 +1,14 @@
|
|||||||
[package]
|
[package]
|
||||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||||
name = "ownedbytes"
|
name = "ownedbytes"
|
||||||
version = "0.3.0"
|
version = "0.4.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "Expose data as static slice"
|
description = "Expose data as static slice"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
documentation = "https://docs.rs/ownedbytes/"
|
||||||
|
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||||
|
repository = "https://github.com/quickwit-oss/tantivy"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use std::{fmt, io, mem};
|
|||||||
use stable_deref_trait::StableDeref;
|
use stable_deref_trait::StableDeref;
|
||||||
|
|
||||||
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
|
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
|
||||||
/// this data as a static slice.
|
/// this data as a slice.
|
||||||
///
|
///
|
||||||
/// The backing object is required to be `StableDeref`.
|
/// The backing object is required to be `StableDeref`.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -80,6 +80,21 @@ impl OwnedBytes {
|
|||||||
(left, right)
|
(left, right)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
|
||||||
|
///
|
||||||
|
/// Right will hold `split_len` bytes.
|
||||||
|
///
|
||||||
|
/// This operation is cheap and does not require to copy any memory.
|
||||||
|
/// On the other hand, both `left` and `right` retain a handle over
|
||||||
|
/// the entire slice of memory. In other words, the memory will only
|
||||||
|
/// be released when both left and right are dropped.
|
||||||
|
#[inline]
|
||||||
|
#[must_use]
|
||||||
|
pub fn rsplit(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
|
||||||
|
let data_len = self.data.len();
|
||||||
|
self.split(data_len - split_len)
|
||||||
|
}
|
||||||
|
|
||||||
/// Splits the right part of the `OwnedBytes` at the given offset.
|
/// Splits the right part of the `OwnedBytes` at the given offset.
|
||||||
///
|
///
|
||||||
/// `self` is truncated to `split_len`, left with the remaining bytes.
|
/// `self` is truncated to `split_len`, left with the remaining bytes.
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-query-grammar"
|
name = "tantivy-query-grammar"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
|
|||||||
@@ -5,7 +5,8 @@ use combine::parser::range::{take_while, take_while1};
|
|||||||
use combine::parser::repeat::escaped;
|
use combine::parser::repeat::escaped;
|
||||||
use combine::parser::Parser;
|
use combine::parser::Parser;
|
||||||
use combine::{
|
use combine::{
|
||||||
attempt, choice, eof, many, many1, one_of, optional, parser, satisfy, skip_many1, value,
|
attempt, between, choice, eof, many, many1, one_of, optional, parser, satisfy, sep_by,
|
||||||
|
skip_many1, value,
|
||||||
};
|
};
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
@@ -23,7 +24,7 @@ const ESCAPED_SPECIAL_CHARS_PATTERN: &str = r#"\\(\+|\^|`|:|\{|\}|"|\[|\]|\(|\)|
|
|||||||
/// Parses a field_name
|
/// Parses a field_name
|
||||||
/// A field name must have at least one character and be followed by a colon.
|
/// A field name must have at least one character and be followed by a colon.
|
||||||
/// All characters are allowed including special characters `SPECIAL_CHARS`, but these
|
/// All characters are allowed including special characters `SPECIAL_CHARS`, but these
|
||||||
/// need to be escaped with a backslack character '\'.
|
/// need to be escaped with a backslash character '\'.
|
||||||
fn field_name<'a>() -> impl Parser<&'a str, Output = String> {
|
fn field_name<'a>() -> impl Parser<&'a str, Output = String> {
|
||||||
static ESCAPED_SPECIAL_CHARS_RE: Lazy<Regex> =
|
static ESCAPED_SPECIAL_CHARS_RE: Lazy<Regex> =
|
||||||
Lazy::new(|| Regex::new(ESCAPED_SPECIAL_CHARS_PATTERN).unwrap());
|
Lazy::new(|| Regex::new(ESCAPED_SPECIAL_CHARS_PATTERN).unwrap());
|
||||||
@@ -62,13 +63,27 @@ fn word<'a>() -> impl Parser<&'a str, Output = String> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// word variant that allows more characters, e.g. for range queries that don't allow field
|
||||||
|
// specifier
|
||||||
|
fn relaxed_word<'a>() -> impl Parser<&'a str, Output = String> {
|
||||||
|
(
|
||||||
|
satisfy(|c: char| {
|
||||||
|
!c.is_whitespace() && !['`', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
||||||
|
}),
|
||||||
|
many(satisfy(|c: char| {
|
||||||
|
!c.is_whitespace() && !['{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
||||||
|
})),
|
||||||
|
)
|
||||||
|
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
||||||
|
}
|
||||||
|
|
||||||
/// Parses a date time according to rfc3339
|
/// Parses a date time according to rfc3339
|
||||||
/// 2015-08-02T18:54:42+02
|
/// 2015-08-02T18:54:42+02
|
||||||
/// 2021-04-13T19:46:26.266051969+00:00
|
/// 2021-04-13T19:46:26.266051969+00:00
|
||||||
///
|
///
|
||||||
/// NOTE: also accepts 999999-99-99T99:99:99.266051969+99:99
|
/// NOTE: also accepts 999999-99-99T99:99:99.266051969+99:99
|
||||||
/// We delegate rejecting such invalid dates to the logical AST computation code
|
/// We delegate rejecting such invalid dates to the logical AST computation code
|
||||||
/// which invokes time::OffsetDateTime::parse(..., &Rfc3339) on the value to actually parse
|
/// which invokes `time::OffsetDateTime::parse(..., &Rfc3339)` on the value to actually parse
|
||||||
/// it (instead of merely extracting the datetime value as string as done here).
|
/// it (instead of merely extracting the datetime value as string as done here).
|
||||||
fn date_time<'a>() -> impl Parser<&'a str, Output = String> {
|
fn date_time<'a>() -> impl Parser<&'a str, Output = String> {
|
||||||
let two_digits = || recognize::<String, _, _>((digit(), digit()));
|
let two_digits = || recognize::<String, _, _>((digit(), digit()));
|
||||||
@@ -181,8 +196,8 @@ fn spaces1<'a>() -> impl Parser<&'a str, Output = ()> {
|
|||||||
fn range<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
fn range<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
||||||
let range_term_val = || {
|
let range_term_val = || {
|
||||||
attempt(date_time())
|
attempt(date_time())
|
||||||
.or(word())
|
|
||||||
.or(negative_number())
|
.or(negative_number())
|
||||||
|
.or(relaxed_word())
|
||||||
.or(char('*').with(value("*".to_string())))
|
.or(char('*').with(value("*".to_string())))
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -250,6 +265,17 @@ fn range<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Function that parses a set out of a Stream
|
||||||
|
/// Supports ranges like: `IN [val1 val2 val3]`
|
||||||
|
fn set<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
||||||
|
let term_list = between(char('['), char(']'), sep_by(term_val(), spaces()));
|
||||||
|
|
||||||
|
let set_content = ((string("IN"), spaces()), term_list).map(|(_, elements)| elements);
|
||||||
|
|
||||||
|
(optional(attempt(field_name().skip(spaces()))), set_content)
|
||||||
|
.map(|(field, elements)| UserInputLeaf::Set { field, elements })
|
||||||
|
}
|
||||||
|
|
||||||
fn negate(expr: UserInputAst) -> UserInputAst {
|
fn negate(expr: UserInputAst) -> UserInputAst {
|
||||||
expr.unary(Occur::MustNot)
|
expr.unary(Occur::MustNot)
|
||||||
}
|
}
|
||||||
@@ -264,6 +290,7 @@ fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAst> {
|
|||||||
string("NOT").skip(spaces1()).with(leaf()).map(negate),
|
string("NOT").skip(spaces1()).with(leaf()).map(negate),
|
||||||
))
|
))
|
||||||
.or(attempt(range().map(UserInputAst::from)))
|
.or(attempt(range().map(UserInputAst::from)))
|
||||||
|
.or(attempt(set().map(UserInputAst::from)))
|
||||||
.or(literal().map(UserInputAst::from))
|
.or(literal().map(UserInputAst::from))
|
||||||
.parse_stream(input)
|
.parse_stream(input)
|
||||||
.into_result()
|
.into_result()
|
||||||
@@ -649,6 +676,34 @@ mod test {
|
|||||||
.expect("Cannot parse date range")
|
.expect("Cannot parse date range")
|
||||||
.0;
|
.0;
|
||||||
assert_eq!(res6, expected_flexible_dates);
|
assert_eq!(res6, expected_flexible_dates);
|
||||||
|
// IP Range Unbounded
|
||||||
|
let expected_weight = UserInputLeaf::Range {
|
||||||
|
field: Some("ip".to_string()),
|
||||||
|
lower: UserInputBound::Inclusive("::1".to_string()),
|
||||||
|
upper: UserInputBound::Unbounded,
|
||||||
|
};
|
||||||
|
let res1 = range()
|
||||||
|
.parse("ip: >=::1")
|
||||||
|
.expect("Cannot parse ip v6 format")
|
||||||
|
.0;
|
||||||
|
let res2 = range()
|
||||||
|
.parse("ip:[::1 TO *}")
|
||||||
|
.expect("Cannot parse ip v6 format")
|
||||||
|
.0;
|
||||||
|
assert_eq!(res1, expected_weight);
|
||||||
|
assert_eq!(res2, expected_weight);
|
||||||
|
|
||||||
|
// IP Range Bounded
|
||||||
|
let expected_weight = UserInputLeaf::Range {
|
||||||
|
field: Some("ip".to_string()),
|
||||||
|
lower: UserInputBound::Inclusive("::0.0.0.50".to_string()),
|
||||||
|
upper: UserInputBound::Exclusive("::0.0.0.52".to_string()),
|
||||||
|
};
|
||||||
|
let res1 = range()
|
||||||
|
.parse("ip:[::0.0.0.50 TO ::0.0.0.52}")
|
||||||
|
.expect("Cannot parse ip v6 format")
|
||||||
|
.0;
|
||||||
|
assert_eq!(res1, expected_weight);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -705,6 +760,14 @@ mod test {
|
|||||||
test_parse_query_to_ast_helper("+(a b) +d", "(+(*\"a\" *\"b\") +\"d\")");
|
test_parse_query_to_ast_helper("+(a b) +d", "(+(*\"a\" *\"b\") +\"d\")");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_test_query_set() {
|
||||||
|
test_parse_query_to_ast_helper("abc: IN [a b c]", r#""abc": IN ["a" "b" "c"]"#);
|
||||||
|
test_parse_query_to_ast_helper("abc: IN [1]", r#""abc": IN ["1"]"#);
|
||||||
|
test_parse_query_to_ast_helper("abc: IN []", r#""abc": IN []"#);
|
||||||
|
test_parse_query_to_ast_helper("IN [1 2]", r#"IN ["1" "2"]"#);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_test_query_other() {
|
fn test_parse_test_query_other() {
|
||||||
test_parse_query_to_ast_helper("(+a +b) d", "(*(+\"a\" +\"b\") *\"d\")");
|
test_parse_query_to_ast_helper("(+a +b) d", "(*(+\"a\" +\"b\") *\"d\")");
|
||||||
|
|||||||
@@ -12,6 +12,10 @@ pub enum UserInputLeaf {
|
|||||||
lower: UserInputBound,
|
lower: UserInputBound,
|
||||||
upper: UserInputBound,
|
upper: UserInputBound,
|
||||||
},
|
},
|
||||||
|
Set {
|
||||||
|
field: Option<String>,
|
||||||
|
elements: Vec<String>,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Debug for UserInputLeaf {
|
impl Debug for UserInputLeaf {
|
||||||
@@ -31,6 +35,19 @@ impl Debug for UserInputLeaf {
|
|||||||
upper.display_upper(formatter)?;
|
upper.display_upper(formatter)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
UserInputLeaf::Set { field, elements } => {
|
||||||
|
if let Some(ref field) = field {
|
||||||
|
write!(formatter, "\"{}\": ", field)?;
|
||||||
|
}
|
||||||
|
write!(formatter, "IN [")?;
|
||||||
|
for (i, element) in elements.iter().enumerate() {
|
||||||
|
if i != 0 {
|
||||||
|
write!(formatter, " ")?;
|
||||||
|
}
|
||||||
|
write!(formatter, "\"{}\"", element)?;
|
||||||
|
}
|
||||||
|
write!(formatter, "]")
|
||||||
|
}
|
||||||
UserInputLeaf::All => write!(formatter, "*"),
|
UserInputLeaf::All => write!(formatter, "*"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
//! Contains the aggregation request tree. Used to build an
|
//! Contains the aggregation request tree. Used to build an
|
||||||
//! [AggregationCollector](super::AggregationCollector).
|
//! [`AggregationCollector`](super::AggregationCollector).
|
||||||
//!
|
//!
|
||||||
//! [Aggregations] is the top level entry point to create a request, which is a `HashMap<String,
|
//! [`Aggregations`] is the top level entry point to create a request, which is a `HashMap<String,
|
||||||
//! Aggregation>`.
|
//! Aggregation>`.
|
||||||
//!
|
//!
|
||||||
//! Requests are compatible with the json format of elasticsearch.
|
//! Requests are compatible with the json format of elasticsearch.
|
||||||
@@ -54,8 +54,8 @@ use super::bucket::{HistogramAggregation, TermsAggregation};
|
|||||||
use super::metric::{AverageAggregation, StatsAggregation};
|
use super::metric::{AverageAggregation, StatsAggregation};
|
||||||
use super::VecWithNames;
|
use super::VecWithNames;
|
||||||
|
|
||||||
/// The top-level aggregation request structure, which contains [Aggregation] and their user defined
|
/// The top-level aggregation request structure, which contains [`Aggregation`] and their user
|
||||||
/// names. It is also used in [buckets](BucketAggregation) to define sub-aggregations.
|
/// defined names. It is also used in [buckets](BucketAggregation) to define sub-aggregations.
|
||||||
///
|
///
|
||||||
/// The key is the user defined name of the aggregation.
|
/// The key is the user defined name of the aggregation.
|
||||||
pub type Aggregations = HashMap<String, Aggregation>;
|
pub type Aggregations = HashMap<String, Aggregation>;
|
||||||
@@ -139,15 +139,15 @@ pub fn get_fast_field_names(aggs: &Aggregations) -> HashSet<String> {
|
|||||||
fast_field_names
|
fast_field_names
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Aggregation request of [BucketAggregation] or [MetricAggregation].
|
/// Aggregation request of [`BucketAggregation`] or [`MetricAggregation`].
|
||||||
///
|
///
|
||||||
/// An aggregation is either a bucket or a metric.
|
/// An aggregation is either a bucket or a metric.
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
#[serde(untagged)]
|
#[serde(untagged)]
|
||||||
pub enum Aggregation {
|
pub enum Aggregation {
|
||||||
/// Bucket aggregation, see [BucketAggregation] for details.
|
/// Bucket aggregation, see [`BucketAggregation`] for details.
|
||||||
Bucket(BucketAggregation),
|
Bucket(BucketAggregation),
|
||||||
/// Metric aggregation, see [MetricAggregation] for details.
|
/// Metric aggregation, see [`MetricAggregation`] for details.
|
||||||
Metric(MetricAggregation),
|
Metric(MetricAggregation),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,14 +4,14 @@ use std::rc::Rc;
|
|||||||
use std::sync::atomic::AtomicU32;
|
use std::sync::atomic::AtomicU32;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use fastfield_codecs::Column;
|
||||||
|
|
||||||
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
|
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
|
||||||
use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
|
use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
|
||||||
use super::metric::{AverageAggregation, StatsAggregation};
|
use super::metric::{AverageAggregation, StatsAggregation};
|
||||||
use super::segment_agg_result::BucketCount;
|
use super::segment_agg_result::BucketCount;
|
||||||
use super::VecWithNames;
|
use super::VecWithNames;
|
||||||
use crate::fastfield::{
|
use crate::fastfield::{type_and_cardinality, MultiValuedFastFieldReader};
|
||||||
type_and_cardinality, DynamicFastFieldReader, FastType, MultiValuedFastFieldReader,
|
|
||||||
};
|
|
||||||
use crate::schema::{Cardinality, Type};
|
use crate::schema::{Cardinality, Type};
|
||||||
use crate::{InvertedIndexReader, SegmentReader, TantivyError};
|
use crate::{InvertedIndexReader, SegmentReader, TantivyError};
|
||||||
|
|
||||||
@@ -37,10 +37,16 @@ impl AggregationsWithAccessor {
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub(crate) enum FastFieldAccessor {
|
pub(crate) enum FastFieldAccessor {
|
||||||
Multi(MultiValuedFastFieldReader<u64>),
|
Multi(MultiValuedFastFieldReader<u64>),
|
||||||
Single(DynamicFastFieldReader<u64>),
|
Single(Arc<dyn Column<u64>>),
|
||||||
}
|
}
|
||||||
impl FastFieldAccessor {
|
impl FastFieldAccessor {
|
||||||
pub fn as_single(&self) -> Option<&DynamicFastFieldReader<u64>> {
|
pub fn as_single(&self) -> Option<&dyn Column<u64>> {
|
||||||
|
match self {
|
||||||
|
FastFieldAccessor::Multi(_) => None,
|
||||||
|
FastFieldAccessor::Single(reader) => Some(&**reader),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn into_single(self) -> Option<Arc<dyn Column<u64>>> {
|
||||||
match self {
|
match self {
|
||||||
FastFieldAccessor::Multi(_) => None,
|
FastFieldAccessor::Multi(_) => None,
|
||||||
FastFieldAccessor::Single(reader) => Some(reader),
|
FastFieldAccessor::Single(reader) => Some(reader),
|
||||||
@@ -118,7 +124,7 @@ impl BucketAggregationWithAccessor {
|
|||||||
pub struct MetricAggregationWithAccessor {
|
pub struct MetricAggregationWithAccessor {
|
||||||
pub metric: MetricAggregation,
|
pub metric: MetricAggregation,
|
||||||
pub field_type: Type,
|
pub field_type: Type,
|
||||||
pub accessor: DynamicFastFieldReader<u64>,
|
pub accessor: Arc<dyn Column>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MetricAggregationWithAccessor {
|
impl MetricAggregationWithAccessor {
|
||||||
@@ -134,9 +140,8 @@ impl MetricAggregationWithAccessor {
|
|||||||
|
|
||||||
Ok(MetricAggregationWithAccessor {
|
Ok(MetricAggregationWithAccessor {
|
||||||
accessor: accessor
|
accessor: accessor
|
||||||
.as_single()
|
.into_single()
|
||||||
.expect("unexpected fast field cardinality")
|
.expect("unexpected fast field cardinality"),
|
||||||
.clone(),
|
|
||||||
field_type,
|
field_type,
|
||||||
metric: metric.clone(),
|
metric: metric.clone(),
|
||||||
})
|
})
|
||||||
@@ -189,13 +194,7 @@ fn get_ff_reader_and_validate(
|
|||||||
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
|
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
|
||||||
let field_type = reader.schema().get_field_entry(field).field_type();
|
let field_type = reader.schema().get_field_entry(field).field_type();
|
||||||
|
|
||||||
if let Some((ff_type, field_cardinality)) = type_and_cardinality(field_type) {
|
if let Some((_ff_type, field_cardinality)) = type_and_cardinality(field_type) {
|
||||||
if ff_type == FastType::Date {
|
|
||||||
return Err(TantivyError::InvalidArgument(
|
|
||||||
"Unsupported field type date in aggregation".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if cardinality != field_cardinality {
|
if cardinality != field_cardinality {
|
||||||
return Err(TantivyError::InvalidArgument(format!(
|
return Err(TantivyError::InvalidArgument(format!(
|
||||||
"Invalid field cardinality on field {} expected {:?}, but got {:?}",
|
"Invalid field cardinality on field {} expected {:?}, but got {:?}",
|
||||||
|
|||||||
@@ -4,9 +4,7 @@
|
|||||||
//! intermediate average results, which is the sum and the number of values. The actual average is
|
//! intermediate average results, which is the sum and the number of values. The actual average is
|
||||||
//! calculated on the step from intermediate to final aggregation result tree.
|
//! calculated on the step from intermediate to final aggregation result tree.
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use rustc_hash::FxHashMap;
|
||||||
|
|
||||||
use fnv::FnvHashMap;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::agg_req::BucketAggregationInternal;
|
use super::agg_req::BucketAggregationInternal;
|
||||||
@@ -14,11 +12,12 @@ use super::bucket::GetDocCount;
|
|||||||
use super::intermediate_agg_result::{IntermediateBucketResult, IntermediateMetricResult};
|
use super::intermediate_agg_result::{IntermediateBucketResult, IntermediateMetricResult};
|
||||||
use super::metric::{SingleMetricResult, Stats};
|
use super::metric::{SingleMetricResult, Stats};
|
||||||
use super::Key;
|
use super::Key;
|
||||||
|
use crate::schema::Schema;
|
||||||
use crate::TantivyError;
|
use crate::TantivyError;
|
||||||
|
|
||||||
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
/// The final aggegation result.
|
/// The final aggegation result.
|
||||||
pub struct AggregationResults(pub HashMap<String, AggregationResult>);
|
pub struct AggregationResults(pub FxHashMap<String, AggregationResult>);
|
||||||
|
|
||||||
impl AggregationResults {
|
impl AggregationResults {
|
||||||
pub(crate) fn get_value_from_aggregation(
|
pub(crate) fn get_value_from_aggregation(
|
||||||
@@ -113,14 +112,14 @@ pub enum BucketResult {
|
|||||||
///
|
///
|
||||||
/// If there are holes depends on the request, if min_doc_count is 0, then there are no
|
/// If there are holes depends on the request, if min_doc_count is 0, then there are no
|
||||||
/// holes between the first and last bucket.
|
/// holes between the first and last bucket.
|
||||||
/// See [HistogramAggregation](super::bucket::HistogramAggregation)
|
/// See [`HistogramAggregation`](super::bucket::HistogramAggregation)
|
||||||
buckets: BucketEntries<BucketEntry>,
|
buckets: BucketEntries<BucketEntry>,
|
||||||
},
|
},
|
||||||
/// This is the term result
|
/// This is the term result
|
||||||
Terms {
|
Terms {
|
||||||
/// The buckets.
|
/// The buckets.
|
||||||
///
|
///
|
||||||
/// See [TermsAggregation](super::bucket::TermsAggregation)
|
/// See [`TermsAggregation`](super::bucket::TermsAggregation)
|
||||||
buckets: Vec<BucketEntry>,
|
buckets: Vec<BucketEntry>,
|
||||||
/// The number of documents that didn’t make it into to TOP N due to shard_size or size
|
/// The number of documents that didn’t make it into to TOP N due to shard_size or size
|
||||||
sum_other_doc_count: u64,
|
sum_other_doc_count: u64,
|
||||||
@@ -131,9 +130,12 @@ pub enum BucketResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BucketResult {
|
impl BucketResult {
|
||||||
pub(crate) fn empty_from_req(req: &BucketAggregationInternal) -> crate::Result<Self> {
|
pub(crate) fn empty_from_req(
|
||||||
|
req: &BucketAggregationInternal,
|
||||||
|
schema: &Schema,
|
||||||
|
) -> crate::Result<Self> {
|
||||||
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
|
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
|
||||||
empty_bucket.into_final_bucket_result(req)
|
empty_bucket.into_final_bucket_result(req, schema)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -145,7 +147,7 @@ pub enum BucketEntries<T> {
|
|||||||
/// Vector format bucket entries
|
/// Vector format bucket entries
|
||||||
Vec(Vec<T>),
|
Vec(Vec<T>),
|
||||||
/// HashMap format bucket entries
|
/// HashMap format bucket entries
|
||||||
HashMap(FnvHashMap<String, T>),
|
HashMap(FxHashMap<String, T>),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This is the default entry for a bucket, which contains a key, count, and optionally
|
/// This is the default entry for a bucket, which contains a key, count, and optionally
|
||||||
@@ -176,6 +178,9 @@ pub enum BucketEntries<T> {
|
|||||||
/// ```
|
/// ```
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct BucketEntry {
|
pub struct BucketEntry {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
/// The string representation of the bucket.
|
||||||
|
pub key_as_string: Option<String>,
|
||||||
/// The identifier of the bucket.
|
/// The identifier of the bucket.
|
||||||
pub key: Key,
|
pub key: Key,
|
||||||
/// Number of documents in the bucket.
|
/// Number of documents in the bucket.
|
||||||
@@ -234,10 +239,16 @@ pub struct RangeBucketEntry {
|
|||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
/// sub-aggregations in this bucket.
|
/// sub-aggregations in this bucket.
|
||||||
pub sub_aggregation: AggregationResults,
|
pub sub_aggregation: AggregationResults,
|
||||||
/// The from range of the bucket. Equals f64::MIN when None.
|
/// The from range of the bucket. Equals `f64::MIN` when `None`.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub from: Option<f64>,
|
pub from: Option<f64>,
|
||||||
/// The to range of the bucket. Equals f64::MAX when None.
|
/// The to range of the bucket. Equals `f64::MAX` when `None`.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub to: Option<f64>,
|
pub to: Option<f64>,
|
||||||
|
/// The optional string representation for the `from` range.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub from_as_string: Option<String>,
|
||||||
|
/// The optional string representation for the `to` range.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub to_as_string: Option<String>,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
|
|
||||||
|
use fastfield_codecs::Column;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
@@ -9,13 +10,12 @@ use crate::aggregation::agg_req_with_accessor::{
|
|||||||
AggregationsWithAccessor, BucketAggregationWithAccessor,
|
AggregationsWithAccessor, BucketAggregationWithAccessor,
|
||||||
};
|
};
|
||||||
use crate::aggregation::agg_result::BucketEntry;
|
use crate::aggregation::agg_result::BucketEntry;
|
||||||
use crate::aggregation::f64_from_fastfield_u64;
|
|
||||||
use crate::aggregation::intermediate_agg_result::{
|
use crate::aggregation::intermediate_agg_result::{
|
||||||
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
||||||
};
|
};
|
||||||
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
|
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
|
||||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
use crate::aggregation::{f64_from_fastfield_u64, format_date};
|
||||||
use crate::schema::Type;
|
use crate::schema::{Schema, Type};
|
||||||
use crate::{DocId, TantivyError};
|
use crate::{DocId, TantivyError};
|
||||||
|
|
||||||
/// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`.
|
/// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`.
|
||||||
@@ -37,14 +37,14 @@ use crate::{DocId, TantivyError};
|
|||||||
/// [hard_bounds](HistogramAggregation::hard_bounds).
|
/// [hard_bounds](HistogramAggregation::hard_bounds).
|
||||||
///
|
///
|
||||||
/// # Result
|
/// # Result
|
||||||
/// Result type is [BucketResult](crate::aggregation::agg_result::BucketResult) with
|
/// Result type is [`BucketResult`](crate::aggregation::agg_result::BucketResult) with
|
||||||
/// [BucketEntry](crate::aggregation::agg_result::BucketEntry) on the
|
/// [`BucketEntry`](crate::aggregation::agg_result::BucketEntry) on the
|
||||||
/// AggregationCollector.
|
/// `AggregationCollector`.
|
||||||
///
|
///
|
||||||
/// Result type is
|
/// Result type is
|
||||||
/// [crate::aggregation::intermediate_agg_result::IntermediateBucketResult] with
|
/// [`IntermediateBucketResult`](crate::aggregation::intermediate_agg_result::IntermediateBucketResult) with
|
||||||
/// [crate::aggregation::intermediate_agg_result::IntermediateHistogramBucketEntry] on the
|
/// [`IntermediateHistogramBucketEntry`](crate::aggregation::intermediate_agg_result::IntermediateHistogramBucketEntry) on the
|
||||||
/// DistributedAggregationCollector.
|
/// `DistributedAggregationCollector`.
|
||||||
///
|
///
|
||||||
/// # Limitations/Compatibility
|
/// # Limitations/Compatibility
|
||||||
///
|
///
|
||||||
@@ -61,7 +61,7 @@ use crate::{DocId, TantivyError};
|
|||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// Response
|
/// Response
|
||||||
/// See [BucketEntry](crate::aggregation::agg_result::BucketEntry)
|
/// See [`BucketEntry`](crate::aggregation::agg_result::BucketEntry)
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct HistogramAggregation {
|
pub struct HistogramAggregation {
|
||||||
@@ -263,7 +263,7 @@ impl SegmentHistogramCollector {
|
|||||||
req: &HistogramAggregation,
|
req: &HistogramAggregation,
|
||||||
sub_aggregation: &AggregationsWithAccessor,
|
sub_aggregation: &AggregationsWithAccessor,
|
||||||
field_type: Type,
|
field_type: Type,
|
||||||
accessor: &DynamicFastFieldReader<u64>,
|
accessor: &dyn Column<u64>,
|
||||||
) -> crate::Result<Self> {
|
) -> crate::Result<Self> {
|
||||||
req.validate()?;
|
req.validate()?;
|
||||||
let min = f64_from_fastfield_u64(accessor.min_value(), &field_type);
|
let min = f64_from_fastfield_u64(accessor.min_value(), &field_type);
|
||||||
@@ -331,10 +331,10 @@ impl SegmentHistogramCollector {
|
|||||||
.expect("unexpected fast field cardinatility");
|
.expect("unexpected fast field cardinatility");
|
||||||
let mut iter = doc.chunks_exact(4);
|
let mut iter = doc.chunks_exact(4);
|
||||||
for docs in iter.by_ref() {
|
for docs in iter.by_ref() {
|
||||||
let val0 = self.f64_from_fastfield_u64(accessor.get(docs[0]));
|
let val0 = self.f64_from_fastfield_u64(accessor.get_val(docs[0]));
|
||||||
let val1 = self.f64_from_fastfield_u64(accessor.get(docs[1]));
|
let val1 = self.f64_from_fastfield_u64(accessor.get_val(docs[1]));
|
||||||
let val2 = self.f64_from_fastfield_u64(accessor.get(docs[2]));
|
let val2 = self.f64_from_fastfield_u64(accessor.get_val(docs[2]));
|
||||||
let val3 = self.f64_from_fastfield_u64(accessor.get(docs[3]));
|
let val3 = self.f64_from_fastfield_u64(accessor.get_val(docs[3]));
|
||||||
|
|
||||||
let bucket_pos0 = get_bucket_num(val0);
|
let bucket_pos0 = get_bucket_num(val0);
|
||||||
let bucket_pos1 = get_bucket_num(val1);
|
let bucket_pos1 = get_bucket_num(val1);
|
||||||
@@ -370,8 +370,8 @@ impl SegmentHistogramCollector {
|
|||||||
&bucket_with_accessor.sub_aggregation,
|
&bucket_with_accessor.sub_aggregation,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
for doc in iter.remainder() {
|
for &doc in iter.remainder() {
|
||||||
let val = f64_from_fastfield_u64(accessor.get(*doc), &self.field_type);
|
let val = f64_from_fastfield_u64(accessor.get_val(doc), &self.field_type);
|
||||||
if !bounds.contains(val) {
|
if !bounds.contains(val) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -382,7 +382,7 @@ impl SegmentHistogramCollector {
|
|||||||
self.buckets[bucket_pos].key,
|
self.buckets[bucket_pos].key,
|
||||||
get_bucket_val(val, self.interval, self.offset) as f64
|
get_bucket_val(val, self.interval, self.offset) as f64
|
||||||
);
|
);
|
||||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation)?;
|
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||||
}
|
}
|
||||||
if force_flush {
|
if force_flush {
|
||||||
if let Some(sub_aggregations) = self.sub_aggregations.as_mut() {
|
if let Some(sub_aggregations) = self.sub_aggregations.as_mut() {
|
||||||
@@ -425,7 +425,7 @@ impl SegmentHistogramCollector {
|
|||||||
let bucket = &mut self.buckets[bucket_pos];
|
let bucket = &mut self.buckets[bucket_pos];
|
||||||
bucket.doc_count += 1;
|
bucket.doc_count += 1;
|
||||||
if let Some(sub_aggregation) = self.sub_aggregations.as_mut() {
|
if let Some(sub_aggregation) = self.sub_aggregations.as_mut() {
|
||||||
(&mut sub_aggregation[bucket_pos]).collect(doc, bucket_with_accessor)?;
|
sub_aggregation[bucket_pos].collect(doc, bucket_with_accessor)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -451,8 +451,9 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
|||||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||||
histogram_req: &HistogramAggregation,
|
histogram_req: &HistogramAggregation,
|
||||||
sub_aggregation: &AggregationsInternal,
|
sub_aggregation: &AggregationsInternal,
|
||||||
|
schema: &Schema,
|
||||||
) -> crate::Result<Vec<BucketEntry>> {
|
) -> crate::Result<Vec<BucketEntry>> {
|
||||||
// Generate the the full list of buckets without gaps.
|
// Generate the full list of buckets without gaps.
|
||||||
//
|
//
|
||||||
// The bounds are the min max from the current buckets, optionally extended by
|
// The bounds are the min max from the current buckets, optionally extended by
|
||||||
// extended_bounds from the request
|
// extended_bounds from the request
|
||||||
@@ -491,7 +492,9 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
|||||||
sub_aggregation: empty_sub_aggregation.clone(),
|
sub_aggregation: empty_sub_aggregation.clone(),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
.map(|intermediate_bucket| intermediate_bucket.into_final_bucket_entry(sub_aggregation))
|
.map(|intermediate_bucket| {
|
||||||
|
intermediate_bucket.into_final_bucket_entry(sub_aggregation, schema)
|
||||||
|
})
|
||||||
.collect::<crate::Result<Vec<_>>>()
|
.collect::<crate::Result<Vec<_>>>()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -500,25 +503,48 @@ pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
|
|||||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||||
histogram_req: &HistogramAggregation,
|
histogram_req: &HistogramAggregation,
|
||||||
sub_aggregation: &AggregationsInternal,
|
sub_aggregation: &AggregationsInternal,
|
||||||
|
schema: &Schema,
|
||||||
) -> crate::Result<Vec<BucketEntry>> {
|
) -> crate::Result<Vec<BucketEntry>> {
|
||||||
if histogram_req.min_doc_count() == 0 {
|
let mut buckets = if histogram_req.min_doc_count() == 0 {
|
||||||
// With min_doc_count != 0, we may need to add buckets, so that there are no
|
// With min_doc_count != 0, we may need to add buckets, so that there are no
|
||||||
// gaps, since intermediate result does not contain empty buckets (filtered to
|
// gaps, since intermediate result does not contain empty buckets (filtered to
|
||||||
// reduce serialization size).
|
// reduce serialization size).
|
||||||
|
|
||||||
intermediate_buckets_to_final_buckets_fill_gaps(buckets, histogram_req, sub_aggregation)
|
intermediate_buckets_to_final_buckets_fill_gaps(
|
||||||
|
buckets,
|
||||||
|
histogram_req,
|
||||||
|
sub_aggregation,
|
||||||
|
schema,
|
||||||
|
)?
|
||||||
} else {
|
} else {
|
||||||
buckets
|
buckets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|histogram_bucket| histogram_bucket.doc_count >= histogram_req.min_doc_count())
|
.filter(|histogram_bucket| histogram_bucket.doc_count >= histogram_req.min_doc_count())
|
||||||
.map(|histogram_bucket| histogram_bucket.into_final_bucket_entry(sub_aggregation))
|
.map(|histogram_bucket| {
|
||||||
.collect::<crate::Result<Vec<_>>>()
|
histogram_bucket.into_final_bucket_entry(sub_aggregation, schema)
|
||||||
|
})
|
||||||
|
.collect::<crate::Result<Vec<_>>>()?
|
||||||
|
};
|
||||||
|
|
||||||
|
// If we have a date type on the histogram buckets, we add the `key_as_string` field as rfc339
|
||||||
|
let field = schema
|
||||||
|
.get_field(&histogram_req.field)
|
||||||
|
.ok_or_else(|| TantivyError::FieldNotFound(histogram_req.field.to_string()))?;
|
||||||
|
if schema.get_field_entry(field).field_type().is_date() {
|
||||||
|
for bucket in buckets.iter_mut() {
|
||||||
|
if let crate::aggregation::Key::F64(val) = bucket.key {
|
||||||
|
let key_as_string = format_date(val as i64)?;
|
||||||
|
bucket.key_as_string = Some(key_as_string);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(buckets)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Applies req extended_bounds/hard_bounds on the min_max value
|
/// Applies req extended_bounds/hard_bounds on the min_max value
|
||||||
///
|
///
|
||||||
/// May return (f64::MAX, f64::MIN), if there is no range.
|
/// May return `(f64::MAX, f64::MIN)`, if there is no range.
|
||||||
fn get_req_min_max(req: &HistogramAggregation, min_max: Option<(f64, f64)>) -> (f64, f64) {
|
fn get_req_min_max(req: &HistogramAggregation, min_max: Option<(f64, f64)>) -> (f64, f64) {
|
||||||
let (mut min, mut max) = min_max.unwrap_or((f64::MAX, f64::MIN));
|
let (mut min, mut max) = min_max.unwrap_or((f64::MAX, f64::MIN));
|
||||||
|
|
||||||
@@ -1372,6 +1398,63 @@ mod tests {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn histogram_date_test_single_segment() -> crate::Result<()> {
|
||||||
|
histogram_date_test_with_opt(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn histogram_date_test_multi_segment() -> crate::Result<()> {
|
||||||
|
histogram_date_test_with_opt(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn histogram_date_test_with_opt(merge_segments: bool) -> crate::Result<()> {
|
||||||
|
let index = get_test_index_2_segments(merge_segments)?;
|
||||||
|
|
||||||
|
let agg_req: Aggregations = vec![(
|
||||||
|
"histogram".to_string(),
|
||||||
|
Aggregation::Bucket(BucketAggregation {
|
||||||
|
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||||
|
field: "date".to_string(),
|
||||||
|
interval: 86400000000.0, // one day in microseconds
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
sub_aggregation: Default::default(),
|
||||||
|
}),
|
||||||
|
)]
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let agg_res = exec_request(agg_req, &index)?;
|
||||||
|
|
||||||
|
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||||
|
|
||||||
|
assert_eq!(res["histogram"]["buckets"][0]["key"], 1546300800000000.0);
|
||||||
|
assert_eq!(
|
||||||
|
res["histogram"]["buckets"][0]["key_as_string"],
|
||||||
|
"2019-01-01T00:00:00Z"
|
||||||
|
);
|
||||||
|
assert_eq!(res["histogram"]["buckets"][0]["doc_count"], 1);
|
||||||
|
|
||||||
|
assert_eq!(res["histogram"]["buckets"][1]["key"], 1546387200000000.0);
|
||||||
|
assert_eq!(
|
||||||
|
res["histogram"]["buckets"][1]["key_as_string"],
|
||||||
|
"2019-01-02T00:00:00Z"
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(res["histogram"]["buckets"][1]["doc_count"], 5);
|
||||||
|
|
||||||
|
assert_eq!(res["histogram"]["buckets"][2]["key"], 1546473600000000.0);
|
||||||
|
assert_eq!(
|
||||||
|
res["histogram"]["buckets"][2]["key_as_string"],
|
||||||
|
"2019-01-03T00:00:00Z"
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(res["histogram"]["buckets"][3], Value::Null);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn histogram_invalid_request() -> crate::Result<()> {
|
fn histogram_invalid_request() -> crate::Result<()> {
|
||||||
let index = get_test_index_2_segments(true)?;
|
let index = get_test_index_2_segments(true)?;
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
//! Module for all bucket aggregations.
|
//! Module for all bucket aggregations.
|
||||||
//!
|
//!
|
||||||
//! BucketAggregations create buckets of documents
|
//! BucketAggregations create buckets of documents
|
||||||
//! [BucketAggregation](super::agg_req::BucketAggregation).
|
//! [`BucketAggregation`](super::agg_req::BucketAggregation).
|
||||||
//!
|
//!
|
||||||
//! Results of final buckets are [BucketResult](super::agg_result::BucketResult).
|
//! Results of final buckets are [`BucketResult`](super::agg_result::BucketResult).
|
||||||
//! Results of intermediate buckets are
|
//! Results of intermediate buckets are
|
||||||
//! [IntermediateBucketResult](super::intermediate_agg_result::IntermediateBucketResult)
|
//! [`IntermediateBucketResult`](super::intermediate_agg_result::IntermediateBucketResult)
|
||||||
|
|
||||||
mod histogram;
|
mod histogram;
|
||||||
mod range;
|
mod range;
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
use fnv::FnvHashMap;
|
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||||
|
use rustc_hash::FxHashMap;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::aggregation::agg_req_with_accessor::{
|
use crate::aggregation::agg_req_with_accessor::{
|
||||||
@@ -11,8 +12,9 @@ use crate::aggregation::intermediate_agg_result::{
|
|||||||
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
|
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
|
||||||
};
|
};
|
||||||
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
|
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
|
||||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key, SerializedKey};
|
use crate::aggregation::{
|
||||||
use crate::fastfield::FastFieldReader;
|
f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey,
|
||||||
|
};
|
||||||
use crate::schema::Type;
|
use crate::schema::Type;
|
||||||
use crate::{DocId, TantivyError};
|
use crate::{DocId, TantivyError};
|
||||||
|
|
||||||
@@ -23,14 +25,14 @@ use crate::{DocId, TantivyError};
|
|||||||
/// against each bucket range. Note that this aggregation includes the from value and excludes the
|
/// against each bucket range. Note that this aggregation includes the from value and excludes the
|
||||||
/// to value for each range.
|
/// to value for each range.
|
||||||
///
|
///
|
||||||
/// Result type is [BucketResult](crate::aggregation::agg_result::BucketResult) with
|
/// Result type is [`BucketResult`](crate::aggregation::agg_result::BucketResult) with
|
||||||
/// [RangeBucketEntry](crate::aggregation::agg_result::RangeBucketEntry) on the
|
/// [`RangeBucketEntry`](crate::aggregation::agg_result::RangeBucketEntry) on the
|
||||||
/// AggregationCollector.
|
/// `AggregationCollector`.
|
||||||
///
|
///
|
||||||
/// Result type is
|
/// Result type is
|
||||||
/// [crate::aggregation::intermediate_agg_result::IntermediateBucketResult] with
|
/// [`IntermediateBucketResult`](crate::aggregation::intermediate_agg_result::IntermediateBucketResult) with
|
||||||
/// [crate::aggregation::intermediate_agg_result::IntermediateRangeBucketEntry] on the
|
/// [`IntermediateRangeBucketEntry`](crate::aggregation::intermediate_agg_result::IntermediateRangeBucketEntry) on the
|
||||||
/// DistributedAggregationCollector.
|
/// `DistributedAggregationCollector`.
|
||||||
///
|
///
|
||||||
/// # Limitations/Compatibility
|
/// # Limitations/Compatibility
|
||||||
/// Overlapping ranges are not yet supported.
|
/// Overlapping ranges are not yet supported.
|
||||||
@@ -68,11 +70,11 @@ pub struct RangeAggregationRange {
|
|||||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||||
pub key: Option<String>,
|
pub key: Option<String>,
|
||||||
/// The from range value, which is inclusive in the range.
|
/// The from range value, which is inclusive in the range.
|
||||||
/// None equals to an open ended interval.
|
/// `None` equals to an open ended interval.
|
||||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||||
pub from: Option<f64>,
|
pub from: Option<f64>,
|
||||||
/// The to range value, which is not inclusive in the range.
|
/// The to range value, which is not inclusive in the range.
|
||||||
/// None equals to an open ended interval.
|
/// `None` equals to an open ended interval.
|
||||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||||
pub to: Option<f64>,
|
pub to: Option<f64>,
|
||||||
}
|
}
|
||||||
@@ -102,7 +104,7 @@ impl From<Range<f64>> for RangeAggregationRange {
|
|||||||
pub(crate) struct InternalRangeAggregationRange {
|
pub(crate) struct InternalRangeAggregationRange {
|
||||||
/// Custom key for the range bucket
|
/// Custom key for the range bucket
|
||||||
key: Option<String>,
|
key: Option<String>,
|
||||||
/// u64 range value
|
/// `u64` range value
|
||||||
range: Range<u64>,
|
range: Range<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,9 +134,9 @@ pub(crate) struct SegmentRangeBucketEntry {
|
|||||||
pub key: Key,
|
pub key: Key,
|
||||||
pub doc_count: u64,
|
pub doc_count: u64,
|
||||||
pub sub_aggregation: Option<SegmentAggregationResultsCollector>,
|
pub sub_aggregation: Option<SegmentAggregationResultsCollector>,
|
||||||
/// The from range of the bucket. Equals f64::MIN when None.
|
/// The from range of the bucket. Equals `f64::MIN` when `None`.
|
||||||
pub from: Option<f64>,
|
pub from: Option<f64>,
|
||||||
/// The to range of the bucket. Equals f64::MAX when None. Open interval, `to` is not
|
/// The to range of the bucket. Equals `f64::MAX` when `None`. Open interval, `to` is not
|
||||||
/// inclusive.
|
/// inclusive.
|
||||||
pub to: Option<f64>,
|
pub to: Option<f64>,
|
||||||
}
|
}
|
||||||
@@ -177,12 +179,12 @@ impl SegmentRangeCollector {
|
|||||||
) -> crate::Result<IntermediateBucketResult> {
|
) -> crate::Result<IntermediateBucketResult> {
|
||||||
let field_type = self.field_type;
|
let field_type = self.field_type;
|
||||||
|
|
||||||
let buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
|
let buckets: FxHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
|
||||||
.buckets
|
.buckets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(move |range_bucket| {
|
.map(move |range_bucket| {
|
||||||
Ok((
|
Ok((
|
||||||
range_to_string(&range_bucket.range, &field_type),
|
range_to_string(&range_bucket.range, &field_type)?,
|
||||||
range_bucket
|
range_bucket
|
||||||
.bucket
|
.bucket
|
||||||
.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
|
.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
|
||||||
@@ -210,8 +212,8 @@ impl SegmentRangeCollector {
|
|||||||
let key = range
|
let key = range
|
||||||
.key
|
.key
|
||||||
.clone()
|
.clone()
|
||||||
.map(Key::Str)
|
.map(|key| Ok(Key::Str(key)))
|
||||||
.unwrap_or_else(|| range_to_key(&range.range, &field_type));
|
.unwrap_or_else(|| range_to_key(&range.range, &field_type))?;
|
||||||
let to = if range.range.end == u64::MAX {
|
let to = if range.range.end == u64::MAX {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
@@ -229,6 +231,7 @@ impl SegmentRangeCollector {
|
|||||||
sub_aggregation,
|
sub_aggregation,
|
||||||
)?)
|
)?)
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(SegmentRangeAndBucketEntry {
|
Ok(SegmentRangeAndBucketEntry {
|
||||||
range: range.range.clone(),
|
range: range.range.clone(),
|
||||||
bucket: SegmentRangeBucketEntry {
|
bucket: SegmentRangeBucketEntry {
|
||||||
@@ -262,12 +265,12 @@ impl SegmentRangeCollector {
|
|||||||
let accessor = bucket_with_accessor
|
let accessor = bucket_with_accessor
|
||||||
.accessor
|
.accessor
|
||||||
.as_single()
|
.as_single()
|
||||||
.expect("unexpected fast field cardinatility");
|
.expect("unexpected fast field cardinality");
|
||||||
for docs in iter.by_ref() {
|
for docs in iter.by_ref() {
|
||||||
let val1 = accessor.get(docs[0]);
|
let val1 = accessor.get_val(docs[0]);
|
||||||
let val2 = accessor.get(docs[1]);
|
let val2 = accessor.get_val(docs[1]);
|
||||||
let val3 = accessor.get(docs[2]);
|
let val3 = accessor.get_val(docs[2]);
|
||||||
let val4 = accessor.get(docs[3]);
|
let val4 = accessor.get_val(docs[3]);
|
||||||
let bucket_pos1 = self.get_bucket_pos(val1);
|
let bucket_pos1 = self.get_bucket_pos(val1);
|
||||||
let bucket_pos2 = self.get_bucket_pos(val2);
|
let bucket_pos2 = self.get_bucket_pos(val2);
|
||||||
let bucket_pos3 = self.get_bucket_pos(val3);
|
let bucket_pos3 = self.get_bucket_pos(val3);
|
||||||
@@ -278,10 +281,10 @@ impl SegmentRangeCollector {
|
|||||||
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation)?;
|
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation)?;
|
||||||
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
|
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
|
||||||
}
|
}
|
||||||
for doc in iter.remainder() {
|
for &doc in iter.remainder() {
|
||||||
let val = accessor.get(*doc);
|
let val = accessor.get_val(doc);
|
||||||
let bucket_pos = self.get_bucket_pos(val);
|
let bucket_pos = self.get_bucket_pos(val);
|
||||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation)?;
|
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||||
}
|
}
|
||||||
if force_flush {
|
if force_flush {
|
||||||
for bucket in &mut self.buckets {
|
for bucket in &mut self.buckets {
|
||||||
@@ -324,8 +327,8 @@ impl SegmentRangeCollector {
|
|||||||
/// Converts the user provided f64 range value to fast field value space.
|
/// Converts the user provided f64 range value to fast field value space.
|
||||||
///
|
///
|
||||||
/// Internally fast field values are always stored as u64.
|
/// Internally fast field values are always stored as u64.
|
||||||
/// If the fast field has u64 [1,2,5], these values are stored as is in the fast field.
|
/// If the fast field has u64 `[1, 2, 5]`, these values are stored as is in the fast field.
|
||||||
/// A fast field with f64 [1.0, 2.0, 5.0] is converted to u64 space, using a
|
/// A fast field with f64 `[1.0, 2.0, 5.0]` is converted to u64 space, using a
|
||||||
/// monotonic mapping function, so the order is preserved.
|
/// monotonic mapping function, so the order is preserved.
|
||||||
///
|
///
|
||||||
/// Consequently, a f64 user range 1.0..3.0 needs to be converted to fast field value space using
|
/// Consequently, a f64 user range 1.0..3.0 needs to be converted to fast field value space using
|
||||||
@@ -403,33 +406,45 @@ fn extend_validate_ranges(
|
|||||||
Ok(converted_buckets)
|
Ok(converted_buckets)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
|
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> crate::Result<String> {
|
||||||
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
|
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
|
||||||
// it should be rendered as "*-0" and not "*-*"
|
// it should be rendered as "*-0" and not "*-*"
|
||||||
let to_str = |val: u64, is_start: bool| {
|
let to_str = |val: u64, is_start: bool| {
|
||||||
if (is_start && val == u64::MIN) || (!is_start && val == u64::MAX) {
|
if (is_start && val == u64::MIN) || (!is_start && val == u64::MAX) {
|
||||||
"*".to_string()
|
Ok("*".to_string())
|
||||||
|
} else if *field_type == Type::Date {
|
||||||
|
let val = i64::from_u64(val);
|
||||||
|
format_date(val)
|
||||||
} else {
|
} else {
|
||||||
f64_from_fastfield_u64(val, field_type).to_string()
|
Ok(f64_from_fastfield_u64(val, field_type).to_string())
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
format!("{}-{}", to_str(range.start, true), to_str(range.end, false))
|
Ok(format!(
|
||||||
|
"{}-{}",
|
||||||
|
to_str(range.start, true)?,
|
||||||
|
to_str(range.end, false)?
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> Key {
|
pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> crate::Result<Key> {
|
||||||
Key::Str(range_to_string(range, field_type))
|
Ok(Key::Str(range_to_string(range, field_type)?))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
|
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::aggregation::agg_req::{
|
use crate::aggregation::agg_req::{
|
||||||
Aggregation, Aggregations, BucketAggregation, BucketAggregationType,
|
Aggregation, Aggregations, BucketAggregation, BucketAggregationType,
|
||||||
};
|
};
|
||||||
use crate::aggregation::tests::{exec_request_with_query, get_test_index_with_num_docs};
|
use crate::aggregation::tests::{
|
||||||
use crate::fastfield::FastValue;
|
exec_request, exec_request_with_query, get_test_index_2_segments,
|
||||||
|
get_test_index_with_num_docs,
|
||||||
|
};
|
||||||
|
|
||||||
pub fn get_collector_from_ranges(
|
pub fn get_collector_from_ranges(
|
||||||
ranges: Vec<RangeAggregationRange>,
|
ranges: Vec<RangeAggregationRange>,
|
||||||
@@ -567,6 +582,77 @@ mod tests {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn range_date_test_single_segment() -> crate::Result<()> {
|
||||||
|
range_date_test_with_opt(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn range_date_test_multi_segment() -> crate::Result<()> {
|
||||||
|
range_date_test_with_opt(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn range_date_test_with_opt(merge_segments: bool) -> crate::Result<()> {
|
||||||
|
let index = get_test_index_2_segments(merge_segments)?;
|
||||||
|
|
||||||
|
let agg_req: Aggregations = vec![(
|
||||||
|
"date_ranges".to_string(),
|
||||||
|
Aggregation::Bucket(BucketAggregation {
|
||||||
|
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||||
|
field: "date".to_string(),
|
||||||
|
ranges: vec![
|
||||||
|
RangeAggregationRange {
|
||||||
|
key: None,
|
||||||
|
from: None,
|
||||||
|
to: Some(1546300800000000.0f64),
|
||||||
|
},
|
||||||
|
RangeAggregationRange {
|
||||||
|
key: None,
|
||||||
|
from: Some(1546300800000000.0f64),
|
||||||
|
to: Some(1546387200000000.0f64),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
keyed: false,
|
||||||
|
}),
|
||||||
|
sub_aggregation: Default::default(),
|
||||||
|
}),
|
||||||
|
)]
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let agg_res = exec_request(agg_req, &index)?;
|
||||||
|
|
||||||
|
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
res["date_ranges"]["buckets"][0]["from_as_string"],
|
||||||
|
Value::Null
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
res["date_ranges"]["buckets"][0]["key"],
|
||||||
|
"*-2019-01-01T00:00:00Z"
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
res["date_ranges"]["buckets"][1]["from_as_string"],
|
||||||
|
"2019-01-01T00:00:00Z"
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
res["date_ranges"]["buckets"][1]["to_as_string"],
|
||||||
|
"2019-01-02T00:00:00Z"
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
res["date_ranges"]["buckets"][2]["from_as_string"],
|
||||||
|
"2019-01-02T00:00:00Z"
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
res["date_ranges"]["buckets"][2]["to_as_string"],
|
||||||
|
Value::Null
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn range_custom_key_keyed_buckets_test() -> crate::Result<()> {
|
fn range_custom_key_keyed_buckets_test() -> crate::Result<()> {
|
||||||
let index = get_test_index_with_num_docs(false, 100)?;
|
let index = get_test_index_with_num_docs(false, 100)?;
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
use fnv::FnvHashMap;
|
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
|
use rustc_hash::FxHashMap;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::{CustomOrder, Order, OrderTarget};
|
use super::{CustomOrder, Order, OrderTarget};
|
||||||
@@ -17,7 +17,11 @@ use crate::fastfield::MultiValuedFastFieldReader;
|
|||||||
use crate::schema::Type;
|
use crate::schema::Type;
|
||||||
use crate::{DocId, TantivyError};
|
use crate::{DocId, TantivyError};
|
||||||
|
|
||||||
/// Creates a bucket for every unique term
|
/// Creates a bucket for every unique term and counts the number of occurences.
|
||||||
|
/// Note that doc_count in the response buckets equals term count here.
|
||||||
|
///
|
||||||
|
/// If the text is untokenized and single value, that means one term per document and therefore it
|
||||||
|
/// is in fact doc count.
|
||||||
///
|
///
|
||||||
/// ### Terminology
|
/// ### Terminology
|
||||||
/// Shard parameters are supposed to be equivalent to elasticsearch shard parameter.
|
/// Shard parameters are supposed to be equivalent to elasticsearch shard parameter.
|
||||||
@@ -31,7 +35,7 @@ use crate::{DocId, TantivyError};
|
|||||||
///
|
///
|
||||||
/// Even with a larger `segment_size` value, doc_count values for a terms aggregation may be
|
/// Even with a larger `segment_size` value, doc_count values for a terms aggregation may be
|
||||||
/// approximate. As a result, any sub-aggregations on the terms aggregation may also be approximate.
|
/// approximate. As a result, any sub-aggregations on the terms aggregation may also be approximate.
|
||||||
/// `sum_other_doc_count` is the number of documents that didn’t make it into the the top size
|
/// `sum_other_doc_count` is the number of documents that didn’t make it into the top size
|
||||||
/// terms. If this is greater than 0, you can be sure that the terms agg had to throw away some
|
/// terms. If this is greater than 0, you can be sure that the terms agg had to throw away some
|
||||||
/// buckets, either because they didn’t fit into size on the root node or they didn’t fit into
|
/// buckets, either because they didn’t fit into size on the root node or they didn’t fit into
|
||||||
/// `segment_size` on the segment node.
|
/// `segment_size` on the segment node.
|
||||||
@@ -42,14 +46,14 @@ use crate::{DocId, TantivyError};
|
|||||||
/// each segment. It’s the sum of the size of the largest bucket on each segment that didn’t fit
|
/// each segment. It’s the sum of the size of the largest bucket on each segment that didn’t fit
|
||||||
/// into segment_size.
|
/// into segment_size.
|
||||||
///
|
///
|
||||||
/// Result type is [BucketResult](crate::aggregation::agg_result::BucketResult) with
|
/// Result type is [`BucketResult`](crate::aggregation::agg_result::BucketResult) with
|
||||||
/// [TermBucketEntry](crate::aggregation::agg_result::BucketEntry) on the
|
/// [`TermBucketEntry`](crate::aggregation::agg_result::BucketEntry) on the
|
||||||
/// AggregationCollector.
|
/// `AggregationCollector`.
|
||||||
///
|
///
|
||||||
/// Result type is
|
/// Result type is
|
||||||
/// [crate::aggregation::intermediate_agg_result::IntermediateBucketResult] with
|
/// [`IntermediateBucketResult`](crate::aggregation::intermediate_agg_result::IntermediateBucketResult) with
|
||||||
/// [crate::aggregation::intermediate_agg_result::IntermediateTermBucketEntry] on the
|
/// [`IntermediateTermBucketEntry`](crate::aggregation::intermediate_agg_result::IntermediateTermBucketEntry) on the
|
||||||
/// DistributedAggregationCollector.
|
/// `DistributedAggregationCollector`.
|
||||||
///
|
///
|
||||||
/// # Limitations/Compatibility
|
/// # Limitations/Compatibility
|
||||||
///
|
///
|
||||||
@@ -64,6 +68,25 @@ use crate::{DocId, TantivyError};
|
|||||||
/// }
|
/// }
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
|
///
|
||||||
|
/// /// # Response JSON Format
|
||||||
|
/// ```json
|
||||||
|
/// {
|
||||||
|
/// ...
|
||||||
|
/// "aggregations": {
|
||||||
|
/// "genres": {
|
||||||
|
/// "doc_count_error_upper_bound": 0,
|
||||||
|
/// "sum_other_doc_count": 0,
|
||||||
|
/// "buckets": [
|
||||||
|
/// { "key": "drumnbass", "doc_count": 6 },
|
||||||
|
/// { "key": "raggae", "doc_count": 4 },
|
||||||
|
/// { "key": "jazz", "doc_count": 2 }
|
||||||
|
/// ]
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct TermsAggregation {
|
pub struct TermsAggregation {
|
||||||
/// The field to aggregate on.
|
/// The field to aggregate on.
|
||||||
@@ -176,7 +199,7 @@ impl TermsAggregationInternal {
|
|||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
/// Container to store term_ids and their buckets.
|
/// Container to store term_ids and their buckets.
|
||||||
struct TermBuckets {
|
struct TermBuckets {
|
||||||
pub(crate) entries: FnvHashMap<u32, TermBucketEntry>,
|
pub(crate) entries: FxHashMap<u32, TermBucketEntry>,
|
||||||
blueprint: Option<SegmentAggregationResultsCollector>,
|
blueprint: Option<SegmentAggregationResultsCollector>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -374,7 +397,7 @@ impl SegmentTermCollector {
|
|||||||
.expect("internal error: inverted index not loaded for term aggregation");
|
.expect("internal error: inverted index not loaded for term aggregation");
|
||||||
let term_dict = inverted_index.terms();
|
let term_dict = inverted_index.terms();
|
||||||
|
|
||||||
let mut dict: FnvHashMap<String, IntermediateTermBucketEntry> = Default::default();
|
let mut dict: FxHashMap<String, IntermediateTermBucketEntry> = Default::default();
|
||||||
let mut buffer = vec![];
|
let mut buffer = vec![];
|
||||||
for (term_id, entry) in entries {
|
for (term_id, entry) in entries {
|
||||||
term_dict
|
term_dict
|
||||||
@@ -1106,9 +1129,9 @@ mod tests {
|
|||||||
|
|
||||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "terma");
|
assert_eq!(res["my_texts"]["buckets"][0]["key"], "terma");
|
||||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 4);
|
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 4);
|
||||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termb");
|
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termc");
|
||||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 0);
|
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 0);
|
||||||
assert_eq!(res["my_texts"]["buckets"][2]["key"], "termc");
|
assert_eq!(res["my_texts"]["buckets"][2]["key"], "termb");
|
||||||
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 0);
|
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 0);
|
||||||
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
|
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
|
||||||
assert_eq!(res["my_texts"]["doc_count_error_upper_bound"], 0);
|
assert_eq!(res["my_texts"]["doc_count_error_upper_bound"], 0);
|
||||||
@@ -1206,11 +1229,43 @@ mod tests {
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let res = exec_request_with_query(agg_req, &index, None);
|
let res = exec_request_with_query(agg_req, &index, None);
|
||||||
|
|
||||||
assert!(res.is_err());
|
assert!(res.is_err());
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn terms_aggregation_multi_token_per_doc() -> crate::Result<()> {
|
||||||
|
let terms = vec!["Hello Hello", "Hallo Hallo"];
|
||||||
|
|
||||||
|
let index = get_test_index_from_terms(true, &[terms])?;
|
||||||
|
|
||||||
|
let agg_req: Aggregations = vec![(
|
||||||
|
"my_texts".to_string(),
|
||||||
|
Aggregation::Bucket(BucketAggregation {
|
||||||
|
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||||
|
field: "text_id".to_string(),
|
||||||
|
min_doc_count: Some(0),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
sub_aggregation: Default::default(),
|
||||||
|
}),
|
||||||
|
)]
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let res = exec_request_with_query(agg_req, &index, None).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(res["my_texts"]["buckets"][0]["key"], "hello");
|
||||||
|
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 2);
|
||||||
|
|
||||||
|
assert_eq!(res["my_texts"]["buckets"][1]["key"], "hallo");
|
||||||
|
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 2);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_json_format() -> crate::Result<()> {
|
fn test_json_format() -> crate::Result<()> {
|
||||||
let agg_req: Aggregations = vec![(
|
let agg_req: Aggregations = vec![(
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ use super::intermediate_agg_result::IntermediateAggregationResults;
|
|||||||
use super::segment_agg_result::SegmentAggregationResultsCollector;
|
use super::segment_agg_result::SegmentAggregationResultsCollector;
|
||||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
|
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
|
||||||
use crate::collector::{Collector, SegmentCollector};
|
use crate::collector::{Collector, SegmentCollector};
|
||||||
|
use crate::schema::Schema;
|
||||||
use crate::{SegmentReader, TantivyError};
|
use crate::{SegmentReader, TantivyError};
|
||||||
|
|
||||||
/// The default max bucket count, before the aggregation fails.
|
/// The default max bucket count, before the aggregation fails.
|
||||||
@@ -16,6 +17,7 @@ pub const MAX_BUCKET_COUNT: u32 = 65000;
|
|||||||
///
|
///
|
||||||
/// The collector collects all aggregations by the underlying aggregation request.
|
/// The collector collects all aggregations by the underlying aggregation request.
|
||||||
pub struct AggregationCollector {
|
pub struct AggregationCollector {
|
||||||
|
schema: Schema,
|
||||||
agg: Aggregations,
|
agg: Aggregations,
|
||||||
max_bucket_count: u32,
|
max_bucket_count: u32,
|
||||||
}
|
}
|
||||||
@@ -25,8 +27,9 @@ impl AggregationCollector {
|
|||||||
///
|
///
|
||||||
/// Aggregation fails when the total bucket count is higher than max_bucket_count.
|
/// Aggregation fails when the total bucket count is higher than max_bucket_count.
|
||||||
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
||||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
|
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>, schema: Schema) -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
schema,
|
||||||
agg,
|
agg,
|
||||||
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
||||||
}
|
}
|
||||||
@@ -113,7 +116,7 @@ impl Collector for AggregationCollector {
|
|||||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
) -> crate::Result<Self::Fruit> {
|
) -> crate::Result<Self::Fruit> {
|
||||||
let res = merge_fruits(segment_fruits)?;
|
let res = merge_fruits(segment_fruits)?;
|
||||||
res.into_final_bucket_result(self.agg.clone())
|
res.into_final_bucket_result(self.agg.clone(), &self.schema)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -131,7 +134,7 @@ fn merge_fruits(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// AggregationSegmentCollector does the aggregation collection on a segment.
|
/// `AggregationSegmentCollector` does the aggregation collection on a segment.
|
||||||
pub struct AggregationSegmentCollector {
|
pub struct AggregationSegmentCollector {
|
||||||
aggs_with_accessor: AggregationsWithAccessor,
|
aggs_with_accessor: AggregationsWithAccessor,
|
||||||
result: SegmentAggregationResultsCollector,
|
result: SegmentAggregationResultsCollector,
|
||||||
@@ -139,8 +142,8 @@ pub struct AggregationSegmentCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl AggregationSegmentCollector {
|
impl AggregationSegmentCollector {
|
||||||
/// Creates an AggregationSegmentCollector from an [Aggregations] request and a segment reader.
|
/// Creates an `AggregationSegmentCollector from` an [`Aggregations`] request and a segment
|
||||||
/// Also includes validation, e.g. checking field types and existence.
|
/// reader. Also includes validation, e.g. checking field types and existence.
|
||||||
pub fn from_agg_req_and_reader(
|
pub fn from_agg_req_and_reader(
|
||||||
agg: &Aggregations,
|
agg: &Aggregations,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
|
|||||||
18
src/aggregation/date.rs
Normal file
18
src/aggregation/date.rs
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
use time::format_description::well_known::Rfc3339;
|
||||||
|
use time::OffsetDateTime;
|
||||||
|
|
||||||
|
use crate::TantivyError;
|
||||||
|
|
||||||
|
pub(crate) fn format_date(val: i64) -> crate::Result<String> {
|
||||||
|
let datetime =
|
||||||
|
OffsetDateTime::from_unix_timestamp_nanos(1_000 * (val as i128)).map_err(|err| {
|
||||||
|
TantivyError::InvalidArgument(format!(
|
||||||
|
"Could not convert {:?} to OffsetDateTime, err {:?}",
|
||||||
|
val, err
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let key_as_string = datetime
|
||||||
|
.format(&Rfc3339)
|
||||||
|
.map_err(|_err| TantivyError::InvalidArgument("Could not serialize date".to_string()))?;
|
||||||
|
Ok(key_as_string)
|
||||||
|
}
|
||||||
@@ -3,15 +3,14 @@
|
|||||||
//! indices.
|
//! indices.
|
||||||
|
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use fnv::FnvHashMap;
|
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
|
use rustc_hash::FxHashMap;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::agg_req::{
|
use super::agg_req::{
|
||||||
Aggregations, AggregationsInternal, BucketAggregationInternal, BucketAggregationType,
|
Aggregations, AggregationsInternal, BucketAggregationInternal, BucketAggregationType,
|
||||||
MetricAggregation,
|
MetricAggregation, RangeAggregation,
|
||||||
};
|
};
|
||||||
use super::agg_result::{AggregationResult, BucketResult, RangeBucketEntry};
|
use super::agg_result::{AggregationResult, BucketResult, RangeBucketEntry};
|
||||||
use super::bucket::{
|
use super::bucket::{
|
||||||
@@ -20,9 +19,11 @@ use super::bucket::{
|
|||||||
};
|
};
|
||||||
use super::metric::{IntermediateAverage, IntermediateStats};
|
use super::metric::{IntermediateAverage, IntermediateStats};
|
||||||
use super::segment_agg_result::SegmentMetricResultCollector;
|
use super::segment_agg_result::SegmentMetricResultCollector;
|
||||||
use super::{Key, SerializedKey, VecWithNames};
|
use super::{format_date, Key, SerializedKey, VecWithNames};
|
||||||
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
|
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
|
||||||
use crate::aggregation::bucket::TermsAggregationInternal;
|
use crate::aggregation::bucket::TermsAggregationInternal;
|
||||||
|
use crate::schema::Schema;
|
||||||
|
use crate::TantivyError;
|
||||||
|
|
||||||
/// Contains the intermediate aggregation result, which is optimized to be merged with other
|
/// Contains the intermediate aggregation result, which is optimized to be merged with other
|
||||||
/// intermediate results.
|
/// intermediate results.
|
||||||
@@ -36,8 +37,12 @@ pub struct IntermediateAggregationResults {
|
|||||||
|
|
||||||
impl IntermediateAggregationResults {
|
impl IntermediateAggregationResults {
|
||||||
/// Convert intermediate result and its aggregation request to the final result.
|
/// Convert intermediate result and its aggregation request to the final result.
|
||||||
pub fn into_final_bucket_result(self, req: Aggregations) -> crate::Result<AggregationResults> {
|
pub fn into_final_bucket_result(
|
||||||
self.into_final_bucket_result_internal(&(req.into()))
|
self,
|
||||||
|
req: Aggregations,
|
||||||
|
schema: &Schema,
|
||||||
|
) -> crate::Result<AggregationResults> {
|
||||||
|
self.into_final_bucket_result_internal(&(req.into()), schema)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert intermediate result and its aggregation request to the final result.
|
/// Convert intermediate result and its aggregation request to the final result.
|
||||||
@@ -47,18 +52,19 @@ impl IntermediateAggregationResults {
|
|||||||
pub(crate) fn into_final_bucket_result_internal(
|
pub(crate) fn into_final_bucket_result_internal(
|
||||||
self,
|
self,
|
||||||
req: &AggregationsInternal,
|
req: &AggregationsInternal,
|
||||||
|
schema: &Schema,
|
||||||
) -> crate::Result<AggregationResults> {
|
) -> crate::Result<AggregationResults> {
|
||||||
// Important assumption:
|
// Important assumption:
|
||||||
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
||||||
// request
|
// request
|
||||||
let mut results: HashMap<String, AggregationResult> = HashMap::new();
|
let mut results: FxHashMap<String, AggregationResult> = FxHashMap::default();
|
||||||
|
|
||||||
if let Some(buckets) = self.buckets {
|
if let Some(buckets) = self.buckets {
|
||||||
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets)?
|
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets, schema)?
|
||||||
} else {
|
} else {
|
||||||
// When there are no buckets, we create empty buckets, so that the serialized json
|
// When there are no buckets, we create empty buckets, so that the serialized json
|
||||||
// format is constant
|
// format is constant
|
||||||
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
|
add_empty_final_buckets_to_result(&mut results, &req.buckets, schema)?
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(metrics) = self.metrics {
|
if let Some(metrics) = self.metrics {
|
||||||
@@ -108,10 +114,10 @@ impl IntermediateAggregationResults {
|
|||||||
Self { metrics, buckets }
|
Self { metrics, buckets }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Merge an other intermediate aggregation result into this result.
|
/// Merge another intermediate aggregation result into this result.
|
||||||
///
|
///
|
||||||
/// The order of the values need to be the same on both results. This is ensured when the same
|
/// The order of the values need to be the same on both results. This is ensured when the same
|
||||||
/// (key values) are present on the underlying VecWithNames struct.
|
/// (key values) are present on the underlying `VecWithNames` struct.
|
||||||
pub fn merge_fruits(&mut self, other: IntermediateAggregationResults) {
|
pub fn merge_fruits(&mut self, other: IntermediateAggregationResults) {
|
||||||
if let (Some(buckets_left), Some(buckets_right)) = (&mut self.buckets, other.buckets) {
|
if let (Some(buckets_left), Some(buckets_right)) = (&mut self.buckets, other.buckets) {
|
||||||
for (bucket_left, bucket_right) in
|
for (bucket_left, bucket_right) in
|
||||||
@@ -132,7 +138,7 @@ impl IntermediateAggregationResults {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn convert_and_add_final_metrics_to_result(
|
fn convert_and_add_final_metrics_to_result(
|
||||||
results: &mut HashMap<String, AggregationResult>,
|
results: &mut FxHashMap<String, AggregationResult>,
|
||||||
metrics: VecWithNames<IntermediateMetricResult>,
|
metrics: VecWithNames<IntermediateMetricResult>,
|
||||||
) {
|
) {
|
||||||
results.extend(
|
results.extend(
|
||||||
@@ -143,7 +149,7 @@ fn convert_and_add_final_metrics_to_result(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn add_empty_final_metrics_to_result(
|
fn add_empty_final_metrics_to_result(
|
||||||
results: &mut HashMap<String, AggregationResult>,
|
results: &mut FxHashMap<String, AggregationResult>,
|
||||||
req_metrics: &VecWithNames<MetricAggregation>,
|
req_metrics: &VecWithNames<MetricAggregation>,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
results.extend(req_metrics.iter().map(|(key, req)| {
|
results.extend(req_metrics.iter().map(|(key, req)| {
|
||||||
@@ -157,27 +163,30 @@ fn add_empty_final_metrics_to_result(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn add_empty_final_buckets_to_result(
|
fn add_empty_final_buckets_to_result(
|
||||||
results: &mut HashMap<String, AggregationResult>,
|
results: &mut FxHashMap<String, AggregationResult>,
|
||||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||||
|
schema: &Schema,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let requested_buckets = req_buckets.iter();
|
let requested_buckets = req_buckets.iter();
|
||||||
for (key, req) in requested_buckets {
|
for (key, req) in requested_buckets {
|
||||||
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
|
let empty_bucket =
|
||||||
|
AggregationResult::BucketResult(BucketResult::empty_from_req(req, schema)?);
|
||||||
results.insert(key.to_string(), empty_bucket);
|
results.insert(key.to_string(), empty_bucket);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn convert_and_add_final_buckets_to_result(
|
fn convert_and_add_final_buckets_to_result(
|
||||||
results: &mut HashMap<String, AggregationResult>,
|
results: &mut FxHashMap<String, AggregationResult>,
|
||||||
buckets: VecWithNames<IntermediateBucketResult>,
|
buckets: VecWithNames<IntermediateBucketResult>,
|
||||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||||
|
schema: &Schema,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
assert_eq!(buckets.len(), req_buckets.len());
|
assert_eq!(buckets.len(), req_buckets.len());
|
||||||
|
|
||||||
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
|
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
|
||||||
for ((key, bucket), req) in buckets_with_request {
|
for ((key, bucket), req) in buckets_with_request {
|
||||||
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req)?);
|
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req, schema)?);
|
||||||
results.insert(key, result);
|
results.insert(key, result);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -267,13 +276,21 @@ impl IntermediateBucketResult {
|
|||||||
pub(crate) fn into_final_bucket_result(
|
pub(crate) fn into_final_bucket_result(
|
||||||
self,
|
self,
|
||||||
req: &BucketAggregationInternal,
|
req: &BucketAggregationInternal,
|
||||||
|
schema: &Schema,
|
||||||
) -> crate::Result<BucketResult> {
|
) -> crate::Result<BucketResult> {
|
||||||
match self {
|
match self {
|
||||||
IntermediateBucketResult::Range(range_res) => {
|
IntermediateBucketResult::Range(range_res) => {
|
||||||
let mut buckets: Vec<RangeBucketEntry> = range_res
|
let mut buckets: Vec<RangeBucketEntry> = range_res
|
||||||
.buckets
|
.buckets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(_, bucket)| bucket.into_final_bucket_entry(&req.sub_aggregation))
|
.map(|(_, bucket)| {
|
||||||
|
bucket.into_final_bucket_entry(
|
||||||
|
&req.sub_aggregation,
|
||||||
|
schema,
|
||||||
|
req.as_range()
|
||||||
|
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||||
|
)
|
||||||
|
})
|
||||||
.collect::<crate::Result<Vec<_>>>()?;
|
.collect::<crate::Result<Vec<_>>>()?;
|
||||||
|
|
||||||
buckets.sort_by(|left, right| {
|
buckets.sort_by(|left, right| {
|
||||||
@@ -288,7 +305,7 @@ impl IntermediateBucketResult {
|
|||||||
.keyed;
|
.keyed;
|
||||||
let buckets = if is_keyed {
|
let buckets = if is_keyed {
|
||||||
let mut bucket_map =
|
let mut bucket_map =
|
||||||
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
FxHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
||||||
for bucket in buckets {
|
for bucket in buckets {
|
||||||
bucket_map.insert(bucket.key.to_string(), bucket);
|
bucket_map.insert(bucket.key.to_string(), bucket);
|
||||||
}
|
}
|
||||||
@@ -304,11 +321,12 @@ impl IntermediateBucketResult {
|
|||||||
req.as_histogram()
|
req.as_histogram()
|
||||||
.expect("unexpected aggregation, expected histogram aggregation"),
|
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||||
&req.sub_aggregation,
|
&req.sub_aggregation,
|
||||||
|
schema,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let buckets = if req.as_histogram().unwrap().keyed {
|
let buckets = if req.as_histogram().unwrap().keyed {
|
||||||
let mut bucket_map =
|
let mut bucket_map =
|
||||||
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
FxHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
||||||
for bucket in buckets {
|
for bucket in buckets {
|
||||||
bucket_map.insert(bucket.key.to_string(), bucket);
|
bucket_map.insert(bucket.key.to_string(), bucket);
|
||||||
}
|
}
|
||||||
@@ -322,6 +340,7 @@ impl IntermediateBucketResult {
|
|||||||
req.as_term()
|
req.as_term()
|
||||||
.expect("unexpected aggregation, expected term aggregation"),
|
.expect("unexpected aggregation, expected term aggregation"),
|
||||||
&req.sub_aggregation,
|
&req.sub_aggregation,
|
||||||
|
schema,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -396,13 +415,13 @@ impl IntermediateBucketResult {
|
|||||||
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
/// Range aggregation including error counts
|
/// Range aggregation including error counts
|
||||||
pub struct IntermediateRangeBucketResult {
|
pub struct IntermediateRangeBucketResult {
|
||||||
pub(crate) buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry>,
|
pub(crate) buckets: FxHashMap<SerializedKey, IntermediateRangeBucketEntry>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
/// Term aggregation including error counts
|
/// Term aggregation including error counts
|
||||||
pub struct IntermediateTermBucketResult {
|
pub struct IntermediateTermBucketResult {
|
||||||
pub(crate) entries: FnvHashMap<String, IntermediateTermBucketEntry>,
|
pub(crate) entries: FxHashMap<String, IntermediateTermBucketEntry>,
|
||||||
pub(crate) sum_other_doc_count: u64,
|
pub(crate) sum_other_doc_count: u64,
|
||||||
pub(crate) doc_count_error_upper_bound: u64,
|
pub(crate) doc_count_error_upper_bound: u64,
|
||||||
}
|
}
|
||||||
@@ -412,6 +431,7 @@ impl IntermediateTermBucketResult {
|
|||||||
self,
|
self,
|
||||||
req: &TermsAggregation,
|
req: &TermsAggregation,
|
||||||
sub_aggregation_req: &AggregationsInternal,
|
sub_aggregation_req: &AggregationsInternal,
|
||||||
|
schema: &Schema,
|
||||||
) -> crate::Result<BucketResult> {
|
) -> crate::Result<BucketResult> {
|
||||||
let req = TermsAggregationInternal::from_req(req);
|
let req = TermsAggregationInternal::from_req(req);
|
||||||
let mut buckets: Vec<BucketEntry> = self
|
let mut buckets: Vec<BucketEntry> = self
|
||||||
@@ -420,11 +440,12 @@ impl IntermediateTermBucketResult {
|
|||||||
.filter(|bucket| bucket.1.doc_count >= req.min_doc_count)
|
.filter(|bucket| bucket.1.doc_count >= req.min_doc_count)
|
||||||
.map(|(key, entry)| {
|
.map(|(key, entry)| {
|
||||||
Ok(BucketEntry {
|
Ok(BucketEntry {
|
||||||
|
key_as_string: None,
|
||||||
key: Key::Str(key),
|
key: Key::Str(key),
|
||||||
doc_count: entry.doc_count,
|
doc_count: entry.doc_count,
|
||||||
sub_aggregation: entry
|
sub_aggregation: entry
|
||||||
.sub_aggregation
|
.sub_aggregation
|
||||||
.into_final_bucket_result_internal(sub_aggregation_req)?,
|
.into_final_bucket_result_internal(sub_aggregation_req, schema)?,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect::<crate::Result<_>>()?;
|
.collect::<crate::Result<_>>()?;
|
||||||
@@ -499,8 +520,8 @@ trait MergeFruits {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn merge_maps<V: MergeFruits + Clone>(
|
fn merge_maps<V: MergeFruits + Clone>(
|
||||||
entries_left: &mut FnvHashMap<SerializedKey, V>,
|
entries_left: &mut FxHashMap<SerializedKey, V>,
|
||||||
mut entries_right: FnvHashMap<SerializedKey, V>,
|
mut entries_right: FxHashMap<SerializedKey, V>,
|
||||||
) {
|
) {
|
||||||
for (name, entry_left) in entries_left.iter_mut() {
|
for (name, entry_left) in entries_left.iter_mut() {
|
||||||
if let Some(entry_right) = entries_right.remove(name) {
|
if let Some(entry_right) = entries_right.remove(name) {
|
||||||
@@ -529,13 +550,15 @@ impl IntermediateHistogramBucketEntry {
|
|||||||
pub(crate) fn into_final_bucket_entry(
|
pub(crate) fn into_final_bucket_entry(
|
||||||
self,
|
self,
|
||||||
req: &AggregationsInternal,
|
req: &AggregationsInternal,
|
||||||
|
schema: &Schema,
|
||||||
) -> crate::Result<BucketEntry> {
|
) -> crate::Result<BucketEntry> {
|
||||||
Ok(BucketEntry {
|
Ok(BucketEntry {
|
||||||
|
key_as_string: None,
|
||||||
key: Key::F64(self.key),
|
key: Key::F64(self.key),
|
||||||
doc_count: self.doc_count,
|
doc_count: self.doc_count,
|
||||||
sub_aggregation: self
|
sub_aggregation: self
|
||||||
.sub_aggregation
|
.sub_aggregation
|
||||||
.into_final_bucket_result_internal(req)?,
|
.into_final_bucket_result_internal(req, schema)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -560,10 +583,10 @@ pub struct IntermediateRangeBucketEntry {
|
|||||||
pub doc_count: u64,
|
pub doc_count: u64,
|
||||||
/// The sub_aggregation in this bucket.
|
/// The sub_aggregation in this bucket.
|
||||||
pub sub_aggregation: IntermediateAggregationResults,
|
pub sub_aggregation: IntermediateAggregationResults,
|
||||||
/// The from range of the bucket. Equals f64::MIN when None.
|
/// The from range of the bucket. Equals `f64::MIN` when `None`.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub from: Option<f64>,
|
pub from: Option<f64>,
|
||||||
/// The to range of the bucket. Equals f64::MAX when None.
|
/// The to range of the bucket. Equals `f64::MAX` when `None`.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub to: Option<f64>,
|
pub to: Option<f64>,
|
||||||
}
|
}
|
||||||
@@ -572,16 +595,38 @@ impl IntermediateRangeBucketEntry {
|
|||||||
pub(crate) fn into_final_bucket_entry(
|
pub(crate) fn into_final_bucket_entry(
|
||||||
self,
|
self,
|
||||||
req: &AggregationsInternal,
|
req: &AggregationsInternal,
|
||||||
|
schema: &Schema,
|
||||||
|
range_req: &RangeAggregation,
|
||||||
) -> crate::Result<RangeBucketEntry> {
|
) -> crate::Result<RangeBucketEntry> {
|
||||||
Ok(RangeBucketEntry {
|
let mut range_bucket_entry = RangeBucketEntry {
|
||||||
key: self.key,
|
key: self.key,
|
||||||
doc_count: self.doc_count,
|
doc_count: self.doc_count,
|
||||||
sub_aggregation: self
|
sub_aggregation: self
|
||||||
.sub_aggregation
|
.sub_aggregation
|
||||||
.into_final_bucket_result_internal(req)?,
|
.into_final_bucket_result_internal(req, schema)?,
|
||||||
to: self.to,
|
to: self.to,
|
||||||
from: self.from,
|
from: self.from,
|
||||||
})
|
to_as_string: None,
|
||||||
|
from_as_string: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
// If we have a date type on the histogram buckets, we add the `key_as_string` field as
|
||||||
|
// rfc339
|
||||||
|
let field = schema
|
||||||
|
.get_field(&range_req.field)
|
||||||
|
.ok_or_else(|| TantivyError::FieldNotFound(range_req.field.to_string()))?;
|
||||||
|
if schema.get_field_entry(field).field_type().is_date() {
|
||||||
|
if let Some(val) = range_bucket_entry.to {
|
||||||
|
let key_as_string = format_date(val as i64)?;
|
||||||
|
range_bucket_entry.to_as_string = Some(key_as_string);
|
||||||
|
}
|
||||||
|
if let Some(val) = range_bucket_entry.from {
|
||||||
|
let key_as_string = format_date(val as i64)?;
|
||||||
|
range_bucket_entry.from_as_string = Some(key_as_string);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(range_bucket_entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -626,7 +671,7 @@ mod tests {
|
|||||||
|
|
||||||
fn get_sub_test_tree(data: &[(String, u64)]) -> IntermediateAggregationResults {
|
fn get_sub_test_tree(data: &[(String, u64)]) -> IntermediateAggregationResults {
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
let mut buckets = FnvHashMap::default();
|
let mut buckets = FxHashMap::default();
|
||||||
for (key, doc_count) in data {
|
for (key, doc_count) in data {
|
||||||
buckets.insert(
|
buckets.insert(
|
||||||
key.to_string(),
|
key.to_string(),
|
||||||
@@ -653,7 +698,7 @@ mod tests {
|
|||||||
data: &[(String, u64, String, u64)],
|
data: &[(String, u64, String, u64)],
|
||||||
) -> IntermediateAggregationResults {
|
) -> IntermediateAggregationResults {
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
let mut buckets: FnvHashMap<_, _> = Default::default();
|
let mut buckets: FxHashMap<_, _> = Default::default();
|
||||||
for (key, doc_count, sub_aggregation_key, sub_aggregation_count) in data {
|
for (key, doc_count, sub_aggregation_key, sub_aggregation_count) in data {
|
||||||
buckets.insert(
|
buckets.insert(
|
||||||
key.to_string(),
|
key.to_string(),
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
|
use fastfield_codecs::Column;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::aggregation::f64_from_fastfield_u64;
|
use crate::aggregation::f64_from_fastfield_u64;
|
||||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
|
||||||
use crate::schema::Type;
|
use crate::schema::Type;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
@@ -57,13 +57,13 @@ impl SegmentAverageCollector {
|
|||||||
data: Default::default(),
|
data: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
|
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
||||||
let mut iter = doc.chunks_exact(4);
|
let mut iter = doc.chunks_exact(4);
|
||||||
for docs in iter.by_ref() {
|
for docs in iter.by_ref() {
|
||||||
let val1 = field.get(docs[0]);
|
let val1 = field.get_val(docs[0]);
|
||||||
let val2 = field.get(docs[1]);
|
let val2 = field.get_val(docs[1]);
|
||||||
let val3 = field.get(docs[2]);
|
let val3 = field.get_val(docs[2]);
|
||||||
let val4 = field.get(docs[3]);
|
let val4 = field.get_val(docs[3]);
|
||||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||||
@@ -73,8 +73,8 @@ impl SegmentAverageCollector {
|
|||||||
self.data.collect(val3);
|
self.data.collect(val3);
|
||||||
self.data.collect(val4);
|
self.data.collect(val4);
|
||||||
}
|
}
|
||||||
for doc in iter.remainder() {
|
for &doc in iter.remainder() {
|
||||||
let val = field.get(*doc);
|
let val = field.get_val(doc);
|
||||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||||
self.data.collect(val);
|
self.data.collect(val);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
|
use fastfield_codecs::Column;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::aggregation::f64_from_fastfield_u64;
|
use crate::aggregation::f64_from_fastfield_u64;
|
||||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
|
||||||
use crate::schema::Type;
|
use crate::schema::Type;
|
||||||
use crate::{DocId, TantivyError};
|
use crate::{DocId, TantivyError};
|
||||||
|
|
||||||
/// A multi-value metric aggregation that computes stats of numeric values that are
|
/// A multi-value metric aggregation that computes stats of numeric values that are
|
||||||
/// extracted from the aggregated documents.
|
/// extracted from the aggregated documents.
|
||||||
/// Supported field types are u64, i64, and f64.
|
/// Supported field types are `u64`, `i64`, and `f64`.
|
||||||
/// See [Stats] for returned statistics.
|
/// See [`Stats`] for returned statistics.
|
||||||
///
|
///
|
||||||
/// # JSON Format
|
/// # JSON Format
|
||||||
/// ```json
|
/// ```json
|
||||||
@@ -43,13 +43,13 @@ pub struct Stats {
|
|||||||
pub count: usize,
|
pub count: usize,
|
||||||
/// The sum of the fast field values.
|
/// The sum of the fast field values.
|
||||||
pub sum: f64,
|
pub sum: f64,
|
||||||
/// The standard deviation of the fast field values. None for count == 0.
|
/// The standard deviation of the fast field values. `None` for count == 0.
|
||||||
pub standard_deviation: Option<f64>,
|
pub standard_deviation: Option<f64>,
|
||||||
/// The min value of the fast field values.
|
/// The min value of the fast field values.
|
||||||
pub min: Option<f64>,
|
pub min: Option<f64>,
|
||||||
/// The max value of the fast field values.
|
/// The max value of the fast field values.
|
||||||
pub max: Option<f64>,
|
pub max: Option<f64>,
|
||||||
/// The average of the values. None for count == 0.
|
/// The average of the values. `None` for count == 0.
|
||||||
pub avg: Option<f64>,
|
pub avg: Option<f64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,7 +70,7 @@ impl Stats {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// IntermediateStats contains the mergeable version for stats.
|
/// `IntermediateStats` contains the mergeable version for stats.
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct IntermediateStats {
|
pub struct IntermediateStats {
|
||||||
count: usize,
|
count: usize,
|
||||||
@@ -163,13 +163,13 @@ impl SegmentStatsCollector {
|
|||||||
stats: IntermediateStats::default(),
|
stats: IntermediateStats::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
|
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
||||||
let mut iter = doc.chunks_exact(4);
|
let mut iter = doc.chunks_exact(4);
|
||||||
for docs in iter.by_ref() {
|
for docs in iter.by_ref() {
|
||||||
let val1 = field.get(docs[0]);
|
let val1 = field.get_val(docs[0]);
|
||||||
let val2 = field.get(docs[1]);
|
let val2 = field.get_val(docs[1]);
|
||||||
let val3 = field.get(docs[2]);
|
let val3 = field.get_val(docs[2]);
|
||||||
let val4 = field.get(docs[3]);
|
let val4 = field.get_val(docs[3]);
|
||||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||||
@@ -179,8 +179,8 @@ impl SegmentStatsCollector {
|
|||||||
self.stats.collect(val3);
|
self.stats.collect(val3);
|
||||||
self.stats.collect(val4);
|
self.stats.collect(val4);
|
||||||
}
|
}
|
||||||
for doc in iter.remainder() {
|
for &doc in iter.remainder() {
|
||||||
let val = field.get(*doc);
|
let val = field.get_val(doc);
|
||||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||||
self.stats.collect(val);
|
self.stats.collect(val);
|
||||||
}
|
}
|
||||||
@@ -222,7 +222,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
@@ -300,7 +300,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||||
|
|||||||
@@ -10,20 +10,19 @@
|
|||||||
//!
|
//!
|
||||||
//! There are two categories: [Metrics](metric) and [Buckets](bucket).
|
//! There are two categories: [Metrics](metric) and [Buckets](bucket).
|
||||||
//!
|
//!
|
||||||
//! # Usage
|
//! ## Prerequisite
|
||||||
//!
|
//! Currently aggregations work only on [fast fields](`crate::fastfield`). Single value fast fields
|
||||||
|
//! of type `u64`, `f64`, `i64`, `date` and fast fields on text fields.
|
||||||
//!
|
//!
|
||||||
|
//! ## Usage
|
||||||
//! To use aggregations, build an aggregation request by constructing
|
//! To use aggregations, build an aggregation request by constructing
|
||||||
//! [Aggregations](agg_req::Aggregations).
|
//! [`Aggregations`](agg_req::Aggregations).
|
||||||
//! Create an [AggregationCollector] from this request. AggregationCollector implements the
|
//! Create an [`AggregationCollector`] from this request. `AggregationCollector` implements the
|
||||||
//! `Collector` trait and can be passed as collector into `searcher.search()`.
|
//! [`Collector`](crate::collector::Collector) trait and can be passed as collector into
|
||||||
|
//! [`Searcher::search()`](crate::Searcher::search).
|
||||||
//!
|
//!
|
||||||
//! #### Limitations
|
|
||||||
//!
|
//!
|
||||||
//! Currently aggregations work only on single value fast fields of type u64, f64, i64 and
|
//! ## JSON Format
|
||||||
//! fast fields on text fields.
|
|
||||||
//!
|
|
||||||
//! # JSON Format
|
|
||||||
//! Aggregations request and result structures de/serialize into elasticsearch compatible JSON.
|
//! Aggregations request and result structures de/serialize into elasticsearch compatible JSON.
|
||||||
//!
|
//!
|
||||||
//! ```verbatim
|
//! ```verbatim
|
||||||
@@ -34,7 +33,7 @@
|
|||||||
//! let json_response_string: String = &serde_json::to_string(&agg_res)?;
|
//! let json_response_string: String = &serde_json::to_string(&agg_res)?;
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! # Supported Aggregations
|
//! ## Supported Aggregations
|
||||||
//! - [Bucket](bucket)
|
//! - [Bucket](bucket)
|
||||||
//! - [Histogram](bucket::HistogramAggregation)
|
//! - [Histogram](bucket::HistogramAggregation)
|
||||||
//! - [Range](bucket::RangeAggregation)
|
//! - [Range](bucket::RangeAggregation)
|
||||||
@@ -44,8 +43,8 @@
|
|||||||
//! - [Stats](metric::StatsAggregation)
|
//! - [Stats](metric::StatsAggregation)
|
||||||
//!
|
//!
|
||||||
//! # Example
|
//! # Example
|
||||||
//! Compute the average metric, by building [agg_req::Aggregations], which is built from an (String,
|
//! Compute the average metric, by building [`agg_req::Aggregations`], which is built from an
|
||||||
//! [agg_req::Aggregation]) iterator.
|
//! `(String, agg_req::Aggregation)` iterator.
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
//! use tantivy::aggregation::agg_req::{Aggregations, Aggregation, MetricAggregation};
|
//! use tantivy::aggregation::agg_req::{Aggregations, Aggregation, MetricAggregation};
|
||||||
@@ -54,9 +53,10 @@
|
|||||||
//! use tantivy::query::AllQuery;
|
//! use tantivy::query::AllQuery;
|
||||||
//! use tantivy::aggregation::agg_result::AggregationResults;
|
//! use tantivy::aggregation::agg_result::AggregationResults;
|
||||||
//! use tantivy::IndexReader;
|
//! use tantivy::IndexReader;
|
||||||
|
//! use tantivy::schema::Schema;
|
||||||
//!
|
//!
|
||||||
//! # #[allow(dead_code)]
|
//! # #[allow(dead_code)]
|
||||||
//! fn aggregate_on_index(reader: &IndexReader) {
|
//! fn aggregate_on_index(reader: &IndexReader, schema: Schema) {
|
||||||
//! let agg_req: Aggregations = vec![
|
//! let agg_req: Aggregations = vec![
|
||||||
//! (
|
//! (
|
||||||
//! "average".to_string(),
|
//! "average".to_string(),
|
||||||
@@ -68,7 +68,7 @@
|
|||||||
//! .into_iter()
|
//! .into_iter()
|
||||||
//! .collect();
|
//! .collect();
|
||||||
//!
|
//!
|
||||||
//! let collector = AggregationCollector::from_aggs(agg_req, None);
|
//! let collector = AggregationCollector::from_aggs(agg_req, None, schema);
|
||||||
//!
|
//!
|
||||||
//! let searcher = reader.searcher();
|
//! let searcher = reader.searcher();
|
||||||
//! let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
//! let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||||
@@ -143,25 +143,25 @@
|
|||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! # Distributed Aggregation
|
//! # Distributed Aggregation
|
||||||
//! When the data is distributed on different [crate::Index] instances, the
|
//! When the data is distributed on different [`Index`](crate::Index) instances, the
|
||||||
//! [DistributedAggregationCollector] provides functionality to merge data between independent
|
//! [`DistributedAggregationCollector`] provides functionality to merge data between independent
|
||||||
//! search calls by returning
|
//! search calls by returning
|
||||||
//! [IntermediateAggregationResults](intermediate_agg_result::IntermediateAggregationResults).
|
//! [`IntermediateAggregationResults`](intermediate_agg_result::IntermediateAggregationResults).
|
||||||
//! IntermediateAggregationResults provides the
|
//! `IntermediateAggregationResults` provides the
|
||||||
//! [merge_fruits](intermediate_agg_result::IntermediateAggregationResults::merge_fruits) method to
|
//! [`merge_fruits`](intermediate_agg_result::IntermediateAggregationResults::merge_fruits) method
|
||||||
//! merge multiple results. The merged result can then be converted into
|
//! to merge multiple results. The merged result can then be converted into
|
||||||
//! [agg_result::AggregationResults] via the
|
//! [`AggregationResults`](agg_result::AggregationResults) via the
|
||||||
//! [agg_result::AggregationResults::from_intermediate_and_req] method.
|
//! [`into_final_bucket_result`](intermediate_agg_result::IntermediateAggregationResults::into_final_bucket_result) method.
|
||||||
|
|
||||||
pub mod agg_req;
|
pub mod agg_req;
|
||||||
mod agg_req_with_accessor;
|
mod agg_req_with_accessor;
|
||||||
pub mod agg_result;
|
pub mod agg_result;
|
||||||
pub mod bucket;
|
pub mod bucket;
|
||||||
mod collector;
|
mod collector;
|
||||||
|
mod date;
|
||||||
pub mod intermediate_agg_result;
|
pub mod intermediate_agg_result;
|
||||||
pub mod metric;
|
pub mod metric;
|
||||||
mod segment_agg_result;
|
mod segment_agg_result;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
|
|
||||||
@@ -169,10 +169,11 @@ pub use collector::{
|
|||||||
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
|
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
|
||||||
MAX_BUCKET_COUNT,
|
MAX_BUCKET_COUNT,
|
||||||
};
|
};
|
||||||
|
pub(crate) use date::format_date;
|
||||||
|
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::fastfield::FastValue;
|
|
||||||
use crate::schema::Type;
|
use crate::schema::Type;
|
||||||
|
|
||||||
/// Represents an associative array `(key => values)` in a very efficient manner.
|
/// Represents an associative array `(key => values)` in a very efficient manner.
|
||||||
@@ -260,7 +261,7 @@ impl<T: Clone> VecWithNames<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The serialized key is used in a HashMap.
|
/// The serialized key is used in a `HashMap`.
|
||||||
pub type SerializedKey = String;
|
pub type SerializedKey = String;
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, PartialOrd)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, PartialOrd)]
|
||||||
@@ -269,7 +270,7 @@ pub type SerializedKey = String;
|
|||||||
pub enum Key {
|
pub enum Key {
|
||||||
/// String key
|
/// String key
|
||||||
Str(String),
|
Str(String),
|
||||||
/// f64 key
|
/// `f64` key
|
||||||
F64(f64),
|
F64(f64),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -282,14 +283,14 @@ impl Display for Key {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Invert of to_fastfield_u64. Used to convert to f64 for metrics.
|
/// Inverse of `to_fastfield_u64`. Used to convert to `f64` for metrics.
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
/// Only u64, f64, i64 is supported
|
/// Only `u64`, `f64`, `date`, and `i64` are supported.
|
||||||
pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
|
pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
|
||||||
match field_type {
|
match field_type {
|
||||||
Type::U64 => val as f64,
|
Type::U64 => val as f64,
|
||||||
Type::I64 => i64::from_u64(val) as f64,
|
Type::I64 | Type::Date => i64::from_u64(val) as f64,
|
||||||
Type::F64 => f64::from_u64(val),
|
Type::F64 => f64::from_u64(val),
|
||||||
_ => {
|
_ => {
|
||||||
panic!("unexpected type {:?}. This should not happen", field_type)
|
panic!("unexpected type {:?}. This should not happen", field_type)
|
||||||
@@ -297,20 +298,19 @@ pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts the f64 value to fast field value space.
|
/// Converts the `f64` value to fast field value space, which is always u64.
|
||||||
///
|
///
|
||||||
/// If the fast field has u64, values are stored as u64 in the fast field.
|
/// If the fast field has `u64`, values are stored unchanged as `u64` in the fast field.
|
||||||
/// A f64 value of e.g. 2.0 therefore needs to be converted to 1u64
|
|
||||||
///
|
///
|
||||||
/// If the fast field has f64 values are converted and stored to u64 using a
|
/// If the fast field has `f64` values are converted and stored to `u64` using a
|
||||||
/// monotonic mapping.
|
/// monotonic mapping.
|
||||||
/// A f64 value of e.g. 2.0 needs to be converted using the same monotonic
|
/// A `f64` value of e.g. `2.0` needs to be converted using the same monotonic
|
||||||
/// conversion function, so that the value matches the u64 value stored in the fast
|
/// conversion function, so that the value matches the `u64` value stored in the fast
|
||||||
/// field.
|
/// field.
|
||||||
pub(crate) fn f64_to_fastfield_u64(val: f64, field_type: &Type) -> Option<u64> {
|
pub(crate) fn f64_to_fastfield_u64(val: f64, field_type: &Type) -> Option<u64> {
|
||||||
match field_type {
|
match field_type {
|
||||||
Type::U64 => Some(val as u64),
|
Type::U64 => Some(val as u64),
|
||||||
Type::I64 => Some((val as i64).to_u64()),
|
Type::I64 | Type::Date => Some((val as i64).to_u64()),
|
||||||
Type::F64 => Some(val.to_u64()),
|
Type::F64 => Some(val.to_u64()),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
@@ -319,6 +319,7 @@ pub(crate) fn f64_to_fastfield_u64(val: f64, field_type: &Type) -> Option<u64> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
use time::OffsetDateTime;
|
||||||
|
|
||||||
use super::agg_req::{Aggregation, Aggregations, BucketAggregation};
|
use super::agg_req::{Aggregation, Aggregations, BucketAggregation};
|
||||||
use super::bucket::RangeAggregation;
|
use super::bucket::RangeAggregation;
|
||||||
@@ -334,7 +335,7 @@ mod tests {
|
|||||||
use crate::aggregation::DistributedAggregationCollector;
|
use crate::aggregation::DistributedAggregationCollector;
|
||||||
use crate::query::{AllQuery, TermQuery};
|
use crate::query::{AllQuery, TermQuery};
|
||||||
use crate::schema::{Cardinality, IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
use crate::schema::{Cardinality, IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||||
use crate::{Index, Term};
|
use crate::{DateTime, Index, Term};
|
||||||
|
|
||||||
fn get_avg_req(field_name: &str) -> Aggregation {
|
fn get_avg_req(field_name: &str) -> Aggregation {
|
||||||
Aggregation::Metric(MetricAggregation::Average(
|
Aggregation::Metric(MetricAggregation::Average(
|
||||||
@@ -360,7 +361,7 @@ mod tests {
|
|||||||
index: &Index,
|
index: &Index,
|
||||||
query: Option<(&str, &str)>,
|
query: Option<(&str, &str)>,
|
||||||
) -> crate::Result<Value> {
|
) -> crate::Result<Value> {
|
||||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||||
|
|
||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
@@ -554,10 +555,10 @@ mod tests {
|
|||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap();
|
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap();
|
||||||
intermediate_agg_result
|
intermediate_agg_result
|
||||||
.into_final_bucket_result(agg_req)
|
.into_final_bucket_result(agg_req, &index.schema())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
} else {
|
} else {
|
||||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
searcher.search(&AllQuery, &collector).unwrap()
|
searcher.search(&AllQuery, &collector).unwrap()
|
||||||
@@ -650,6 +651,7 @@ mod tests {
|
|||||||
.set_fast()
|
.set_fast()
|
||||||
.set_stored();
|
.set_stored();
|
||||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||||
|
let date_field = schema_builder.add_date_field("date", FAST);
|
||||||
schema_builder.add_text_field("dummy_text", STRING);
|
schema_builder.add_text_field("dummy_text", STRING);
|
||||||
let score_fieldtype =
|
let score_fieldtype =
|
||||||
crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
|
crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
|
||||||
@@ -667,6 +669,7 @@ mod tests {
|
|||||||
// writing the segment
|
// writing the segment
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
text_field => "cool",
|
text_field => "cool",
|
||||||
|
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800).unwrap()),
|
||||||
score_field => 1u64,
|
score_field => 1u64,
|
||||||
score_field_f64 => 1f64,
|
score_field_f64 => 1f64,
|
||||||
score_field_i64 => 1i64,
|
score_field_i64 => 1i64,
|
||||||
@@ -675,6 +678,7 @@ mod tests {
|
|||||||
))?;
|
))?;
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
text_field => "cool",
|
text_field => "cool",
|
||||||
|
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||||
score_field => 3u64,
|
score_field => 3u64,
|
||||||
score_field_f64 => 3f64,
|
score_field_f64 => 3f64,
|
||||||
score_field_i64 => 3i64,
|
score_field_i64 => 3i64,
|
||||||
@@ -683,18 +687,21 @@ mod tests {
|
|||||||
))?;
|
))?;
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
text_field => "cool",
|
text_field => "cool",
|
||||||
|
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||||
score_field => 5u64,
|
score_field => 5u64,
|
||||||
score_field_f64 => 5f64,
|
score_field_f64 => 5f64,
|
||||||
score_field_i64 => 5i64,
|
score_field_i64 => 5i64,
|
||||||
))?;
|
))?;
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
text_field => "nohit",
|
text_field => "nohit",
|
||||||
|
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||||
score_field => 6u64,
|
score_field => 6u64,
|
||||||
score_field_f64 => 6f64,
|
score_field_f64 => 6f64,
|
||||||
score_field_i64 => 6i64,
|
score_field_i64 => 6i64,
|
||||||
))?;
|
))?;
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
text_field => "cool",
|
text_field => "cool",
|
||||||
|
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||||
score_field => 7u64,
|
score_field => 7u64,
|
||||||
score_field_f64 => 7f64,
|
score_field_f64 => 7f64,
|
||||||
score_field_i64 => 7i64,
|
score_field_i64 => 7i64,
|
||||||
@@ -702,12 +709,14 @@ mod tests {
|
|||||||
index_writer.commit()?;
|
index_writer.commit()?;
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
text_field => "cool",
|
text_field => "cool",
|
||||||
|
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||||
score_field => 11u64,
|
score_field => 11u64,
|
||||||
score_field_f64 => 11f64,
|
score_field_f64 => 11f64,
|
||||||
score_field_i64 => 11i64,
|
score_field_i64 => 11i64,
|
||||||
))?;
|
))?;
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
text_field => "cool",
|
text_field => "cool",
|
||||||
|
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400 + 86400).unwrap()),
|
||||||
score_field => 14u64,
|
score_field => 14u64,
|
||||||
score_field_f64 => 14f64,
|
score_field_f64 => 14f64,
|
||||||
score_field_i64 => 14i64,
|
score_field_i64 => 14i64,
|
||||||
@@ -715,6 +724,7 @@ mod tests {
|
|||||||
|
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
text_field => "cool",
|
text_field => "cool",
|
||||||
|
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400 + 86400).unwrap()),
|
||||||
score_field => 44u64,
|
score_field => 44u64,
|
||||||
score_field_f64 => 44.5f64,
|
score_field_f64 => 44.5f64,
|
||||||
score_field_i64 => 44i64,
|
score_field_i64 => 44i64,
|
||||||
@@ -725,6 +735,7 @@ mod tests {
|
|||||||
// no hits segment
|
// no hits segment
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
text_field => "nohit",
|
text_field => "nohit",
|
||||||
|
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400 + 86400).unwrap()),
|
||||||
score_field => 44u64,
|
score_field => 44u64,
|
||||||
score_field_f64 => 44.5f64,
|
score_field_f64 => 44.5f64,
|
||||||
score_field_i64 => 44i64,
|
score_field_i64 => 44i64,
|
||||||
@@ -797,7 +808,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||||
@@ -997,9 +1008,10 @@ mod tests {
|
|||||||
// Test de/serialization roundtrip on intermediate_agg_result
|
// Test de/serialization roundtrip on intermediate_agg_result
|
||||||
let res: IntermediateAggregationResults =
|
let res: IntermediateAggregationResults =
|
||||||
serde_json::from_str(&serde_json::to_string(&res).unwrap()).unwrap();
|
serde_json::from_str(&serde_json::to_string(&res).unwrap()).unwrap();
|
||||||
res.into_final_bucket_result(agg_req.clone()).unwrap()
|
res.into_final_bucket_result(agg_req.clone(), &index.schema())
|
||||||
|
.unwrap()
|
||||||
} else {
|
} else {
|
||||||
let collector = AggregationCollector::from_aggs(agg_req.clone(), None);
|
let collector = AggregationCollector::from_aggs(agg_req.clone(), None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
searcher.search(&term_query, &collector).unwrap()
|
searcher.search(&term_query, &collector).unwrap()
|
||||||
@@ -1057,7 +1069,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Test empty result set
|
// Test empty result set
|
||||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
searcher.search(&query_with_no_hits, &collector).unwrap();
|
searcher.search(&query_with_no_hits, &collector).unwrap();
|
||||||
|
|
||||||
@@ -1122,7 +1134,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
|
|
||||||
@@ -1235,7 +1247,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults =
|
let agg_res: AggregationResults =
|
||||||
@@ -1266,7 +1278,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults =
|
let agg_res: AggregationResults =
|
||||||
@@ -1297,7 +1309,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults =
|
let agg_res: AggregationResults =
|
||||||
@@ -1336,7 +1348,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults =
|
let agg_res: AggregationResults =
|
||||||
@@ -1365,7 +1377,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults =
|
let agg_res: AggregationResults =
|
||||||
@@ -1394,7 +1406,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults =
|
let agg_res: AggregationResults =
|
||||||
@@ -1431,7 +1443,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults =
|
let agg_res: AggregationResults =
|
||||||
@@ -1466,7 +1478,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults =
|
let agg_res: AggregationResults =
|
||||||
@@ -1505,7 +1517,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults =
|
let agg_res: AggregationResults =
|
||||||
@@ -1535,7 +1547,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults =
|
let agg_res: AggregationResults =
|
||||||
@@ -1592,7 +1604,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults =
|
let agg_res: AggregationResults =
|
||||||
|
|||||||
@@ -185,10 +185,10 @@ impl SegmentMetricResultCollector {
|
|||||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], metric: &MetricAggregationWithAccessor) {
|
pub(crate) fn collect_block(&mut self, doc: &[DocId], metric: &MetricAggregationWithAccessor) {
|
||||||
match self {
|
match self {
|
||||||
SegmentMetricResultCollector::Average(avg_collector) => {
|
SegmentMetricResultCollector::Average(avg_collector) => {
|
||||||
avg_collector.collect_block(doc, &metric.accessor);
|
avg_collector.collect_block(doc, &*metric.accessor);
|
||||||
}
|
}
|
||||||
SegmentMetricResultCollector::Stats(stats_collector) => {
|
SegmentMetricResultCollector::Stats(stats_collector) => {
|
||||||
stats_collector.collect_block(doc, &metric.accessor);
|
stats_collector.collect_block(doc, &*metric.accessor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ where TScore: Clone + PartialOrd
|
|||||||
/// A custom segment scorer makes it possible to define any kind of score
|
/// A custom segment scorer makes it possible to define any kind of score
|
||||||
/// for a given document belonging to a specific segment.
|
/// for a given document belonging to a specific segment.
|
||||||
///
|
///
|
||||||
/// It is the segment local version of the [`CustomScorer`](./trait.CustomScorer.html).
|
/// It is the segment local version of the [`CustomScorer`].
|
||||||
pub trait CustomSegmentScorer<TScore>: 'static {
|
pub trait CustomSegmentScorer<TScore>: 'static {
|
||||||
/// Computes the score of a specific `doc`.
|
/// Computes the score of a specific `doc`.
|
||||||
fn score(&mut self, doc: DocId) -> TScore;
|
fn score(&mut self, doc: DocId) -> TScore;
|
||||||
@@ -36,9 +36,9 @@ pub trait CustomSegmentScorer<TScore>: 'static {
|
|||||||
/// Instead, it helps constructing `Self::Child` instances that will compute
|
/// Instead, it helps constructing `Self::Child` instances that will compute
|
||||||
/// the score at a segment scale.
|
/// the score at a segment scale.
|
||||||
pub trait CustomScorer<TScore>: Sync {
|
pub trait CustomScorer<TScore>: Sync {
|
||||||
/// Type of the associated [`CustomSegmentScorer`](./trait.CustomSegmentScorer.html).
|
/// Type of the associated [`CustomSegmentScorer`].
|
||||||
type Child: CustomSegmentScorer<TScore>;
|
type Child: CustomSegmentScorer<TScore>;
|
||||||
/// Builds a child scorer for a specific segment. The child scorer is associated to
|
/// Builds a child scorer for a specific segment. The child scorer is associated with
|
||||||
/// a specific segment.
|
/// a specific segment.
|
||||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child>;
|
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -67,10 +67,10 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
|||||||
/// (e.g. `/category/fiction`, `/category/biography`, `/category/personal_development`).
|
/// (e.g. `/category/fiction`, `/category/biography`, `/category/personal_development`).
|
||||||
///
|
///
|
||||||
/// Once collection is finished, you can harvest its results in the form
|
/// Once collection is finished, you can harvest its results in the form
|
||||||
/// of a `FacetCounts` object, and extract your face t counts from it.
|
/// of a [`FacetCounts`] object, and extract your facet counts from it.
|
||||||
///
|
///
|
||||||
/// This implementation assumes you are working with a number of facets that
|
/// This implementation assumes you are working with a number of facets that
|
||||||
/// is much hundreds of time lower than your number of documents.
|
/// is many hundreds of times smaller than your number of documents.
|
||||||
///
|
///
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
@@ -91,7 +91,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
|||||||
/// let index = Index::create_in_ram(schema);
|
/// let index = Index::create_in_ram(schema);
|
||||||
/// {
|
/// {
|
||||||
/// let mut index_writer = index.writer(3_000_000)?;
|
/// let mut index_writer = index.writer(3_000_000)?;
|
||||||
/// // a document can be associated to any number of facets
|
/// // a document can be associated with any number of facets
|
||||||
/// index_writer.add_document(doc!(
|
/// index_writer.add_document(doc!(
|
||||||
/// title => "The Name of the Wind",
|
/// title => "The Name of the Wind",
|
||||||
/// facet => Facet::from("/lang/en"),
|
/// facet => Facet::from("/lang/en"),
|
||||||
@@ -231,7 +231,7 @@ impl FacetCollector {
|
|||||||
///
|
///
|
||||||
/// Adding two facets within which one is the prefix of the other is forbidden.
|
/// Adding two facets within which one is the prefix of the other is forbidden.
|
||||||
/// If you need the correct number of unique documents for two such facets,
|
/// If you need the correct number of unique documents for two such facets,
|
||||||
/// just add them in separate `FacetCollector`.
|
/// just add them in a separate `FacetCollector`.
|
||||||
pub fn add_facet<T>(&mut self, facet_from: T)
|
pub fn add_facet<T>(&mut self, facet_from: T)
|
||||||
where Facet: From<T> {
|
where Facet: From<T> {
|
||||||
let facet = Facet::from(facet_from);
|
let facet = Facet::from(facet_from);
|
||||||
@@ -338,11 +338,7 @@ impl SegmentCollector for FacetSegmentCollector {
|
|||||||
let mut previous_collapsed_ord: usize = usize::MAX;
|
let mut previous_collapsed_ord: usize = usize::MAX;
|
||||||
for &facet_ord in &self.facet_ords_buf {
|
for &facet_ord in &self.facet_ords_buf {
|
||||||
let collapsed_ord = self.collapse_mapping[facet_ord as usize];
|
let collapsed_ord = self.collapse_mapping[facet_ord as usize];
|
||||||
self.counts[collapsed_ord] += if collapsed_ord == previous_collapsed_ord {
|
self.counts[collapsed_ord] += u64::from(collapsed_ord != previous_collapsed_ord);
|
||||||
0
|
|
||||||
} else {
|
|
||||||
1
|
|
||||||
};
|
|
||||||
previous_collapsed_ord = collapsed_ord;
|
previous_collapsed_ord = collapsed_ord;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -391,7 +387,7 @@ impl<'a> Iterator for FacetChildIterator<'a> {
|
|||||||
|
|
||||||
impl FacetCounts {
|
impl FacetCounts {
|
||||||
/// Returns an iterator over all of the facet count pairs inside this result.
|
/// Returns an iterator over all of the facet count pairs inside this result.
|
||||||
/// See the documentation for [FacetCollector] for a usage example.
|
/// See the documentation for [`FacetCollector`] for a usage example.
|
||||||
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_>
|
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_>
|
||||||
where Facet: From<T> {
|
where Facet: From<T> {
|
||||||
let facet = Facet::from(facet_from);
|
let facet = Facet::from(facet_from);
|
||||||
@@ -410,7 +406,7 @@ impl FacetCounts {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a vector of top `k` facets with their counts, sorted highest-to-lowest by counts.
|
/// Returns a vector of top `k` facets with their counts, sorted highest-to-lowest by counts.
|
||||||
/// See the documentation for [FacetCollector] for a usage example.
|
/// See the documentation for [`FacetCollector`] for a usage example.
|
||||||
pub fn top_k<T>(&self, facet: T, k: usize) -> Vec<(&Facet, u64)>
|
pub fn top_k<T>(&self, facet: T, k: usize) -> Vec<(&Facet, u64)>
|
||||||
where Facet: From<T> {
|
where Facet: From<T> {
|
||||||
let mut heap = BinaryHeap::with_capacity(k);
|
let mut heap = BinaryHeap::with_capacity(k);
|
||||||
@@ -620,7 +616,7 @@ mod tests {
|
|||||||
.map(|mut doc| {
|
.map(|mut doc| {
|
||||||
doc.add_facet(
|
doc.add_facet(
|
||||||
facet_field,
|
facet_field,
|
||||||
&format!("/facet/{}", thread_rng().sample(&uniform)),
|
&format!("/facet/{}", thread_rng().sample(uniform)),
|
||||||
);
|
);
|
||||||
doc
|
doc
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -10,9 +10,12 @@
|
|||||||
// ---
|
// ---
|
||||||
// Importing tantivy...
|
// Importing tantivy...
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use fastfield_codecs::Column;
|
||||||
|
|
||||||
use crate::collector::{Collector, SegmentCollector};
|
use crate::collector::{Collector, SegmentCollector};
|
||||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
|
use crate::fastfield::FastValue;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::{Score, SegmentReader, TantivyError};
|
use crate::{Score, SegmentReader, TantivyError};
|
||||||
|
|
||||||
@@ -158,7 +161,7 @@ where
|
|||||||
TPredicate: 'static,
|
TPredicate: 'static,
|
||||||
TPredicateValue: FastValue,
|
TPredicateValue: FastValue,
|
||||||
{
|
{
|
||||||
fast_field_reader: DynamicFastFieldReader<TPredicateValue>,
|
fast_field_reader: Arc<dyn Column<TPredicateValue>>,
|
||||||
segment_collector: TSegmentCollector,
|
segment_collector: TSegmentCollector,
|
||||||
predicate: TPredicate,
|
predicate: TPredicate,
|
||||||
t_predicate_value: PhantomData<TPredicateValue>,
|
t_predicate_value: PhantomData<TPredicateValue>,
|
||||||
@@ -174,7 +177,7 @@ where
|
|||||||
type Fruit = TSegmentCollector::Fruit;
|
type Fruit = TSegmentCollector::Fruit;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, score: Score) {
|
fn collect(&mut self, doc: u32, score: Score) {
|
||||||
let value = self.fast_field_reader.get(doc);
|
let value = self.fast_field_reader.get_val(doc);
|
||||||
if (self.predicate)(value) {
|
if (self.predicate)(value) {
|
||||||
self.segment_collector.collect(doc, score)
|
self.segment_collector.collect(doc, score)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use fastdivide::DividerU64;
|
use fastdivide::DividerU64;
|
||||||
|
use fastfield_codecs::Column;
|
||||||
|
|
||||||
use crate::collector::{Collector, SegmentCollector};
|
use crate::collector::{Collector, SegmentCollector};
|
||||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
|
use crate::fastfield::FastValue;
|
||||||
use crate::schema::{Field, Type};
|
use crate::schema::{Field, Type};
|
||||||
use crate::{DocId, Score};
|
use crate::{DocId, Score};
|
||||||
|
|
||||||
@@ -34,7 +37,7 @@ impl HistogramCollector {
|
|||||||
/// The scale/range of the histogram is not dynamic. It is required to
|
/// The scale/range of the histogram is not dynamic. It is required to
|
||||||
/// define it by supplying following parameter:
|
/// define it by supplying following parameter:
|
||||||
/// - `min_value`: the minimum value that can be recorded in the histogram.
|
/// - `min_value`: the minimum value that can be recorded in the histogram.
|
||||||
/// - `bucket_width`: the length of the interval that is associated to each buckets.
|
/// - `bucket_width`: the length of the interval that is associated with each buckets.
|
||||||
/// - `num_buckets`: The overall number of buckets.
|
/// - `num_buckets`: The overall number of buckets.
|
||||||
///
|
///
|
||||||
/// Together, this parameters define a partition of `[min_value, min_value + num_buckets *
|
/// Together, this parameters define a partition of `[min_value, min_value + num_buckets *
|
||||||
@@ -84,14 +87,14 @@ impl HistogramComputer {
|
|||||||
}
|
}
|
||||||
pub struct SegmentHistogramCollector {
|
pub struct SegmentHistogramCollector {
|
||||||
histogram_computer: HistogramComputer,
|
histogram_computer: HistogramComputer,
|
||||||
ff_reader: DynamicFastFieldReader<u64>,
|
ff_reader: Arc<dyn Column<u64>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentCollector for SegmentHistogramCollector {
|
impl SegmentCollector for SegmentHistogramCollector {
|
||||||
type Fruit = Vec<u64>;
|
type Fruit = Vec<u64>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||||
let value = self.ff_reader.get(doc);
|
let value = self.ff_reader.get_val(doc);
|
||||||
self.histogram_computer.add_value(value);
|
self.histogram_computer.add_value(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,13 +4,13 @@
|
|||||||
//! In tantivy jargon, we call this information your search "fruit".
|
//! In tantivy jargon, we call this information your search "fruit".
|
||||||
//!
|
//!
|
||||||
//! Your fruit could for instance be :
|
//! Your fruit could for instance be :
|
||||||
//! - [the count of matching documents](./struct.Count.html)
|
//! - [the count of matching documents](crate::collector::Count)
|
||||||
//! - [the top 10 documents, by relevancy or by a fast field](./struct.TopDocs.html)
|
//! - [the top 10 documents, by relevancy or by a fast field](crate::collector::TopDocs)
|
||||||
//! - [facet counts](./struct.FacetCollector.html)
|
//! - [facet counts](FacetCollector)
|
||||||
//!
|
//!
|
||||||
//! At one point in your code, you will trigger the actual search operation by calling
|
//! At some point in your code, you will trigger the actual search operation by calling
|
||||||
//! [the `search(...)` method of your `Searcher` object](../struct.Searcher.html#method.search).
|
//! [`Searcher::search()`](crate::Searcher::search).
|
||||||
//! This call will look like this.
|
//! This call will look like this:
|
||||||
//!
|
//!
|
||||||
//! ```verbatim
|
//! ```verbatim
|
||||||
//! let fruit = searcher.search(&query, &collector)?;
|
//! let fruit = searcher.search(&query, &collector)?;
|
||||||
@@ -64,7 +64,7 @@
|
|||||||
//!
|
//!
|
||||||
//! The `Collector` trait is implemented for up to 4 collectors.
|
//! The `Collector` trait is implemented for up to 4 collectors.
|
||||||
//! If you have more than 4 collectors, you can either group them into
|
//! If you have more than 4 collectors, you can either group them into
|
||||||
//! tuples of tuples `(a,(b,(c,d)))`, or rely on [`MultiCollector`](./struct.MultiCollector.html).
|
//! tuples of tuples `(a,(b,(c,d)))`, or rely on [`MultiCollector`].
|
||||||
//!
|
//!
|
||||||
//! # Combining several collectors dynamically
|
//! # Combining several collectors dynamically
|
||||||
//!
|
//!
|
||||||
@@ -74,7 +74,7 @@
|
|||||||
//!
|
//!
|
||||||
//! Unfortunately it requires you to know at compile time your collector types.
|
//! Unfortunately it requires you to know at compile time your collector types.
|
||||||
//! If on the other hand, the collectors depend on some query parameter,
|
//! If on the other hand, the collectors depend on some query parameter,
|
||||||
//! you can rely on `MultiCollector`'s.
|
//! you can rely on [`MultiCollector`]'s.
|
||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
//! # Implementing your own collectors.
|
//! # Implementing your own collectors.
|
||||||
@@ -142,7 +142,7 @@ pub trait Collector: Sync + Send {
|
|||||||
/// e.g. `usize` for the `Count` collector.
|
/// e.g. `usize` for the `Count` collector.
|
||||||
type Fruit: Fruit;
|
type Fruit: Fruit;
|
||||||
|
|
||||||
/// Type of the `SegmentCollector` associated to this collector.
|
/// Type of the `SegmentCollector` associated with this collector.
|
||||||
type Child: SegmentCollector;
|
type Child: SegmentCollector;
|
||||||
|
|
||||||
/// `set_segment` is called before beginning to enumerate
|
/// `set_segment` is called before beginning to enumerate
|
||||||
@@ -156,7 +156,7 @@ pub trait Collector: Sync + Send {
|
|||||||
/// Returns true iff the collector requires to compute scores for documents.
|
/// Returns true iff the collector requires to compute scores for documents.
|
||||||
fn requires_scoring(&self) -> bool;
|
fn requires_scoring(&self) -> bool;
|
||||||
|
|
||||||
/// Combines the fruit associated to the collection of each segments
|
/// Combines the fruit associated with the collection of each segments
|
||||||
/// into one fruit.
|
/// into one fruit.
|
||||||
fn merge_fruits(
|
fn merge_fruits(
|
||||||
&self,
|
&self,
|
||||||
@@ -172,17 +172,33 @@ pub trait Collector: Sync + Send {
|
|||||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
||||||
let mut segment_collector = self.for_segment(segment_ord as u32, reader)?;
|
let mut segment_collector = self.for_segment(segment_ord as u32, reader)?;
|
||||||
|
|
||||||
if let Some(alive_bitset) = reader.alive_bitset() {
|
match (reader.alive_bitset(), self.requires_scoring()) {
|
||||||
weight.for_each(reader, &mut |doc, score| {
|
(Some(alive_bitset), true) => {
|
||||||
if alive_bitset.is_alive(doc) {
|
weight.for_each(reader, &mut |doc, score| {
|
||||||
|
if alive_bitset.is_alive(doc) {
|
||||||
|
segment_collector.collect(doc, score);
|
||||||
|
}
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
(Some(alive_bitset), false) => {
|
||||||
|
weight.for_each_no_score(reader, &mut |doc| {
|
||||||
|
if alive_bitset.is_alive(doc) {
|
||||||
|
segment_collector.collect(doc, 0.0);
|
||||||
|
}
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
(None, true) => {
|
||||||
|
weight.for_each(reader, &mut |doc, score| {
|
||||||
segment_collector.collect(doc, score);
|
segment_collector.collect(doc, score);
|
||||||
}
|
})?;
|
||||||
})?;
|
}
|
||||||
} else {
|
(None, false) => {
|
||||||
weight.for_each(reader, &mut |doc, score| {
|
weight.for_each_no_score(reader, &mut |doc| {
|
||||||
segment_collector.collect(doc, score);
|
segment_collector.collect(doc, 0.0);
|
||||||
})?;
|
})?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(segment_collector.harvest())
|
Ok(segment_collector.harvest())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,11 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use fastfield_codecs::Column;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::collector::{Count, FilterCollector, TopDocs};
|
use crate::collector::{Count, FilterCollector, TopDocs};
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::fastfield::{BytesFastFieldReader, DynamicFastFieldReader, FastFieldReader};
|
use crate::fastfield::BytesFastFieldReader;
|
||||||
use crate::query::{AllQuery, QueryParser};
|
use crate::query::{AllQuery, QueryParser};
|
||||||
use crate::schema::{Field, Schema, FAST, TEXT};
|
use crate::schema::{Field, Schema, FAST, TEXT};
|
||||||
use crate::time::format_description::well_known::Rfc3339;
|
use crate::time::format_description::well_known::Rfc3339;
|
||||||
@@ -156,7 +160,7 @@ pub struct FastFieldTestCollector {
|
|||||||
|
|
||||||
pub struct FastFieldSegmentCollector {
|
pub struct FastFieldSegmentCollector {
|
||||||
vals: Vec<u64>,
|
vals: Vec<u64>,
|
||||||
reader: DynamicFastFieldReader<u64>,
|
reader: Arc<dyn Column<u64>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastFieldTestCollector {
|
impl FastFieldTestCollector {
|
||||||
@@ -197,7 +201,7 @@ impl SegmentCollector for FastFieldSegmentCollector {
|
|||||||
type Fruit = Vec<u64>;
|
type Fruit = Vec<u64>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||||
let val = self.reader.get(doc);
|
let val = self.reader.get_val(doc);
|
||||||
self.vals.push(val);
|
self.vals.push(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
use std::collections::BinaryHeap;
|
use std::collections::BinaryHeap;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use fastfield_codecs::Column;
|
||||||
|
|
||||||
use super::Collector;
|
use super::Collector;
|
||||||
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
||||||
@@ -9,7 +12,7 @@ use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
|||||||
use crate::collector::{
|
use crate::collector::{
|
||||||
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
||||||
};
|
};
|
||||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
|
use crate::fastfield::FastValue;
|
||||||
use crate::query::Weight;
|
use crate::query::Weight;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::{DocAddress, DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
|
use crate::{DocAddress, DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
|
||||||
@@ -129,12 +132,12 @@ impl fmt::Debug for TopDocs {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct ScorerByFastFieldReader {
|
struct ScorerByFastFieldReader {
|
||||||
ff_reader: DynamicFastFieldReader<u64>,
|
ff_reader: Arc<dyn Column<u64>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||||
fn score(&mut self, doc: DocId) -> u64 {
|
fn score(&mut self, doc: DocId) -> u64 {
|
||||||
self.ff_reader.get(doc)
|
self.ff_reader.get_val(doc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -284,7 +287,7 @@ impl TopDocs {
|
|||||||
/// # See also
|
/// # See also
|
||||||
///
|
///
|
||||||
/// To comfortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
/// To comfortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
||||||
/// [.order_by_fast_field(...)](#method.order_by_fast_field) method.
|
/// the [.order_by_fast_field(...)](TopDocs::order_by_fast_field) method.
|
||||||
pub fn order_by_u64_field(
|
pub fn order_by_u64_field(
|
||||||
self,
|
self,
|
||||||
field: Field,
|
field: Field,
|
||||||
@@ -381,7 +384,7 @@ impl TopDocs {
|
|||||||
///
|
///
|
||||||
/// This method offers a convenient way to tweak or replace
|
/// This method offers a convenient way to tweak or replace
|
||||||
/// the documents score. As suggested by the prototype you can
|
/// the documents score. As suggested by the prototype you can
|
||||||
/// manually define your own [`ScoreTweaker`](./trait.ScoreTweaker.html)
|
/// manually define your own [`ScoreTweaker`]
|
||||||
/// and pass it as an argument, but there is a much simpler way to
|
/// and pass it as an argument, but there is a much simpler way to
|
||||||
/// tweak your score: you can use a closure as in the following
|
/// tweak your score: you can use a closure as in the following
|
||||||
/// example.
|
/// example.
|
||||||
@@ -398,7 +401,7 @@ impl TopDocs {
|
|||||||
/// In the following example will will tweak our ranking a bit by
|
/// In the following example will will tweak our ranking a bit by
|
||||||
/// boosting popular products a notch.
|
/// boosting popular products a notch.
|
||||||
///
|
///
|
||||||
/// In more serious application, this tweaking could involved running a
|
/// In more serious application, this tweaking could involve running a
|
||||||
/// learning-to-rank model over various features
|
/// learning-to-rank model over various features
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
@@ -407,7 +410,6 @@ impl TopDocs {
|
|||||||
/// # use tantivy::query::QueryParser;
|
/// # use tantivy::query::QueryParser;
|
||||||
/// use tantivy::SegmentReader;
|
/// use tantivy::SegmentReader;
|
||||||
/// use tantivy::collector::TopDocs;
|
/// use tantivy::collector::TopDocs;
|
||||||
/// use tantivy::fastfield::FastFieldReader;
|
|
||||||
/// use tantivy::schema::Field;
|
/// use tantivy::schema::Field;
|
||||||
///
|
///
|
||||||
/// fn create_schema() -> Schema {
|
/// fn create_schema() -> Schema {
|
||||||
@@ -456,7 +458,7 @@ impl TopDocs {
|
|||||||
///
|
///
|
||||||
/// // We can now define our actual scoring function
|
/// // We can now define our actual scoring function
|
||||||
/// move |doc: DocId, original_score: Score| {
|
/// move |doc: DocId, original_score: Score| {
|
||||||
/// let popularity: u64 = popularity_reader.get(doc);
|
/// let popularity: u64 = popularity_reader.get_val(doc);
|
||||||
/// // Well.. For the sake of the example we use a simple logarithm
|
/// // Well.. For the sake of the example we use a simple logarithm
|
||||||
/// // function.
|
/// // function.
|
||||||
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
|
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
|
||||||
@@ -472,7 +474,7 @@ impl TopDocs {
|
|||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// # See also
|
/// # See also
|
||||||
/// [custom_score(...)](#method.custom_score).
|
/// - [custom_score(...)](TopDocs::custom_score)
|
||||||
pub fn tweak_score<TScore, TScoreSegmentTweaker, TScoreTweaker>(
|
pub fn tweak_score<TScore, TScoreSegmentTweaker, TScoreTweaker>(
|
||||||
self,
|
self,
|
||||||
score_tweaker: TScoreTweaker,
|
score_tweaker: TScoreTweaker,
|
||||||
@@ -489,8 +491,7 @@ impl TopDocs {
|
|||||||
///
|
///
|
||||||
/// This method offers a convenient way to use a different score.
|
/// This method offers a convenient way to use a different score.
|
||||||
///
|
///
|
||||||
/// As suggested by the prototype you can manually define your
|
/// As suggested by the prototype you can manually define your own [`CustomScorer`]
|
||||||
/// own [`CustomScorer`](./trait.CustomScorer.html)
|
|
||||||
/// and pass it as an argument, but there is a much simpler way to
|
/// and pass it as an argument, but there is a much simpler way to
|
||||||
/// tweak your score: you can use a closure as in the following
|
/// tweak your score: you can use a closure as in the following
|
||||||
/// example.
|
/// example.
|
||||||
@@ -515,7 +516,6 @@ impl TopDocs {
|
|||||||
/// use tantivy::SegmentReader;
|
/// use tantivy::SegmentReader;
|
||||||
/// use tantivy::collector::TopDocs;
|
/// use tantivy::collector::TopDocs;
|
||||||
/// use tantivy::schema::Field;
|
/// use tantivy::schema::Field;
|
||||||
/// use tantivy::fastfield::FastFieldReader;
|
|
||||||
///
|
///
|
||||||
/// # fn create_schema() -> Schema {
|
/// # fn create_schema() -> Schema {
|
||||||
/// # let mut schema_builder = Schema::builder();
|
/// # let mut schema_builder = Schema::builder();
|
||||||
@@ -567,8 +567,8 @@ impl TopDocs {
|
|||||||
///
|
///
|
||||||
/// // We can now define our actual scoring function
|
/// // We can now define our actual scoring function
|
||||||
/// move |doc: DocId| {
|
/// move |doc: DocId| {
|
||||||
/// let popularity: u64 = popularity_reader.get(doc);
|
/// let popularity: u64 = popularity_reader.get_val(doc);
|
||||||
/// let boosted: u64 = boosted_reader.get(doc);
|
/// let boosted: u64 = boosted_reader.get_val(doc);
|
||||||
/// // Score do not have to be `f64` in tantivy.
|
/// // Score do not have to be `f64` in tantivy.
|
||||||
/// // Here we return a couple to get lexicographical order
|
/// // Here we return a couple to get lexicographical order
|
||||||
/// // for free.
|
/// // for free.
|
||||||
@@ -587,7 +587,7 @@ impl TopDocs {
|
|||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// # See also
|
/// # See also
|
||||||
/// [tweak_score(...)](#method.tweak_score).
|
/// - [tweak_score(...)](TopDocs::tweak_score)
|
||||||
pub fn custom_score<TScore, TCustomSegmentScorer, TCustomScorer>(
|
pub fn custom_score<TScore, TCustomSegmentScorer, TCustomScorer>(
|
||||||
self,
|
self,
|
||||||
custom_score: TCustomScorer,
|
custom_score: TCustomScorer,
|
||||||
@@ -693,7 +693,7 @@ impl Collector for TopDocs {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Segment Collector associated to `TopDocs`.
|
/// Segment Collector associated with `TopDocs`.
|
||||||
pub struct TopScoreSegmentCollector(TopSegmentCollector<Score>);
|
pub struct TopScoreSegmentCollector(TopSegmentCollector<Score>);
|
||||||
|
|
||||||
impl SegmentCollector for TopScoreSegmentCollector {
|
impl SegmentCollector for TopScoreSegmentCollector {
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ where TScore: Clone + PartialOrd
|
|||||||
/// A `ScoreSegmentTweaker` makes it possible to modify the default score
|
/// A `ScoreSegmentTweaker` makes it possible to modify the default score
|
||||||
/// for a given document belonging to a specific segment.
|
/// for a given document belonging to a specific segment.
|
||||||
///
|
///
|
||||||
/// It is the segment local version of the [`ScoreTweaker`](./trait.ScoreTweaker.html).
|
/// It is the segment local version of the [`ScoreTweaker`].
|
||||||
pub trait ScoreSegmentTweaker<TScore>: 'static {
|
pub trait ScoreSegmentTweaker<TScore>: 'static {
|
||||||
/// Tweak the given `score` for the document `doc`.
|
/// Tweak the given `score` for the document `doc`.
|
||||||
fn score(&mut self, doc: DocId, score: Score) -> TScore;
|
fn score(&mut self, doc: DocId, score: Score) -> TScore;
|
||||||
@@ -37,10 +37,10 @@ pub trait ScoreSegmentTweaker<TScore>: 'static {
|
|||||||
/// Instead, it helps constructing `Self::Child` instances that will compute
|
/// Instead, it helps constructing `Self::Child` instances that will compute
|
||||||
/// the score at a segment scale.
|
/// the score at a segment scale.
|
||||||
pub trait ScoreTweaker<TScore>: Sync {
|
pub trait ScoreTweaker<TScore>: Sync {
|
||||||
/// Type of the associated [`ScoreSegmentTweaker`](./trait.ScoreSegmentTweaker.html).
|
/// Type of the associated [`ScoreSegmentTweaker`].
|
||||||
type Child: ScoreSegmentTweaker<TScore>;
|
type Child: ScoreSegmentTweaker<TScore>;
|
||||||
|
|
||||||
/// Builds a child tweaker for a specific segment. The child scorer is associated to
|
/// Builds a child tweaker for a specific segment. The child scorer is associated with
|
||||||
/// a specific segment.
|
/// a specific segment.
|
||||||
fn segment_tweaker(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
|
fn segment_tweaker(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use super::segment::Segment;
|
use super::segment::Segment;
|
||||||
use super::IndexSettings;
|
use super::IndexSettings;
|
||||||
|
use crate::core::single_segment_index_writer::SingleSegmentIndexWriter;
|
||||||
use crate::core::{
|
use crate::core::{
|
||||||
Executor, IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory, META_FILEPATH,
|
Executor, IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory, META_FILEPATH,
|
||||||
};
|
};
|
||||||
@@ -16,9 +17,9 @@ use crate::directory::MmapDirectory;
|
|||||||
use crate::directory::{Directory, ManagedDirectory, RamDirectory, INDEX_WRITER_LOCK};
|
use crate::directory::{Directory, ManagedDirectory, RamDirectory, INDEX_WRITER_LOCK};
|
||||||
use crate::error::{DataCorruption, TantivyError};
|
use crate::error::{DataCorruption, TantivyError};
|
||||||
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_ARENA_NUM_BYTES_MIN};
|
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_ARENA_NUM_BYTES_MIN};
|
||||||
use crate::indexer::segment_updater::save_new_metas;
|
use crate::indexer::segment_updater::save_metas;
|
||||||
use crate::reader::{IndexReader, IndexReaderBuilder};
|
use crate::reader::{IndexReader, IndexReaderBuilder};
|
||||||
use crate::schema::{Field, FieldType, Schema};
|
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
||||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||||
use crate::IndexWriter;
|
use crate::IndexWriter;
|
||||||
|
|
||||||
@@ -47,10 +48,38 @@ fn load_metas(
|
|||||||
.map_err(From::from)
|
.map_err(From::from)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Save the index meta file.
|
||||||
|
/// This operation is atomic :
|
||||||
|
/// Either
|
||||||
|
/// - it fails, in which case an error is returned,
|
||||||
|
/// and the `meta.json` remains untouched,
|
||||||
|
/// - it succeeds, and `meta.json` is written
|
||||||
|
/// and flushed.
|
||||||
|
///
|
||||||
|
/// This method is not part of tantivy's public API
|
||||||
|
fn save_new_metas(
|
||||||
|
schema: Schema,
|
||||||
|
index_settings: IndexSettings,
|
||||||
|
directory: &dyn Directory,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
save_metas(
|
||||||
|
&IndexMeta {
|
||||||
|
index_settings,
|
||||||
|
segments: Vec::new(),
|
||||||
|
schema,
|
||||||
|
opstamp: 0u64,
|
||||||
|
payload: None,
|
||||||
|
},
|
||||||
|
directory,
|
||||||
|
)?;
|
||||||
|
directory.sync_directory()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// IndexBuilder can be used to create an index.
|
/// IndexBuilder can be used to create an index.
|
||||||
///
|
///
|
||||||
/// Use in conjunction with `SchemaBuilder`. Global index settings
|
/// Use in conjunction with [`SchemaBuilder`][crate::schema::SchemaBuilder].
|
||||||
/// can be configured with `IndexSettings`
|
/// Global index settings can be configured with [`IndexSettings`].
|
||||||
///
|
///
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
@@ -68,7 +97,13 @@ fn load_metas(
|
|||||||
/// );
|
/// );
|
||||||
///
|
///
|
||||||
/// let schema = schema_builder.build();
|
/// let schema = schema_builder.build();
|
||||||
/// let settings = IndexSettings{sort_by_field: Some(IndexSortByField{field:"number".to_string(), order:Order::Asc}), ..Default::default()};
|
/// let settings = IndexSettings{
|
||||||
|
/// sort_by_field: Some(IndexSortByField{
|
||||||
|
/// field: "number".to_string(),
|
||||||
|
/// order: Order::Asc
|
||||||
|
/// }),
|
||||||
|
/// ..Default::default()
|
||||||
|
/// };
|
||||||
/// let index = Index::builder().schema(schema).settings(settings).create_in_ram();
|
/// let index = Index::builder().schema(schema).settings(settings).create_in_ram();
|
||||||
/// ```
|
/// ```
|
||||||
pub struct IndexBuilder {
|
pub struct IndexBuilder {
|
||||||
@@ -111,21 +146,21 @@ impl IndexBuilder {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new index using the `RAMDirectory`.
|
/// Creates a new index using the [`RamDirectory`].
|
||||||
///
|
///
|
||||||
/// The index will be allocated in anonymous memory.
|
/// The index will be allocated in anonymous memory.
|
||||||
/// This should only be used for unit tests.
|
/// This is useful for indexing small set of documents
|
||||||
|
/// for instances like unit test or temporary in memory index.
|
||||||
pub fn create_in_ram(self) -> Result<Index, TantivyError> {
|
pub fn create_in_ram(self) -> Result<Index, TantivyError> {
|
||||||
let ram_directory = RamDirectory::create();
|
let ram_directory = RamDirectory::create();
|
||||||
Ok(self
|
self.create(ram_directory)
|
||||||
.create(ram_directory)
|
|
||||||
.expect("Creating a RAMDirectory should never fail"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new index in a given filepath.
|
/// Creates a new index in a given filepath.
|
||||||
/// The index will use the `MMapDirectory`.
|
/// The index will use the [`MmapDirectory`].
|
||||||
///
|
///
|
||||||
/// If a previous index was in this directory, it returns an `IndexAlreadyExists` error.
|
/// If a previous index was in this directory, it returns an
|
||||||
|
/// [`TantivyError::IndexAlreadyExists`] error.
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
pub fn create_in_dir<P: AsRef<Path>>(self, directory_path: P) -> crate::Result<Index> {
|
pub fn create_in_dir<P: AsRef<Path>>(self, directory_path: P) -> crate::Result<Index> {
|
||||||
let mmap_directory: Box<dyn Directory> = Box::new(MmapDirectory::open(directory_path)?);
|
let mmap_directory: Box<dyn Directory> = Box::new(MmapDirectory::open(directory_path)?);
|
||||||
@@ -135,14 +170,34 @@ impl IndexBuilder {
|
|||||||
self.create(mmap_directory)
|
self.create(mmap_directory)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Dragons ahead!!!
|
||||||
|
///
|
||||||
|
/// The point of this API is to let users create a simple index with a single segment
|
||||||
|
/// and without starting any thread.
|
||||||
|
///
|
||||||
|
/// Do not use this method if you are not sure what you are doing.
|
||||||
|
///
|
||||||
|
/// It expects an originally empty directory, and will not run any GC operation.
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub fn single_segment_index_writer(
|
||||||
|
self,
|
||||||
|
dir: impl Into<Box<dyn Directory>>,
|
||||||
|
mem_budget: usize,
|
||||||
|
) -> crate::Result<SingleSegmentIndexWriter> {
|
||||||
|
let index = self.create(dir)?;
|
||||||
|
let index_simple_writer = SingleSegmentIndexWriter::new(index, mem_budget)?;
|
||||||
|
Ok(index_simple_writer)
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates a new index in a temp directory.
|
/// Creates a new index in a temp directory.
|
||||||
///
|
///
|
||||||
/// The index will use the `MMapDirectory` in a newly created directory.
|
/// The index will use the [`MmapDirectory`] in a newly created directory.
|
||||||
/// The temp directory will be destroyed automatically when the `Index` object
|
/// The temp directory will be destroyed automatically when the [`Index`] object
|
||||||
/// is destroyed.
|
/// is destroyed.
|
||||||
///
|
///
|
||||||
/// The temp directory is only used for testing the `MmapDirectory`.
|
/// The temp directory is only used for testing the [`MmapDirectory`].
|
||||||
/// For other unit tests, prefer the `RAMDirectory`, see: `create_in_ram`.
|
/// For other unit tests, prefer the [`RamDirectory`], see:
|
||||||
|
/// [`IndexBuilder::create_in_ram()`].
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
pub fn create_from_tempdir(self) -> crate::Result<Index> {
|
pub fn create_from_tempdir(self) -> crate::Result<Index> {
|
||||||
let mmap_directory: Box<dyn Directory> = Box::new(MmapDirectory::create_from_tempdir()?);
|
let mmap_directory: Box<dyn Directory> = Box::new(MmapDirectory::create_from_tempdir()?);
|
||||||
@@ -172,10 +227,44 @@ impl IndexBuilder {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn validate(&self) -> crate::Result<()> {
|
||||||
|
if let Some(schema) = self.schema.as_ref() {
|
||||||
|
if let Some(sort_by_field) = self.index_settings.sort_by_field.as_ref() {
|
||||||
|
let schema_field = schema.get_field(&sort_by_field.field).ok_or_else(|| {
|
||||||
|
TantivyError::InvalidArgument(format!(
|
||||||
|
"Field to sort index {} not found in schema",
|
||||||
|
sort_by_field.field
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let entry = schema.get_field_entry(schema_field);
|
||||||
|
if !entry.is_fast() {
|
||||||
|
return Err(TantivyError::InvalidArgument(format!(
|
||||||
|
"Field {} is no fast field. Field needs to be a single value fast field \
|
||||||
|
to be used to sort an index",
|
||||||
|
sort_by_field.field
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
if entry.field_type().fastfield_cardinality() != Some(Cardinality::SingleValue) {
|
||||||
|
return Err(TantivyError::InvalidArgument(format!(
|
||||||
|
"Only single value fast field Cardinality supported for sorting index {}",
|
||||||
|
sort_by_field.field
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(TantivyError::InvalidArgument(
|
||||||
|
"no schema passed".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates a new index given an implementation of the trait `Directory`.
|
/// Creates a new index given an implementation of the trait `Directory`.
|
||||||
///
|
///
|
||||||
/// If a directory previously existed, it will be erased.
|
/// If a directory previously existed, it will be erased.
|
||||||
fn create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
|
fn create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
|
||||||
|
self.validate()?;
|
||||||
let dir = dir.into();
|
let dir = dir.into();
|
||||||
let directory = ManagedDirectory::wrap(dir)?;
|
let directory = ManagedDirectory::wrap(dir)?;
|
||||||
save_new_metas(
|
save_new_metas(
|
||||||
@@ -238,7 +327,7 @@ impl Index {
|
|||||||
self.set_multithread_executor(default_num_threads)
|
self.set_multithread_executor(default_num_threads)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new index using the `RamDirectory`.
|
/// Creates a new index using the [`RamDirectory`].
|
||||||
///
|
///
|
||||||
/// The index will be allocated in anonymous memory.
|
/// The index will be allocated in anonymous memory.
|
||||||
/// This is useful for indexing small set of documents
|
/// This is useful for indexing small set of documents
|
||||||
@@ -248,9 +337,10 @@ impl Index {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new index in a given filepath.
|
/// Creates a new index in a given filepath.
|
||||||
/// The index will use the `MMapDirectory`.
|
/// The index will use the [`MmapDirectory`].
|
||||||
///
|
///
|
||||||
/// If a previous index was in this directory, then it returns an `IndexAlreadyExists` error.
|
/// If a previous index was in this directory, then it returns
|
||||||
|
/// a [`TantivyError::IndexAlreadyExists`] error.
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
pub fn create_in_dir<P: AsRef<Path>>(
|
pub fn create_in_dir<P: AsRef<Path>>(
|
||||||
directory_path: P,
|
directory_path: P,
|
||||||
@@ -272,12 +362,13 @@ impl Index {
|
|||||||
|
|
||||||
/// Creates a new index in a temp directory.
|
/// Creates a new index in a temp directory.
|
||||||
///
|
///
|
||||||
/// The index will use the `MMapDirectory` in a newly created directory.
|
/// The index will use the [`MmapDirectory`] in a newly created directory.
|
||||||
/// The temp directory will be destroyed automatically when the `Index` object
|
/// The temp directory will be destroyed automatically when the [`Index`] object
|
||||||
/// is destroyed.
|
/// is destroyed.
|
||||||
///
|
///
|
||||||
/// The temp directory is only used for testing the `MmapDirectory`.
|
/// The temp directory is only used for testing the [`MmapDirectory`].
|
||||||
/// For other unit tests, prefer the `RamDirectory`, see: `create_in_ram`.
|
/// For other unit tests, prefer the [`RamDirectory`],
|
||||||
|
/// see: [`IndexBuilder::create_in_ram()`].
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
pub fn create_from_tempdir(schema: Schema) -> crate::Result<Index> {
|
pub fn create_from_tempdir(schema: Schema) -> crate::Result<Index> {
|
||||||
IndexBuilder::new().schema(schema).create_from_tempdir()
|
IndexBuilder::new().schema(schema).create_from_tempdir()
|
||||||
@@ -297,7 +388,7 @@ impl Index {
|
|||||||
builder.create(dir)
|
builder.create(dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new index given a directory and an `IndexMeta`.
|
/// Creates a new index given a directory and an [`IndexMeta`].
|
||||||
fn open_from_metas(
|
fn open_from_metas(
|
||||||
directory: ManagedDirectory,
|
directory: ManagedDirectory,
|
||||||
metas: &IndexMeta,
|
metas: &IndexMeta,
|
||||||
@@ -324,7 +415,7 @@ impl Index {
|
|||||||
&self.tokenizers
|
&self.tokenizers
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Helper to access the tokenizer associated to a specific field.
|
/// Get the tokenizer associated with a specific field.
|
||||||
pub fn tokenizer_for_field(&self, field: Field) -> crate::Result<TextAnalyzer> {
|
pub fn tokenizer_for_field(&self, field: Field) -> crate::Result<TextAnalyzer> {
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
let field_type = field_entry.field_type();
|
let field_type = field_entry.field_type();
|
||||||
@@ -356,14 +447,14 @@ impl Index {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a default `IndexReader` for the given index.
|
/// Create a default [`IndexReader`] for the given index.
|
||||||
///
|
///
|
||||||
/// See [`Index.reader_builder()`](#method.reader_builder).
|
/// See [`Index.reader_builder()`].
|
||||||
pub fn reader(&self) -> crate::Result<IndexReader> {
|
pub fn reader(&self) -> crate::Result<IndexReader> {
|
||||||
self.reader_builder().try_into()
|
self.reader_builder().try_into()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a `IndexReader` for the given index.
|
/// Create a [`IndexReader`] for the given index.
|
||||||
///
|
///
|
||||||
/// Most project should create at most one reader for a given index.
|
/// Most project should create at most one reader for a given index.
|
||||||
/// This method is typically called only once per `Index` instance.
|
/// This method is typically called only once per `Index` instance.
|
||||||
@@ -580,10 +671,12 @@ impl fmt::Debug for Index {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use crate::collector::Count;
|
||||||
use crate::directory::{RamDirectory, WatchCallback};
|
use crate::directory::{RamDirectory, WatchCallback};
|
||||||
use crate::schema::{Field, Schema, INDEXED, TEXT};
|
use crate::query::TermQuery;
|
||||||
|
use crate::schema::{Field, IndexRecordOption, Schema, INDEXED, TEXT};
|
||||||
use crate::tokenizer::TokenizerManager;
|
use crate::tokenizer::TokenizerManager;
|
||||||
use crate::{Directory, Index, IndexBuilder, IndexReader, IndexSettings, ReloadPolicy};
|
use crate::{Directory, Index, IndexBuilder, IndexReader, IndexSettings, ReloadPolicy, Term};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexer_for_field() {
|
fn test_indexer_for_field() {
|
||||||
@@ -849,4 +942,28 @@ mod tests {
|
|||||||
);
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_single_segment_index_writer() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let directory = RamDirectory::default();
|
||||||
|
let mut single_segment_index_writer = Index::builder()
|
||||||
|
.schema(schema)
|
||||||
|
.single_segment_index_writer(directory, 10_000_000)?;
|
||||||
|
for _ in 0..10 {
|
||||||
|
let doc = doc!(text_field=>"hello");
|
||||||
|
single_segment_index_writer.add_document(doc)?;
|
||||||
|
}
|
||||||
|
let index = single_segment_index_writer.finalize()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let term_query = TermQuery::new(
|
||||||
|
Term::from_field_text(text_field, "hello"),
|
||||||
|
IndexRecordOption::Basic,
|
||||||
|
);
|
||||||
|
let count = searcher.search(&term_query, &Count)?;
|
||||||
|
assert_eq!(count, 10);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -130,10 +130,10 @@ impl SegmentMeta {
|
|||||||
/// Returns the relative path of a component of our segment.
|
/// Returns the relative path of a component of our segment.
|
||||||
///
|
///
|
||||||
/// It just joins the segment id with the extension
|
/// It just joins the segment id with the extension
|
||||||
/// associated to a segment component.
|
/// associated with a segment component.
|
||||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||||
let mut path = self.id().uuid_string();
|
let mut path = self.id().uuid_string();
|
||||||
path.push_str(&*match component {
|
path.push_str(&match component {
|
||||||
SegmentComponent::Postings => ".idx".to_string(),
|
SegmentComponent::Postings => ".idx".to_string(),
|
||||||
SegmentComponent::Positions => ".pos".to_string(),
|
SegmentComponent::Positions => ".pos".to_string(),
|
||||||
SegmentComponent::Terms => ".term".to_string(),
|
SegmentComponent::Terms => ".term".to_string(),
|
||||||
@@ -235,6 +235,14 @@ impl InnerSegmentMeta {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn return_true() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_true(val: &bool) -> bool {
|
||||||
|
*val
|
||||||
|
}
|
||||||
|
|
||||||
/// Search Index Settings.
|
/// Search Index Settings.
|
||||||
///
|
///
|
||||||
/// Contains settings which are applied on the whole
|
/// Contains settings which are applied on the whole
|
||||||
@@ -248,6 +256,12 @@ pub struct IndexSettings {
|
|||||||
/// The `Compressor` used to compress the doc store.
|
/// The `Compressor` used to compress the doc store.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub docstore_compression: Compressor,
|
pub docstore_compression: Compressor,
|
||||||
|
/// If set to true, docstore compression will happen on a dedicated thread.
|
||||||
|
/// (defaults: true)
|
||||||
|
#[doc(hidden)]
|
||||||
|
#[serde(default = "return_true")]
|
||||||
|
#[serde(skip_serializing_if = "is_true")]
|
||||||
|
pub docstore_compress_dedicated_thread: bool,
|
||||||
#[serde(default = "default_docstore_blocksize")]
|
#[serde(default = "default_docstore_blocksize")]
|
||||||
/// The size of each block that will be compressed and written to disk
|
/// The size of each block that will be compressed and written to disk
|
||||||
pub docstore_blocksize: usize,
|
pub docstore_blocksize: usize,
|
||||||
@@ -264,6 +278,7 @@ impl Default for IndexSettings {
|
|||||||
sort_by_field: None,
|
sort_by_field: None,
|
||||||
docstore_compression: Compressor::default(),
|
docstore_compression: Compressor::default(),
|
||||||
docstore_blocksize: default_docstore_blocksize(),
|
docstore_blocksize: default_docstore_blocksize(),
|
||||||
|
docstore_compress_dedicated_thread: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -311,13 +326,13 @@ pub struct IndexMeta {
|
|||||||
/// `IndexSettings` to configure index options.
|
/// `IndexSettings` to configure index options.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub index_settings: IndexSettings,
|
pub index_settings: IndexSettings,
|
||||||
/// List of `SegmentMeta` information associated to each finalized segment of the index.
|
/// List of `SegmentMeta` information associated with each finalized segment of the index.
|
||||||
pub segments: Vec<SegmentMeta>,
|
pub segments: Vec<SegmentMeta>,
|
||||||
/// Index `Schema`
|
/// Index `Schema`
|
||||||
pub schema: Schema,
|
pub schema: Schema,
|
||||||
/// Opstamp associated to the last `commit` operation.
|
/// Opstamp associated with the last `commit` operation.
|
||||||
pub opstamp: Opstamp,
|
pub opstamp: Opstamp,
|
||||||
/// Payload associated to the last commit.
|
/// Payload associated with the last commit.
|
||||||
///
|
///
|
||||||
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
||||||
/// to help identify this commit.
|
/// to help identify this commit.
|
||||||
@@ -395,7 +410,7 @@ mod tests {
|
|||||||
use super::IndexMeta;
|
use super::IndexMeta;
|
||||||
use crate::core::index_meta::UntrackedIndexMeta;
|
use crate::core::index_meta::UntrackedIndexMeta;
|
||||||
use crate::schema::{Schema, TEXT};
|
use crate::schema::{Schema, TEXT};
|
||||||
use crate::store::ZstdCompressor;
|
use crate::store::{Compressor, ZstdCompressor};
|
||||||
use crate::{IndexSettings, IndexSortByField, Order};
|
use crate::{IndexSettings, IndexSortByField, Order};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -447,6 +462,7 @@ mod tests {
|
|||||||
compression_level: Some(4),
|
compression_level: Some(4),
|
||||||
}),
|
}),
|
||||||
docstore_blocksize: 1_000_000,
|
docstore_blocksize: 1_000_000,
|
||||||
|
docstore_compress_dedicated_thread: true,
|
||||||
},
|
},
|
||||||
segments: Vec::new(),
|
segments: Vec::new(),
|
||||||
schema,
|
schema,
|
||||||
@@ -485,4 +501,47 @@ mod tests {
|
|||||||
"unknown zstd option \"bla\" at line 1 column 103".to_string()
|
"unknown zstd option \"bla\" at line 1 column 103".to_string()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[cfg(feature = "lz4-compression")]
|
||||||
|
fn test_index_settings_default() {
|
||||||
|
let mut index_settings = IndexSettings::default();
|
||||||
|
assert_eq!(
|
||||||
|
index_settings,
|
||||||
|
IndexSettings {
|
||||||
|
sort_by_field: None,
|
||||||
|
docstore_compression: Compressor::default(),
|
||||||
|
docstore_compress_dedicated_thread: true,
|
||||||
|
docstore_blocksize: 16_384
|
||||||
|
}
|
||||||
|
);
|
||||||
|
{
|
||||||
|
let index_settings_json = serde_json::to_value(&index_settings).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
index_settings_json,
|
||||||
|
serde_json::json!({
|
||||||
|
"docstore_compression": "lz4",
|
||||||
|
"docstore_blocksize": 16384
|
||||||
|
})
|
||||||
|
);
|
||||||
|
let index_settings_deser: IndexSettings =
|
||||||
|
serde_json::from_value(index_settings_json).unwrap();
|
||||||
|
assert_eq!(index_settings_deser, index_settings);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
index_settings.docstore_compress_dedicated_thread = false;
|
||||||
|
let index_settings_json = serde_json::to_value(&index_settings).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
index_settings_json,
|
||||||
|
serde_json::json!({
|
||||||
|
"docstore_compression": "lz4",
|
||||||
|
"docstore_blocksize": 16384,
|
||||||
|
"docstore_compress_dedicated_thread": false,
|
||||||
|
})
|
||||||
|
);
|
||||||
|
let index_settings_deser: IndexSettings =
|
||||||
|
serde_json::from_value(index_settings_json).unwrap();
|
||||||
|
assert_eq!(index_settings_deser, index_settings);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,18 +9,17 @@ use crate::schema::{IndexRecordOption, Term};
|
|||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
|
|
||||||
/// The inverted index reader is in charge of accessing
|
/// The inverted index reader is in charge of accessing
|
||||||
/// the inverted index associated to a specific field.
|
/// the inverted index associated with a specific field.
|
||||||
///
|
///
|
||||||
/// # Note
|
/// # Note
|
||||||
///
|
///
|
||||||
/// It is safe to delete the segment associated to
|
/// It is safe to delete the segment associated with
|
||||||
/// an `InvertedIndexReader`. As long as it is open,
|
/// an `InvertedIndexReader`. As long as it is open,
|
||||||
/// the `FileSlice` it is relying on should
|
/// the [`FileSlice`] it is relying on should
|
||||||
/// stay available.
|
/// stay available.
|
||||||
///
|
///
|
||||||
///
|
|
||||||
/// `InvertedIndexReader` are created by calling
|
/// `InvertedIndexReader` are created by calling
|
||||||
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
/// [`SegmentReader::inverted_index()`](crate::SegmentReader::inverted_index).
|
||||||
pub struct InvertedIndexReader {
|
pub struct InvertedIndexReader {
|
||||||
termdict: TermDictionary,
|
termdict: TermDictionary,
|
||||||
postings_file_slice: FileSlice,
|
postings_file_slice: FileSlice,
|
||||||
@@ -30,7 +29,7 @@ pub struct InvertedIndexReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl InvertedIndexReader {
|
impl InvertedIndexReader {
|
||||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
|
#[allow(clippy::needless_pass_by_value)] // for symmetry
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
termdict: TermDictionary,
|
termdict: TermDictionary,
|
||||||
postings_file_slice: FileSlice,
|
postings_file_slice: FileSlice,
|
||||||
@@ -75,7 +74,7 @@ impl InvertedIndexReader {
|
|||||||
///
|
///
|
||||||
/// This is useful for enumerating through a list of terms,
|
/// This is useful for enumerating through a list of terms,
|
||||||
/// and consuming the associated posting lists while avoiding
|
/// and consuming the associated posting lists while avoiding
|
||||||
/// reallocating a `BlockSegmentPostings`.
|
/// reallocating a [`BlockSegmentPostings`].
|
||||||
///
|
///
|
||||||
/// # Warning
|
/// # Warning
|
||||||
///
|
///
|
||||||
@@ -96,7 +95,7 @@ impl InvertedIndexReader {
|
|||||||
/// Returns a block postings given a `Term`.
|
/// Returns a block postings given a `Term`.
|
||||||
/// This method is for an advanced usage only.
|
/// This method is for an advanced usage only.
|
||||||
///
|
///
|
||||||
/// Most user should prefer using `read_postings` instead.
|
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||||
pub fn read_block_postings(
|
pub fn read_block_postings(
|
||||||
&self,
|
&self,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
@@ -110,7 +109,7 @@ impl InvertedIndexReader {
|
|||||||
/// Returns a block postings given a `term_info`.
|
/// Returns a block postings given a `term_info`.
|
||||||
/// This method is for an advanced usage only.
|
/// This method is for an advanced usage only.
|
||||||
///
|
///
|
||||||
/// Most user should prefer using `read_postings` instead.
|
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||||
pub fn read_block_postings_from_terminfo(
|
pub fn read_block_postings_from_terminfo(
|
||||||
&self,
|
&self,
|
||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
@@ -130,7 +129,7 @@ impl InvertedIndexReader {
|
|||||||
/// Returns a posting object given a `term_info`.
|
/// Returns a posting object given a `term_info`.
|
||||||
/// This method is for an advanced usage only.
|
/// This method is for an advanced usage only.
|
||||||
///
|
///
|
||||||
/// Most user should prefer using `read_postings` instead.
|
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||||
pub fn read_postings_from_terminfo(
|
pub fn read_postings_from_terminfo(
|
||||||
&self,
|
&self,
|
||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
@@ -164,12 +163,12 @@ impl InvertedIndexReader {
|
|||||||
/// or `None` if the term has never been encountered and indexed.
|
/// or `None` if the term has never been encountered and indexed.
|
||||||
///
|
///
|
||||||
/// If the field was not indexed with the indexing options that cover
|
/// If the field was not indexed with the indexing options that cover
|
||||||
/// the requested options, the returned `SegmentPostings` the method does not fail
|
/// the requested options, the returned [`SegmentPostings`] the method does not fail
|
||||||
/// and returns a `SegmentPostings` with as much information as possible.
|
/// and returns a `SegmentPostings` with as much information as possible.
|
||||||
///
|
///
|
||||||
/// For instance, requesting `IndexRecordOption::Freq` for a
|
/// For instance, requesting [`IndexRecordOption::WithFreqs`] for a
|
||||||
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
/// [`TextOptions`](crate::schema::TextOptions) that does not index position
|
||||||
/// with `DocId`s and frequencies.
|
/// will return a [`SegmentPostings`] with `DocId`s and frequencies.
|
||||||
pub fn read_postings(
|
pub fn read_postings(
|
||||||
&self,
|
&self,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
@@ -211,7 +210,7 @@ impl InvertedIndexReader {
|
|||||||
/// Returns a block postings given a `Term`.
|
/// Returns a block postings given a `Term`.
|
||||||
/// This method is for an advanced usage only.
|
/// This method is for an advanced usage only.
|
||||||
///
|
///
|
||||||
/// Most user should prefer using `read_postings` instead.
|
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||||
pub async fn warm_postings(
|
pub async fn warm_postings(
|
||||||
&self,
|
&self,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
@@ -231,6 +230,18 @@ impl InvertedIndexReader {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Read the block postings for all terms.
|
||||||
|
/// This method is for an advanced usage only.
|
||||||
|
///
|
||||||
|
/// If you know which terms to pre-load, prefer using [`Self::warm_postings`] instead.
|
||||||
|
pub async fn warm_postings_full(&self, with_positions: bool) -> crate::AsyncIoResult<()> {
|
||||||
|
self.postings_file_slice.read_bytes_async().await?;
|
||||||
|
if with_positions {
|
||||||
|
self.positions_file_slice.read_bytes_async().await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the number of documents containing the term asynchronously.
|
/// Returns the number of documents containing the term asynchronously.
|
||||||
pub async fn doc_freq_async(&self, term: &Term) -> crate::AsyncIoResult<u32> {
|
pub async fn doc_freq_async(&self, term: &Term) -> crate::AsyncIoResult<u32> {
|
||||||
Ok(self
|
Ok(self
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ mod segment;
|
|||||||
mod segment_component;
|
mod segment_component;
|
||||||
mod segment_id;
|
mod segment_id;
|
||||||
mod segment_reader;
|
mod segment_reader;
|
||||||
|
mod single_segment_index_writer;
|
||||||
|
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
@@ -23,6 +24,7 @@ pub use self::segment::Segment;
|
|||||||
pub use self::segment_component::SegmentComponent;
|
pub use self::segment_component::SegmentComponent;
|
||||||
pub use self::segment_id::SegmentId;
|
pub use self::segment_id::SegmentId;
|
||||||
pub use self::segment_reader::SegmentReader;
|
pub use self::segment_reader::SegmentReader;
|
||||||
|
pub use self::single_segment_index_writer::SingleSegmentIndexWriter;
|
||||||
|
|
||||||
/// The meta file contains all the information about the list of segments and the schema
|
/// The meta file contains all the information about the list of segments and the schema
|
||||||
/// of the index.
|
/// of the index.
|
||||||
|
|||||||
@@ -4,18 +4,18 @@ use std::{fmt, io};
|
|||||||
|
|
||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
use crate::core::{Executor, SegmentReader};
|
use crate::core::{Executor, SegmentReader};
|
||||||
use crate::query::Query;
|
use crate::query::{EnableScoring, Query};
|
||||||
use crate::schema::{Document, Schema, Term};
|
use crate::schema::{Document, Schema, Term};
|
||||||
use crate::space_usage::SearcherSpaceUsage;
|
use crate::space_usage::SearcherSpaceUsage;
|
||||||
use crate::store::{CacheStats, StoreReader};
|
use crate::store::{CacheStats, StoreReader};
|
||||||
use crate::{DocAddress, Index, Opstamp, SegmentId, TrackedObject};
|
use crate::{DocAddress, Index, Opstamp, SegmentId, TrackedObject};
|
||||||
|
|
||||||
/// Identifies the searcher generation accessed by a [Searcher].
|
/// Identifies the searcher generation accessed by a [`Searcher`].
|
||||||
///
|
///
|
||||||
/// While this might seem redundant, a [SearcherGeneration] contains
|
/// While this might seem redundant, a [`SearcherGeneration`] contains
|
||||||
/// both a `generation_id` AND a list of `(SegmentId, DeleteOpstamp)`.
|
/// both a `generation_id` AND a list of `(SegmentId, DeleteOpstamp)`.
|
||||||
///
|
///
|
||||||
/// This is on purpose. This object is used by the `Warmer` API.
|
/// This is on purpose. This object is used by the [`Warmer`](crate::reader::Warmer) API.
|
||||||
/// Having both information makes it possible to identify which
|
/// Having both information makes it possible to identify which
|
||||||
/// artifact should be refreshed or garbage collected.
|
/// artifact should be refreshed or garbage collected.
|
||||||
///
|
///
|
||||||
@@ -69,20 +69,20 @@ pub struct Searcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Searcher {
|
impl Searcher {
|
||||||
/// Returns the `Index` associated to the `Searcher`
|
/// Returns the `Index` associated with the `Searcher`
|
||||||
pub fn index(&self) -> &Index {
|
pub fn index(&self) -> &Index {
|
||||||
&self.inner.index
|
&self.inner.index
|
||||||
}
|
}
|
||||||
|
|
||||||
/// [SearcherGeneration] which identifies the version of the snapshot held by this `Searcher`.
|
/// [`SearcherGeneration`] which identifies the version of the snapshot held by this `Searcher`.
|
||||||
pub fn generation(&self) -> &SearcherGeneration {
|
pub fn generation(&self) -> &SearcherGeneration {
|
||||||
self.inner.generation.as_ref()
|
self.inner.generation.as_ref()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetches a document from tantivy's store given a `DocAddress`.
|
/// Fetches a document from tantivy's store given a [`DocAddress`].
|
||||||
///
|
///
|
||||||
/// The searcher uses the segment ordinal to route the
|
/// The searcher uses the segment ordinal to route the
|
||||||
/// the request to the right `Segment`.
|
/// request to the right `Segment`.
|
||||||
pub fn doc(&self, doc_address: DocAddress) -> crate::Result<Document> {
|
pub fn doc(&self, doc_address: DocAddress) -> crate::Result<Document> {
|
||||||
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
|
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
|
||||||
store_reader.get(doc_address.doc_id)
|
store_reader.get(doc_address.doc_id)
|
||||||
@@ -108,7 +108,7 @@ impl Searcher {
|
|||||||
store_reader.get_async(doc_address.doc_id).await
|
store_reader.get_async(doc_address.doc_id).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Access the schema associated to the index of this searcher.
|
/// Access the schema associated with the index of this searcher.
|
||||||
pub fn schema(&self) -> &Schema {
|
pub fn schema(&self) -> &Schema {
|
||||||
&self.inner.schema
|
&self.inner.schema
|
||||||
}
|
}
|
||||||
@@ -161,11 +161,11 @@ impl Searcher {
|
|||||||
///
|
///
|
||||||
/// Search works as follows :
|
/// Search works as follows :
|
||||||
///
|
///
|
||||||
/// First the weight object associated to the query is created.
|
/// First the weight object associated with the query is created.
|
||||||
///
|
///
|
||||||
/// Then, the query loops over the segments and for each segment :
|
/// Then, the query loops over the segments and for each segment :
|
||||||
/// - setup the collector and informs it that the segment being processed has changed.
|
/// - setup the collector and informs it that the segment being processed has changed.
|
||||||
/// - creates a SegmentCollector for collecting documents associated to the segment
|
/// - creates a SegmentCollector for collecting documents associated with the segment
|
||||||
/// - creates a `Scorer` object associated for this segment
|
/// - creates a `Scorer` object associated for this segment
|
||||||
/// - iterate through the matched documents and push them to the segment collector.
|
/// - iterate through the matched documents and push them to the segment collector.
|
||||||
///
|
///
|
||||||
@@ -180,7 +180,7 @@ impl Searcher {
|
|||||||
self.search_with_executor(query, collector, executor)
|
self.search_with_executor(query, collector, executor)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Same as [`search(...)`](#method.search) but multithreaded.
|
/// Same as [`search(...)`](Searcher::search) but multithreaded.
|
||||||
///
|
///
|
||||||
/// The current implementation is rather naive :
|
/// The current implementation is rather naive :
|
||||||
/// multithreading is by splitting search into as many task
|
/// multithreading is by splitting search into as many task
|
||||||
@@ -199,7 +199,12 @@ impl Searcher {
|
|||||||
executor: &Executor,
|
executor: &Executor,
|
||||||
) -> crate::Result<C::Fruit> {
|
) -> crate::Result<C::Fruit> {
|
||||||
let scoring_enabled = collector.requires_scoring();
|
let scoring_enabled = collector.requires_scoring();
|
||||||
let weight = query.weight(self, scoring_enabled)?;
|
let enabled_scoring = if scoring_enabled {
|
||||||
|
EnableScoring::Enabled(self)
|
||||||
|
} else {
|
||||||
|
EnableScoring::Disabled(self.schema())
|
||||||
|
};
|
||||||
|
let weight = query.weight(enabled_scoring)?;
|
||||||
let segment_readers = self.segment_readers();
|
let segment_readers = self.segment_readers();
|
||||||
let fruits = executor.map(
|
let fruits = executor.map(
|
||||||
|(segment_ord, segment_reader)| {
|
|(segment_ord, segment_reader)| {
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ impl Segment {
|
|||||||
/// Returns the relative path of a component of our segment.
|
/// Returns the relative path of a component of our segment.
|
||||||
///
|
///
|
||||||
/// It just joins the segment id with the extension
|
/// It just joins the segment id with the extension
|
||||||
/// associated to a segment component.
|
/// associated with a segment component.
|
||||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||||
self.meta.relative_path(component)
|
self.meta.relative_path(component)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use std::slice;
|
|||||||
/// except the delete component that takes an `segment_uuid`.`delete_opstamp`.`component_extension`
|
/// except the delete component that takes an `segment_uuid`.`delete_opstamp`.`component_extension`
|
||||||
#[derive(Copy, Clone, Eq, PartialEq)]
|
#[derive(Copy, Clone, Eq, PartialEq)]
|
||||||
pub enum SegmentComponent {
|
pub enum SegmentComponent {
|
||||||
/// Postings (or inverted list). Sorted lists of document ids, associated to terms
|
/// Postings (or inverted list). Sorted lists of document ids, associated with terms
|
||||||
Postings,
|
Postings,
|
||||||
/// Positions of terms in each document.
|
/// Positions of terms in each document.
|
||||||
Positions,
|
Positions,
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ impl SegmentId {
|
|||||||
/// Picking the first 8 chars is ok to identify
|
/// Picking the first 8 chars is ok to identify
|
||||||
/// segments in a display message (e.g. a5c4dfcb).
|
/// segments in a display message (e.g. a5c4dfcb).
|
||||||
pub fn short_uuid_string(&self) -> String {
|
pub fn short_uuid_string(&self) -> String {
|
||||||
(&self.0.as_simple().to_string()[..8]).to_string()
|
self.0.as_simple().to_string()[..8].to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a segment uuid string.
|
/// Returns a segment uuid string.
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ impl SegmentReader {
|
|||||||
&self.fast_fields_readers
|
&self.fast_fields_readers
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the `FacetReader` associated to a given `Field`.
|
/// Accessor to the `FacetReader` associated with a given `Field`.
|
||||||
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> {
|
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> {
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
|
|
||||||
@@ -208,18 +208,18 @@ impl SegmentReader {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a field reader associated to the field given in argument.
|
/// Returns a field reader associated with the field given in argument.
|
||||||
/// If the field was not present in the index during indexing time,
|
/// If the field was not present in the index during indexing time,
|
||||||
/// the InvertedIndexReader is empty.
|
/// the InvertedIndexReader is empty.
|
||||||
///
|
///
|
||||||
/// The field reader is in charge of iterating through the
|
/// The field reader is in charge of iterating through the
|
||||||
/// term dictionary associated to a specific field,
|
/// term dictionary associated with a specific field,
|
||||||
/// and opening the posting list associated to any term.
|
/// and opening the posting list associated with any term.
|
||||||
///
|
///
|
||||||
/// If the field is not marked as index, a warn is logged and an empty `InvertedIndexReader`
|
/// If the field is not marked as index, a warning is logged and an empty `InvertedIndexReader`
|
||||||
/// is returned.
|
/// is returned.
|
||||||
/// Similarly if the field is marked as indexed but no term has been indexed for the given
|
/// Similarly, if the field is marked as indexed but no term has been indexed for the given
|
||||||
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
|
/// index, an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||||
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
|
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
|
||||||
if let Some(inv_idx_reader) = self
|
if let Some(inv_idx_reader) = self
|
||||||
.inv_idx_reader_cache
|
.inv_idx_reader_cache
|
||||||
@@ -241,7 +241,7 @@ impl SegmentReader {
|
|||||||
|
|
||||||
if postings_file_opt.is_none() || record_option_opt.is_none() {
|
if postings_file_opt.is_none() || record_option_opt.is_none() {
|
||||||
// no documents in the segment contained this field.
|
// no documents in the segment contained this field.
|
||||||
// As a result, no data is associated to the inverted index.
|
// As a result, no data is associated with the inverted index.
|
||||||
//
|
//
|
||||||
// Returns an empty inverted index.
|
// Returns an empty inverted index.
|
||||||
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
|
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
|
||||||
|
|||||||
51
src/core/single_segment_index_writer.rs
Normal file
51
src/core/single_segment_index_writer.rs
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
use crate::indexer::operation::AddOperation;
|
||||||
|
use crate::indexer::segment_updater::save_metas;
|
||||||
|
use crate::indexer::SegmentWriter;
|
||||||
|
use crate::{Directory, Document, Index, IndexMeta, Opstamp, Segment};
|
||||||
|
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub struct SingleSegmentIndexWriter {
|
||||||
|
segment_writer: SegmentWriter,
|
||||||
|
segment: Segment,
|
||||||
|
opstamp: Opstamp,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SingleSegmentIndexWriter {
|
||||||
|
pub fn new(index: Index, mem_budget: usize) -> crate::Result<Self> {
|
||||||
|
let segment = index.new_segment();
|
||||||
|
let segment_writer = SegmentWriter::for_segment(mem_budget, segment.clone())?;
|
||||||
|
Ok(Self {
|
||||||
|
segment_writer,
|
||||||
|
segment,
|
||||||
|
opstamp: 0,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn mem_usage(&self) -> usize {
|
||||||
|
self.segment_writer.mem_usage()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_document(&mut self, document: Document) -> crate::Result<()> {
|
||||||
|
let opstamp = self.opstamp;
|
||||||
|
self.opstamp += 1;
|
||||||
|
self.segment_writer
|
||||||
|
.add_document(AddOperation { opstamp, document })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn finalize(self) -> crate::Result<Index> {
|
||||||
|
let max_doc = self.segment_writer.max_doc();
|
||||||
|
self.segment_writer.finalize()?;
|
||||||
|
let segment: Segment = self.segment.with_max_doc(max_doc);
|
||||||
|
let index = segment.index();
|
||||||
|
let index_meta = IndexMeta {
|
||||||
|
index_settings: index.settings().clone(),
|
||||||
|
segments: vec![segment.meta().clone()],
|
||||||
|
schema: index.schema(),
|
||||||
|
opstamp: 0,
|
||||||
|
payload: None,
|
||||||
|
};
|
||||||
|
save_metas(&index_meta, index.directory())?;
|
||||||
|
index.directory().sync_directory()?;
|
||||||
|
Ok(segment.index().clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -154,14 +154,14 @@ impl CompositeFile {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `FileSlice` associated
|
/// Returns the `FileSlice` associated with
|
||||||
/// to a given `Field` and stored in a `CompositeFile`.
|
/// a given `Field` and stored in a `CompositeFile`.
|
||||||
pub fn open_read(&self, field: Field) -> Option<FileSlice> {
|
pub fn open_read(&self, field: Field) -> Option<FileSlice> {
|
||||||
self.open_read_with_idx(field, 0)
|
self.open_read_with_idx(field, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `FileSlice` associated
|
/// Returns the `FileSlice` associated with
|
||||||
/// to a given `Field` and stored in a `CompositeFile`.
|
/// a given `Field` and stored in a `CompositeFile`.
|
||||||
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> {
|
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> {
|
||||||
self.offsets_index
|
self.offsets_index
|
||||||
.get(&FileAddr { field, idx })
|
.get(&FileAddr { field, idx })
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ impl RetryPolicy {
|
|||||||
|
|
||||||
/// The `DirectoryLock` is an object that represents a file lock.
|
/// The `DirectoryLock` is an object that represents a file lock.
|
||||||
///
|
///
|
||||||
/// It is associated to a lock file, that gets deleted on `Drop.`
|
/// It is associated with a lock file, that gets deleted on `Drop.`
|
||||||
pub struct DirectoryLock(Box<dyn Send + Sync + 'static>);
|
pub struct DirectoryLock(Box<dyn Send + Sync + 'static>);
|
||||||
|
|
||||||
struct DirectoryLockGuard {
|
struct DirectoryLockGuard {
|
||||||
@@ -55,7 +55,7 @@ impl<T: Send + Sync + 'static> From<Box<T>> for DirectoryLock {
|
|||||||
|
|
||||||
impl Drop for DirectoryLockGuard {
|
impl Drop for DirectoryLockGuard {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
if let Err(e) = self.directory.delete(&*self.path) {
|
if let Err(e) = self.directory.delete(&self.path) {
|
||||||
error!("Failed to remove the lock file. {:?}", e);
|
error!("Failed to remove the lock file. {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -117,9 +117,9 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
/// change.
|
/// change.
|
||||||
///
|
///
|
||||||
/// Specifically, subsequent writes or flushes should
|
/// Specifically, subsequent writes or flushes should
|
||||||
/// have no effect on the returned `FileSlice` object.
|
/// have no effect on the returned [`FileSlice`] object.
|
||||||
///
|
///
|
||||||
/// You should only use this to read files create with [Directory::open_write].
|
/// You should only use this to read files create with [`Directory::open_write()`].
|
||||||
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> {
|
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> {
|
||||||
let file_handle = self.get_file_handle(path)?;
|
let file_handle = self.get_file_handle(path)?;
|
||||||
Ok(FileSlice::new(file_handle))
|
Ok(FileSlice::new(file_handle))
|
||||||
@@ -128,27 +128,28 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
/// Removes a file
|
/// Removes a file
|
||||||
///
|
///
|
||||||
/// Removing a file will not affect an eventual
|
/// Removing a file will not affect an eventual
|
||||||
/// existing FileSlice pointing to it.
|
/// existing [`FileSlice`] pointing to it.
|
||||||
///
|
///
|
||||||
/// Removing a nonexistent file, yields a
|
/// Removing a nonexistent file, returns a
|
||||||
/// `DeleteError::DoesNotExist`.
|
/// [`DeleteError::FileDoesNotExist`].
|
||||||
fn delete(&self, path: &Path) -> Result<(), DeleteError>;
|
fn delete(&self, path: &Path) -> Result<(), DeleteError>;
|
||||||
|
|
||||||
/// Returns true if and only if the file exists
|
/// Returns true if and only if the file exists
|
||||||
fn exists(&self, path: &Path) -> Result<bool, OpenReadError>;
|
fn exists(&self, path: &Path) -> Result<bool, OpenReadError>;
|
||||||
|
|
||||||
/// Opens a writer for the *virtual file* associated with
|
/// Opens a writer for the *virtual file* associated with
|
||||||
/// a Path.
|
/// a [`Path`].
|
||||||
///
|
///
|
||||||
/// Right after this call, for the span of the execution of the program
|
/// Right after this call, for the span of the execution of the program
|
||||||
/// the file should be created and any subsequent call to `open_read` for the
|
/// the file should be created and any subsequent call to
|
||||||
/// same path should return a `FileSlice`.
|
/// [`Directory::open_read()`] for the same path should return
|
||||||
|
/// a [`FileSlice`].
|
||||||
///
|
///
|
||||||
/// However, depending on the directory implementation,
|
/// However, depending on the directory implementation,
|
||||||
/// it might be required to call `sync_directory` to ensure
|
/// it might be required to call [`Directory::sync_directory()`] to ensure
|
||||||
/// that the file is durably created.
|
/// that the file is durably created.
|
||||||
/// (The semantics here are the same when dealing with
|
/// (The semantics here are the same when dealing with
|
||||||
/// a posix filesystem.)
|
/// a POSIX filesystem.)
|
||||||
///
|
///
|
||||||
/// Write operations may be aggressively buffered.
|
/// Write operations may be aggressively buffered.
|
||||||
/// The client of this trait is responsible for calling flush
|
/// The client of this trait is responsible for calling flush
|
||||||
@@ -157,19 +158,19 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
///
|
///
|
||||||
/// Flush operation should also be persistent.
|
/// Flush operation should also be persistent.
|
||||||
///
|
///
|
||||||
/// The user shall not rely on `Drop` triggering `flush`.
|
/// The user shall not rely on [`Drop`] triggering `flush`.
|
||||||
/// Note that `RamDirectory` will panic! if `flush`
|
/// Note that [`RamDirectory`][crate::directory::RamDirectory] will
|
||||||
/// was not called.
|
/// panic! if `flush` was not called.
|
||||||
///
|
///
|
||||||
/// The file may not previously exist.
|
/// The file may not previously exist.
|
||||||
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
||||||
|
|
||||||
/// Reads the full content file that has been written using
|
/// Reads the full content file that has been written using
|
||||||
/// atomic_write.
|
/// [`Directory::atomic_write()`].
|
||||||
///
|
///
|
||||||
/// This should only be used for small files.
|
/// This should only be used for small files.
|
||||||
///
|
///
|
||||||
/// You should only use this to read files create with [Directory::atomic_write].
|
/// You should only use this to read files create with [`Directory::atomic_write()`].
|
||||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
|
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
|
||||||
|
|
||||||
/// Atomically replace the content of a file with data.
|
/// Atomically replace the content of a file with data.
|
||||||
@@ -186,9 +187,9 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
/// effectively stored durably.
|
/// effectively stored durably.
|
||||||
fn sync_directory(&self) -> io::Result<()>;
|
fn sync_directory(&self) -> io::Result<()>;
|
||||||
|
|
||||||
/// Acquire a lock in the given directory.
|
/// Acquire a lock in the directory given in the [`Lock`].
|
||||||
///
|
///
|
||||||
/// The method is blocking or not depending on the `Lock` object.
|
/// The method is blocking or not depending on the [`Lock`] object.
|
||||||
fn acquire_lock(&self, lock: &Lock) -> Result<DirectoryLock, LockError> {
|
fn acquire_lock(&self, lock: &Lock) -> Result<DirectoryLock, LockError> {
|
||||||
let mut box_directory = self.box_clone();
|
let mut box_directory = self.box_clone();
|
||||||
let mut retry_policy = retry_policy(lock.is_blocking);
|
let mut retry_policy = retry_policy(lock.is_blocking);
|
||||||
@@ -210,15 +211,15 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Registers a callback that will be called whenever a change on the `meta.json`
|
/// Registers a callback that will be called whenever a change on the `meta.json`
|
||||||
/// using the `atomic_write` API is detected.
|
/// using the [`Directory::atomic_write()`] API is detected.
|
||||||
///
|
///
|
||||||
/// The behavior when using `.watch()` on a file using [Directory::open_write] is, on the other
|
/// The behavior when using `.watch()` on a file using [`Directory::open_write()`] is, on the
|
||||||
/// hand, undefined.
|
/// other hand, undefined.
|
||||||
///
|
///
|
||||||
/// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is
|
/// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is
|
||||||
/// required to keep it.
|
/// required to keep it.
|
||||||
/// It does not override previous callbacks. When the file is modified, all callback that are
|
/// It does not override previous callbacks. When the file is modified, all callback that are
|
||||||
/// registered (and whose `WatchHandle` is still alive) are triggered.
|
/// registered (and whose [`WatchHandle`] is still alive) are triggered.
|
||||||
///
|
///
|
||||||
/// Internally, tantivy only uses this API to detect new commits to implement the
|
/// Internally, tantivy only uses this API to detect new commits to implement the
|
||||||
/// `OnCommit` `ReloadPolicy`. Not implementing watch in a `Directory` only prevents the
|
/// `OnCommit` `ReloadPolicy`. Not implementing watch in a `Directory` only prevents the
|
||||||
|
|||||||
@@ -4,12 +4,14 @@ use once_cell::sync::Lazy;
|
|||||||
|
|
||||||
/// A directory lock.
|
/// A directory lock.
|
||||||
///
|
///
|
||||||
/// A lock is associated to a specific path and some
|
/// A lock is associated with a specific path.
|
||||||
/// [`LockParams`](./enum.LockParams.html).
|
///
|
||||||
|
/// The lock will be passed to [`Directory::acquire_lock`](crate::Directory::acquire_lock).
|
||||||
|
///
|
||||||
/// Tantivy itself uses only two locks but client application
|
/// Tantivy itself uses only two locks but client application
|
||||||
/// can use the directory facility to define their own locks.
|
/// can use the directory facility to define their own locks.
|
||||||
/// - [INDEX_WRITER_LOCK]
|
/// - [`INDEX_WRITER_LOCK`]
|
||||||
/// - [META_LOCK]
|
/// - [`META_LOCK`]
|
||||||
///
|
///
|
||||||
/// Check out these locks documentation for more information.
|
/// Check out these locks documentation for more information.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -18,19 +20,21 @@ pub struct Lock {
|
|||||||
/// Depending on the platform, the lock might rely on the creation
|
/// Depending on the platform, the lock might rely on the creation
|
||||||
/// and deletion of this filepath.
|
/// and deletion of this filepath.
|
||||||
pub filepath: PathBuf,
|
pub filepath: PathBuf,
|
||||||
/// `lock_params` describes whether acquiring the lock is meant
|
/// `is_blocking` describes whether acquiring the lock is meant
|
||||||
/// to be a blocking operation or a non-blocking.
|
/// to be a blocking operation or a non-blocking.
|
||||||
///
|
///
|
||||||
/// Acquiring a blocking lock blocks until the lock is
|
/// Acquiring a blocking lock blocks until the lock is
|
||||||
/// available.
|
/// available.
|
||||||
/// Acquiring a blocking lock returns rapidly, either successfully
|
///
|
||||||
|
/// Acquiring a non-blocking lock returns rapidly, either successfully
|
||||||
/// or with an error signifying that someone is already holding
|
/// or with an error signifying that someone is already holding
|
||||||
/// the lock.
|
/// the lock.
|
||||||
pub is_blocking: bool,
|
pub is_blocking: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Only one process should be able to write tantivy's index at a time.
|
/// Only one process should be able to write tantivy's index at a time.
|
||||||
/// This lock file, when present, is in charge of preventing other processes to open an IndexWriter.
|
/// This lock file, when present, is in charge of preventing other processes to open an
|
||||||
|
/// `IndexWriter`.
|
||||||
///
|
///
|
||||||
/// If the process is killed and this file remains, it is safe to remove it manually.
|
/// If the process is killed and this file remains, it is safe to remove it manually.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -4,7 +4,9 @@ use std::{fmt, io};
|
|||||||
|
|
||||||
use crate::Version;
|
use crate::Version;
|
||||||
|
|
||||||
/// Error while trying to acquire a directory lock.
|
/// Error while trying to acquire a directory [lock](crate::directory::Lock).
|
||||||
|
///
|
||||||
|
/// This is returned from [`Directory::acquire_lock`](crate::Directory::acquire_lock).
|
||||||
#[derive(Debug, Clone, Error)]
|
#[derive(Debug, Clone, Error)]
|
||||||
pub enum LockError {
|
pub enum LockError {
|
||||||
/// Failed to acquired a lock as it is already held by another
|
/// Failed to acquired a lock as it is already held by another
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use std::ops::{Deref, Range};
|
use std::ops::{Deref, Range};
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::Arc;
|
||||||
use std::{fmt, io};
|
use std::{fmt, io};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -8,16 +8,13 @@ use stable_deref_trait::StableDeref;
|
|||||||
|
|
||||||
use crate::directory::OwnedBytes;
|
use crate::directory::OwnedBytes;
|
||||||
|
|
||||||
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
|
||||||
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
|
||||||
|
|
||||||
/// Objects that represents files sections in tantivy.
|
/// Objects that represents files sections in tantivy.
|
||||||
///
|
///
|
||||||
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
||||||
/// is alive, the data associated with it cannot be altered or destroyed.
|
/// is alive, the data associated with it cannot be altered or destroyed.
|
||||||
///
|
///
|
||||||
/// The underlying behavior is therefore specific to the `Directory` that created it.
|
/// The underlying behavior is therefore specific to the [`Directory`](crate::Directory) that
|
||||||
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
|
/// created it. Despite its name, a [`FileSlice`] may or may not directly map to an actual file
|
||||||
/// on the filesystem.
|
/// on the filesystem.
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ use crc32fast::Hasher;
|
|||||||
|
|
||||||
use crate::directory::{WatchCallback, WatchCallbackList, WatchHandle};
|
use crate::directory::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||||
|
|
||||||
pub const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
|
const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
|
||||||
|
|
||||||
// Watches a file and executes registered callbacks when the file is modified.
|
// Watches a file and executes registered callbacks when the file is modified.
|
||||||
pub struct FileWatcher {
|
pub struct FileWatcher {
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::fs::{self, File, OpenOptions};
|
|||||||
use std::io::{self, BufWriter, Read, Seek, Write};
|
use std::io::{self, BufWriter, Read, Seek, Write};
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock, Weak};
|
||||||
use std::{fmt, result};
|
use std::{fmt, result};
|
||||||
|
|
||||||
use fs2::FileExt;
|
use fs2::FileExt;
|
||||||
@@ -18,16 +18,19 @@ use crate::directory::error::{
|
|||||||
};
|
};
|
||||||
use crate::directory::file_watcher::FileWatcher;
|
use crate::directory::file_watcher::FileWatcher;
|
||||||
use crate::directory::{
|
use crate::directory::{
|
||||||
AntiCallToken, ArcBytes, Directory, DirectoryLock, FileHandle, Lock, OwnedBytes,
|
AntiCallToken, Directory, DirectoryLock, FileHandle, Lock, OwnedBytes, TerminatingWrite,
|
||||||
TerminatingWrite, WatchCallback, WatchHandle, WeakArcBytes, WritePtr,
|
WatchCallback, WatchHandle, WritePtr,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||||
|
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||||
|
|
||||||
/// Create a default io error given a string.
|
/// Create a default io error given a string.
|
||||||
pub(crate) fn make_io_err(msg: String) -> io::Error {
|
pub(crate) fn make_io_err(msg: String) -> io::Error {
|
||||||
io::Error::new(io::ErrorKind::Other, msg)
|
io::Error::new(io::ErrorKind::Other, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns None iff the file exists, can be read, but is empty (and hence
|
/// Returns `None` iff the file exists, can be read, but is empty (and hence
|
||||||
/// cannot be mmapped)
|
/// cannot be mmapped)
|
||||||
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
||||||
let file = File::open(full_path).map_err(|io_err| {
|
let file = File::open(full_path).map_err(|io_err| {
|
||||||
@@ -56,10 +59,10 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
|||||||
|
|
||||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct CacheCounters {
|
pub struct CacheCounters {
|
||||||
// Number of time the cache prevents to call `mmap`
|
/// Number of time the cache prevents to call `mmap`
|
||||||
pub hit: usize,
|
pub hit: usize,
|
||||||
// Number of time tantivy had to call `mmap`
|
/// Number of time tantivy had to call `mmap`
|
||||||
// as no entry was in the cache.
|
/// as no entry was in the cache.
|
||||||
pub miss: usize,
|
pub miss: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -301,7 +304,7 @@ pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
|
|||||||
"Path {:?} does not have parent directory.",
|
"Path {:?} does not have parent directory.",
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
let mut tempfile = tempfile::Builder::new().tempfile_in(&parent_path)?;
|
let mut tempfile = tempfile::Builder::new().tempfile_in(parent_path)?;
|
||||||
tempfile.write_all(content)?;
|
tempfile.write_all(content)?;
|
||||||
tempfile.flush()?;
|
tempfile.flush()?;
|
||||||
tempfile.as_file_mut().sync_data()?;
|
tempfile.as_file_mut().sync_data()?;
|
||||||
@@ -334,7 +337,7 @@ impl Directory for MmapDirectory {
|
|||||||
Ok(Arc::new(owned_bytes))
|
Ok(Arc::new(owned_bytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Any entry associated to the path in the mmap will be
|
/// Any entry associated with the path in the mmap will be
|
||||||
/// removed before the file is deleted.
|
/// removed before the file is deleted.
|
||||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
@@ -472,6 +475,8 @@ mod tests {
|
|||||||
// There are more tests in directory/mod.rs
|
// There are more tests in directory/mod.rs
|
||||||
// The following tests are specific to the MmapDirectory
|
// The following tests are specific to the MmapDirectory
|
||||||
|
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use common::HasLen;
|
use common::HasLen;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -566,9 +571,21 @@ mod tests {
|
|||||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn assert_eventually<P: Fn() -> Option<String>>(predicate: P) {
|
||||||
|
for _ in 0..30 {
|
||||||
|
if predicate().is_none() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
std::thread::sleep(Duration::from_millis(200));
|
||||||
|
}
|
||||||
|
if let Some(error_msg) = predicate() {
|
||||||
|
panic!("{}", error_msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_mmap_released() -> crate::Result<()> {
|
fn test_mmap_released() {
|
||||||
let mmap_directory = MmapDirectory::create_from_tempdir()?;
|
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||||
let mut schema_builder: SchemaBuilder = Schema::builder();
|
let mut schema_builder: SchemaBuilder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -577,40 +594,56 @@ mod tests {
|
|||||||
let index =
|
let index =
|
||||||
Index::create(mmap_directory.clone(), schema, IndexSettings::default()).unwrap();
|
Index::create(mmap_directory.clone(), schema, IndexSettings::default()).unwrap();
|
||||||
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let mut log_merge_policy = LogMergePolicy::default();
|
let mut log_merge_policy = LogMergePolicy::default();
|
||||||
log_merge_policy.set_min_num_segments(3);
|
log_merge_policy.set_min_num_segments(3);
|
||||||
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
||||||
for _num_commits in 0..10 {
|
for _num_commits in 0..10 {
|
||||||
for _ in 0..10 {
|
for _ in 0..10 {
|
||||||
index_writer.add_document(doc!(text_field=>"abc"))?;
|
index_writer.add_document(doc!(text_field=>"abc")).unwrap();
|
||||||
}
|
}
|
||||||
index_writer.commit()?;
|
index_writer.commit().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()?;
|
.try_into()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
for _ in 0..4 {
|
for _ in 0..4 {
|
||||||
index_writer.add_document(doc!(text_field=>"abc"))?;
|
index_writer.add_document(doc!(text_field=>"abc")).unwrap();
|
||||||
index_writer.commit()?;
|
index_writer.commit().unwrap();
|
||||||
reader.reload()?;
|
reader.reload().unwrap();
|
||||||
}
|
}
|
||||||
index_writer.wait_merging_threads()?;
|
index_writer.wait_merging_threads().unwrap();
|
||||||
|
|
||||||
reader.reload()?;
|
reader.reload().unwrap();
|
||||||
let num_segments = reader.searcher().segment_readers().len();
|
let num_segments = reader.searcher().segment_readers().len();
|
||||||
assert!(num_segments <= 4);
|
assert!(num_segments <= 4);
|
||||||
let num_components_except_deletes_and_tempstore =
|
let num_components_except_deletes_and_tempstore =
|
||||||
crate::core::SegmentComponent::iterator().len() - 2;
|
crate::core::SegmentComponent::iterator().len() - 2;
|
||||||
assert_eq!(
|
let max_num_mmapped = num_components_except_deletes_and_tempstore * num_segments;
|
||||||
num_segments * num_components_except_deletes_and_tempstore,
|
assert_eventually(|| {
|
||||||
mmap_directory.get_cache_info().mmapped.len()
|
let num_mmapped = mmap_directory.get_cache_info().mmapped.len();
|
||||||
);
|
if num_mmapped > max_num_mmapped {
|
||||||
|
Some(format!(
|
||||||
|
"Expected at most {max_num_mmapped} mmapped files, got {num_mmapped}"
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
assert!(mmap_directory.get_cache_info().mmapped.is_empty());
|
// This test failed on CI. The last Mmap is dropped from the merging thread so there might
|
||||||
Ok(())
|
// be a race condition indeed.
|
||||||
|
assert_eventually(|| {
|
||||||
|
let num_mmapped = mmap_directory.get_cache_info().mmapped.len();
|
||||||
|
if num_mmapped > 0 {
|
||||||
|
Some(format!("Expected no mmapped files, got {num_mmapped}"))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,7 +26,6 @@ pub use ownedbytes::OwnedBytes;
|
|||||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||||
pub use self::directory::{Directory, DirectoryClone, DirectoryLock};
|
pub use self::directory::{Directory, DirectoryClone, DirectoryLock};
|
||||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||||
pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes};
|
|
||||||
pub use self::file_slice::{FileHandle, FileSlice};
|
pub use self::file_slice::{FileHandle, FileSlice};
|
||||||
pub use self::ram_directory::RamDirectory;
|
pub use self::ram_directory::RamDirectory;
|
||||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ use crate::directory::{
|
|||||||
WatchHandle, WritePtr,
|
WatchHandle, WritePtr,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Writer associated with the `RamDirectory`
|
/// Writer associated with the [`RamDirectory`].
|
||||||
///
|
///
|
||||||
/// The Writer just writes a buffer.
|
/// The Writer just writes a buffer.
|
||||||
struct VecWriter {
|
struct VecWriter {
|
||||||
@@ -136,18 +136,32 @@ impl RamDirectory {
|
|||||||
Self::default()
|
Self::default()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Deep clones the directory.
|
||||||
|
///
|
||||||
|
/// Ulterior writes on one of the copy
|
||||||
|
/// will not affect the other copy.
|
||||||
|
pub fn deep_clone(&self) -> RamDirectory {
|
||||||
|
let inner_clone = InnerDirectory {
|
||||||
|
fs: self.fs.read().unwrap().fs.clone(),
|
||||||
|
watch_router: Default::default(),
|
||||||
|
};
|
||||||
|
RamDirectory {
|
||||||
|
fs: Arc::new(RwLock::new(inner_clone)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the sum of the size of the different files
|
/// Returns the sum of the size of the different files
|
||||||
/// in the RamDirectory.
|
/// in the [`RamDirectory`].
|
||||||
pub fn total_mem_usage(&self) -> usize {
|
pub fn total_mem_usage(&self) -> usize {
|
||||||
self.fs.read().unwrap().total_mem_usage()
|
self.fs.read().unwrap().total_mem_usage()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write a copy of all of the files saved in the RamDirectory in the target `Directory`.
|
/// Write a copy of all of the files saved in the [`RamDirectory`] in the target [`Directory`].
|
||||||
///
|
///
|
||||||
/// Files are all written using the `Directory::write` meaning, even if they were
|
/// Files are all written using the [`Directory::open_write()`] meaning, even if they were
|
||||||
/// written using the `atomic_write` api.
|
/// written using the [`Directory::atomic_write()`] api.
|
||||||
///
|
///
|
||||||
/// If an error is encounterred, files may be persisted partially.
|
/// If an error is encountered, files may be persisted partially.
|
||||||
pub fn persist(&self, dest: &dyn Directory) -> crate::Result<()> {
|
pub fn persist(&self, dest: &dyn Directory) -> crate::Result<()> {
|
||||||
let wlock = self.fs.write().unwrap();
|
let wlock = self.fs.write().unwrap();
|
||||||
for (path, file) in wlock.fs.iter() {
|
for (path, file) in wlock.fs.iter() {
|
||||||
@@ -256,4 +270,23 @@ mod tests {
|
|||||||
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
||||||
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_ram_directory_deep_clone() {
|
||||||
|
let dir = RamDirectory::default();
|
||||||
|
let test = Path::new("test");
|
||||||
|
let test2 = Path::new("test2");
|
||||||
|
dir.atomic_write(test, b"firstwrite").unwrap();
|
||||||
|
let dir_clone = dir.deep_clone();
|
||||||
|
assert_eq!(
|
||||||
|
dir_clone.atomic_read(test).unwrap(),
|
||||||
|
dir.atomic_read(test).unwrap()
|
||||||
|
);
|
||||||
|
dir.atomic_write(test, b"original").unwrap();
|
||||||
|
dir_clone.atomic_write(test, b"clone").unwrap();
|
||||||
|
dir_clone.atomic_write(test2, b"clone2").unwrap();
|
||||||
|
assert_eq!(dir.atomic_read(test).unwrap(), b"original");
|
||||||
|
assert_eq!(&dir_clone.atomic_read(test).unwrap(), b"clone");
|
||||||
|
assert_eq!(&dir_clone.atomic_read(test2).unwrap(), b"clone2");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,10 +3,10 @@ use std::borrow::{Borrow, BorrowMut};
|
|||||||
use crate::fastfield::AliveBitSet;
|
use crate::fastfield::AliveBitSet;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
/// Sentinel value returned when a DocSet has been entirely consumed.
|
/// Sentinel value returned when a [`DocSet`] has been entirely consumed.
|
||||||
///
|
///
|
||||||
/// This is not u32::MAX as one would have expected, due to the lack of SSE2 instructions
|
/// This is not `u32::MAX` as one would have expected, due to the lack of SSE2 instructions
|
||||||
/// to compare [u32; 4].
|
/// to compare `[u32; 4]`.
|
||||||
pub const TERMINATED: DocId = i32::MAX as u32;
|
pub const TERMINATED: DocId = i32::MAX as u32;
|
||||||
|
|
||||||
/// Represents an iterable set of sorted doc ids.
|
/// Represents an iterable set of sorted doc ids.
|
||||||
@@ -20,21 +20,21 @@ pub trait DocSet: Send {
|
|||||||
/// assert_eq!(doc, docset.doc());
|
/// assert_eq!(doc, docset.doc());
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// If we reached the end of the DocSet, TERMINATED should be returned.
|
/// If we reached the end of the `DocSet`, [`TERMINATED`] should be returned.
|
||||||
///
|
///
|
||||||
/// Calling `.advance()` on a terminated DocSet should be supported, and TERMINATED should
|
/// Calling `.advance()` on a terminated `DocSet` should be supported, and [`TERMINATED`] should
|
||||||
/// be returned.
|
/// be returned.
|
||||||
fn advance(&mut self) -> DocId;
|
fn advance(&mut self) -> DocId;
|
||||||
|
|
||||||
/// Advances the DocSet forward until reaching the target, or going to the
|
/// Advances the `DocSet` forward until reaching the target, or going to the
|
||||||
/// lowest DocId greater than the target.
|
/// lowest [`DocId`] greater than the target.
|
||||||
///
|
///
|
||||||
/// If the end of the DocSet is reached, TERMINATED is returned.
|
/// If the end of the `DocSet` is reached, [`TERMINATED`] is returned.
|
||||||
///
|
///
|
||||||
/// Calling `.seek(target)` on a terminated DocSet is legal. Implementation
|
/// Calling `.seek(target)` on a terminated `DocSet` is legal. Implementation
|
||||||
/// of DocSet should support it.
|
/// of `DocSet` should support it.
|
||||||
///
|
///
|
||||||
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a DocSet.
|
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a `DocSet`.
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn seek(&mut self, target: DocId) -> DocId {
|
||||||
let mut doc = self.doc();
|
let mut doc = self.doc();
|
||||||
debug_assert!(doc <= target);
|
debug_assert!(doc <= target);
|
||||||
@@ -73,9 +73,9 @@ pub trait DocSet: Send {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the current document
|
/// Returns the current document
|
||||||
/// Right after creating a new DocSet, the docset points to the first document.
|
/// Right after creating a new `DocSet`, the docset points to the first document.
|
||||||
///
|
///
|
||||||
/// If the DocSet is empty, .doc() should return `TERMINATED`.
|
/// If the `DocSet` is empty, `.doc()` should return [`TERMINATED`].
|
||||||
fn doc(&self) -> DocId;
|
fn doc(&self) -> DocId;
|
||||||
|
|
||||||
/// Returns a best-effort hint of the
|
/// Returns a best-effort hint of the
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ pub use self::writer::BytesFastFieldWriter;
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::query::TermQuery;
|
use crate::query::{EnableScoring, TermQuery};
|
||||||
use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value, FAST, INDEXED, STORED};
|
use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value, FAST, INDEXED, STORED};
|
||||||
use crate::{DocAddress, DocSet, Index, Searcher, Term};
|
use crate::{DocAddress, DocSet, Index, Searcher, Term};
|
||||||
|
|
||||||
@@ -82,7 +82,7 @@ mod tests {
|
|||||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||||
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
||||||
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||||
let term_weight = term_query.specialized_weight(&searcher, true)?;
|
let term_weight = term_query.specialized_weight(EnableScoring::Enabled(&searcher))?;
|
||||||
let term_scorer = term_weight.specialized_scorer(searcher.segment_reader(0), 1.0)?;
|
let term_scorer = term_weight.specialized_scorer(searcher.segment_reader(0), 1.0)?;
|
||||||
assert_eq!(term_scorer.doc(), 0u32);
|
assert_eq!(term_scorer.doc(), 0u32);
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -95,7 +95,8 @@ mod tests {
|
|||||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||||
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
||||||
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||||
let term_weight_err = term_query.specialized_weight(&searcher, false);
|
let term_weight_err =
|
||||||
|
term_query.specialized_weight(EnableScoring::Disabled(searcher.schema()));
|
||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
term_weight_err,
|
term_weight_err,
|
||||||
Err(crate::TantivyError::SchemaError(_))
|
Err(crate::TantivyError::SchemaError(_))
|
||||||
|
|||||||
@@ -1,5 +1,9 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use fastfield_codecs::Column;
|
||||||
|
|
||||||
use crate::directory::{FileSlice, OwnedBytes};
|
use crate::directory::{FileSlice, OwnedBytes};
|
||||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, MultiValueLength};
|
use crate::fastfield::MultiValueIndex;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
/// Reader for byte array fast fields
|
/// Reader for byte array fast fields
|
||||||
@@ -14,48 +18,41 @@ use crate::DocId;
|
|||||||
/// and the start index for the next document, and keeping the bytes in between.
|
/// and the start index for the next document, and keeping the bytes in between.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BytesFastFieldReader {
|
pub struct BytesFastFieldReader {
|
||||||
idx_reader: DynamicFastFieldReader<u64>,
|
idx_reader: MultiValueIndex,
|
||||||
values: OwnedBytes,
|
values: OwnedBytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BytesFastFieldReader {
|
impl BytesFastFieldReader {
|
||||||
pub(crate) fn open(
|
pub(crate) fn open(
|
||||||
idx_reader: DynamicFastFieldReader<u64>,
|
idx_reader: Arc<dyn Column<u64>>,
|
||||||
values_file: FileSlice,
|
values_file: FileSlice,
|
||||||
) -> crate::Result<BytesFastFieldReader> {
|
) -> crate::Result<BytesFastFieldReader> {
|
||||||
let values = values_file.read_bytes()?;
|
let values = values_file.read_bytes()?;
|
||||||
Ok(BytesFastFieldReader { idx_reader, values })
|
Ok(BytesFastFieldReader {
|
||||||
|
idx_reader: MultiValueIndex::new(idx_reader),
|
||||||
|
values,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn range(&self, doc: DocId) -> (usize, usize) {
|
/// returns the multivalue index
|
||||||
let start = self.idx_reader.get(doc) as usize;
|
pub fn get_index_reader(&self) -> &MultiValueIndex {
|
||||||
let stop = self.idx_reader.get(doc + 1) as usize;
|
&self.idx_reader
|
||||||
(start, stop)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the bytes associated to the given `doc`
|
/// Returns the bytes associated with the given `doc`
|
||||||
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
||||||
let (start, stop) = self.range(doc);
|
let range = self.idx_reader.range(doc);
|
||||||
&self.values.as_slice()[start..stop]
|
&self.values.as_slice()[range.start as usize..range.end as usize]
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the length of the bytes associated to the given `doc`
|
/// Returns the length of the bytes associated with the given `doc`
|
||||||
pub fn num_bytes(&self, doc: DocId) -> usize {
|
pub fn num_bytes(&self, doc: DocId) -> u64 {
|
||||||
let (start, stop) = self.range(doc);
|
let range = self.idx_reader.range(doc);
|
||||||
stop - start
|
(range.end - range.start) as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the overall number of bytes in this bytes fast field.
|
/// Returns the overall number of bytes in this bytes fast field.
|
||||||
pub fn total_num_bytes(&self) -> usize {
|
pub fn total_num_bytes(&self) -> u32 {
|
||||||
self.values.len()
|
self.values.len() as u32
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MultiValueLength for BytesFastFieldReader {
|
|
||||||
fn get_len(&self, doc_id: DocId) -> u64 {
|
|
||||||
self.num_bytes(doc_id) as u64
|
|
||||||
}
|
|
||||||
fn get_total_len(&self) -> u64 {
|
|
||||||
self.total_num_bytes() as u64
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
use std::io;
|
use std::io::{self, Write};
|
||||||
|
|
||||||
|
use fastfield_codecs::VecColumn;
|
||||||
|
|
||||||
use crate::fastfield::serializer::CompositeFastFieldSerializer;
|
use crate::fastfield::serializer::CompositeFastFieldSerializer;
|
||||||
|
use crate::fastfield::MultivalueStartIndex;
|
||||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||||
use crate::schema::{Document, Field, Value};
|
use crate::schema::{Document, Field, Value};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
@@ -10,16 +13,18 @@ use crate::DocId;
|
|||||||
/// This `BytesFastFieldWriter` is only useful for advanced users.
|
/// This `BytesFastFieldWriter` is only useful for advanced users.
|
||||||
/// The normal way to get your associated bytes in your index
|
/// The normal way to get your associated bytes in your index
|
||||||
/// is to
|
/// is to
|
||||||
/// - declare your field with fast set to `Cardinality::SingleValue`
|
/// - declare your field with fast set to
|
||||||
|
/// [`Cardinality::SingleValue`](crate::schema::Cardinality::SingleValue)
|
||||||
/// in your schema
|
/// in your schema
|
||||||
/// - add your document simply by calling `.add_document(...)` with associating bytes to the field.
|
/// - add your document simply by calling `.add_document(...)` with associating bytes to the field.
|
||||||
///
|
///
|
||||||
/// The `BytesFastFieldWriter` can be acquired from the
|
/// The `BytesFastFieldWriter` can be acquired from the
|
||||||
/// fast field writer by calling
|
/// fast field writer by calling
|
||||||
/// [`.get_bytes_writer(...)`](./struct.FastFieldsWriter.html#method.get_bytes_writer).
|
/// [`.get_bytes_writer_mut(...)`](crate::fastfield::FastFieldsWriter::get_bytes_writer_mut).
|
||||||
///
|
///
|
||||||
/// Once acquired, writing is done by calling `.add_document_val(&[u8])`
|
/// Once acquired, writing is done by calling
|
||||||
/// once per document, even if there are no bytes associated to it.
|
/// [`.add_document_val(&[u8])`](BytesFastFieldWriter::add_document_val)
|
||||||
|
/// once per document, even if there are no bytes associated with it.
|
||||||
pub struct BytesFastFieldWriter {
|
pub struct BytesFastFieldWriter {
|
||||||
field: Field,
|
field: Field,
|
||||||
vals: Vec<u8>,
|
vals: Vec<u8>,
|
||||||
@@ -40,7 +45,7 @@ impl BytesFastFieldWriter {
|
|||||||
pub fn mem_usage(&self) -> usize {
|
pub fn mem_usage(&self) -> usize {
|
||||||
self.vals.capacity() + self.doc_index.capacity() * std::mem::size_of::<u64>()
|
self.vals.capacity() + self.doc_index.capacity() * std::mem::size_of::<u64>()
|
||||||
}
|
}
|
||||||
/// Access the field associated to the `BytesFastFieldWriter`
|
/// Access the field associated with the `BytesFastFieldWriter`
|
||||||
pub fn field(&self) -> Field {
|
pub fn field(&self) -> Field {
|
||||||
self.field
|
self.field
|
||||||
}
|
}
|
||||||
@@ -52,17 +57,18 @@ impl BytesFastFieldWriter {
|
|||||||
|
|
||||||
/// Shift to the next document and add all of the
|
/// Shift to the next document and add all of the
|
||||||
/// matching field values present in the document.
|
/// matching field values present in the document.
|
||||||
pub fn add_document(&mut self, doc: &Document) {
|
pub fn add_document(&mut self, doc: &Document) -> crate::Result<()> {
|
||||||
self.next_doc();
|
self.next_doc();
|
||||||
for field_value in doc.get_all(self.field) {
|
for field_value in doc.get_all(self.field) {
|
||||||
if let Value::Bytes(ref bytes) = field_value {
|
if let Value::Bytes(ref bytes) = field_value {
|
||||||
self.vals.extend_from_slice(bytes);
|
self.vals.extend_from_slice(bytes);
|
||||||
return;
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Register the bytes associated to a document.
|
/// Register the bytes associated with a document.
|
||||||
///
|
///
|
||||||
/// The method returns the `DocId` of the document that was
|
/// The method returns the `DocId` of the document that was
|
||||||
/// just written.
|
/// just written.
|
||||||
@@ -104,22 +110,27 @@ impl BytesFastFieldWriter {
|
|||||||
|
|
||||||
/// Serializes the fast field values by pushing them to the `FastFieldSerializer`.
|
/// Serializes the fast field values by pushing them to the `FastFieldSerializer`.
|
||||||
pub fn serialize(
|
pub fn serialize(
|
||||||
&self,
|
mut self,
|
||||||
serializer: &mut CompositeFastFieldSerializer,
|
serializer: &mut CompositeFastFieldSerializer,
|
||||||
doc_id_map: Option<&DocIdMapping>,
|
doc_id_map: Option<&DocIdMapping>,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
// writing the offset index
|
// writing the offset index
|
||||||
let mut doc_index_serializer =
|
{
|
||||||
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
self.doc_index.push(self.vals.len() as u64);
|
||||||
let mut offset = 0;
|
let col = VecColumn::from(&self.doc_index[..]);
|
||||||
for vals in self.get_ordered_values(doc_id_map) {
|
if let Some(doc_id_map) = doc_id_map {
|
||||||
doc_index_serializer.add_val(offset)?;
|
let multi_value_start_index = MultivalueStartIndex::new(&col, doc_id_map);
|
||||||
offset += vals.len() as u64;
|
serializer.create_auto_detect_u64_fast_field_with_idx(
|
||||||
|
self.field,
|
||||||
|
multi_value_start_index,
|
||||||
|
0,
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
serializer.create_auto_detect_u64_fast_field_with_idx(self.field, col, 0)?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
doc_index_serializer.add_val(self.vals.len() as u64)?;
|
|
||||||
doc_index_serializer.close_field()?;
|
|
||||||
// writing the values themselves
|
// writing the values themselves
|
||||||
let mut value_serializer = serializer.new_bytes_fast_field_with_idx(self.field, 1);
|
let mut value_serializer = serializer.new_bytes_fast_field(self.field);
|
||||||
// the else could be removed, but this is faster (difference not benchmarked)
|
// the else could be removed, but this is faster (difference not benchmarked)
|
||||||
if let Some(doc_id_map) = doc_id_map {
|
if let Some(doc_id_map) = doc_id_map {
|
||||||
for vals in self.get_ordered_values(Some(doc_id_map)) {
|
for vals in self.get_ordered_values(Some(doc_id_map)) {
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use crate::termdict::{TermDictionary, TermOrdinal};
|
|||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
/// The facet reader makes it possible to access the list of
|
/// The facet reader makes it possible to access the list of
|
||||||
/// facets associated to a given document in a specific
|
/// facets associated with a given document in a specific
|
||||||
/// segment.
|
/// segment.
|
||||||
///
|
///
|
||||||
/// Rather than manipulating `Facet` object directly, the API
|
/// Rather than manipulating `Facet` object directly, the API
|
||||||
@@ -58,7 +58,7 @@ impl FacetReader {
|
|||||||
&self.term_dict
|
&self.term_dict
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Given a term ordinal returns the term associated to it.
|
/// Given a term ordinal returns the term associated with it.
|
||||||
pub fn facet_from_ord(
|
pub fn facet_from_ord(
|
||||||
&mut self,
|
&mut self,
|
||||||
facet_ord: TermOrdinal,
|
facet_ord: TermOrdinal,
|
||||||
@@ -74,7 +74,7 @@ impl FacetReader {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the list of facet ordinals associated to a document.
|
/// Return the list of facet ordinals associated with a document.
|
||||||
pub fn facet_ords(&self, doc: DocId, output: &mut Vec<u64>) {
|
pub fn facet_ords(&self, doc: DocId, output: &mut Vec<u64>) {
|
||||||
self.term_ords.get_vals(doc, output);
|
self.term_ords.get_vals(doc, output);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,340 +0,0 @@
|
|||||||
use std::io::{self, Write};
|
|
||||||
use std::num::NonZeroU64;
|
|
||||||
|
|
||||||
use common::BinarySerializable;
|
|
||||||
use fastdivide::DividerU64;
|
|
||||||
use fastfield_codecs::{FastFieldCodecDeserializer, FastFieldDataAccess};
|
|
||||||
use ownedbytes::OwnedBytes;
|
|
||||||
|
|
||||||
pub const GCD_DEFAULT: u64 = 1;
|
|
||||||
|
|
||||||
/// Wrapper for accessing a fastfield.
|
|
||||||
///
|
|
||||||
/// Holds the data and the codec to the read the data.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct GCDFastFieldCodec<CodecReader> {
|
|
||||||
gcd: u64,
|
|
||||||
min_value: u64,
|
|
||||||
num_vals: u64,
|
|
||||||
reader: CodecReader,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<C: FastFieldDataAccess + FastFieldCodecDeserializer + Clone> FastFieldCodecDeserializer
|
|
||||||
for GCDFastFieldCodec<C>
|
|
||||||
{
|
|
||||||
fn open_from_bytes(bytes: OwnedBytes) -> std::io::Result<Self> {
|
|
||||||
let footer_offset = bytes.len() - 24;
|
|
||||||
let (body, mut footer) = bytes.split(footer_offset);
|
|
||||||
let gcd = u64::deserialize(&mut footer)?;
|
|
||||||
let min_value = u64::deserialize(&mut footer)?;
|
|
||||||
let num_vals = u64::deserialize(&mut footer)?;
|
|
||||||
let reader = C::open_from_bytes(body)?;
|
|
||||||
Ok(GCDFastFieldCodec {
|
|
||||||
gcd,
|
|
||||||
min_value,
|
|
||||||
num_vals,
|
|
||||||
reader,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<C: FastFieldDataAccess + Clone> FastFieldDataAccess for GCDFastFieldCodec<C> {
|
|
||||||
#[inline]
|
|
||||||
fn get_val(&self, doc: u64) -> u64 {
|
|
||||||
let mut data = self.reader.get_val(doc);
|
|
||||||
data *= self.gcd;
|
|
||||||
data += self.min_value;
|
|
||||||
data
|
|
||||||
}
|
|
||||||
|
|
||||||
fn min_value(&self) -> u64 {
|
|
||||||
self.min_value + self.reader.min_value() * self.gcd
|
|
||||||
}
|
|
||||||
|
|
||||||
fn max_value(&self) -> u64 {
|
|
||||||
self.min_value + self.reader.max_value() * self.gcd
|
|
||||||
}
|
|
||||||
fn num_vals(&self) -> u64 {
|
|
||||||
self.num_vals
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write_gcd_header<W: Write>(
|
|
||||||
field_write: &mut W,
|
|
||||||
min_value: u64,
|
|
||||||
gcd: u64,
|
|
||||||
num_vals: u64,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
gcd.serialize(field_write)?;
|
|
||||||
min_value.serialize(field_write)?;
|
|
||||||
num_vals.serialize(field_write)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compute the gcd of two non null numbers.
|
|
||||||
///
|
|
||||||
/// It is recommended, but not required, to feed values such that `large >= small`.
|
|
||||||
fn compute_gcd(mut large: NonZeroU64, mut small: NonZeroU64) -> NonZeroU64 {
|
|
||||||
loop {
|
|
||||||
let rem: u64 = large.get() % small;
|
|
||||||
if let Some(new_small) = NonZeroU64::new(rem) {
|
|
||||||
(large, small) = (small, new_small);
|
|
||||||
} else {
|
|
||||||
return small;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find GCD for iterator of numbers
|
|
||||||
pub fn find_gcd(numbers: impl Iterator<Item = u64>) -> Option<NonZeroU64> {
|
|
||||||
let mut numbers = numbers.flat_map(NonZeroU64::new);
|
|
||||||
let mut gcd: NonZeroU64 = numbers.next()?;
|
|
||||||
if gcd.get() == 1 {
|
|
||||||
return Some(gcd);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut gcd_divider = DividerU64::divide_by(gcd.get());
|
|
||||||
for val in numbers {
|
|
||||||
let remainder = val.get() - (gcd_divider.divide(val.get())) * gcd.get();
|
|
||||||
if remainder == 0 {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
gcd = compute_gcd(val, gcd);
|
|
||||||
if gcd.get() == 1 {
|
|
||||||
return Some(gcd);
|
|
||||||
}
|
|
||||||
|
|
||||||
gcd_divider = DividerU64::divide_by(gcd.get());
|
|
||||||
}
|
|
||||||
Some(gcd)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::num::NonZeroU64;
|
|
||||||
use std::path::Path;
|
|
||||||
use std::time::{Duration, SystemTime};
|
|
||||||
|
|
||||||
use common::HasLen;
|
|
||||||
|
|
||||||
use crate::directory::{CompositeFile, RamDirectory, WritePtr};
|
|
||||||
use crate::fastfield::gcd::compute_gcd;
|
|
||||||
use crate::fastfield::serializer::FastFieldCodecEnableCheck;
|
|
||||||
use crate::fastfield::tests::{FIELD, FIELDI64, SCHEMA, SCHEMAI64};
|
|
||||||
use crate::fastfield::{
|
|
||||||
find_gcd, CompositeFastFieldSerializer, DynamicFastFieldReader, FastFieldCodecType,
|
|
||||||
FastFieldReader, FastFieldsWriter, ALL_CODECS,
|
|
||||||
};
|
|
||||||
use crate::schema::{Cardinality, Schema};
|
|
||||||
use crate::{DateOptions, DatePrecision, DateTime, Directory};
|
|
||||||
|
|
||||||
fn get_index(
|
|
||||||
docs: &[crate::Document],
|
|
||||||
schema: &Schema,
|
|
||||||
codec_enable_checker: FastFieldCodecEnableCheck,
|
|
||||||
) -> crate::Result<RamDirectory> {
|
|
||||||
let directory: RamDirectory = RamDirectory::create();
|
|
||||||
{
|
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
|
||||||
let mut serializer =
|
|
||||||
CompositeFastFieldSerializer::from_write_with_codec(write, codec_enable_checker)
|
|
||||||
.unwrap();
|
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(schema);
|
|
||||||
for doc in docs {
|
|
||||||
fast_field_writers.add_document(doc);
|
|
||||||
}
|
|
||||||
fast_field_writers
|
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
|
||||||
.unwrap();
|
|
||||||
serializer.close().unwrap();
|
|
||||||
}
|
|
||||||
Ok(directory)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_fastfield_gcd_i64_with_codec(
|
|
||||||
code_type: FastFieldCodecType,
|
|
||||||
num_vals: usize,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let path = Path::new("test");
|
|
||||||
let mut docs = vec![];
|
|
||||||
for i in 1..=num_vals {
|
|
||||||
let val = (i as i64 - 5) * 1000i64;
|
|
||||||
docs.push(doc!(*FIELDI64=>val));
|
|
||||||
}
|
|
||||||
let directory = get_index(&docs, &SCHEMAI64, code_type.into())?;
|
|
||||||
let file = directory.open_read(path).unwrap();
|
|
||||||
let composite_file = CompositeFile::open(&file)?;
|
|
||||||
let file = composite_file.open_read(*FIELD).unwrap();
|
|
||||||
let fast_field_reader = DynamicFastFieldReader::<i64>::open(file)?;
|
|
||||||
|
|
||||||
assert_eq!(fast_field_reader.get(0), -4000i64);
|
|
||||||
assert_eq!(fast_field_reader.get(1), -3000i64);
|
|
||||||
assert_eq!(fast_field_reader.get(2), -2000i64);
|
|
||||||
assert_eq!(fast_field_reader.max_value(), (num_vals as i64 - 5) * 1000);
|
|
||||||
assert_eq!(fast_field_reader.min_value(), -4000i64);
|
|
||||||
let file = directory.open_read(path).unwrap();
|
|
||||||
|
|
||||||
// Can't apply gcd
|
|
||||||
let path = Path::new("test");
|
|
||||||
docs.pop();
|
|
||||||
docs.push(doc!(*FIELDI64=>2001i64));
|
|
||||||
let directory = get_index(&docs, &SCHEMAI64, code_type.into())?;
|
|
||||||
let file2 = directory.open_read(path).unwrap();
|
|
||||||
assert!(file2.len() > file.len());
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_fastfield_gcd_i64() -> crate::Result<()> {
|
|
||||||
for &code_type in ALL_CODECS {
|
|
||||||
test_fastfield_gcd_i64_with_codec(code_type, 5005)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_fastfield_gcd_u64_with_codec(
|
|
||||||
code_type: FastFieldCodecType,
|
|
||||||
num_vals: usize,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let path = Path::new("test");
|
|
||||||
let mut docs = vec![];
|
|
||||||
for i in 1..=num_vals {
|
|
||||||
let val = i as u64 * 1000u64;
|
|
||||||
docs.push(doc!(*FIELD=>val));
|
|
||||||
}
|
|
||||||
let directory = get_index(&docs, &SCHEMA, code_type.into())?;
|
|
||||||
let file = directory.open_read(path).unwrap();
|
|
||||||
let composite_file = CompositeFile::open(&file)?;
|
|
||||||
let file = composite_file.open_read(*FIELD).unwrap();
|
|
||||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(file)?;
|
|
||||||
assert_eq!(fast_field_reader.get(0), 1000u64);
|
|
||||||
assert_eq!(fast_field_reader.get(1), 2000u64);
|
|
||||||
assert_eq!(fast_field_reader.get(2), 3000u64);
|
|
||||||
assert_eq!(fast_field_reader.max_value(), num_vals as u64 * 1000);
|
|
||||||
assert_eq!(fast_field_reader.min_value(), 1000u64);
|
|
||||||
let file = directory.open_read(path).unwrap();
|
|
||||||
|
|
||||||
// Can't apply gcd
|
|
||||||
let path = Path::new("test");
|
|
||||||
docs.pop();
|
|
||||||
docs.push(doc!(*FIELDI64=>2001u64));
|
|
||||||
let directory = get_index(&docs, &SCHEMA, code_type.into())?;
|
|
||||||
let file2 = directory.open_read(path).unwrap();
|
|
||||||
assert!(file2.len() > file.len());
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_fastfield_gcd_u64() -> crate::Result<()> {
|
|
||||||
for &code_type in ALL_CODECS {
|
|
||||||
test_fastfield_gcd_u64_with_codec(code_type, 5005)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_fastfield2() {
|
|
||||||
let test_fastfield = DynamicFastFieldReader::<u64>::from(vec![100, 200, 300]);
|
|
||||||
assert_eq!(test_fastfield.get(0), 100);
|
|
||||||
assert_eq!(test_fastfield.get(1), 200);
|
|
||||||
assert_eq!(test_fastfield.get(2), 300);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_gcd_date() -> crate::Result<()> {
|
|
||||||
let size_prec_sec =
|
|
||||||
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Seconds)?;
|
|
||||||
let size_prec_micro =
|
|
||||||
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Microseconds)?;
|
|
||||||
assert!(size_prec_sec < size_prec_micro);
|
|
||||||
|
|
||||||
let size_prec_sec =
|
|
||||||
test_gcd_date_with_codec(FastFieldCodecType::Linear, DatePrecision::Seconds)?;
|
|
||||||
let size_prec_micro =
|
|
||||||
test_gcd_date_with_codec(FastFieldCodecType::Linear, DatePrecision::Microseconds)?;
|
|
||||||
assert!(size_prec_sec < size_prec_micro);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_gcd_date_with_codec(
|
|
||||||
codec_type: FastFieldCodecType,
|
|
||||||
precision: DatePrecision,
|
|
||||||
) -> crate::Result<usize> {
|
|
||||||
let time1 = DateTime::from_timestamp_micros(
|
|
||||||
SystemTime::now()
|
|
||||||
.duration_since(SystemTime::UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_secs() as i64,
|
|
||||||
);
|
|
||||||
let time2 = DateTime::from_timestamp_micros(
|
|
||||||
SystemTime::now()
|
|
||||||
.checked_sub(Duration::from_micros(4111))
|
|
||||||
.unwrap()
|
|
||||||
.duration_since(SystemTime::UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_secs() as i64,
|
|
||||||
);
|
|
||||||
|
|
||||||
let time3 = DateTime::from_timestamp_micros(
|
|
||||||
SystemTime::now()
|
|
||||||
.checked_sub(Duration::from_millis(2000))
|
|
||||||
.unwrap()
|
|
||||||
.duration_since(SystemTime::UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_secs() as i64,
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let date_options = DateOptions::default()
|
|
||||||
.set_fast(Cardinality::SingleValue)
|
|
||||||
.set_precision(precision);
|
|
||||||
let field = schema_builder.add_date_field("field", date_options);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
|
|
||||||
let docs = vec![doc!(field=>time1), doc!(field=>time2), doc!(field=>time3)];
|
|
||||||
|
|
||||||
let directory = get_index(&docs, &schema, codec_type.into())?;
|
|
||||||
let path = Path::new("test");
|
|
||||||
let file = directory.open_read(path).unwrap();
|
|
||||||
let composite_file = CompositeFile::open(&file)?;
|
|
||||||
let file = composite_file.open_read(*FIELD).unwrap();
|
|
||||||
let len = file.len();
|
|
||||||
let test_fastfield = DynamicFastFieldReader::<DateTime>::open(file)?;
|
|
||||||
|
|
||||||
assert_eq!(test_fastfield.get(0), time1.truncate(precision));
|
|
||||||
assert_eq!(test_fastfield.get(1), time2.truncate(precision));
|
|
||||||
assert_eq!(test_fastfield.get(2), time3.truncate(precision));
|
|
||||||
Ok(len)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_compute_gcd() {
|
|
||||||
let test_compute_gcd_aux = |large, small, expected| {
|
|
||||||
let large = NonZeroU64::new(large).unwrap();
|
|
||||||
let small = NonZeroU64::new(small).unwrap();
|
|
||||||
let expected = NonZeroU64::new(expected).unwrap();
|
|
||||||
assert_eq!(compute_gcd(small, large), expected);
|
|
||||||
assert_eq!(compute_gcd(large, small), expected);
|
|
||||||
};
|
|
||||||
test_compute_gcd_aux(1, 4, 1);
|
|
||||||
test_compute_gcd_aux(2, 4, 2);
|
|
||||||
test_compute_gcd_aux(10, 25, 5);
|
|
||||||
test_compute_gcd_aux(25, 25, 25);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn find_gcd_test() {
|
|
||||||
assert_eq!(find_gcd([0].into_iter()), None);
|
|
||||||
assert_eq!(find_gcd([0, 10].into_iter()), NonZeroU64::new(10));
|
|
||||||
assert_eq!(find_gcd([10, 0].into_iter()), NonZeroU64::new(10));
|
|
||||||
assert_eq!(find_gcd([].into_iter()), None);
|
|
||||||
assert_eq!(find_gcd([15, 30, 5, 10].into_iter()), NonZeroU64::new(5));
|
|
||||||
assert_eq!(find_gcd([15, 16, 10].into_iter()), NonZeroU64::new(1));
|
|
||||||
assert_eq!(find_gcd([0, 5, 5, 5].into_iter()), NonZeroU64::new(5));
|
|
||||||
assert_eq!(find_gcd([0, 0].into_iter()), None);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -7,237 +7,118 @@
|
|||||||
//! It is designed for the fast random access of some document
|
//! It is designed for the fast random access of some document
|
||||||
//! fields given a document id.
|
//! fields given a document id.
|
||||||
//!
|
//!
|
||||||
//! `FastField` are useful when a field is required for all or most of
|
//! Fast fields are useful when a field is required for all or most of
|
||||||
//! the `DocSet` : for instance for scoring, grouping, filtering, or faceting.
|
//! the `DocSet`: for instance for scoring, grouping, aggregation, filtering, or faceting.
|
||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
//! Fields have to be declared as `FAST` in the schema.
|
//! Fields have to be declared as `FAST` in the schema.
|
||||||
//! Currently supported fields are: u64, i64, f64 and bytes.
|
//! Currently supported fields are: u64, i64, f64, bytes and text.
|
||||||
//!
|
//!
|
||||||
//! u64, i64 and f64 fields are stored in a bit-packed fashion so that
|
//! Fast fields are stored in with [different codecs](fastfield_codecs). The best codec is detected
|
||||||
//! their memory usage is directly linear with the amplitude of the
|
//! automatically, when serializing.
|
||||||
//! values stored.
|
|
||||||
//!
|
//!
|
||||||
//! Read access performance is comparable to that of an array lookup.
|
//! Read access performance is comparable to that of an array lookup.
|
||||||
|
|
||||||
use fastfield_codecs::FastFieldCodecType;
|
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||||
|
|
||||||
pub use self::alive_bitset::{intersect_alive_bitsets, write_alive_bitset, AliveBitSet};
|
pub use self::alive_bitset::{intersect_alive_bitsets, write_alive_bitset, AliveBitSet};
|
||||||
pub use self::bytes::{BytesFastFieldReader, BytesFastFieldWriter};
|
pub use self::bytes::{BytesFastFieldReader, BytesFastFieldWriter};
|
||||||
pub use self::error::{FastFieldNotAvailableError, Result};
|
pub use self::error::{FastFieldNotAvailableError, Result};
|
||||||
pub use self::facet_reader::FacetReader;
|
pub use self::facet_reader::FacetReader;
|
||||||
pub(crate) use self::gcd::{find_gcd, GCDFastFieldCodec, GCD_DEFAULT};
|
pub(crate) use self::multivalued::{get_fastfield_codecs_for_multivalue, MultivalueStartIndex};
|
||||||
pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter};
|
pub use self::multivalued::{
|
||||||
pub use self::reader::{DynamicFastFieldReader, FastFieldReader};
|
MultiValueIndex, MultiValueU128FastFieldWriter, MultiValuedFastFieldReader,
|
||||||
|
MultiValuedFastFieldWriter, MultiValuedU128FastFieldReader,
|
||||||
|
};
|
||||||
|
pub(crate) use self::readers::type_and_cardinality;
|
||||||
pub use self::readers::FastFieldReaders;
|
pub use self::readers::FastFieldReaders;
|
||||||
pub(crate) use self::readers::{type_and_cardinality, FastType};
|
pub use self::serializer::{Column, CompositeFastFieldSerializer};
|
||||||
pub use self::serializer::{CompositeFastFieldSerializer, FastFieldDataAccess, FastFieldStats};
|
use self::writer::unexpected_value;
|
||||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||||
use crate::schema::{Cardinality, FieldType, Type, Value};
|
use crate::schema::{Type, Value};
|
||||||
use crate::{DateTime, DocId};
|
use crate::DateTime;
|
||||||
|
|
||||||
mod alive_bitset;
|
mod alive_bitset;
|
||||||
mod bytes;
|
mod bytes;
|
||||||
mod error;
|
mod error;
|
||||||
mod facet_reader;
|
mod facet_reader;
|
||||||
mod gcd;
|
|
||||||
mod multivalued;
|
mod multivalued;
|
||||||
mod reader;
|
|
||||||
mod readers;
|
mod readers;
|
||||||
mod serializer;
|
mod serializer;
|
||||||
mod writer;
|
mod writer;
|
||||||
|
|
||||||
pub(crate) const ALL_CODECS: &[FastFieldCodecType; 3] = &[
|
|
||||||
FastFieldCodecType::Bitpacked,
|
|
||||||
FastFieldCodecType::Linear,
|
|
||||||
FastFieldCodecType::BlockwiseLinear,
|
|
||||||
];
|
|
||||||
|
|
||||||
/// Trait for `BytesFastFieldReader` and `MultiValuedFastFieldReader` to return the length of data
|
|
||||||
/// for a doc_id
|
|
||||||
pub trait MultiValueLength {
|
|
||||||
/// returns the num of values associated to a doc_id
|
|
||||||
fn get_len(&self, doc_id: DocId) -> u64;
|
|
||||||
/// returns the sum of num values for all doc_ids
|
|
||||||
fn get_total_len(&self) -> u64;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait for types that are allowed for fast fields:
|
/// Trait for types that are allowed for fast fields:
|
||||||
/// (u64, i64 and f64, bool, DateTime).
|
/// (u64, i64 and f64, bool, DateTime).
|
||||||
pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd + 'static {
|
pub trait FastValue:
|
||||||
/// Converts a value from u64
|
MonotonicallyMappableToU64 + Copy + Send + Sync + PartialOrd + 'static
|
||||||
///
|
{
|
||||||
/// Internally all fast field values are encoded as u64.
|
/// Returns the `schema::Type` for this FastValue.
|
||||||
/// **Note: To be used for converting encoded Term, Posting values.**
|
fn to_type() -> Type;
|
||||||
fn from_u64(val: u64) -> Self;
|
|
||||||
|
|
||||||
/// Converts a value to u64.
|
|
||||||
///
|
|
||||||
/// Internally all fast field values are encoded as u64.
|
|
||||||
fn to_u64(&self) -> u64;
|
|
||||||
|
|
||||||
/// Returns the fast field cardinality that can be extracted from the given
|
|
||||||
/// `FieldType`.
|
|
||||||
///
|
|
||||||
/// If the type is not a fast field, `None` is returned.
|
|
||||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality>;
|
|
||||||
|
|
||||||
/// Cast value to `u64`.
|
|
||||||
/// The value is just reinterpreted in memory.
|
|
||||||
fn as_u64(&self) -> u64;
|
|
||||||
|
|
||||||
/// Build a default value. This default value is never used, so the value does not
|
/// Build a default value. This default value is never used, so the value does not
|
||||||
/// really matter.
|
/// really matter.
|
||||||
fn make_zero() -> Self {
|
fn make_zero() -> Self {
|
||||||
Self::from_u64(0i64.to_u64())
|
Self::from_u64(0u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `schema::Type` for this FastValue.
|
|
||||||
fn to_type() -> Type;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for u64 {
|
impl FastValue for u64 {
|
||||||
fn from_u64(val: u64) -> Self {
|
|
||||||
val
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_u64(&self) -> u64 {
|
|
||||||
*self
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
|
||||||
match *field_type {
|
|
||||||
FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
|
||||||
FieldType::Facet(_) => Some(Cardinality::MultiValues),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_u64(&self) -> u64 {
|
|
||||||
*self
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_type() -> Type {
|
fn to_type() -> Type {
|
||||||
Type::U64
|
Type::U64
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for i64 {
|
impl FastValue for i64 {
|
||||||
fn from_u64(val: u64) -> Self {
|
|
||||||
common::u64_to_i64(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_u64(&self) -> u64 {
|
|
||||||
common::i64_to_u64(*self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
|
||||||
match *field_type {
|
|
||||||
FieldType::I64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_u64(&self) -> u64 {
|
|
||||||
*self as u64
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_type() -> Type {
|
fn to_type() -> Type {
|
||||||
Type::I64
|
Type::I64
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for f64 {
|
impl FastValue for f64 {
|
||||||
fn from_u64(val: u64) -> Self {
|
|
||||||
common::u64_to_f64(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_u64(&self) -> u64 {
|
|
||||||
common::f64_to_u64(*self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
|
||||||
match *field_type {
|
|
||||||
FieldType::F64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_u64(&self) -> u64 {
|
|
||||||
self.to_bits()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_type() -> Type {
|
fn to_type() -> Type {
|
||||||
Type::F64
|
Type::F64
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for bool {
|
impl FastValue for bool {
|
||||||
fn from_u64(val: u64) -> Self {
|
|
||||||
val != 0u64
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_u64(&self) -> u64 {
|
|
||||||
match self {
|
|
||||||
false => 0,
|
|
||||||
true => 1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
|
||||||
match *field_type {
|
|
||||||
FieldType::Bool(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_u64(&self) -> u64 {
|
|
||||||
*self as u64
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_type() -> Type {
|
fn to_type() -> Type {
|
||||||
Type::Bool
|
Type::Bool
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for DateTime {
|
impl MonotonicallyMappableToU64 for DateTime {
|
||||||
/// Converts a timestamp microseconds into DateTime.
|
fn to_u64(self) -> u64 {
|
||||||
///
|
self.timestamp_micros.to_u64()
|
||||||
/// **Note the timestamps is expected to be in microseconds.**
|
|
||||||
fn from_u64(timestamp_micros_u64: u64) -> Self {
|
|
||||||
let timestamp_micros = i64::from_u64(timestamp_micros_u64);
|
|
||||||
Self::from_timestamp_micros(timestamp_micros)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn to_u64(&self) -> u64 {
|
fn from_u64(val: u64) -> Self {
|
||||||
common::i64_to_u64(self.into_timestamp_micros())
|
let timestamp_micros = i64::from_u64(val);
|
||||||
}
|
DateTime { timestamp_micros }
|
||||||
|
|
||||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
|
||||||
match *field_type {
|
|
||||||
FieldType::Date(ref options) => options.get_fastfield_cardinality(),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_u64(&self) -> u64 {
|
|
||||||
self.into_timestamp_micros().as_u64()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_type() -> Type {
|
|
||||||
Type::Date
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn value_to_u64(value: &Value) -> u64 {
|
impl FastValue for DateTime {
|
||||||
match value {
|
fn to_type() -> Type {
|
||||||
|
Type::Date
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_zero() -> Self {
|
||||||
|
DateTime {
|
||||||
|
timestamp_micros: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn value_to_u64(value: &Value) -> crate::Result<u64> {
|
||||||
|
let value = match value {
|
||||||
Value::U64(val) => val.to_u64(),
|
Value::U64(val) => val.to_u64(),
|
||||||
Value::I64(val) => val.to_u64(),
|
Value::I64(val) => val.to_u64(),
|
||||||
Value::F64(val) => val.to_u64(),
|
Value::F64(val) => val.to_u64(),
|
||||||
Value::Bool(val) => val.to_u64(),
|
Value::Bool(val) => val.to_u64(),
|
||||||
Value::Date(val) => val.to_u64(),
|
Value::Date(val) => val.to_u64(),
|
||||||
_ => panic!("Expected a u64/i64/f64/bool/date field, got {:?} ", value),
|
_ => return Err(unexpected_value("u64/i64/f64/bool/date", value)),
|
||||||
}
|
};
|
||||||
|
Ok(value)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The fast field type
|
/// The fast field type
|
||||||
@@ -266,17 +147,19 @@ mod tests {
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common::HasLen;
|
use common::HasLen;
|
||||||
|
use fastfield_codecs::{open, FastFieldCodecType};
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use rand::prelude::SliceRandom;
|
use rand::prelude::SliceRandom;
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
use rand::SeedableRng;
|
use rand::{Rng, SeedableRng};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
|
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
|
||||||
use crate::merge_policy::NoMergePolicy;
|
use crate::merge_policy::NoMergePolicy;
|
||||||
use crate::schema::{Document, Field, Schema, FAST, STRING, TEXT};
|
use crate::schema::{Cardinality, Document, Field, Schema, SchemaBuilder, FAST, STRING, TEXT};
|
||||||
use crate::time::OffsetDateTime;
|
use crate::time::OffsetDateTime;
|
||||||
use crate::{DateOptions, DatePrecision, Index, SegmentId, SegmentReader};
|
use crate::{DateOptions, DatePrecision, Index, SegmentId, SegmentReader};
|
||||||
|
|
||||||
@@ -285,22 +168,14 @@ mod tests {
|
|||||||
schema_builder.add_u64_field("field", FAST);
|
schema_builder.add_u64_field("field", FAST);
|
||||||
schema_builder.build()
|
schema_builder.build()
|
||||||
});
|
});
|
||||||
|
|
||||||
pub static SCHEMAI64: Lazy<Schema> = Lazy::new(|| {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
schema_builder.add_i64_field("field", FAST);
|
|
||||||
schema_builder.build()
|
|
||||||
});
|
|
||||||
|
|
||||||
pub static FIELD: Lazy<Field> = Lazy::new(|| SCHEMA.get_field("field").unwrap());
|
pub static FIELD: Lazy<Field> = Lazy::new(|| SCHEMA.get_field("field").unwrap());
|
||||||
pub static FIELDI64: Lazy<Field> = Lazy::new(|| SCHEMAI64.get_field("field").unwrap());
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_fastfield() {
|
pub fn test_fastfield() {
|
||||||
let test_fastfield = DynamicFastFieldReader::<u64>::from(vec![100, 200, 300]);
|
let test_fastfield = fastfield_codecs::serialize_and_load(&[100u64, 200u64, 300u64][..]);
|
||||||
assert_eq!(test_fastfield.get(0), 100);
|
assert_eq!(test_fastfield.get_val(0), 100);
|
||||||
assert_eq!(test_fastfield.get(1), 200);
|
assert_eq!(test_fastfield.get_val(1), 200);
|
||||||
assert_eq!(test_fastfield.get(2), 300);
|
assert_eq!(test_fastfield.get_val(2), 300);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -317,22 +192,28 @@ mod tests {
|
|||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>13u64));
|
fast_field_writers
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>14u64));
|
.add_document(&doc!(*FIELD=>13u64))
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>2u64));
|
.unwrap();
|
||||||
|
fast_field_writers
|
||||||
|
.add_document(&doc!(*FIELD=>14u64))
|
||||||
|
.unwrap();
|
||||||
|
fast_field_writers
|
||||||
|
.add_document(&doc!(*FIELD=>2u64))
|
||||||
|
.unwrap();
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(path).unwrap();
|
let file = directory.open_read(path).unwrap();
|
||||||
assert_eq!(file.len(), 45);
|
assert_eq!(file.len(), 34);
|
||||||
let composite_file = CompositeFile::open(&file)?;
|
let composite_file = CompositeFile::open(&file)?;
|
||||||
let file = composite_file.open_read(*FIELD).unwrap();
|
let fast_field_bytes = composite_file.open_read(*FIELD).unwrap().read_bytes()?;
|
||||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(file)?;
|
let fast_field_reader = open::<u64>(fast_field_bytes)?;
|
||||||
assert_eq!(fast_field_reader.get(0), 13u64);
|
assert_eq!(fast_field_reader.get_val(0), 13u64);
|
||||||
assert_eq!(fast_field_reader.get(1), 14u64);
|
assert_eq!(fast_field_reader.get_val(1), 14u64);
|
||||||
assert_eq!(fast_field_reader.get(2), 2u64);
|
assert_eq!(fast_field_reader.get_val(2), 2u64);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -344,33 +225,54 @@ mod tests {
|
|||||||
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write)?;
|
let mut serializer = CompositeFastFieldSerializer::from_write(write)?;
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>4u64));
|
fast_field_writers
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64));
|
.add_document(&doc!(*FIELD=>4u64))
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>3_052u64));
|
.unwrap();
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>9_002u64));
|
fast_field_writers
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>15_001u64));
|
.add_document(&doc!(*FIELD=>14_082_001u64))
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>777u64));
|
.unwrap();
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>1_002u64));
|
fast_field_writers
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>1_501u64));
|
.add_document(&doc!(*FIELD=>3_052u64))
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>215u64));
|
.unwrap();
|
||||||
|
fast_field_writers
|
||||||
|
.add_document(&doc!(*FIELD=>9_002u64))
|
||||||
|
.unwrap();
|
||||||
|
fast_field_writers
|
||||||
|
.add_document(&doc!(*FIELD=>15_001u64))
|
||||||
|
.unwrap();
|
||||||
|
fast_field_writers
|
||||||
|
.add_document(&doc!(*FIELD=>777u64))
|
||||||
|
.unwrap();
|
||||||
|
fast_field_writers
|
||||||
|
.add_document(&doc!(*FIELD=>1_002u64))
|
||||||
|
.unwrap();
|
||||||
|
fast_field_writers
|
||||||
|
.add_document(&doc!(*FIELD=>1_501u64))
|
||||||
|
.unwrap();
|
||||||
|
fast_field_writers
|
||||||
|
.add_document(&doc!(*FIELD=>215u64))
|
||||||
|
.unwrap();
|
||||||
fast_field_writers.serialize(&mut serializer, &HashMap::new(), None)?;
|
fast_field_writers.serialize(&mut serializer, &HashMap::new(), None)?;
|
||||||
serializer.close()?;
|
serializer.close()?;
|
||||||
}
|
}
|
||||||
let file = directory.open_read(path)?;
|
let file = directory.open_read(path)?;
|
||||||
assert_eq!(file.len(), 70);
|
assert_eq!(file.len(), 62);
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite
|
||||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
|
.open_read(*FIELD)
|
||||||
assert_eq!(fast_field_reader.get(0), 4u64);
|
.unwrap()
|
||||||
assert_eq!(fast_field_reader.get(1), 14_082_001u64);
|
.read_bytes()?;
|
||||||
assert_eq!(fast_field_reader.get(2), 3_052u64);
|
let fast_field_reader = open::<u64>(data)?;
|
||||||
assert_eq!(fast_field_reader.get(3), 9002u64);
|
assert_eq!(fast_field_reader.get_val(0), 4u64);
|
||||||
assert_eq!(fast_field_reader.get(4), 15_001u64);
|
assert_eq!(fast_field_reader.get_val(1), 14_082_001u64);
|
||||||
assert_eq!(fast_field_reader.get(5), 777u64);
|
assert_eq!(fast_field_reader.get_val(2), 3_052u64);
|
||||||
assert_eq!(fast_field_reader.get(6), 1_002u64);
|
assert_eq!(fast_field_reader.get_val(3), 9002u64);
|
||||||
assert_eq!(fast_field_reader.get(7), 1_501u64);
|
assert_eq!(fast_field_reader.get_val(4), 15_001u64);
|
||||||
assert_eq!(fast_field_reader.get(8), 215u64);
|
assert_eq!(fast_field_reader.get_val(5), 777u64);
|
||||||
|
assert_eq!(fast_field_reader.get_val(6), 1_002u64);
|
||||||
|
assert_eq!(fast_field_reader.get_val(7), 1_501u64);
|
||||||
|
assert_eq!(fast_field_reader.get_val(8), 215u64);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -385,7 +287,9 @@ mod tests {
|
|||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
for _ in 0..10_000 {
|
for _ in 0..10_000 {
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>100_000u64));
|
fast_field_writers
|
||||||
|
.add_document(&doc!(*FIELD=>100_000u64))
|
||||||
|
.unwrap();
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
@@ -393,13 +297,16 @@ mod tests {
|
|||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(path).unwrap();
|
let file = directory.open_read(path).unwrap();
|
||||||
assert_eq!(file.len(), 43);
|
assert_eq!(file.len(), 35);
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite
|
||||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
|
.open_read(*FIELD)
|
||||||
|
.unwrap()
|
||||||
|
.read_bytes()?;
|
||||||
|
let fast_field_reader = open::<u64>(data)?;
|
||||||
for doc in 0..10_000 {
|
for doc in 0..10_000 {
|
||||||
assert_eq!(fast_field_reader.get(doc), 100_000u64);
|
assert_eq!(fast_field_reader.get_val(doc), 100_000u64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -415,9 +322,13 @@ mod tests {
|
|||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
// forcing the amplitude to be high
|
// forcing the amplitude to be high
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>0u64));
|
fast_field_writers
|
||||||
|
.add_document(&doc!(*FIELD=>0u64))
|
||||||
|
.unwrap();
|
||||||
for i in 0u64..10_000u64 {
|
for i in 0u64..10_000u64 {
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>5_000_000_000_000_000_000u64 + i));
|
fast_field_writers
|
||||||
|
.add_document(&doc!(*FIELD=>5_000_000_000_000_000_000u64 + i))
|
||||||
|
.unwrap();
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
@@ -425,15 +336,18 @@ mod tests {
|
|||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(path).unwrap();
|
let file = directory.open_read(path).unwrap();
|
||||||
assert_eq!(file.len(), 80051);
|
assert_eq!(file.len(), 80049);
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite
|
||||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
|
.open_read(*FIELD)
|
||||||
assert_eq!(fast_field_reader.get(0), 0u64);
|
.unwrap()
|
||||||
|
.read_bytes()?;
|
||||||
|
let fast_field_reader = open::<u64>(data)?;
|
||||||
|
assert_eq!(fast_field_reader.get_val(0), 0u64);
|
||||||
for doc in 1..10_001 {
|
for doc in 1..10_001 {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
fast_field_reader.get(doc),
|
fast_field_reader.get_val(doc),
|
||||||
5_000_000_000_000_000_000u64 + doc as u64 - 1u64
|
5_000_000_000_000_000_000u64 + doc as u64 - 1u64
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -456,7 +370,7 @@ mod tests {
|
|||||||
for i in -100i64..10_000i64 {
|
for i in -100i64..10_000i64 {
|
||||||
let mut doc = Document::default();
|
let mut doc = Document::default();
|
||||||
doc.add_i64(i64_field, i);
|
doc.add_i64(i64_field, i);
|
||||||
fast_field_writers.add_document(&doc);
|
fast_field_writers.add_document(&doc).unwrap();
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
@@ -464,18 +378,20 @@ mod tests {
|
|||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(path).unwrap();
|
let file = directory.open_read(path).unwrap();
|
||||||
// assert_eq!(file.len(), 17710 as usize); //bitpacked size
|
assert_eq!(file.len(), 49_usize);
|
||||||
// assert_eq!(file.len(), 10175_usize); // linear interpol size
|
|
||||||
assert_eq!(file.len(), 75_usize); // linear interpol size after calc improvement
|
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
let data = fast_fields_composite
|
||||||
let fast_field_reader = DynamicFastFieldReader::<i64>::open(data)?;
|
.open_read(i64_field)
|
||||||
|
.unwrap()
|
||||||
|
.read_bytes()?;
|
||||||
|
let fast_field_reader = open::<i64>(data)?;
|
||||||
|
|
||||||
assert_eq!(fast_field_reader.min_value(), -100i64);
|
assert_eq!(fast_field_reader.min_value(), -100i64);
|
||||||
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
||||||
for (doc, i) in (-100i64..10_000i64).enumerate() {
|
for (doc, i) in (-100i64..10_000i64).enumerate() {
|
||||||
assert_eq!(fast_field_reader.get(doc as u32), i);
|
assert_eq!(fast_field_reader.get_val(doc as u32), i);
|
||||||
}
|
}
|
||||||
let mut buffer = vec![0i64; 100];
|
let mut buffer = vec![0i64; 100];
|
||||||
fast_field_reader.get_range(53, &mut buffer[..]);
|
fast_field_reader.get_range(53, &mut buffer[..]);
|
||||||
@@ -499,7 +415,7 @@ mod tests {
|
|||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||||
let doc = Document::default();
|
let doc = Document::default();
|
||||||
fast_field_writers.add_document(&doc);
|
fast_field_writers.add_document(&doc).unwrap();
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -509,9 +425,12 @@ mod tests {
|
|||||||
let file = directory.open_read(path).unwrap();
|
let file = directory.open_read(path).unwrap();
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
let data = fast_fields_composite
|
||||||
let fast_field_reader = DynamicFastFieldReader::<i64>::open(data)?;
|
.open_read(i64_field)
|
||||||
assert_eq!(fast_field_reader.get(0u32), 0i64);
|
.unwrap()
|
||||||
|
.read_bytes()?;
|
||||||
|
let fast_field_reader = open::<i64>(data)?;
|
||||||
|
assert_eq!(fast_field_reader.get_val(0), 0i64);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -539,7 +458,7 @@ mod tests {
|
|||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write)?;
|
let mut serializer = CompositeFastFieldSerializer::from_write(write)?;
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
for &x in &permutation {
|
for &x in &permutation {
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
fast_field_writers.add_document(&doc!(*FIELD=>x)).unwrap();
|
||||||
}
|
}
|
||||||
fast_field_writers.serialize(&mut serializer, &HashMap::new(), None)?;
|
fast_field_writers.serialize(&mut serializer, &HashMap::new(), None)?;
|
||||||
serializer.close()?;
|
serializer.close()?;
|
||||||
@@ -547,11 +466,14 @@ mod tests {
|
|||||||
let file = directory.open_read(path)?;
|
let file = directory.open_read(path)?;
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite
|
||||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
|
.open_read(*FIELD)
|
||||||
|
.unwrap()
|
||||||
|
.read_bytes()?;
|
||||||
|
let fast_field_reader = open::<u64>(data)?;
|
||||||
|
|
||||||
for a in 0..n {
|
for a in 0..n {
|
||||||
assert_eq!(fast_field_reader.get(a as u32), permutation[a as usize]);
|
assert_eq!(fast_field_reader.get_val(a as u32), permutation[a as usize]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -607,7 +529,7 @@ mod tests {
|
|||||||
let mut all = vec![];
|
let mut all = vec![];
|
||||||
|
|
||||||
for doc in docs {
|
for doc in docs {
|
||||||
let mut out = vec![];
|
let mut out: Vec<u64> = vec![];
|
||||||
ff.get_vals(doc, &mut out);
|
ff.get_vals(doc, &mut out);
|
||||||
all.extend(out);
|
all.extend(out);
|
||||||
}
|
}
|
||||||
@@ -804,7 +726,6 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_datefastfield() -> crate::Result<()> {
|
fn test_datefastfield() -> crate::Result<()> {
|
||||||
use crate::fastfield::FastValue;
|
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let date_field = schema_builder.add_date_field(
|
let date_field = schema_builder.add_date_field(
|
||||||
"date",
|
"date",
|
||||||
@@ -842,19 +763,19 @@ mod tests {
|
|||||||
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
|
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
|
||||||
let mut dates = vec![];
|
let mut dates = vec![];
|
||||||
{
|
{
|
||||||
assert_eq!(date_fast_field.get(0u32).into_timestamp_micros(), 1i64);
|
assert_eq!(date_fast_field.get_val(0).into_timestamp_micros(), 1i64);
|
||||||
dates_fast_field.get_vals(0u32, &mut dates);
|
dates_fast_field.get_vals(0u32, &mut dates);
|
||||||
assert_eq!(dates.len(), 2);
|
assert_eq!(dates.len(), 2);
|
||||||
assert_eq!(dates[0].into_timestamp_micros(), 2i64);
|
assert_eq!(dates[0].into_timestamp_micros(), 2i64);
|
||||||
assert_eq!(dates[1].into_timestamp_micros(), 3i64);
|
assert_eq!(dates[1].into_timestamp_micros(), 3i64);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(date_fast_field.get(1u32).into_timestamp_micros(), 4i64);
|
assert_eq!(date_fast_field.get_val(1).into_timestamp_micros(), 4i64);
|
||||||
dates_fast_field.get_vals(1u32, &mut dates);
|
dates_fast_field.get_vals(1u32, &mut dates);
|
||||||
assert!(dates.is_empty());
|
assert!(dates.is_empty());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(date_fast_field.get(2u32).into_timestamp_micros(), 0i64);
|
assert_eq!(date_fast_field.get_val(2).into_timestamp_micros(), 0i64);
|
||||||
dates_fast_field.get_vals(2u32, &mut dates);
|
dates_fast_field.get_vals(2u32, &mut dates);
|
||||||
assert_eq!(dates.len(), 2);
|
assert_eq!(dates.len(), 2);
|
||||||
assert_eq!(dates[0].into_timestamp_micros(), 5i64);
|
assert_eq!(dates[0].into_timestamp_micros(), 5i64);
|
||||||
@@ -865,11 +786,12 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_fastfield_bool() {
|
pub fn test_fastfield_bool() {
|
||||||
let test_fastfield = DynamicFastFieldReader::<bool>::from(vec![true, false, true, false]);
|
let test_fastfield: Arc<dyn Column<bool>> =
|
||||||
assert_eq!(test_fastfield.get(0), true);
|
fastfield_codecs::serialize_and_load::<bool>(&[true, false, true, false]);
|
||||||
assert_eq!(test_fastfield.get(1), false);
|
assert_eq!(test_fastfield.get_val(0), true);
|
||||||
assert_eq!(test_fastfield.get(2), true);
|
assert_eq!(test_fastfield.get_val(1), false);
|
||||||
assert_eq!(test_fastfield.get(3), false);
|
assert_eq!(test_fastfield.get_val(2), true);
|
||||||
|
assert_eq!(test_fastfield.get_val(3), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -886,24 +808,28 @@ mod tests {
|
|||||||
let write: WritePtr = directory.open_write(path).unwrap();
|
let write: WritePtr = directory.open_write(path).unwrap();
|
||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||||
fast_field_writers.add_document(&doc!(field=>true));
|
fast_field_writers.add_document(&doc!(field=>true)).unwrap();
|
||||||
fast_field_writers.add_document(&doc!(field=>false));
|
fast_field_writers
|
||||||
fast_field_writers.add_document(&doc!(field=>true));
|
.add_document(&doc!(field=>false))
|
||||||
fast_field_writers.add_document(&doc!(field=>false));
|
.unwrap();
|
||||||
|
fast_field_writers.add_document(&doc!(field=>true)).unwrap();
|
||||||
|
fast_field_writers
|
||||||
|
.add_document(&doc!(field=>false))
|
||||||
|
.unwrap();
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(path).unwrap();
|
let file = directory.open_read(path).unwrap();
|
||||||
assert_eq!(file.len(), 44);
|
assert_eq!(file.len(), 33);
|
||||||
let composite_file = CompositeFile::open(&file)?;
|
let composite_file = CompositeFile::open(&file)?;
|
||||||
let file = composite_file.open_read(field).unwrap();
|
let data = composite_file.open_read(field).unwrap().read_bytes()?;
|
||||||
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
|
let fast_field_reader = open::<bool>(data)?;
|
||||||
assert_eq!(fast_field_reader.get(0), true);
|
assert_eq!(fast_field_reader.get_val(0), true);
|
||||||
assert_eq!(fast_field_reader.get(1), false);
|
assert_eq!(fast_field_reader.get_val(1), false);
|
||||||
assert_eq!(fast_field_reader.get(2), true);
|
assert_eq!(fast_field_reader.get_val(2), true);
|
||||||
assert_eq!(fast_field_reader.get(3), false);
|
assert_eq!(fast_field_reader.get_val(3), false);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -923,8 +849,10 @@ mod tests {
|
|||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||||
for _ in 0..50 {
|
for _ in 0..50 {
|
||||||
fast_field_writers.add_document(&doc!(field=>true));
|
fast_field_writers.add_document(&doc!(field=>true)).unwrap();
|
||||||
fast_field_writers.add_document(&doc!(field=>false));
|
fast_field_writers
|
||||||
|
.add_document(&doc!(field=>false))
|
||||||
|
.unwrap();
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
@@ -932,13 +860,13 @@ mod tests {
|
|||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(path).unwrap();
|
let file = directory.open_read(path).unwrap();
|
||||||
assert_eq!(file.len(), 56);
|
assert_eq!(file.len(), 45);
|
||||||
let composite_file = CompositeFile::open(&file)?;
|
let composite_file = CompositeFile::open(&file)?;
|
||||||
let file = composite_file.open_read(field).unwrap();
|
let data = composite_file.open_read(field).unwrap().read_bytes()?;
|
||||||
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
|
let fast_field_reader = open::<bool>(data)?;
|
||||||
for i in 0..25 {
|
for i in 0..25 {
|
||||||
assert_eq!(fast_field_reader.get(i * 2), true);
|
assert_eq!(fast_field_reader.get_val(i * 2), true);
|
||||||
assert_eq!(fast_field_reader.get(i * 2 + 1), false);
|
assert_eq!(fast_field_reader.get_val(i * 2 + 1), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -950,168 +878,95 @@ mod tests {
|
|||||||
let directory: RamDirectory = RamDirectory::create();
|
let directory: RamDirectory = RamDirectory::create();
|
||||||
|
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
schema_builder.add_bool_field("field_bool", FAST);
|
let field = schema_builder.add_bool_field("field_bool", FAST);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let field = schema.get_field("field_bool").unwrap();
|
|
||||||
|
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(path).unwrap();
|
let write: WritePtr = directory.open_write(path).unwrap();
|
||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = CompositeFastFieldSerializer::from_write(write)?;
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||||
let doc = Document::default();
|
let doc = Document::default();
|
||||||
fast_field_writers.add_document(&doc);
|
fast_field_writers.add_document(&doc).unwrap();
|
||||||
fast_field_writers
|
fast_field_writers.serialize(&mut serializer, &HashMap::new(), None)?;
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
serializer.close()?;
|
||||||
.unwrap();
|
|
||||||
serializer.close().unwrap();
|
|
||||||
}
|
}
|
||||||
let file = directory.open_read(path).unwrap();
|
let file = directory.open_read(path).unwrap();
|
||||||
assert_eq!(file.len(), 43);
|
|
||||||
let composite_file = CompositeFile::open(&file)?;
|
let composite_file = CompositeFile::open(&file)?;
|
||||||
let file = composite_file.open_read(field).unwrap();
|
assert_eq!(file.len(), 32);
|
||||||
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
|
let data = composite_file.open_read(field).unwrap().read_bytes()?;
|
||||||
assert_eq!(fast_field_reader.get(0), false);
|
let fast_field_reader = open::<bool>(data)?;
|
||||||
|
assert_eq!(fast_field_reader.get_val(0), false);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(all(test, feature = "unstable"))]
|
fn get_index(
|
||||||
mod bench {
|
docs: &[crate::Document],
|
||||||
use std::collections::HashMap;
|
schema: &Schema,
|
||||||
use std::path::Path;
|
codec_types: &[FastFieldCodecType],
|
||||||
|
) -> crate::Result<RamDirectory> {
|
||||||
use test::{self, Bencher};
|
|
||||||
|
|
||||||
use super::tests::{generate_permutation, FIELD, SCHEMA};
|
|
||||||
use super::*;
|
|
||||||
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
|
|
||||||
use crate::fastfield::tests::generate_permutation_gcd;
|
|
||||||
use crate::fastfield::FastFieldReader;
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn bench_intfastfield_linear_veclookup(b: &mut Bencher) {
|
|
||||||
let permutation = generate_permutation();
|
|
||||||
b.iter(|| {
|
|
||||||
let n = test::black_box(7000u32);
|
|
||||||
let mut a = 0u64;
|
|
||||||
for i in (0u32..n / 7).map(|v| v * 7) {
|
|
||||||
a ^= permutation[i as usize];
|
|
||||||
}
|
|
||||||
a
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn bench_intfastfield_veclookup(b: &mut Bencher) {
|
|
||||||
let permutation = generate_permutation();
|
|
||||||
b.iter(|| {
|
|
||||||
let n = test::black_box(1000u32);
|
|
||||||
let mut a = 0u64;
|
|
||||||
for _ in 0u32..n {
|
|
||||||
a = permutation[a as usize];
|
|
||||||
}
|
|
||||||
a
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
|
|
||||||
let path = Path::new("test");
|
|
||||||
let permutation = generate_permutation();
|
|
||||||
let directory: RamDirectory = RamDirectory::create();
|
let directory: RamDirectory = RamDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer =
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
CompositeFastFieldSerializer::from_write_with_codec(write, codec_types).unwrap();
|
||||||
for &x in &permutation {
|
let mut fast_field_writers = FastFieldsWriter::from_schema(schema);
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
for doc in docs {
|
||||||
|
fast_field_writers.add_document(doc).unwrap();
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(&path).unwrap();
|
Ok(directory)
|
||||||
{
|
|
||||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
|
||||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let n = test::black_box(7000u32);
|
|
||||||
let mut a = 0u64;
|
|
||||||
for i in (0u32..n / 7).map(|val| val * 7) {
|
|
||||||
a ^= fast_field_reader.get(i);
|
|
||||||
}
|
|
||||||
a
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[test]
|
||||||
fn bench_intfastfield_fflookup(b: &mut Bencher) {
|
pub fn test_gcd_date() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let size_prec_sec =
|
||||||
let permutation = generate_permutation();
|
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Seconds)?;
|
||||||
let directory: RamDirectory = RamDirectory::create();
|
assert_eq!(size_prec_sec, 5 + 4 + 28 + (1_000 * 13) / 8); // 13 bits per val = ceil(log_2(number of seconds in 2hours);
|
||||||
{
|
let size_prec_micro =
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Microseconds)?;
|
||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
assert_eq!(size_prec_micro, 5 + 4 + 26 + (1_000 * 33) / 8); // 33 bits per val = ceil(log_2(number of microsecsseconds in 2hours);
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
Ok(())
|
||||||
for &x in &permutation {
|
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
|
||||||
}
|
|
||||||
fast_field_writers
|
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
|
||||||
.unwrap();
|
|
||||||
serializer.close().unwrap();
|
|
||||||
}
|
|
||||||
let file = directory.open_read(&path).unwrap();
|
|
||||||
{
|
|
||||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
|
||||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let mut a = 0u32;
|
|
||||||
for i in 0u32..permutation.len() as u32 {
|
|
||||||
a = fast_field_reader.get(i) as u32;
|
|
||||||
}
|
|
||||||
a
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
fn test_gcd_date_with_codec(
|
||||||
fn bench_intfastfield_fflookup_gcd(b: &mut Bencher) {
|
codec_type: FastFieldCodecType,
|
||||||
let path = Path::new("test");
|
precision: DatePrecision,
|
||||||
let permutation = generate_permutation_gcd();
|
) -> crate::Result<usize> {
|
||||||
let directory: RamDirectory = RamDirectory::create();
|
let mut rng = StdRng::seed_from_u64(2u64);
|
||||||
{
|
const T0: i64 = 1_662_345_825_012_529i64;
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
const ONE_HOUR_IN_MICROSECS: i64 = 3_600 * 1_000_000;
|
||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let times: Vec<DateTime> = std::iter::repeat_with(|| {
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
// +- One hour.
|
||||||
for &x in &permutation {
|
let t = T0 + rng.gen_range(-ONE_HOUR_IN_MICROSECS..ONE_HOUR_IN_MICROSECS);
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
DateTime::from_timestamp_micros(t)
|
||||||
}
|
})
|
||||||
fast_field_writers
|
.take(1_000)
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.collect();
|
||||||
.unwrap();
|
let date_options = DateOptions::default()
|
||||||
serializer.close().unwrap();
|
.set_fast(Cardinality::SingleValue)
|
||||||
}
|
.set_precision(precision);
|
||||||
let file = directory.open_read(&path).unwrap();
|
let mut schema_builder = SchemaBuilder::default();
|
||||||
{
|
let field = schema_builder.add_date_field("field", date_options);
|
||||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
let schema = schema_builder.build();
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
|
||||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
let docs: Vec<Document> = times.iter().map(|time| doc!(field=>*time)).collect();
|
||||||
let mut a = 0u32;
|
|
||||||
for i in 0u32..permutation.len() as u32 {
|
let directory = get_index(&docs[..], &schema, &[codec_type])?;
|
||||||
a = fast_field_reader.get(i) as u32;
|
let path = Path::new("test");
|
||||||
}
|
let file = directory.open_read(path).unwrap();
|
||||||
a
|
let composite_file = CompositeFile::open(&file)?;
|
||||||
});
|
let file = composite_file.open_read(*FIELD).unwrap();
|
||||||
|
let len = file.len();
|
||||||
|
let test_fastfield = open::<DateTime>(file.read_bytes()?)?;
|
||||||
|
|
||||||
|
for (i, time) in times.iter().enumerate() {
|
||||||
|
assert_eq!(test_fastfield.get_val(i as u32), time.truncate(precision));
|
||||||
}
|
}
|
||||||
|
Ok(len)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
148
src/fastfield/multivalued/index.rs
Normal file
148
src/fastfield/multivalued/index.rs
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
use std::ops::Range;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use fastfield_codecs::Column;
|
||||||
|
|
||||||
|
use crate::DocId;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
/// Index to resolve value range for given doc_id.
|
||||||
|
/// Starts at 0.
|
||||||
|
pub struct MultiValueIndex {
|
||||||
|
idx: Arc<dyn Column<u64>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MultiValueIndex {
|
||||||
|
pub(crate) fn new(idx: Arc<dyn Column<u64>>) -> Self {
|
||||||
|
Self { idx }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns `[start, end)`, such that the values associated with
|
||||||
|
/// the given document are `start..end`.
|
||||||
|
#[inline]
|
||||||
|
pub(crate) fn range(&self, doc: DocId) -> Range<u32> {
|
||||||
|
let start = self.idx.get_val(doc) as u32;
|
||||||
|
let end = self.idx.get_val(doc + 1) as u32;
|
||||||
|
start..end
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Given a range of documents, returns the Range of value offsets fo
|
||||||
|
/// these documents.
|
||||||
|
///
|
||||||
|
/// For instance, `given start_doc..end_doc`,
|
||||||
|
/// if we assume Document #start_doc end #end_doc both
|
||||||
|
/// have values, this function returns `start..end`
|
||||||
|
/// such that `value_column.get(start_doc)` is the first value of
|
||||||
|
/// `start_doc` (well, if there is one), and `value_column.get(end_doc - 1)`
|
||||||
|
/// is the last value of `end_doc`.
|
||||||
|
///
|
||||||
|
/// The passed end range is allowed to be out of bounds, in which case
|
||||||
|
/// it will be clipped to make it valid.
|
||||||
|
#[inline]
|
||||||
|
pub(crate) fn docid_range_to_position_range(&self, range: Range<DocId>) -> Range<u32> {
|
||||||
|
let end_docid = range.end.min(self.num_docs() - 1) + 1;
|
||||||
|
let start_docid = range.start.min(end_docid);
|
||||||
|
|
||||||
|
let start = self.idx.get_val(start_docid) as u32;
|
||||||
|
let end = self.idx.get_val(end_docid) as u32;
|
||||||
|
assert!(start <= end);
|
||||||
|
|
||||||
|
start..end
|
||||||
|
}
|
||||||
|
|
||||||
|
/// returns the num of values associated with a doc_id
|
||||||
|
pub(crate) fn num_vals_for_doc(&self, doc: DocId) -> u32 {
|
||||||
|
let range = self.range(doc);
|
||||||
|
range.end - range.start
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the overall number of values in this field.
|
||||||
|
#[inline]
|
||||||
|
pub fn total_num_vals(&self) -> u32 {
|
||||||
|
self.idx.max_value() as u32
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of documents in the index.
|
||||||
|
#[inline]
|
||||||
|
pub fn num_docs(&self) -> u32 {
|
||||||
|
self.idx.num_vals() - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts a list of positions of values in a 1:n index to the corresponding list of DocIds.
|
||||||
|
/// Positions are converted inplace to docids.
|
||||||
|
///
|
||||||
|
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the
|
||||||
|
/// index.
|
||||||
|
///
|
||||||
|
/// Correctness: positions needs to be sorted. idx_reader needs to contain monotonically
|
||||||
|
/// increasing positions.
|
||||||
|
///
|
||||||
|
///
|
||||||
|
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
|
||||||
|
/// match a docid to its value position.
|
||||||
|
pub(crate) fn positions_to_docids(&self, doc_id_range: Range<u32>, positions: &mut Vec<u32>) {
|
||||||
|
if positions.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let mut cur_doc = doc_id_range.start;
|
||||||
|
let mut last_doc = None;
|
||||||
|
|
||||||
|
assert!(self.idx.get_val(doc_id_range.start) as u32 <= positions[0]);
|
||||||
|
|
||||||
|
let mut write_doc_pos = 0;
|
||||||
|
for i in 0..positions.len() {
|
||||||
|
let pos = positions[i];
|
||||||
|
loop {
|
||||||
|
let end = self.idx.get_val(cur_doc + 1) as u32;
|
||||||
|
if end > pos {
|
||||||
|
positions[write_doc_pos] = cur_doc;
|
||||||
|
write_doc_pos += if last_doc == Some(cur_doc) { 0 } else { 1 };
|
||||||
|
last_doc = Some(cur_doc);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
cur_doc += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
positions.truncate(write_doc_pos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::ops::Range;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use fastfield_codecs::IterColumn;
|
||||||
|
|
||||||
|
use crate::fastfield::MultiValueIndex;
|
||||||
|
|
||||||
|
fn index_to_pos_helper(
|
||||||
|
index: &MultiValueIndex,
|
||||||
|
doc_id_range: Range<u32>,
|
||||||
|
positions: &[u32],
|
||||||
|
) -> Vec<u32> {
|
||||||
|
let mut positions = positions.to_vec();
|
||||||
|
index.positions_to_docids(doc_id_range, &mut positions);
|
||||||
|
positions
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_positions_to_docid() {
|
||||||
|
let offsets = vec![0, 10, 12, 15, 22, 23]; // docid values are [0..10, 10..12, 12..15, etc.]
|
||||||
|
let column = IterColumn::from(offsets.into_iter());
|
||||||
|
let index = MultiValueIndex::new(Arc::new(column));
|
||||||
|
assert_eq!(index.num_docs(), 5);
|
||||||
|
{
|
||||||
|
let positions = vec![10u32, 11, 15, 20, 21, 22];
|
||||||
|
|
||||||
|
assert_eq!(index_to_pos_helper(&index, 0..5, &positions), vec![1, 3, 4]);
|
||||||
|
assert_eq!(index_to_pos_helper(&index, 1..5, &positions), vec![1, 3, 4]);
|
||||||
|
assert_eq!(index_to_pos_helper(&index, 0..5, &[9]), vec![0]);
|
||||||
|
assert_eq!(index_to_pos_helper(&index, 1..5, &[10]), vec![1]);
|
||||||
|
assert_eq!(index_to_pos_helper(&index, 1..5, &[11]), vec![1]);
|
||||||
|
assert_eq!(index_to_pos_helper(&index, 2..5, &[12]), vec![2]);
|
||||||
|
assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14]), vec![2]);
|
||||||
|
assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14, 15]), vec![2, 3]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,8 +1,23 @@
|
|||||||
|
mod index;
|
||||||
mod reader;
|
mod reader;
|
||||||
mod writer;
|
mod writer;
|
||||||
|
|
||||||
pub use self::reader::MultiValuedFastFieldReader;
|
use fastfield_codecs::FastFieldCodecType;
|
||||||
pub use self::writer::MultiValuedFastFieldWriter;
|
pub use index::MultiValueIndex;
|
||||||
|
|
||||||
|
pub use self::reader::{MultiValuedFastFieldReader, MultiValuedU128FastFieldReader};
|
||||||
|
pub(crate) use self::writer::MultivalueStartIndex;
|
||||||
|
pub use self::writer::{MultiValueU128FastFieldWriter, MultiValuedFastFieldWriter};
|
||||||
|
|
||||||
|
/// The valid codecs for multivalue values excludes the linear interpolation codec.
|
||||||
|
///
|
||||||
|
/// This limitation is only valid for the values, not the offset index of the multivalue index.
|
||||||
|
pub(crate) fn get_fastfield_codecs_for_multivalue() -> [FastFieldCodecType; 2] {
|
||||||
|
[
|
||||||
|
FastFieldCodecType::Bitpacked,
|
||||||
|
FastFieldCodecType::BlockwiseLinear,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
@@ -341,11 +356,13 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
proptest! {
|
proptest! {
|
||||||
|
#![proptest_config(proptest::prelude::ProptestConfig::with_cases(5))]
|
||||||
#[test]
|
#[test]
|
||||||
fn test_multivalued_proptest(ops in proptest::collection::vec(operation_strategy(), 1..10)) {
|
fn test_multivalued_proptest(ops in proptest::collection::vec(operation_strategy(), 1..10)) {
|
||||||
assert!(test_multivalued_no_panic(&ops[..]).is_ok());
|
assert!(test_multivalued_no_panic(&ops[..]).is_ok());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_multivalued_proptest_gcd() {
|
fn test_multivalued_proptest_gcd() {
|
||||||
use IndexingOp::*;
|
use IndexingOp::*;
|
||||||
@@ -384,3 +401,219 @@ mod tests {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
|
mod bench {
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use test::{self, Bencher};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
|
||||||
|
use crate::fastfield::{CompositeFastFieldSerializer, FastFieldsWriter};
|
||||||
|
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||||
|
use crate::schema::{Cardinality, NumericOptions, Schema};
|
||||||
|
use crate::Document;
|
||||||
|
|
||||||
|
fn bench_multi_value_ff_merge_opt(
|
||||||
|
num_docs: usize,
|
||||||
|
segments_every_n_docs: usize,
|
||||||
|
merge_policy: impl crate::indexer::MergePolicy + 'static,
|
||||||
|
) {
|
||||||
|
let mut builder = crate::schema::SchemaBuilder::new();
|
||||||
|
|
||||||
|
let fast_multi =
|
||||||
|
crate::schema::NumericOptions::default().set_fast(Cardinality::MultiValues);
|
||||||
|
let multi_field = builder.add_f64_field("f64s", fast_multi);
|
||||||
|
|
||||||
|
let index = crate::Index::create_in_ram(builder.build());
|
||||||
|
|
||||||
|
let mut writer = index.writer_for_tests().unwrap();
|
||||||
|
writer.set_merge_policy(Box::new(merge_policy));
|
||||||
|
|
||||||
|
for i in 0..num_docs {
|
||||||
|
let mut doc = crate::Document::new();
|
||||||
|
doc.add_f64(multi_field, 0.24);
|
||||||
|
doc.add_f64(multi_field, 0.27);
|
||||||
|
doc.add_f64(multi_field, 0.37);
|
||||||
|
if i % 3 == 0 {
|
||||||
|
doc.add_f64(multi_field, 0.44);
|
||||||
|
}
|
||||||
|
|
||||||
|
writer.add_document(doc).unwrap();
|
||||||
|
if i % segments_every_n_docs == 0 {
|
||||||
|
writer.commit().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
writer.wait_merging_threads().unwrap();
|
||||||
|
let mut writer = index.writer_for_tests().unwrap();
|
||||||
|
let segment_ids = index.searchable_segment_ids().unwrap();
|
||||||
|
writer.merge(&segment_ids).wait().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a merging thread fails, we should end up with more
|
||||||
|
// than one segment here
|
||||||
|
assert_eq!(1, index.searchable_segments().unwrap().len());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_multi_value_ff_merge_many_segments(b: &mut Bencher) {
|
||||||
|
let num_docs = 100_000;
|
||||||
|
b.iter(|| {
|
||||||
|
bench_multi_value_ff_merge_opt(num_docs, 1_000, crate::indexer::NoMergePolicy);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_multi_value_ff_merge_many_segments_log_merge(b: &mut Bencher) {
|
||||||
|
let num_docs = 100_000;
|
||||||
|
b.iter(|| {
|
||||||
|
let merge_policy = crate::indexer::LogMergePolicy::default();
|
||||||
|
bench_multi_value_ff_merge_opt(num_docs, 1_000, merge_policy);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_multi_value_ff_merge_few_segments(b: &mut Bencher) {
|
||||||
|
let num_docs = 100_000;
|
||||||
|
b.iter(|| {
|
||||||
|
bench_multi_value_ff_merge_opt(num_docs, 33_000, crate::indexer::NoMergePolicy);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn multi_values(num_docs: usize, vals_per_doc: usize) -> Vec<Vec<u64>> {
|
||||||
|
let mut vals = vec![];
|
||||||
|
for _i in 0..num_docs {
|
||||||
|
let mut block = vec![];
|
||||||
|
for j in 0..vals_per_doc {
|
||||||
|
block.push(j as u64);
|
||||||
|
}
|
||||||
|
vals.push(block);
|
||||||
|
}
|
||||||
|
|
||||||
|
vals
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_multi_value_fflookup(b: &mut Bencher) {
|
||||||
|
let num_docs = 100_000;
|
||||||
|
|
||||||
|
let path = Path::new("test");
|
||||||
|
let directory: RamDirectory = RamDirectory::create();
|
||||||
|
let field = {
|
||||||
|
let options = NumericOptions::default().set_fast(Cardinality::MultiValues);
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let field = schema_builder.add_u64_field("field", options);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||||
|
for block in &multi_values(num_docs, 3) {
|
||||||
|
let mut doc = Document::new();
|
||||||
|
for val in block {
|
||||||
|
doc.add_u64(field, *val);
|
||||||
|
}
|
||||||
|
fast_field_writers.add_document(&doc).unwrap();
|
||||||
|
}
|
||||||
|
fast_field_writers
|
||||||
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
|
.unwrap();
|
||||||
|
serializer.close().unwrap();
|
||||||
|
field
|
||||||
|
};
|
||||||
|
let file = directory.open_read(&path).unwrap();
|
||||||
|
{
|
||||||
|
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||||
|
let data_idx = fast_fields_composite
|
||||||
|
.open_read_with_idx(field, 0)
|
||||||
|
.unwrap()
|
||||||
|
.read_bytes()
|
||||||
|
.unwrap();
|
||||||
|
let idx_reader = fastfield_codecs::open(data_idx).unwrap();
|
||||||
|
|
||||||
|
let data_vals = fast_fields_composite
|
||||||
|
.open_read_with_idx(field, 1)
|
||||||
|
.unwrap()
|
||||||
|
.read_bytes()
|
||||||
|
.unwrap();
|
||||||
|
let vals_reader = fastfield_codecs::open(data_vals).unwrap();
|
||||||
|
let fast_field_reader = MultiValuedFastFieldReader::open(idx_reader, vals_reader);
|
||||||
|
b.iter(|| {
|
||||||
|
let mut sum = 0u64;
|
||||||
|
let mut data = Vec::with_capacity(10);
|
||||||
|
for i in 0u32..num_docs as u32 {
|
||||||
|
fast_field_reader.get_vals(i, &mut data);
|
||||||
|
sum += data.iter().sum::<u64>();
|
||||||
|
}
|
||||||
|
sum
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_multi_value_ff_creation(b: &mut Bencher) {
|
||||||
|
// 3 million ff entries
|
||||||
|
let num_docs = 1_000_000;
|
||||||
|
let multi_values = multi_values(num_docs, 3);
|
||||||
|
|
||||||
|
b.iter(|| {
|
||||||
|
let directory: RamDirectory = RamDirectory::create();
|
||||||
|
let options = NumericOptions::default().set_fast(Cardinality::MultiValues);
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let field = schema_builder.add_u64_field("field", options);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||||
|
for block in &multi_values {
|
||||||
|
let mut doc = Document::new();
|
||||||
|
for val in block {
|
||||||
|
doc.add_u64(field, *val);
|
||||||
|
}
|
||||||
|
fast_field_writers.add_document(&doc).unwrap();
|
||||||
|
}
|
||||||
|
fast_field_writers
|
||||||
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
|
.unwrap();
|
||||||
|
serializer.close().unwrap();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_multi_value_ff_creation_with_sorting(b: &mut Bencher) {
|
||||||
|
// 3 million ff entries
|
||||||
|
let num_docs = 1_000_000;
|
||||||
|
let multi_values = multi_values(num_docs, 3);
|
||||||
|
|
||||||
|
let doc_id_mapping =
|
||||||
|
DocIdMapping::from_new_id_to_old_id((0..1_000_000).collect::<Vec<_>>());
|
||||||
|
|
||||||
|
b.iter(|| {
|
||||||
|
let directory: RamDirectory = RamDirectory::create();
|
||||||
|
let options = NumericOptions::default().set_fast(Cardinality::MultiValues);
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let field = schema_builder.add_u64_field("field", options);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||||
|
for block in &multi_values {
|
||||||
|
let mut doc = Document::new();
|
||||||
|
for val in block {
|
||||||
|
doc.add_u64(field, *val);
|
||||||
|
}
|
||||||
|
fast_field_writers.add_document(&doc).unwrap();
|
||||||
|
}
|
||||||
|
fast_field_writers
|
||||||
|
.serialize(&mut serializer, &HashMap::new(), Some(&doc_id_mapping))
|
||||||
|
.unwrap();
|
||||||
|
serializer.close().unwrap();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,10 @@
|
|||||||
use std::ops::Range;
|
use std::ops::{Range, RangeInclusive};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue, MultiValueLength};
|
use fastfield_codecs::{Column, MonotonicallyMappableToU128};
|
||||||
|
|
||||||
|
use super::MultiValueIndex;
|
||||||
|
use crate::fastfield::FastValue;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
/// Reader for a multivalued `u64` fast field.
|
/// Reader for a multivalued `u64` fast field.
|
||||||
@@ -10,47 +14,45 @@ use crate::DocId;
|
|||||||
/// The `vals_reader` will access the concatenated list of all
|
/// The `vals_reader` will access the concatenated list of all
|
||||||
/// values for all reader.
|
/// values for all reader.
|
||||||
/// The `idx_reader` associated, for each document, the index of its first value.
|
/// The `idx_reader` associated, for each document, the index of its first value.
|
||||||
|
/// Stores the start position for each document.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct MultiValuedFastFieldReader<Item: FastValue> {
|
pub struct MultiValuedFastFieldReader<Item: FastValue> {
|
||||||
idx_reader: DynamicFastFieldReader<u64>,
|
idx_reader: MultiValueIndex,
|
||||||
vals_reader: DynamicFastFieldReader<Item>,
|
vals_reader: Arc<dyn Column<Item>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||||
pub(crate) fn open(
|
pub(crate) fn open(
|
||||||
idx_reader: DynamicFastFieldReader<u64>,
|
idx_reader: Arc<dyn Column<u64>>,
|
||||||
vals_reader: DynamicFastFieldReader<Item>,
|
vals_reader: Arc<dyn Column<Item>>,
|
||||||
) -> MultiValuedFastFieldReader<Item> {
|
) -> MultiValuedFastFieldReader<Item> {
|
||||||
MultiValuedFastFieldReader {
|
MultiValuedFastFieldReader {
|
||||||
idx_reader,
|
idx_reader: MultiValueIndex::new(idx_reader),
|
||||||
vals_reader,
|
vals_reader,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `[start, end)`, such that the values associated
|
/// Returns the array of values associated with the given `doc`.
|
||||||
/// to the given document are `start..end`.
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn range(&self, doc: DocId) -> Range<u64> {
|
fn get_vals_for_range(&self, range: Range<u32>, vals: &mut Vec<Item>) {
|
||||||
let start = self.idx_reader.get(doc);
|
|
||||||
let end = self.idx_reader.get(doc + 1);
|
|
||||||
start..end
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the array of values associated to the given `doc`.
|
|
||||||
#[inline]
|
|
||||||
fn get_vals_for_range(&self, range: Range<u64>, vals: &mut Vec<Item>) {
|
|
||||||
let len = (range.end - range.start) as usize;
|
let len = (range.end - range.start) as usize;
|
||||||
vals.resize(len, Item::make_zero());
|
vals.resize(len, Item::make_zero());
|
||||||
self.vals_reader.get_range(range.start, &mut vals[..]);
|
self.vals_reader
|
||||||
|
.get_range(range.start as u64, &mut vals[..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the array of values associated to the given `doc`.
|
/// Returns the array of values associated with the given `doc`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
||||||
let range = self.range(doc);
|
let range = self.idx_reader.range(doc);
|
||||||
self.get_vals_for_range(range, vals);
|
self.get_vals_for_range(range, vals);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// returns the multivalue index
|
||||||
|
pub fn get_index_reader(&self) -> &MultiValueIndex {
|
||||||
|
&self.idx_reader
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the minimum value for this fast field.
|
/// Returns the minimum value for this fast field.
|
||||||
///
|
///
|
||||||
/// The min value does not take in account of possible
|
/// The min value does not take in account of possible
|
||||||
@@ -71,27 +73,129 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
|||||||
|
|
||||||
/// Returns the number of values associated with the document `DocId`.
|
/// Returns the number of values associated with the document `DocId`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn num_vals(&self, doc: DocId) -> usize {
|
pub fn num_vals(&self, doc: DocId) -> u32 {
|
||||||
let range = self.range(doc);
|
self.idx_reader.num_vals_for_doc(doc)
|
||||||
(range.end - range.start) as usize
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the overall number of values in this field .
|
/// Returns the overall number of values in this field.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn total_num_vals(&self) -> u64 {
|
pub fn total_num_vals(&self) -> u32 {
|
||||||
self.idx_reader.max_value()
|
self.idx_reader.total_num_vals()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
|
/// Reader for a multivalued `u128` fast field.
|
||||||
fn get_len(&self, doc_id: DocId) -> u64 {
|
///
|
||||||
self.num_vals(doc_id) as u64
|
/// The reader is implemented as a `u64` fast field for the index and a `u128` fast field.
|
||||||
|
///
|
||||||
|
/// The `vals_reader` will access the concatenated list of all
|
||||||
|
/// values for all reader.
|
||||||
|
/// The `idx_reader` associated, for each document, the index of its first value.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct MultiValuedU128FastFieldReader<T: MonotonicallyMappableToU128> {
|
||||||
|
idx_reader: MultiValueIndex,
|
||||||
|
vals_reader: Arc<dyn Column<T>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: MonotonicallyMappableToU128> MultiValuedU128FastFieldReader<T> {
|
||||||
|
pub(crate) fn open(
|
||||||
|
idx_reader: Arc<dyn Column<u64>>,
|
||||||
|
vals_reader: Arc<dyn Column<T>>,
|
||||||
|
) -> MultiValuedU128FastFieldReader<T> {
|
||||||
|
Self {
|
||||||
|
idx_reader: MultiValueIndex::new(idx_reader),
|
||||||
|
vals_reader,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_total_len(&self) -> u64 {
|
/// Returns the array of values associated to the given `doc`.
|
||||||
self.total_num_vals() as u64
|
#[inline]
|
||||||
|
pub fn get_first_val(&self, doc: DocId) -> Option<T> {
|
||||||
|
let range = self.idx_reader.range(doc);
|
||||||
|
if range.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Some(self.vals_reader.get_val(range.start))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the array of values associated to the given `doc`.
|
||||||
|
#[inline]
|
||||||
|
fn get_vals_for_range(&self, range: Range<u32>, vals: &mut Vec<T>) {
|
||||||
|
let len = (range.end - range.start) as usize;
|
||||||
|
vals.resize(len, T::from_u128(0));
|
||||||
|
self.vals_reader
|
||||||
|
.get_range(range.start as u64, &mut vals[..]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the index reader
|
||||||
|
pub fn get_index_reader(&self) -> &MultiValueIndex {
|
||||||
|
&self.idx_reader
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the array of values associated to the given `doc`.
|
||||||
|
#[inline]
|
||||||
|
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<T>) {
|
||||||
|
let range = self.idx_reader.range(doc);
|
||||||
|
self.get_vals_for_range(range, vals);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterates over all elements in the fast field
|
||||||
|
pub fn iter(&self) -> impl Iterator<Item = T> + '_ {
|
||||||
|
self.vals_reader.iter()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the minimum value for this fast field.
|
||||||
|
///
|
||||||
|
/// The min value does not take in account of possible
|
||||||
|
/// deleted document, and should be considered as a lower bound
|
||||||
|
/// of the actual mimimum value.
|
||||||
|
pub fn min_value(&self) -> T {
|
||||||
|
self.vals_reader.min_value()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the maximum value for this fast field.
|
||||||
|
///
|
||||||
|
/// The max value does not take in account of possible
|
||||||
|
/// deleted document, and should be considered as an upper bound
|
||||||
|
/// of the actual maximum value.
|
||||||
|
pub fn max_value(&self) -> T {
|
||||||
|
self.vals_reader.max_value()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of values associated with the document `DocId`.
|
||||||
|
#[inline]
|
||||||
|
pub fn num_vals(&self, doc: DocId) -> u32 {
|
||||||
|
self.idx_reader.num_vals_for_doc(doc)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the overall number of values in this field. It does not include deletes.
|
||||||
|
#[inline]
|
||||||
|
pub fn total_num_vals(&self) -> u32 {
|
||||||
|
assert_eq!(
|
||||||
|
self.vals_reader.num_vals(),
|
||||||
|
self.get_index_reader().total_num_vals()
|
||||||
|
);
|
||||||
|
self.idx_reader.total_num_vals()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the docids matching given doc_id_range and value_range.
|
||||||
|
#[inline]
|
||||||
|
pub fn get_docids_for_value_range(
|
||||||
|
&self,
|
||||||
|
value_range: RangeInclusive<T>,
|
||||||
|
doc_id_range: Range<u32>,
|
||||||
|
positions: &mut Vec<u32>,
|
||||||
|
) {
|
||||||
|
let position_range = self
|
||||||
|
.get_index_reader()
|
||||||
|
.docid_range_to_position_range(doc_id_range.clone());
|
||||||
|
self.vals_reader
|
||||||
|
.get_docids_for_value_range(value_range, position_range, positions);
|
||||||
|
|
||||||
|
self.idx_reader.positions_to_docids(doc_id_range, positions);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user