mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-27 20:42:54 +00:00
Compare commits
1413 Commits
python-bin
...
columnar-m
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e70acee748 | ||
|
|
aa8408a979 | ||
|
|
4319d8c1bd | ||
|
|
33d18d0424 | ||
|
|
3de018c49f | ||
|
|
96485f21d6 | ||
|
|
1330e6f10d | ||
|
|
5086914304 | ||
|
|
d7a8053cc2 | ||
|
|
9548570e88 | ||
|
|
9a296b29b7 | ||
|
|
b31fd389d8 | ||
|
|
89cec79813 | ||
|
|
d09d91a856 | ||
|
|
50d8a8bc32 | ||
|
|
08919a2900 | ||
|
|
8ba333f1b4 | ||
|
|
a2ca12995e | ||
|
|
e3d504d833 | ||
|
|
5a42c5aae9 | ||
|
|
a86b104a40 | ||
|
|
f9abd256b7 | ||
|
|
9f42b6440a | ||
|
|
c723ed3f0b | ||
|
|
d72ea7d353 | ||
|
|
5180b612ef | ||
|
|
f687b3a5aa | ||
|
|
c4af63e588 | ||
|
|
4b343b3189 | ||
|
|
c51d9f9f83 | ||
|
|
c9cb3d04bf | ||
|
|
0caaf13a90 | ||
|
|
a59bd965cc | ||
|
|
f2dad194ea | ||
|
|
25bad784ad | ||
|
|
4bac945709 | ||
|
|
16b704e190 | ||
|
|
6ca9a477f3 | ||
|
|
2650111b76 | ||
|
|
1176555eff | ||
|
|
f8d111a75e | ||
|
|
e17996f2fd | ||
|
|
f3621c0487 | ||
|
|
14222a47a3 | ||
|
|
8312c882a5 | ||
|
|
7a8fce0ae7 | ||
|
|
196e42f33e | ||
|
|
82a183bc2d | ||
|
|
3090d49615 | ||
|
|
7c6cc818ae | ||
|
|
514d23a20c | ||
|
|
4f9efe654c | ||
|
|
1afa5bf3db | ||
|
|
07a51eb7c8 | ||
|
|
2080c370c2 | ||
|
|
b22f96624e | ||
|
|
b78dc5e313 | ||
|
|
3f915925af | ||
|
|
9c5fef5af7 | ||
|
|
9948a84ebe | ||
|
|
45156fd869 | ||
|
|
bc959006fa | ||
|
|
7385a8f80c | ||
|
|
13b89cba17 | ||
|
|
f4804ce2f5 | ||
|
|
2a6d1eaf78 | ||
|
|
540a9972bd | ||
|
|
bb48c3e488 | ||
|
|
3339a3ec05 | ||
|
|
f39165e1e7 | ||
|
|
32cb1d22da | ||
|
|
4a6bf50e78 | ||
|
|
2ac1cc2fc0 | ||
|
|
f9171a3981 | ||
|
|
a2cf6a79b4 | ||
|
|
f6e87a5319 | ||
|
|
f9971e15fe | ||
|
|
3cdc8e7472 | ||
|
|
fbb0f8b55d | ||
|
|
136a8f4124 | ||
|
|
5d4535de83 | ||
|
|
2c50b02eb3 | ||
|
|
509adab79d | ||
|
|
96c93a6ba3 | ||
|
|
495824361a | ||
|
|
485a8f507e | ||
|
|
1119e59eae | ||
|
|
ee1f2c1f28 | ||
|
|
600548fd26 | ||
|
|
9929c0c221 | ||
|
|
f53e65648b | ||
|
|
0281b22b77 | ||
|
|
a05c184830 | ||
|
|
0b40a7fe43 | ||
|
|
e758080465 | ||
|
|
2a39289a1b | ||
|
|
ca6231170e | ||
|
|
eda6e5a10a | ||
|
|
8641155cbb | ||
|
|
9a090ed994 | ||
|
|
b7d0dd154a | ||
|
|
ce10fab20f | ||
|
|
e034328a8b | ||
|
|
f811d1616b | ||
|
|
c665b16ff0 | ||
|
|
3b5f810051 | ||
|
|
5765c261aa | ||
|
|
fb9f03118d | ||
|
|
55a9d808d4 | ||
|
|
32166682b3 | ||
|
|
e6acf8f76d | ||
|
|
9e8a0c2cca | ||
|
|
3edf0a2724 | ||
|
|
8ca12a5683 | ||
|
|
a4b759d2fe | ||
|
|
3e9c806890 | ||
|
|
c69a873dd3 | ||
|
|
666afcf641 | ||
|
|
38ad46e580 | ||
|
|
e948889f4c | ||
|
|
6e636c9cea | ||
|
|
5a610efbc1 | ||
|
|
500a0d5e48 | ||
|
|
509a265659 | ||
|
|
5b2cea1b97 | ||
|
|
a5a80ffaea | ||
|
|
0f98d91a39 | ||
|
|
2af6b01c17 | ||
|
|
c32ab66bbd | ||
|
|
3f3a6f9990 | ||
|
|
83325d8f3f | ||
|
|
4e46f4f8c4 | ||
|
|
43df356010 | ||
|
|
6647362464 | ||
|
|
279b1b28d3 | ||
|
|
7a80851e36 | ||
|
|
cd952429d2 | ||
|
|
d777c964da | ||
|
|
bbb058d976 | ||
|
|
5f7d027a52 | ||
|
|
dfab201191 | ||
|
|
0c2bd36fe3 | ||
|
|
af839753e0 | ||
|
|
fec2b63571 | ||
|
|
6213ea476a | ||
|
|
5e159c26bf | ||
|
|
a5e59ab598 | ||
|
|
e772d3170d | ||
|
|
8c2ba7bd55 | ||
|
|
02328b0151 | ||
|
|
7cc775256c | ||
|
|
07b40f8b8b | ||
|
|
9b6b6be5b9 | ||
|
|
6bb73a527f | ||
|
|
03885d0f3c | ||
|
|
f2e5135870 | ||
|
|
c24157f28b | ||
|
|
873382cdcb | ||
|
|
791350091c | ||
|
|
483b1d13d4 | ||
|
|
8de7fa9d95 | ||
|
|
94313b62f8 | ||
|
|
f2b2628feb | ||
|
|
449f595832 | ||
|
|
c9235df059 | ||
|
|
a4485f7611 | ||
|
|
1082ff60f9 | ||
|
|
491854155c | ||
|
|
96c3d54ac7 | ||
|
|
6800fdec9d | ||
|
|
c9cf9c952a | ||
|
|
024e53a99c | ||
|
|
8d75e451bd | ||
|
|
fcfd76ec55 | ||
|
|
6b7b1cc4fa | ||
|
|
129f7422f5 | ||
|
|
f39cce2c8b | ||
|
|
d2478fac8a | ||
|
|
952b048341 | ||
|
|
80f9596ec8 | ||
|
|
84f9e77e1d | ||
|
|
a602c248fb | ||
|
|
4b9d1fe828 | ||
|
|
63bc390b02 | ||
|
|
07393c2fa0 | ||
|
|
77a415cbe4 | ||
|
|
4b4c231bba | ||
|
|
11d3409286 | ||
|
|
9cb8cfbea8 | ||
|
|
8b69aab0fc | ||
|
|
3650d1f36a | ||
|
|
2efebdb1bb | ||
|
|
e443ca63aa | ||
|
|
5c9cbee29d | ||
|
|
b2ca83a93c | ||
|
|
3b189080d4 | ||
|
|
00a6586efe | ||
|
|
b9b913510e | ||
|
|
534b1d33c3 | ||
|
|
f465173872 | ||
|
|
96315df20d | ||
|
|
9a1609d364 | ||
|
|
39f4e58450 | ||
|
|
a8a36b62cd | ||
|
|
226a49338f | ||
|
|
2864bf7123 | ||
|
|
5171ff611b | ||
|
|
e50e74acf8 | ||
|
|
0b86658389 | ||
|
|
5d6602a8d9 | ||
|
|
4d29ff4d01 | ||
|
|
cdc8e3a8be | ||
|
|
67f453b534 | ||
|
|
787a37bacf | ||
|
|
f5039f1846 | ||
|
|
eeb1f19093 | ||
|
|
087beaf328 | ||
|
|
309449dba3 | ||
|
|
5a76e6c5d3 | ||
|
|
c8713a01ed | ||
|
|
6113e0408c | ||
|
|
400a20b7af | ||
|
|
5f565e77de | ||
|
|
516e60900d | ||
|
|
36e1c79f37 | ||
|
|
c2f1c250f9 | ||
|
|
c694bc039a | ||
|
|
2063f1717f | ||
|
|
d742275048 | ||
|
|
b9f06bc287 | ||
|
|
8b42c4c126 | ||
|
|
7905965800 | ||
|
|
f60a551890 | ||
|
|
7baa6e3ec5 | ||
|
|
2100ec5d26 | ||
|
|
b3bf9a5716 | ||
|
|
0dc8c458e0 | ||
|
|
e5043d78d2 | ||
|
|
6d0bb82bd2 | ||
|
|
5945dbf0bd | ||
|
|
4cf911d56a | ||
|
|
0f5cff762f | ||
|
|
6d9a123cf2 | ||
|
|
0f4a47816a | ||
|
|
b062ab2196 | ||
|
|
a9d2f3db23 | ||
|
|
44e03791f9 | ||
|
|
2d23763e9f | ||
|
|
a24ae8d924 | ||
|
|
927dff5262 | ||
|
|
a695edcc95 | ||
|
|
b4b4f3fa73 | ||
|
|
b50e4b7c20 | ||
|
|
f8686ab1ec | ||
|
|
2fe42719d8 | ||
|
|
fadd784a25 | ||
|
|
0e94213af0 | ||
|
|
0da2a2e70d | ||
|
|
0bcdf3cbbf | ||
|
|
8f647b817f | ||
|
|
a86b0df6f4 | ||
|
|
f842da758c | ||
|
|
97ccd6d712 | ||
|
|
cb252a42af | ||
|
|
d9609dd6b6 | ||
|
|
f03667d967 | ||
|
|
10f10a322f | ||
|
|
f757471077 | ||
|
|
21e0adefda | ||
|
|
ea8e6d7b1d | ||
|
|
dac7da780e | ||
|
|
20c87903b2 | ||
|
|
f9c3947803 | ||
|
|
e9a384bb15 | ||
|
|
d231671fe2 | ||
|
|
fa3d786a2f | ||
|
|
75aafeeb9b | ||
|
|
6f066c7f65 | ||
|
|
22e56aaee3 | ||
|
|
d641979127 | ||
|
|
1998111521 | ||
|
|
acb2e2e282 | ||
|
|
1ff5da5eb4 | ||
|
|
c3b25710ad | ||
|
|
8492010d43 | ||
|
|
cf02e32578 | ||
|
|
8cca1014c9 | ||
|
|
938f884e32 | ||
|
|
ed68afb698 | ||
|
|
8a7962dc22 | ||
|
|
a06039dea8 | ||
|
|
68b6254b09 | ||
|
|
6a88ac3fe3 | ||
|
|
191b934650 | ||
|
|
1a2ba7025a | ||
|
|
02599ebeb7 | ||
|
|
a16b466460 | ||
|
|
b8d8fdeb6e | ||
|
|
12856d80fa | ||
|
|
e75472ec9a | ||
|
|
e2e6c94ba8 | ||
|
|
9f610b25af | ||
|
|
237b64025e | ||
|
|
592caeefa0 | ||
|
|
570009b5b1 | ||
|
|
61b5110db7 | ||
|
|
58af1235e4 | ||
|
|
d3e7c41a1f | ||
|
|
11275854ca | ||
|
|
3ca48cd826 | ||
|
|
47dc511733 | ||
|
|
cae6b28a8f | ||
|
|
9aa9efe2a4 | ||
|
|
57570b38a2 | ||
|
|
584394db1e | ||
|
|
3aeb026970 | ||
|
|
df32ee2df2 | ||
|
|
762e662bfd | ||
|
|
63b2420058 | ||
|
|
ced21b8791 | ||
|
|
bc85947105 | ||
|
|
64f08a1a5c | ||
|
|
e029fdfca7 | ||
|
|
817225edfb | ||
|
|
1eab12396d | ||
|
|
8006f63426 | ||
|
|
0a907d0319 | ||
|
|
45924711fd | ||
|
|
14cb817a52 | ||
|
|
edd9155b88 | ||
|
|
9497794d40 | ||
|
|
29d56111de | ||
|
|
4d634d61ff | ||
|
|
1f3d8ca7e2 | ||
|
|
54696da771 | ||
|
|
21c2205de9 | ||
|
|
9436049d85 | ||
|
|
21c9a26182 | ||
|
|
56c68f5869 | ||
|
|
f5e66042d8 | ||
|
|
bf3327acd3 | ||
|
|
2a6479b66d | ||
|
|
9c2ef81198 | ||
|
|
c5d30a54bc | ||
|
|
c632fc014e | ||
|
|
085e63ae43 | ||
|
|
f6f23ba684 | ||
|
|
ea72cf34d6 | ||
|
|
00657d9e99 | ||
|
|
26876d41d7 | ||
|
|
8e775b6c3d | ||
|
|
e1f9af4384 | ||
|
|
4e350c5f1b | ||
|
|
84e0c75598 | ||
|
|
08c4412d73 | ||
|
|
70e58adff9 | ||
|
|
0d1cd119e9 | ||
|
|
d3dd620048 | ||
|
|
e89c220b56 | ||
|
|
a451f6d60d | ||
|
|
f740ddeee3 | ||
|
|
7a26cc9022 | ||
|
|
54972caa7c | ||
|
|
5d436759b0 | ||
|
|
6f563b1606 | ||
|
|
095fb68fda | ||
|
|
6316eaefc6 | ||
|
|
5331be800b | ||
|
|
c73b425bc1 | ||
|
|
54cfd0d154 | ||
|
|
0dd62169c8 | ||
|
|
3a9727aa91 | ||
|
|
17093e8ffe | ||
|
|
03e4630cd8 | ||
|
|
4ae0317d68 | ||
|
|
107b19855f | ||
|
|
d8f66ba07e | ||
|
|
f908549245 | ||
|
|
3673a5df9b | ||
|
|
3984cafccc | ||
|
|
298b5dd726 | ||
|
|
8bbb22e9bf | ||
|
|
513f68209d | ||
|
|
91f2f7e722 | ||
|
|
c476b530cf | ||
|
|
77dd202e19 | ||
|
|
00ebff3c16 | ||
|
|
9a6d37c42c | ||
|
|
bb01e99e05 | ||
|
|
535f1a5d83 | ||
|
|
625f9174a7 | ||
|
|
11a4d97cf5 | ||
|
|
1c3d39677a | ||
|
|
6f65995cfd | ||
|
|
e2e4190571 | ||
|
|
82209c58aa | ||
|
|
21519788ea | ||
|
|
4c6c6e4a9c | ||
|
|
df0ac9e901 | ||
|
|
71ab482720 | ||
|
|
2ae383e452 | ||
|
|
8b3a6f6231 | ||
|
|
11edd6bd59 | ||
|
|
193a3c21f4 | ||
|
|
998b1263f6 | ||
|
|
72272bdf81 | ||
|
|
c39c2d79da | ||
|
|
67d94f5bd2 | ||
|
|
abbd934ac9 | ||
|
|
7f9ba0ee50 | ||
|
|
8edcd6f958 | ||
|
|
f50700835d | ||
|
|
494e92ca59 | ||
|
|
4a3169011d | ||
|
|
050fc5dde9 | ||
|
|
ce45889add | ||
|
|
4875174d16 | ||
|
|
0c634c5bc6 | ||
|
|
e25ab5d537 | ||
|
|
27400c9ad3 | ||
|
|
19074e1d5e | ||
|
|
014b1adc3e | ||
|
|
84295d5b35 | ||
|
|
625bcb4877 | ||
|
|
f01cb7d3aa | ||
|
|
8e773ade77 | ||
|
|
fad3faefe2 | ||
|
|
9811d15657 | ||
|
|
31ba5a3c16 | ||
|
|
f4d7621370 | ||
|
|
d4b2b7de8b | ||
|
|
d5ee4edf25 | ||
|
|
fcc7bd7024 | ||
|
|
ce8d6b259a | ||
|
|
099e626156 | ||
|
|
71041b2314 | ||
|
|
09aae134e6 | ||
|
|
6a9d09cf7a | ||
|
|
704d0a8d8b | ||
|
|
195309a557 | ||
|
|
da0f78e06c | ||
|
|
9b6b60cc2b | ||
|
|
6444516a82 | ||
|
|
a9b0d1a0ab | ||
|
|
2b333ca635 | ||
|
|
80a1418284 | ||
|
|
5ab5f070ed | ||
|
|
d122f2c74e | ||
|
|
5b564916f0 | ||
|
|
06fd8684b7 | ||
|
|
931bab8010 | ||
|
|
8dac30e6d1 | ||
|
|
2e0a7d072f | ||
|
|
af84e74284 | ||
|
|
fff1a03842 | ||
|
|
90e296f2d0 | ||
|
|
5f966d747b | ||
|
|
d24f31f965 | ||
|
|
f26b686a1c | ||
|
|
775e936f7d | ||
|
|
7e032a9efd | ||
|
|
23fe73a6c0 | ||
|
|
a4be239d38 | ||
|
|
2406d9278b | ||
|
|
6c2d9737f1 | ||
|
|
a5688572a5 | ||
|
|
431b5a091e | ||
|
|
2c17271cd9 | ||
|
|
5750224d4c | ||
|
|
02691f2445 | ||
|
|
e31e78f39f | ||
|
|
9db2f0e82b | ||
|
|
2ed5cc873d | ||
|
|
d278417300 | ||
|
|
d89a8dd118 | ||
|
|
1bd44a5f61 | ||
|
|
d750ced813 | ||
|
|
fbc469e5df | ||
|
|
c1273670e4 | ||
|
|
7eb267341e | ||
|
|
db1836691e | ||
|
|
437cd350a2 | ||
|
|
8024ecf013 | ||
|
|
9baefbe2ab | ||
|
|
ad76d11008 | ||
|
|
c3220bece0 | ||
|
|
2b713f0977 | ||
|
|
0bc6b4a117 | ||
|
|
79e42d4a6d | ||
|
|
0135fbc4c8 | ||
|
|
449594f67a | ||
|
|
8b6647e908 | ||
|
|
efabcbcdf5 | ||
|
|
7bf5962554 | ||
|
|
4c7dedef29 | ||
|
|
93f356a7a7 | ||
|
|
6ca5f77466 | ||
|
|
2e2822f89d | ||
|
|
de178a1901 | ||
|
|
11e4225f23 | ||
|
|
f21b73d1f6 | ||
|
|
1440f3243b | ||
|
|
83d0c13fb0 | ||
|
|
88054aa333 | ||
|
|
635c39ba48 | ||
|
|
eab2257637 | ||
|
|
328bd96c24 | ||
|
|
fc24842a43 | ||
|
|
2d6f1d43ff | ||
|
|
ca0973ec78 | ||
|
|
38ee60d792 | ||
|
|
f68be28284 | ||
|
|
fc43ab9280 | ||
|
|
38c2ea6a5d | ||
|
|
26a0fd1fbe | ||
|
|
811b91ecb3 | ||
|
|
25c00ce856 | ||
|
|
e5debb97a7 | ||
|
|
bc4cd9ffaa | ||
|
|
9a13d8709b | ||
|
|
e6eadf1a2f | ||
|
|
7cca7e6a47 | ||
|
|
ef2492dba6 | ||
|
|
2981e6c1df | ||
|
|
b33b4c0092 | ||
|
|
4d9d2b6db0 | ||
|
|
ed868f93a3 | ||
|
|
5e599d96d7 | ||
|
|
314ae43a45 | ||
|
|
fce91b2f3a | ||
|
|
9bcd2b8104 | ||
|
|
0c9c257150 | ||
|
|
1af85a2956 | ||
|
|
bc4c3d0c6b | ||
|
|
6937c75f05 | ||
|
|
e54429e827 | ||
|
|
ca836b6414 | ||
|
|
f0a2b1cc44 | ||
|
|
fcfdc44c61 | ||
|
|
3171f0b9ba | ||
|
|
89e19f14b5 | ||
|
|
1a6a1396cd | ||
|
|
e766375700 | ||
|
|
496b4a4fdb | ||
|
|
93cc8498b3 | ||
|
|
0aa3d63a9f | ||
|
|
4e2a053b69 | ||
|
|
71c4393ec4 | ||
|
|
b2e97e266a | ||
|
|
9ee4772140 | ||
|
|
c95013b11e | ||
|
|
71f75071d2 | ||
|
|
b114e553cd | ||
|
|
17dcc99e43 | ||
|
|
c5c2e59b2b | ||
|
|
fc045e6bf9 | ||
|
|
6837a4d468 | ||
|
|
0759bf9448 | ||
|
|
152e8238d7 | ||
|
|
d4e5b48437 | ||
|
|
03040ed81d | ||
|
|
aaa22ad225 | ||
|
|
44ea7313ca | ||
|
|
3223bdf254 | ||
|
|
11ac451250 | ||
|
|
6a4632211a | ||
|
|
a99e5459e3 | ||
|
|
3f88718f38 | ||
|
|
cbd06ab189 | ||
|
|
749395bbb8 | ||
|
|
617ba1f0c0 | ||
|
|
2f1cd7e7f0 | ||
|
|
58c0cb5fc4 | ||
|
|
7f45a6ac96 | ||
|
|
0ade871126 | ||
|
|
aab65490c9 | ||
|
|
d77e8de36a | ||
|
|
d11a8cce26 | ||
|
|
bc607a921b | ||
|
|
1273f33338 | ||
|
|
e30449743c | ||
|
|
ed26552296 | ||
|
|
65d129afbd | ||
|
|
386ffab76c | ||
|
|
57a8d0359c | ||
|
|
14cb66ee00 | ||
|
|
9e38343352 | ||
|
|
944302ae2f | ||
|
|
be70804d17 | ||
|
|
a1afc80600 | ||
|
|
02e24fda52 | ||
|
|
7e3c0c5392 | ||
|
|
fdb2524f9e | ||
|
|
4db655ae82 | ||
|
|
bb44cc84c4 | ||
|
|
8c1e1cf1ad | ||
|
|
b5b16948b0 | ||
|
|
c305d3a2a2 | ||
|
|
038d234ff1 | ||
|
|
c45eb9a9fa | ||
|
|
824d6f96fe | ||
|
|
7cf821bac0 | ||
|
|
ae83fc8298 | ||
|
|
a7bc361145 | ||
|
|
2805291400 | ||
|
|
6614a2cba0 | ||
|
|
6f4d203d1b | ||
|
|
1be6c6111c | ||
|
|
c7c3eab256 | ||
|
|
ec69875d15 | ||
|
|
d832cfcfd8 | ||
|
|
ab6b532cc4 | ||
|
|
4b6047f7d7 | ||
|
|
5ca04beb94 | ||
|
|
902d05ebec | ||
|
|
f1b298642a | ||
|
|
dd13dedaeb | ||
|
|
46724b4a05 | ||
|
|
24432bf523 | ||
|
|
31d3bcfff2 | ||
|
|
706fbd6886 | ||
|
|
8a8a048015 | ||
|
|
c72549cb9a | ||
|
|
d6f803212c | ||
|
|
dac73537d2 | ||
|
|
bb5254de12 | ||
|
|
be5218c2f6 | ||
|
|
ec9478830a | ||
|
|
8807bfd13d | ||
|
|
447811c111 | ||
|
|
f29acf5d8c | ||
|
|
125707dbe0 | ||
|
|
46d5de920d | ||
|
|
d2a7bcf217 | ||
|
|
141b9aa245 | ||
|
|
c5a6282fa8 | ||
|
|
c0f524e1a3 | ||
|
|
958b2bee08 | ||
|
|
f619658e2c | ||
|
|
aa391bf843 | ||
|
|
47dcbdbeae | ||
|
|
691245bf20 | ||
|
|
90798d4b39 | ||
|
|
0b6d9f90cf | ||
|
|
8a5a12d961 | ||
|
|
e73542e2e8 | ||
|
|
0262e44bbd | ||
|
|
613aad7a8a | ||
|
|
1aa88b0c51 | ||
|
|
564fa38085 | ||
|
|
59ec21479f | ||
|
|
42283f9e91 | ||
|
|
b105bf72e1 | ||
|
|
226f577803 | ||
|
|
2e255c4bef | ||
|
|
387592809f | ||
|
|
cedced5bb0 | ||
|
|
d31f045872 | ||
|
|
6656a70d1b | ||
|
|
d36e0a9549 | ||
|
|
8771b2673f | ||
|
|
a41d3d51a4 | ||
|
|
cae34ffe47 | ||
|
|
4b62f7907d | ||
|
|
7fa6a0b665 | ||
|
|
458ed29a31 | ||
|
|
e37775fe21 | ||
|
|
1cd2434a32 | ||
|
|
de2cba6d1e | ||
|
|
c0b1a58d27 | ||
|
|
848b795b9f | ||
|
|
091b668624 | ||
|
|
5004290daa | ||
|
|
5d2c2b804c | ||
|
|
1a92b588e0 | ||
|
|
010e92c118 | ||
|
|
2ead010c83 | ||
|
|
c4f66eb185 | ||
|
|
d7b46d2137 | ||
|
|
d042ce74c7 | ||
|
|
7ba9e662b8 | ||
|
|
fdd5ef85e5 | ||
|
|
704498a1ac | ||
|
|
1232af7928 | ||
|
|
d37633e034 | ||
|
|
9815067171 | ||
|
|
972cb6c26d | ||
|
|
4dc80cfa25 | ||
|
|
cef145790c | ||
|
|
e05e2a0c51 | ||
|
|
e028515caf | ||
|
|
850b9eaea4 | ||
|
|
505e6a440c | ||
|
|
fcd651f6a9 | ||
|
|
e6653228a9 | ||
|
|
bdedefe07d | ||
|
|
13a4473faa | ||
|
|
2069e3e52b | ||
|
|
0d8263cba1 | ||
|
|
65b365b81c | ||
|
|
4c1366da87 | ||
|
|
eca6628b3c | ||
|
|
9679c5f306 | ||
|
|
5a2497b6fd | ||
|
|
99d4b1a177 | ||
|
|
732f6847c0 | ||
|
|
1c6d9bdc6a | ||
|
|
3ea6800ac5 | ||
|
|
395303b644 | ||
|
|
2c200b46cb | ||
|
|
17e00df112 | ||
|
|
3129d86743 | ||
|
|
e5e252cbc0 | ||
|
|
b2da82f151 | ||
|
|
c81b3030fa | ||
|
|
9e66c75fc6 | ||
|
|
ebdbb6bd2e | ||
|
|
c980b19dd9 | ||
|
|
098eea843a | ||
|
|
466dc8233c | ||
|
|
03c2f6ece2 | ||
|
|
1d4e9a29db | ||
|
|
f378d9a57b | ||
|
|
dde49ac8e2 | ||
|
|
c3cc93406d | ||
|
|
bd0f9211da | ||
|
|
c503c6e4fa | ||
|
|
02174d26af | ||
|
|
cf92be3bd6 | ||
|
|
72cef12db1 | ||
|
|
bbc0a2e233 | ||
|
|
4fd1a6c84b | ||
|
|
c83d99c414 | ||
|
|
eacf510175 | ||
|
|
8802d125f8 | ||
|
|
33301a3eb4 | ||
|
|
7234bef0eb | ||
|
|
fcff91559b | ||
|
|
b75d4e59d1 | ||
|
|
c6b5ab1dbe | ||
|
|
c12e07f0ce | ||
|
|
8b877a4c26 | ||
|
|
7dc0dc1c9b | ||
|
|
0462754673 | ||
|
|
5916ceda73 | ||
|
|
70283dc6c8 | ||
|
|
dbaf4f3623 | ||
|
|
4808648322 | ||
|
|
54afb9b34a | ||
|
|
d336c8b938 | ||
|
|
980d1b2796 | ||
|
|
6317982876 | ||
|
|
e2fbbc08ca | ||
|
|
99cd25beae | ||
|
|
737ecc7015 | ||
|
|
09668459c8 | ||
|
|
e5fd30f438 | ||
|
|
c412a46105 | ||
|
|
3a78402496 | ||
|
|
d18ac136c0 | ||
|
|
b5b1244857 | ||
|
|
27acfa4dea | ||
|
|
02cffa4dea | ||
|
|
b52abbc771 | ||
|
|
894c61867f | ||
|
|
352e0cc58d | ||
|
|
ffe4446d90 | ||
|
|
4d05b26e7a | ||
|
|
0855649986 | ||
|
|
d828e58903 | ||
|
|
aa0396fe27 | ||
|
|
8d8315f8d0 | ||
|
|
078c0a2e2e | ||
|
|
f21e8dd875 | ||
|
|
74e36c7e97 | ||
|
|
f27ae04282 | ||
|
|
0ce49c9dd4 | ||
|
|
fe8e58e078 | ||
|
|
efc0d8341b | ||
|
|
22bcc83d10 | ||
|
|
5ee5037934 | ||
|
|
c217bfed1e | ||
|
|
c27ccd3e24 | ||
|
|
367f5da782 | ||
|
|
b256df6599 | ||
|
|
d7a6a409a1 | ||
|
|
a1f5cead96 | ||
|
|
37c5fe3c86 | ||
|
|
4583fa270b | ||
|
|
beb3a5bd73 | ||
|
|
93cbd52bf0 | ||
|
|
c22177a005 | ||
|
|
4da71273e1 | ||
|
|
2c78b31aab | ||
|
|
4ae1d87632 | ||
|
|
46b86a7976 | ||
|
|
3bc177e69d | ||
|
|
319609e9c1 | ||
|
|
9d87b89718 | ||
|
|
dd81e38e53 | ||
|
|
9f32b22602 | ||
|
|
096ce7488e | ||
|
|
a1782dd172 | ||
|
|
000d76b11a | ||
|
|
abd29f6646 | ||
|
|
b4ecf0ab2f | ||
|
|
798f7dbf67 | ||
|
|
06a2e47c8d | ||
|
|
e0b83eb291 | ||
|
|
13401f46ea | ||
|
|
1a45b030dc | ||
|
|
62052bcc2d | ||
|
|
3265f7bec3 | ||
|
|
ee0881712a | ||
|
|
483e0336b6 | ||
|
|
3e8f267e33 | ||
|
|
3b247fd968 | ||
|
|
750f6e6479 | ||
|
|
5b475e6603 | ||
|
|
0ca7f73dc5 | ||
|
|
47ed18845e | ||
|
|
dc141cdb29 | ||
|
|
f6cf6e889b | ||
|
|
f379a80233 | ||
|
|
4a320fd1ff | ||
|
|
85d23e8e3b | ||
|
|
022ab9d298 | ||
|
|
605e8603dc | ||
|
|
70f160b329 | ||
|
|
6d265e6bed | ||
|
|
fdc512391b | ||
|
|
108714c934 | ||
|
|
44e8cf98a5 | ||
|
|
f0ee69d9e9 | ||
|
|
b8a10c8406 | ||
|
|
ff4813529e | ||
|
|
470bc18e9b | ||
|
|
0b1add0ec6 | ||
|
|
1db76dd9cf | ||
|
|
467a9517db | ||
|
|
b361315a67 | ||
|
|
4e3771bffc | ||
|
|
8176b0335a | ||
|
|
811ac98f36 | ||
|
|
f4b2e71800 | ||
|
|
c431cfcf12 | ||
|
|
92f20bc5a2 | ||
|
|
57f931da3c | ||
|
|
9b662e6d03 | ||
|
|
18377d949c | ||
|
|
e6427b2588 | ||
|
|
0062fe705d | ||
|
|
9b3e508753 | ||
|
|
a1ac63ee1c | ||
|
|
e496ae0470 | ||
|
|
1e4df54ab3 | ||
|
|
2de249af74 | ||
|
|
10f056fbb4 | ||
|
|
074b09d0c0 | ||
|
|
86d0727659 | ||
|
|
be3e1b8718 | ||
|
|
8fdf59bdac | ||
|
|
ebebce2102 | ||
|
|
8044ec38da | ||
|
|
7413f87265 | ||
|
|
aea2e77665 | ||
|
|
a15845f9fd | ||
|
|
94ac44df4f | ||
|
|
f80d804a57 | ||
|
|
3b5c1d7817 | ||
|
|
24274edf81 | ||
|
|
d58497529b | ||
|
|
130495abab | ||
|
|
9b743d60fb | ||
|
|
5c9e2ef036 | ||
|
|
8526434b63 | ||
|
|
6ba302c481 | ||
|
|
de92f094aa | ||
|
|
c82cee66de | ||
|
|
6eed05b1ce | ||
|
|
bb488305c9 | ||
|
|
f05e84f964 | ||
|
|
65546ed22b | ||
|
|
57ae5b27dc | ||
|
|
f9531ec3c9 | ||
|
|
5b54a32563 | ||
|
|
cd049e28bc | ||
|
|
646e41bec4 | ||
|
|
36528c5e83 | ||
|
|
cd169dee23 | ||
|
|
b5cc60f80b | ||
|
|
060b83159a | ||
|
|
a40ff35453 | ||
|
|
268e6bfe6e | ||
|
|
f902440b8b | ||
|
|
77a0902605 | ||
|
|
c889ae10e4 | ||
|
|
0a534c6ee0 | ||
|
|
167d88b449 | ||
|
|
1071ed84f2 | ||
|
|
abb5624af2 | ||
|
|
1d41b96d32 | ||
|
|
ef4665945f | ||
|
|
294cd5fd0b | ||
|
|
f4d271177c | ||
|
|
451538fecf | ||
|
|
e78e0fec59 | ||
|
|
2e639cebf8 | ||
|
|
e296da7ade | ||
|
|
3b3e26c4b8 | ||
|
|
6a4883ac69 | ||
|
|
0ba05df545 | ||
|
|
aa3c4d4029 | ||
|
|
60df629725 | ||
|
|
2570b005ac | ||
|
|
d5212cd19d | ||
|
|
2193d85622 | ||
|
|
dfdbfe9eff | ||
|
|
b999e836b2 | ||
|
|
be2dd41e69 | ||
|
|
483fdb79cc | ||
|
|
aefd0fc907 | ||
|
|
3298d6cb71 | ||
|
|
c02c78ea73 | ||
|
|
6bf4fee1ba | ||
|
|
5209238c1b | ||
|
|
7ef25ec400 | ||
|
|
221e7cbb55 | ||
|
|
873ac1a3ac | ||
|
|
ebe55a7ae1 | ||
|
|
9f32d40b27 | ||
|
|
8ae10a930a | ||
|
|
473a346814 | ||
|
|
3a8a0fe79a | ||
|
|
511dc8f87f | ||
|
|
3901295329 | ||
|
|
f5918c6c74 | ||
|
|
abe6b4baec | ||
|
|
6e4b61154f | ||
|
|
2aad0ced77 | ||
|
|
41ea14840d | ||
|
|
dff0ffd38a | ||
|
|
8d32c3ba3a | ||
|
|
4afba005f9 | ||
|
|
85fb0cc20a | ||
|
|
5ef2d56ec2 | ||
|
|
fd8e5bdf57 | ||
|
|
4f8481a1e4 | ||
|
|
bcd72e5c14 | ||
|
|
249bc6cf72 | ||
|
|
1c0af5765d | ||
|
|
7ba771ed1b | ||
|
|
a4002622f8 | ||
|
|
8e21087ad7 | ||
|
|
d523543dc7 | ||
|
|
6ca27b6dd4 | ||
|
|
8d51e9cc91 | ||
|
|
2aced2d958 | ||
|
|
3fcba00a1f | ||
|
|
372d12766a | ||
|
|
dfed8896b9 | ||
|
|
d71aa57077 | ||
|
|
3e85fe57ac | ||
|
|
537021e12d | ||
|
|
ec4834cd73 | ||
|
|
712c01aa93 | ||
|
|
cde324d4b4 | ||
|
|
478571ebb4 | ||
|
|
fde9d27482 | ||
|
|
f38daab7f7 | ||
|
|
25b9429929 | ||
|
|
83cf638a2e | ||
|
|
a04e0bdaf1 | ||
|
|
c200d59d1e | ||
|
|
bbeac5888c | ||
|
|
daa53522b5 | ||
|
|
2c0f6e3319 | ||
|
|
27f587aa13 | ||
|
|
cfc27c9665 | ||
|
|
88a1a90c3c | ||
|
|
6d8581baae | ||
|
|
2b4b16ae90 | ||
|
|
075c23eb8c | ||
|
|
cbf805c3e6 | ||
|
|
46beb2a989 | ||
|
|
c01c175744 | ||
|
|
eca496ee24 | ||
|
|
083bb3ec3f | ||
|
|
2dc5403e7b | ||
|
|
aead5d4068 | ||
|
|
6fb3622abb | ||
|
|
39dd8cfe24 | ||
|
|
b9b9e9e518 | ||
|
|
e2c91aff33 | ||
|
|
96098fce20 | ||
|
|
18bfe131fe | ||
|
|
8dc3e7704c | ||
|
|
1ebfc71721 | ||
|
|
afd3dc7e81 | ||
|
|
cbaecb1ea4 | ||
|
|
8883e32dd8 | ||
|
|
4243780e0a | ||
|
|
5f3fd08509 | ||
|
|
98a225acd1 | ||
|
|
d8555cc8a1 | ||
|
|
3ccd93ac67 | ||
|
|
336428df8b | ||
|
|
d69aace9ec | ||
|
|
777debf5d7 | ||
|
|
7c20771d20 | ||
|
|
fef428a9c6 | ||
|
|
cc9972ad6c | ||
|
|
c2fdc60569 | ||
|
|
39320f953c | ||
|
|
be7c9cc9b8 | ||
|
|
f58345f0f0 | ||
|
|
f518012656 | ||
|
|
12fb9a95cb | ||
|
|
55e79e34af | ||
|
|
1649f31258 | ||
|
|
7849736d80 | ||
|
|
b7159dd48e | ||
|
|
38992251c5 | ||
|
|
a00049b879 | ||
|
|
ba4bc6d7c3 | ||
|
|
868f4fd174 | ||
|
|
e58401be78 | ||
|
|
5c1ce5b0e1 | ||
|
|
9af3aa0de0 | ||
|
|
71309c5528 | ||
|
|
54decc60bb | ||
|
|
50eea4376b | ||
|
|
443aa17329 | ||
|
|
be1d9e0db7 | ||
|
|
5743b46457 | ||
|
|
e67e5ebd46 | ||
|
|
a550c85369 | ||
|
|
b185df2b22 | ||
|
|
f82922b354 | ||
|
|
86b30d9d7f | ||
|
|
f1499d5b3e | ||
|
|
30b6828d71 | ||
|
|
e6b7b7da0a | ||
|
|
38a20ae269 | ||
|
|
a0ec6e1e9d | ||
|
|
114fbe2512 | ||
|
|
155729044b | ||
|
|
4b34231f28 | ||
|
|
8e7fe068e9 | ||
|
|
4c384272dc | ||
|
|
5de9961cf2 | ||
|
|
eab36b5c6a | ||
|
|
96e5de2eb9 | ||
|
|
5f740d9ab4 | ||
|
|
4f32126e35 | ||
|
|
d2d0873fdb | ||
|
|
761298ff00 | ||
|
|
52b1eb2c37 | ||
|
|
2ab25d994f | ||
|
|
5fac119aa0 | ||
|
|
31137beea6 | ||
|
|
316d65d7c6 | ||
|
|
82d7553c63 | ||
|
|
bc0eb813ff | ||
|
|
a259023fd9 | ||
|
|
25105448e8 | ||
|
|
fe3faf5b3f | ||
|
|
f19dd896cf | ||
|
|
9fe26c4fdd | ||
|
|
a369a72cae | ||
|
|
a707967453 | ||
|
|
b2f2097239 | ||
|
|
6ae96038c2 | ||
|
|
2c6a0d0a19 | ||
|
|
4bcdca8545 | ||
|
|
67f8e91395 | ||
|
|
b209763a55 | ||
|
|
5ef96795dc | ||
|
|
784717749f | ||
|
|
945bcc5bd3 | ||
|
|
51aa9c319e | ||
|
|
74d8d2946b | ||
|
|
0a160cc16e | ||
|
|
f099f97daa | ||
|
|
769e9ba14d | ||
|
|
a482c0e966 | ||
|
|
86d92a72e7 | ||
|
|
ef618a5999 | ||
|
|
94d3d7a89a | ||
|
|
aa9e79f957 | ||
|
|
84a2f534db | ||
|
|
1b4be24dca | ||
|
|
824ccc37ae | ||
|
|
5231651020 | ||
|
|
fa2c6f80c7 | ||
|
|
43c7b3bfec | ||
|
|
b17a10546a | ||
|
|
bf6e6e8a7c | ||
|
|
203b0256a3 | ||
|
|
caf2a38b7e | ||
|
|
96f24b078e | ||
|
|
332b50a4eb | ||
|
|
8ca0954b3b | ||
|
|
36343e2de8 | ||
|
|
2f14a892ca | ||
|
|
9c3cabce40 | ||
|
|
f8d71c2b10 | ||
|
|
394dfb24f1 | ||
|
|
b0549a229d | ||
|
|
670b6eaff6 | ||
|
|
a4f33d3823 | ||
|
|
c7841e3da5 | ||
|
|
e7b4a12bba | ||
|
|
0aaa929d6e | ||
|
|
1112797c18 | ||
|
|
920481e1c1 | ||
|
|
55f7b84966 | ||
|
|
09ab4df1fe | ||
|
|
0c2cf81b37 | ||
|
|
d864430bda | ||
|
|
de60540e06 | ||
|
|
c3e311e6b8 | ||
|
|
ac704f2f22 | ||
|
|
be626083a0 | ||
|
|
b68fcca1e0 | ||
|
|
af6dfa1856 | ||
|
|
654c400a0b | ||
|
|
80a99539ce | ||
|
|
4b1c770e5e | ||
|
|
3491645e69 | ||
|
|
e72c8287f8 | ||
|
|
b4b3bc7acd | ||
|
|
521c7b271b | ||
|
|
acd888c999 | ||
|
|
3ab1ba0b2f | ||
|
|
b344c0ac05 | ||
|
|
1741619c7f | ||
|
|
067ba3dff0 | ||
|
|
f79250f665 | ||
|
|
5a33b8d533 | ||
|
|
d165655fb1 | ||
|
|
c805871b92 | ||
|
|
f288e32634 | ||
|
|
bc44543d8f | ||
|
|
db514208a7 | ||
|
|
b6ff29e020 | ||
|
|
7c94dfdc15 | ||
|
|
8782c0eada | ||
|
|
fea0ba1042 | ||
|
|
027555c75f | ||
|
|
b478ed747a | ||
|
|
e9aa27dace | ||
|
|
c079133f3a | ||
|
|
30c5f7c5f0 | ||
|
|
6f26871c0f | ||
|
|
f93cc5b5e3 | ||
|
|
5a25c8dfd3 | ||
|
|
f5c079159d | ||
|
|
1cfdce3437 | ||
|
|
e9e6d141e9 | ||
|
|
8d0e049261 | ||
|
|
0335c7353d | ||
|
|
267e920a80 | ||
|
|
d8a3a47e3e | ||
|
|
7f0e61b173 | ||
|
|
ce4c50446b | ||
|
|
9ab25d2575 | ||
|
|
6d4b982417 | ||
|
|
650eca271f | ||
|
|
8ee55aef6d | ||
|
|
40d41c7dcb | ||
|
|
c780a889a7 | ||
|
|
eef348004e | ||
|
|
e784bbc40f | ||
|
|
b8118d439f | ||
|
|
a49e59053c | ||
|
|
41bb2bd58b | ||
|
|
7fd6054145 | ||
|
|
6abf4e97b5 | ||
|
|
d23aee76c9 | ||
|
|
58a1595792 | ||
|
|
726d32eac5 | ||
|
|
b5f3dcdc8b | ||
|
|
2875deb4b1 | ||
|
|
b2dfacdc70 | ||
|
|
36a0520a48 | ||
|
|
6b5a5ac1d0 | ||
|
|
581c2bb718 | ||
|
|
3d192c0f57 | ||
|
|
9dc36f4431 | ||
|
|
730ccefffb | ||
|
|
2c56f4b583 | ||
|
|
9e27da8b4e | ||
|
|
7f373f232a | ||
|
|
6f0487979c | ||
|
|
71c66a5405 | ||
|
|
2eb5326aa4 | ||
|
|
91e92fa8a3 | ||
|
|
9cc1661ce2 | ||
|
|
c3f44d38f3 | ||
|
|
01b4aa9adc | ||
|
|
7a78b1cba3 | ||
|
|
4d011cc648 | ||
|
|
80cbe889ba | ||
|
|
c23a03ad81 | ||
|
|
579e3d1ed8 | ||
|
|
687a36a49c | ||
|
|
ad82b455a3 | ||
|
|
848afa43ee | ||
|
|
7720d21265 | ||
|
|
96f946d4c3 | ||
|
|
3432149759 | ||
|
|
392221e36a | ||
|
|
674cae8ee2 | ||
|
|
838c476733 | ||
|
|
5f574348d1 | ||
|
|
19a02b2c30 | ||
|
|
c339b05789 | ||
|
|
2d3c657f9d | ||
|
|
07f9b828ae | ||
|
|
70bae7ce4c | ||
|
|
ac2a7273e6 | ||
|
|
4ce9517a82 | ||
|
|
73024a8af3 | ||
|
|
e70e605fc3 | ||
|
|
439d6956a9 | ||
|
|
6530bf0eae | ||
|
|
151498cbe7 | ||
|
|
3a72b1cb98 | ||
|
|
2737822620 | ||
|
|
06c12ae221 | ||
|
|
4e4400af7f | ||
|
|
3f1ecf53ab | ||
|
|
0b583b8130 | ||
|
|
31d18dca1c | ||
|
|
5e06e7de5a | ||
|
|
8af53cbd36 | ||
|
|
4914076e8f | ||
|
|
e04f47e922 | ||
|
|
f355695581 | ||
|
|
cbacdf0de8 | ||
|
|
3dd0322f4c | ||
|
|
2481c87be8 | ||
|
|
b6a664b5f8 | ||
|
|
25b666a7c9 | ||
|
|
9b41912e66 | ||
|
|
8e74bb98b5 | ||
|
|
6db8bb49d6 | ||
|
|
410aed0176 | ||
|
|
00a239a712 | ||
|
|
68fe406924 | ||
|
|
f71b04acb0 | ||
|
|
1ab7f660a4 | ||
|
|
0ebbc4cb5a | ||
|
|
5300cb5da0 | ||
|
|
7d773abc92 | ||
|
|
c34541ccce | ||
|
|
1cc5bd706c | ||
|
|
4026d183bc | ||
|
|
c0f5645cd9 | ||
|
|
cbff874e43 | ||
|
|
baf015fc57 | ||
|
|
7275ebdf3c | ||
|
|
b974e7ce34 | ||
|
|
8f8f34499f | ||
|
|
6ea6f4bfcd | ||
|
|
e25284bafe | ||
|
|
8b67877cd5 | ||
|
|
9de1360538 | ||
|
|
c55db83609 | ||
|
|
1e5ebdbf3c | ||
|
|
9a2090ab21 | ||
|
|
e4aaacdb86 | ||
|
|
29acf1104d | ||
|
|
3d34fa0b69 | ||
|
|
77f363987a | ||
|
|
c0be461191 | ||
|
|
1fb562f44a | ||
|
|
c591d0e591 | ||
|
|
186d7fc20e | ||
|
|
cfbdef5186 | ||
|
|
d04368b1d4 | ||
|
|
b167058028 | ||
|
|
262957717b | ||
|
|
873a808321 | ||
|
|
6fa8f9330e | ||
|
|
b3f0ef0878 | ||
|
|
04304262ba | ||
|
|
920ced364a | ||
|
|
e0499118e2 | ||
|
|
50b5efae46 | ||
|
|
486b8fa9c5 | ||
|
|
b2baed9bdd | ||
|
|
b591542c0b | ||
|
|
a83fa00ac4 | ||
|
|
7ff5c7c797 | ||
|
|
1748602691 | ||
|
|
6542dd5337 | ||
|
|
c64a44b9e1 | ||
|
|
fccc5b3bed | ||
|
|
98b9d5c6c4 | ||
|
|
afd2c1a8ad | ||
|
|
81f35a3ceb | ||
|
|
7e2e765f4a | ||
|
|
7d6cfa58e1 | ||
|
|
14735ce3aa | ||
|
|
72f7cc1569 | ||
|
|
abef5c4e74 | ||
|
|
ae14022bf0 | ||
|
|
55f5658d40 | ||
|
|
3ae6363462 | ||
|
|
9e20d7f8a5 | ||
|
|
ab13ffe377 | ||
|
|
039138ed50 | ||
|
|
6227a0555a | ||
|
|
f85d0a522a | ||
|
|
5795488ba7 | ||
|
|
c3045dfb5c | ||
|
|
811fd0cb9e | ||
|
|
f6847c46d7 | ||
|
|
92dac7af5c | ||
|
|
801905d77f | ||
|
|
8f5ac86f30 | ||
|
|
d12a06b65b | ||
|
|
749432f949 | ||
|
|
c1400f25a7 | ||
|
|
87120acf7c | ||
|
|
401f74f7ae | ||
|
|
03d31f6713 | ||
|
|
a57faf07f6 | ||
|
|
562ea9a839 | ||
|
|
cf92cc1ada | ||
|
|
f6000aece7 | ||
|
|
2b3fe3a2b5 | ||
|
|
0fde90faac | ||
|
|
5838644b03 | ||
|
|
c0011edd05 | ||
|
|
431c187a60 | ||
|
|
392abec420 | ||
|
|
dfbe337fe2 | ||
|
|
b9896c4962 | ||
|
|
afa5715e56 | ||
|
|
79474288d0 | ||
|
|
daf64487b4 | ||
|
|
00816f5529 | ||
|
|
f73787e6e5 | ||
|
|
5cffa71467 | ||
|
|
02af28b3b7 | ||
|
|
afe0134d0f | ||
|
|
db9e81d0f9 | ||
|
|
3821f57ecc | ||
|
|
d379f98b22 | ||
|
|
ef3eddf3da | ||
|
|
08a2368845 | ||
|
|
1868fc1e2c | ||
|
|
451a0252ab | ||
|
|
42756c7474 | ||
|
|
598b076240 | ||
|
|
f1f96fc417 | ||
|
|
9c941603f5 | ||
|
|
fb3d6fa332 | ||
|
|
88fd7f091a | ||
|
|
6e4fdfd4bf | ||
|
|
0519056bd8 | ||
|
|
7305ad575e | ||
|
|
79f64ac2f4 | ||
|
|
67bce6cbf2 | ||
|
|
e5316a4388 | ||
|
|
6a8a8557d2 | ||
|
|
3a65dc84c8 | ||
|
|
ce42bbf5c9 | ||
|
|
7b21b3f25a | ||
|
|
46caec1040 | ||
|
|
1187a02a3e | ||
|
|
f6c525b19e | ||
|
|
4a8f7712f3 | ||
|
|
2f867aad17 | ||
|
|
5c6580eb15 | ||
|
|
4c3941750b | ||
|
|
2ea8e618f2 | ||
|
|
94f27f990b | ||
|
|
349e8aa348 | ||
|
|
cde9b78b8d | ||
|
|
d8894f0bd2 | ||
|
|
7e08e0047b | ||
|
|
1a817f117f | ||
|
|
2ec19b21ae | ||
|
|
141f5a93f7 | ||
|
|
df47d55cd2 | ||
|
|
5e579fd6b7 | ||
|
|
4b9c1dce69 | ||
|
|
d74f71bbef | ||
|
|
5196ca41d8 | ||
|
|
4959e06151 | ||
|
|
c1635c13f6 | ||
|
|
135e0ea2e9 | ||
|
|
f283bfd7ab | ||
|
|
9f74786db2 | ||
|
|
32e5d7a0c7 | ||
|
|
84c615cff1 | ||
|
|
039c0a0863 | ||
|
|
b3b0138b82 | ||
|
|
ea56160cdc | ||
|
|
028b0a749c | ||
|
|
941f06eb9f | ||
|
|
04832a86eb | ||
|
|
beb8e990cd | ||
|
|
001af3876f | ||
|
|
f428f344da | ||
|
|
143f78eced | ||
|
|
754b55eee5 | ||
|
|
280ea1209c | ||
|
|
0154dbe477 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
||||
cpp/* linguist-vendored
|
||||
12
.github/FUNDING.yml
vendored
Normal file
12
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: fulmicoton
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
13
.github/ISSUE_TEMPLATE/actions.md
vendored
Normal file
13
.github/ISSUE_TEMPLATE/actions.md
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
name: Actions
|
||||
about: Actions not directly related to producing code.
|
||||
|
||||
---
|
||||
|
||||
# Actions title
|
||||
|
||||
Action description.
|
||||
e.g.
|
||||
- benchmark
|
||||
- investigate and report
|
||||
- etc.
|
||||
15
.github/dependabot.yml
vendored
Normal file
15
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: cargo
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "20:00"
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "20:00"
|
||||
open-pull-requests-limit: 10
|
||||
26
.github/workflows/coverage.yml
vendored
Normal file
26
.github/workflows/coverage.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Coverage
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
jobs:
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
run: rustup toolchain install nightly --profile minimal --component llvm-tools-preview
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Generate code coverage
|
||||
run: cargo +nightly llvm-cov --all-features --workspace --lcov --output-path lcov.info
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
continue-on-error: true
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
|
||||
files: lcov.info
|
||||
fail_ci_if_error: true
|
||||
28
.github/workflows/long_running.yml
vendored
Normal file
28
.github/workflows/long_running.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Long running tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
NUM_FUNCTIONAL_TEST_ITERATIONS: 20000
|
||||
|
||||
jobs:
|
||||
test:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Run indexing_unsorted
|
||||
run: cargo test indexing_unsorted -- --ignored
|
||||
- name: Run indexing_sorted
|
||||
run: cargo test indexing_sorted -- --ignored
|
||||
74
.github/workflows/test.yml
vendored
Normal file
74
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
name: Unit tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
check:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install nightly
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
profile: minimal
|
||||
components: rustfmt
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
components: clippy
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Check Formatting
|
||||
run: cargo +nightly fmt --all -- --check
|
||||
|
||||
- uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
args: --tests
|
||||
|
||||
test:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
features: [
|
||||
{ label: "all", flags: "mmap,stopwords,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints" },
|
||||
{ label: "quickwit", flags: "mmap,quickwit,failpoints" }
|
||||
]
|
||||
|
||||
name: test-${{ matrix.features.label}}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- uses: taiki-e/install-action@nextest
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run tests
|
||||
run: cargo +stable nextest run --features ${{ matrix.features.flags }} --verbose --workspace
|
||||
|
||||
- name: Run doctests
|
||||
run: cargo +stable test --doc --features ${{ matrix.features.flags }} --verbose --workspace
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,4 +1,6 @@
|
||||
tantivy.iml
|
||||
.cargo
|
||||
proptest-regressions
|
||||
*.swp
|
||||
target
|
||||
target/debug
|
||||
@@ -7,7 +9,7 @@ target/release
|
||||
Cargo.lock
|
||||
benchmark
|
||||
.DS_Store
|
||||
cpp/simdcomp/bitpackingbenchmark
|
||||
*.bk
|
||||
.idea
|
||||
trace.dat
|
||||
cargo-timing*
|
||||
|
||||
90
.travis.yml
90
.travis.yml
@@ -1,90 +0,0 @@
|
||||
# Based on the "trust" template v0.1.2
|
||||
# https://github.com/japaric/trust/tree/v0.1.2
|
||||
|
||||
dist: trusty
|
||||
language: rust
|
||||
services: docker
|
||||
sudo: required
|
||||
|
||||
env:
|
||||
global:
|
||||
- CRATE_NAME=tantivy
|
||||
- TRAVIS_CARGO_NIGHTLY_FEATURE=""
|
||||
# - secure: eC8HjTi1wgRVCsMAeXEXt8Ckr0YBSGOEnQkkW4/Nde/OZ9jJjz2nmP1ELQlDE7+czHub2QvYtDMG0parcHZDx/Kus0yvyn08y3g2rhGIiE7y8OCvQm1Mybu2D/p7enm6shXquQ6Z5KRfRq+18mHy80wy9ABMA/ukEZdvnfQ76/Een8/Lb0eHaDoXDXn3PqLVtByvSfQQ7OhS60dEScu8PWZ6/l1057P5NpdWbMExBE7Ro4zYXNhkJeGZx0nP/Bd4Jjdt1XfPzMEybV6NZ5xsTILUBFTmOOt603IsqKGov089NExqxYu5bD3K+S4MzF1Nd6VhomNPJqLDCfhlymJCUj5n5Ku4yidlhQbM4Ej9nGrBalJnhcjBjPua5tmMF2WCxP9muKn/2tIOu1/+wc0vMf9Yd3wKIkf5+FtUxCgs2O+NslWvmOMAMI/yD25m7hb4t1IwE/4Bk+GVcWJRWXbo0/m6ZUHzRzdjUY2a1qvw7C9udzdhg7gcnXwsKrSWi2NjMiIVw86l+Zim0nLpKIN41sxZHLaFRG63Ki8zQ/481LGn32awJ6i3sizKS0WD+N1DfR2qYMrwYHaMN0uR0OFXYTJkFvTFttAeUY3EKmRKAuMhmO2YRdSr4/j/G5E9HMc1gSGJj6PxgpQU7EpvxRsmoVAEJr0mszmOj9icGHep/FM=
|
||||
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
- ubuntu-toolchain-r-test
|
||||
- kalakris-cmake
|
||||
packages:
|
||||
- gcc-4.8
|
||||
- g++-4.8
|
||||
- libcurl4-openssl-dev
|
||||
- libelf-dev
|
||||
- libdw-dev
|
||||
- binutils-dev
|
||||
- cmake
|
||||
|
||||
matrix:
|
||||
include:
|
||||
# Android
|
||||
- env: TARGET=aarch64-linux-android DISABLE_TESTS=1
|
||||
#- env: TARGET=arm-linux-androideabi DISABLE_TESTS=1
|
||||
#- env: TARGET=armv7-linux-androideabi DISABLE_TESTS=1
|
||||
#- env: TARGET=i686-linux-android DISABLE_TESTS=1
|
||||
#- env: TARGET=x86_64-linux-android DISABLE_TESTS=1
|
||||
|
||||
# Linux
|
||||
#- env: TARGET=aarch64-unknown-linux-gnu
|
||||
#- env: TARGET=i686-unknown-linux-gnu
|
||||
- env: TARGET=x86_64-unknown-linux-gnu CODECOV=1 #UPLOAD_DOCS=1
|
||||
# - env: TARGET=x86_64-unknown-linux-musl CODECOV=1
|
||||
# OSX
|
||||
#- env: TARGET=x86_64-apple-darwin
|
||||
# os: osx
|
||||
|
||||
before_install:
|
||||
- set -e
|
||||
- rustup self update
|
||||
|
||||
install:
|
||||
- sh ci/install.sh
|
||||
- source ~/.cargo/env || true
|
||||
- env | grep "TRAVIS"
|
||||
|
||||
before_script:
|
||||
- export PATH=$HOME/.cargo/bin:$PATH
|
||||
- cargo install cargo-update || echo "cargo-update already installed"
|
||||
- cargo install cargo-travis || echo "cargo-travis already installed"
|
||||
|
||||
script:
|
||||
- bash ci/script.sh
|
||||
|
||||
before_deploy:
|
||||
- sh ci/before_deploy.sh
|
||||
|
||||
after_success:
|
||||
# Needs GH_TOKEN env var to be set in travis settings
|
||||
- if [[ -v GH_TOKEN ]]; then echo "GH TOKEN IS SET"; else echo "GH TOKEN NOT SET"; fi
|
||||
- if [[ -v UPLOAD_DOCS ]]; then cargo doc; cargo doc-upload; else echo "doc upload disabled."; fi
|
||||
|
||||
#cache: cargo
|
||||
#before_cache:
|
||||
# # Travis can't cache files that are not readable by "others"
|
||||
# - chmod -R a+r $HOME/.cargo
|
||||
# - find ./target/debug -type f -maxdepth 1 -delete
|
||||
# - rm -f ./target/.rustc_info.json
|
||||
# - rm -fr ./target/debug/{deps,.fingerprint}/tantivy*
|
||||
# - rm -r target/debug/examples/
|
||||
# - ls -1 examples/ | sed -e 's/\.rs$//' | xargs -I "{}" find target/* -name "*{}*" -type f -delete
|
||||
|
||||
#branches:
|
||||
# only:
|
||||
# # release tags
|
||||
# - /^v\d+\.\d+\.\d+.*$/
|
||||
# - master
|
||||
|
||||
notifications:
|
||||
email:
|
||||
on_success: never
|
||||
295
ARCHITECTURE.md
Normal file
295
ARCHITECTURE.md
Normal file
@@ -0,0 +1,295 @@
|
||||
# Tantivy
|
||||
|
||||
## What is tantivy?
|
||||
|
||||
Tantivy is a library that is meant to build search engines. Although it is by no means a port of Lucene, its architecture is strongly inspired by it. If you are familiar with Lucene, you may be struck by the overlapping vocabulary.
|
||||
This is not fortuitous.
|
||||
|
||||
Tantivy's bread and butter is to address the problem of full-text search :
|
||||
|
||||
Given a large set of textual documents, and a text query, return the K-most relevant documents in a very efficient way. To execute these queries rapidly, the tantivy needs to build an index beforehand. The relevance score implemented in the tantivy is not configurable. Tantivy uses the same score as the default similarity used in Lucene / Elasticsearch, called [BM25](https://en.wikipedia.org/wiki/Okapi_BM25).
|
||||
|
||||
But tantivy's scope does not stop there. Numerous features are required to power rich-search applications. For instance, one may want to:
|
||||
|
||||
- compute the count of documents matching a query in the different section of an e-commerce website,
|
||||
- display an average price per meter square for a real estate search engine,
|
||||
- take into account historical user data to rank documents in a specific way,
|
||||
- or even use tantivy to power an OLAP database.
|
||||
|
||||
A more abstract description of the problem space tantivy is trying to address is the following.
|
||||
|
||||
Ingest a large set of documents, create an index that makes it possible to
|
||||
rapidly select all documents matching a given predicate (also known as a query) and
|
||||
collect some information about them ([See collector](#collector-define-what-to-do-with-matched-documents)).
|
||||
|
||||
Roughly speaking the design is following these guiding principles:
|
||||
|
||||
- Search should be O(1) in memory.
|
||||
- Indexing should be O(1) in memory. (In practice it is just sublinear)
|
||||
- Search should be as fast as possible
|
||||
|
||||
This comes at the cost of the dynamicity of the index: while it is possible to add, and delete documents from our corpus, the tantivy is designed to handle these updates in large batches.
|
||||
|
||||
## [core/](src/core): Index, segments, searchers
|
||||
|
||||
Core contains all of the high-level code to make it possible to create an index, add documents, delete documents and commit.
|
||||
|
||||
This is both the most high-level part of tantivy, the least performance-sensitive one, the seemingly most mundane code... And paradoxically the most complicated part.
|
||||
|
||||
### Index and Segments
|
||||
|
||||
A tantivy index is a collection of smaller independent immutable segments.
|
||||
Each segment contains its own independent set of data structures.
|
||||
|
||||
A segment is identified by a segment id that is in fact a UUID.
|
||||
The file of a segment has the format
|
||||
|
||||
```segment-id . ext```
|
||||
|
||||
The extension signals which data structure (or [`SegmentComponent`](src/core/segment_component.rs)) is stored in the file.
|
||||
|
||||
A small `meta.json` file is in charge of keeping track of the list of segments, as well as the schema.
|
||||
|
||||
On commit, one segment per indexing thread is written to disk, and the `meta.json` is then updated atomically.
|
||||
|
||||
For a better idea of how indexing works, you may read the [following blog post](https://fulmicoton.com/posts/behold-tantivy-part2/).
|
||||
|
||||
### Deletes
|
||||
|
||||
Deletes happen by deleting a "term". Tantivy does not offer any notion of primary id, so it is up to the user to use a field in their schema as if it was a primary id, and delete the associated term if they want to delete only one specific document.
|
||||
|
||||
On commit, tantivy will find all of the segments with documents matching this existing term and remove from [alive bitset file](src/fastfield/alive_bitset.rs) that represents the bitset of the alive document ids.
|
||||
Like all segment files, this file is immutable. Because it is possible to have more than one alive bitset file at a given instant, the alive bitset filename has the format ```segment_id . commit_opstamp . del```.
|
||||
|
||||
An opstamp is simply an incremental id that identifies any operation applied to the index. For instance, performing a commit or adding a document.
|
||||
|
||||
### DocId
|
||||
|
||||
Within a segment, all documents are identified by a DocId that ranges within `[0, max_doc)`.
|
||||
where `max_doc` is the number of documents in the segment, (deleted or not). Having such a compact `DocId` space is key to the compression of our data structures.
|
||||
|
||||
The DocIds are simply allocated in the order documents are added to the index.
|
||||
|
||||
### Merges
|
||||
|
||||
In separate threads, tantivy's index writer search for opportunities to merge segments.
|
||||
The point of segment merge is to:
|
||||
|
||||
- eventually get rid of tombstoned documents
|
||||
- reduce the otherwise ever-growing number of segments.
|
||||
|
||||
Indeed, while having several segments instead of one does not hurt search too much, having hundreds can have a measurable impact on the search performance.
|
||||
|
||||
### Searcher
|
||||
|
||||
The user of the library usually does not need to know about the existence of Segments.
|
||||
Searching is done through an object called a [`Searcher`](src/core/searcher.rs), that captures a
|
||||
snapshot of the index at one point of time, by holding a list of [SegmentReader](src/core/segment_reader.rs).
|
||||
|
||||
In other words, regardless of commits, file garbage collection, or segment merge that might happen, as long as the user holds and reuse the same [Searcher](src/core/searcher.rs), search will happen on an immutable snapshot of the index.
|
||||
|
||||
## [directory/](src/directory): Where should the data be stored?
|
||||
|
||||
Tantivy, like Lucene, abstracts the place where the data should be stored in a key-trait
|
||||
called [`Directory`](src/directory/directory.rs).
|
||||
Contrary to Lucene however, "files" are quite different from some kind of `io::Read` object.
|
||||
Check out [`src/directory/directory.rs`](src/directory/directory.rs) trait for more details.
|
||||
|
||||
Tantivy ships two main directory implementation: the `MmapDirectory` and the `RamDirectory`,
|
||||
but users can extend tantivy with their own implementation.
|
||||
|
||||
## [schema/](src/schema): What are documents?
|
||||
|
||||
Tantivy's document follows a very strict schema, decided before building any index.
|
||||
|
||||
The schema defines all of the fields that the indexes [`Document`](src/schema/document.rs) may and should contain, their types (`text`, `i64`, `u64`, `Date`, ...) as well as how it should be indexed / represented in tantivy.
|
||||
|
||||
Depending on the type of the field, you can decide to
|
||||
|
||||
- put it in the docstore
|
||||
- store it as a fast field
|
||||
- index it
|
||||
|
||||
Practically, tantivy will push values associated with this type to up to 3 respective
|
||||
data structures.
|
||||
|
||||
*Limitations*
|
||||
|
||||
As of today, tantivy's schema imposes a 1:1 relationship between a field that is being ingested and a field represented in the search index. In sophisticated search application, it is fairly common to want to index a field twice using different tokenizers, or to index the concatenation of several fields together into one field.
|
||||
|
||||
This is not something tantivy supports, and it is up to the user to duplicate field / concatenate fields before feeding them to tantivy.
|
||||
|
||||
## General information about these data structures
|
||||
|
||||
All data structures in tantivy, have:
|
||||
|
||||
- a writer
|
||||
- a serializer
|
||||
- a reader
|
||||
|
||||
The writer builds an in-memory representation of a batch of documents. This representation is not searchable. It is just meant as an intermediary mutable representation, to which we can sequentially add
|
||||
the document of a batch. At the end of the batch (or if a memory limit is reached), this representation
|
||||
is then converted into an on-disk immutable representation, that is extremely compact.
|
||||
This conversion is done by the serializer.
|
||||
|
||||
Finally, the reader is in charge of offering an API to read on this on-disk read-only representation.
|
||||
In tantivy, readers are designed to require very little anonymous memory. The data is read straight from an mmapped file, and loading an index is as fast as mmapping its files.
|
||||
|
||||
## [store/](src/store): Here is my DocId, Gimme my document
|
||||
|
||||
The docstore is a row-oriented storage that, for each document, stores a subset of the fields
|
||||
that are marked as stored in the schema. The docstore is compressed using a general-purpose algorithm
|
||||
like LZ4.
|
||||
|
||||
**Useful for**
|
||||
|
||||
In search engines, it is often used to display search results.
|
||||
Once the top 10 documents have been identified, we fetch them from the store, and display them or their snippet on the search result page (aka SERP).
|
||||
|
||||
**Not useful for**
|
||||
|
||||
Fetching a document from the store is typically a "slow" operation. It usually consists in
|
||||
|
||||
- searching into a compact tree-like data structure to find the position of the right block.
|
||||
- decompressing a small block
|
||||
- returning the document from this block.
|
||||
|
||||
It is NOT meant to be called for every document matching a query.
|
||||
|
||||
As a rule of thumb, if you hit the docstore more than 100 times per search query, you are probably misusing tantivy.
|
||||
|
||||
## [fastfield/](src/fastfield): Here is my DocId, Gimme my value
|
||||
|
||||
Fast fields are stored in a column-oriented storage that allows for random access.
|
||||
The only compression applied is bitpacking. The column comes with two meta data.
|
||||
The minimum value in the column and the number of bits per doc.
|
||||
|
||||
Fetching a value for a `DocId` is then as simple as computing
|
||||
|
||||
```rust
|
||||
min_value + fetch_bits(num_bits * doc_id..num_bits * (doc_id+1))
|
||||
```
|
||||
|
||||
This operation just requires one memory fetch.
|
||||
Because, DocSets are scanned through in order (DocId are iterated in a sorted manner) which
|
||||
also help locality.
|
||||
|
||||
In Lucene's jargon, fast fields are called DocValues.
|
||||
|
||||
**Useful for**
|
||||
|
||||
They are typically integer values that are useful to either rank or compute aggregate over
|
||||
all of the documents matching a query (aka [DocSet](src/docset.rs)).
|
||||
|
||||
For instance, one could define a function to combine upvotes with tantivy's internal relevancy score.
|
||||
This can be done by fetching a fast field during scoring.
|
||||
One could also compute the mean price of the items matching a query in an e-commerce website.
|
||||
This can be done by fetching a fast field in a collector.
|
||||
Finally one could decide to post-filter a docset to remove docset with a price within a specific range.
|
||||
If the ratio of filtered out documents is not too low, an efficient way to do this is to fetch the price and apply the filter on the collector side.
|
||||
|
||||
Aside from integer values, it is also possible to store an actual byte payload.
|
||||
For advanced search engine, it is possible to store all of the features required for learning-to-rank in a byte payload, access it during search, and apply the learning-to-rank model.
|
||||
|
||||
Finally facets are a specific kind of fast field, and the associated source code is in [`fastfield/facet_reader.rs`](src/fastfield/facet_reader.rs).
|
||||
|
||||
# The inverted search index
|
||||
|
||||
The inverted index is the core part of full-text search.
|
||||
When presented a new document with the text field "Hello, happy tax payer!", tantivy breaks it into a list of so-called tokens. In addition to just splitting these strings into tokens, it might also do different kinds of operations like dropping the punctuation, converting the character to lowercase, apply stemming, etc. Tantivy makes it possible to configure the operations to be applied in the schema (tokenizer/ is the place where these operations are implemented).
|
||||
|
||||
For instance, the default tokenizer of tantivy would break our text into: `[hello, happy, tax, payer]`.
|
||||
The document will therefore be registered in the inverted index as containing the terms
|
||||
`[text:hello, text:happy, text:tax, text:payer]`.
|
||||
|
||||
The role of the inverted index is, when given a term, gives us in return a very fast iterator over the sorted doc ids that match the term.
|
||||
|
||||
Such an iterator is called a posting list. In addition to giving us `DocId`, they can also give us optionally the number of occurrence of the term for each document, also called term frequency or TF.
|
||||
|
||||
These iterators being sorted by DocId, one can create an iterator over the document containing `text:tax AND text:payer`, `(text:tax AND text:payer) OR (text:contribuable)` or any boolean expression.
|
||||
|
||||
In order to represent the function
|
||||
```Term ⟶ Posting```
|
||||
|
||||
The inverted index actually consists of two data structures chained together.
|
||||
|
||||
- [Term](src/schema/term.rs) ⟶ [TermInfo](src/postings/term_info.rs) is addressed by the term dictionary.
|
||||
- [TermInfo](src/postings/term_info.rs) ⟶ [Posting](src/postings/postings.rs) is addressed by the posting lists.
|
||||
|
||||
Where [TermInfo](src/postings/term_info.rs) is an object containing some meta data about a term.
|
||||
|
||||
## [termdict/](src/termdict): Here is a term, give me the [TermInfo](src/postings/term_info.rs)
|
||||
|
||||
Tantivy's term dictionary is mainly in charge of supplying the function
|
||||
|
||||
[Term](src/schema/term.rs) ⟶ [TermInfo](src/postings/term_info.rs)
|
||||
|
||||
It is itself broken into two parts.
|
||||
|
||||
- [Term](src/schema/term.rs) ⟶ [TermOrdinal](src/termdict/mod.rs) is addressed by a finite state transducer, implemented by the fst crate.
|
||||
- [TermOrdinal](src/termdict/mod.rs) ⟶ [TermInfo](src/postings/term_info.rs) is addressed by the term info store.
|
||||
|
||||
## [postings/](src/postings): Iterate over documents... very fast
|
||||
|
||||
A posting list makes it possible to store a sorted list of doc ids and for each doc store
|
||||
a term frequency as well.
|
||||
|
||||
The posting lists are stored in a separate file. The [TermInfo](src/postings/term_info.rs) contains an offset into that file and a number of documents for the given posting list. Both are required and sufficient to read the posting list.
|
||||
|
||||
The posting list is organized in block of 128 documents.
|
||||
One block of doc ids is followed by one block of term frequencies.
|
||||
|
||||
The doc ids are delta encoded and bitpacked.
|
||||
The term frequencies are bitpacked.
|
||||
|
||||
Because the number of docs is rarely a multiple of 128, the last block may contain an arbitrary number of docs between 1 and 127 documents. We then use variable int encoding instead of bitpacking.
|
||||
|
||||
## [positions/](src/positions): Where are my terms within the documents?
|
||||
|
||||
Phrase queries make it possible to search for documents containing a specific sequence of terms.
|
||||
For instance, when the phrase query "the art of war" does not match "the war of art".
|
||||
To make it possible, it is possible to specify in the schema that a field should store positions in addition to being indexed.
|
||||
|
||||
The token positions of all of the terms are then stored in a separate file with the extension `.pos`.
|
||||
The [TermInfo](src/postings/term_info.rs) gives an offset (expressed in position this time) in this file. As we iterate through the docset,
|
||||
we advance the position reader by the number of term frequencies of the current document.
|
||||
|
||||
## [fieldnorms/](src/fieldnorms): Here is my doc, how many tokens in this field?
|
||||
|
||||
The [BM25](https://en.wikipedia.org/wiki/Okapi_BM25) formula also requires to know the number of tokens stored in a specific field for a given document. We store this information on one byte per document in the fieldnorm.
|
||||
The fieldnorm is therefore compressed. Values up to 40 are encoded unchanged.
|
||||
|
||||
## [tokenizer/](src/tokenizer): How should we process text?
|
||||
|
||||
Text processing is key to a good search experience.
|
||||
Splits or normalize your text too much, and the search results will have a less precision and a higher recall.
|
||||
Do not normalize, or under split your text, you will end up with a higher precision and a lesser recall.
|
||||
|
||||
Text processing can be configured by selecting an off-the-shelf [`Tokenizer`](./src/tokenizer/tokenizer.rs) or implementing your own to first split the text into tokens, and then chain different [`TokenFilter`](src/tokenizer/tokenizer.rs)'s to it.
|
||||
|
||||
Tantivy's comes with few tokenizers, but external crates are offering advanced tokenizers, such as [Lindera](https://crates.io/crates/lindera) for Japanese.
|
||||
|
||||
## [query/](src/query): Define and compose queries
|
||||
|
||||
The [Query](src/query/query.rs) trait defines what a query is.
|
||||
Due to the necessity for some queries to compute some statistics over the entire index, and because the
|
||||
index is composed of several `SegmentReader`, the path from transforming a `Query` to an iterator over documents is slightly convoluted, but fundamentally, this is what a Query is.
|
||||
|
||||
The iterator over a document comes with some scoring function. The resulting trait is called a
|
||||
[Scorer](src/query/scorer.rs) and is specific to a segment.
|
||||
|
||||
Different queries can be combined using the [BooleanQuery](src/query/boolean_query/).
|
||||
Tantivy comes with different types of queries and can be extended by implementing
|
||||
the `Query`, `Weight`, and `Scorer` traits.
|
||||
|
||||
## [collector](src/collector): Define what to do with matched documents
|
||||
|
||||
Collectors define how to aggregate the documents matching a query, in the broadest sense possible.
|
||||
The search will push matched documents one by one, calling their
|
||||
`fn collect(doc: DocId, score: Score);` method.
|
||||
|
||||
Users may implement their own collectors by implementing the [Collector](src/collector/mod.rs) trait.
|
||||
|
||||
## [query-grammar](query-grammar): Defines the grammar of the query parser
|
||||
|
||||
While the [QueryParser](src/query/query_parser/query_parser.rs) struct is located in the `query/` directory, the actual parser combinator used to convert user queries into an AST is in an external crate called `query-grammar`. This part was externalized to lighten the work of the compiler.
|
||||
384
CHANGELOG.md
384
CHANGELOG.md
@@ -1,7 +1,254 @@
|
||||
Tantivy 0.19
|
||||
================================
|
||||
#### Bugfixes
|
||||
- Fix missing fieldnorms for u64, i64, f64, bool, bytes and date [#1620](https://github.com/quickwit-oss/tantivy/pull/1620) (@PSeitz)
|
||||
- Fix interpolation overflow in linear interpolation fastfield codec [#1480](https://github.com/quickwit-oss/tantivy/pull/1480) (@PSeitz @fulmicoton)
|
||||
|
||||
#### Features/Improvements
|
||||
- Add support for `IN` in queryparser , e.g. `field: IN [val1 val2 val3]` [#1683](https://github.com/quickwit-oss/tantivy/pull/1683) (@trinity-1686a)
|
||||
- Skip score calculation, when no scoring is required [#1646](https://github.com/quickwit-oss/tantivy/pull/1646) (@PSeitz)
|
||||
- Limit fast fields to u32 (`get_val(u32)`) [#1644](https://github.com/quickwit-oss/tantivy/pull/1644) (@PSeitz)
|
||||
- The `DateTime` type has been updated to hold timestamps with microseconds precision.
|
||||
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing) [#1396](https://github.com/quickwit-oss/tantivy/pull/1396) (@evanxg852000)
|
||||
- Add IP address field type [#1553](https://github.com/quickwit-oss/tantivy/pull/1553) (@PSeitz)
|
||||
- Add boolean field type [#1382](https://github.com/quickwit-oss/tantivy/pull/1382) (@boraarslan)
|
||||
- Remove Searcher pool and make `Searcher` cloneable. (@PSeitz)
|
||||
- Validate settings on create [#1570](https://github.com/quickwit-oss/tantivy/pull/1570) (@PSeitz)
|
||||
- Detect and apply gcd on fastfield codecs [#1418](https://github.com/quickwit-oss/tantivy/pull/1418) (@PSeitz)
|
||||
- Doc store
|
||||
- use separate thread to compress block store [#1389](https://github.com/quickwit-oss/tantivy/pull/1389) [#1510](https://github.com/quickwit-oss/tantivy/pull/1510) (@PSeitz @fulmicoton)
|
||||
- Expose doc store cache size [#1403](https://github.com/quickwit-oss/tantivy/pull/1403) (@PSeitz)
|
||||
- Enable compression levels for doc store [#1378](https://github.com/quickwit-oss/tantivy/pull/1378) (@PSeitz)
|
||||
- Make block size configurable [#1374](https://github.com/quickwit-oss/tantivy/pull/1374) (@kryesh)
|
||||
- Make `tantivy::TantivyError` cloneable [#1402](https://github.com/quickwit-oss/tantivy/pull/1402) (@PSeitz)
|
||||
- Add support for phrase slop in query language [#1393](https://github.com/quickwit-oss/tantivy/pull/1393) (@saroh)
|
||||
- Aggregation
|
||||
- Add aggregation support for date type [#1693](https://github.com/quickwit-oss/tantivy/pull/1693)(@PSeitz)
|
||||
- Add support for keyed parameter in range and histgram aggregations [#1424](https://github.com/quickwit-oss/tantivy/pull/1424) (@k-yomo)
|
||||
- Add aggregation bucket limit [#1363](https://github.com/quickwit-oss/tantivy/pull/1363) (@PSeitz)
|
||||
- Faster indexing
|
||||
- [#1610](https://github.com/quickwit-oss/tantivy/pull/1610) (@PSeitz)
|
||||
- [#1594](https://github.com/quickwit-oss/tantivy/pull/1594) (@PSeitz)
|
||||
- [#1582](https://github.com/quickwit-oss/tantivy/pull/1582) (@PSeitz)
|
||||
- [#1611](https://github.com/quickwit-oss/tantivy/pull/1611) (@PSeitz)
|
||||
- Added a pre-configured stop word filter for various language [#1666](https://github.com/quickwit-oss/tantivy/pull/1666) (@adamreichold)
|
||||
|
||||
Tantivy 0.18
|
||||
================================
|
||||
|
||||
- For date values `chrono` has been replaced with `time` (@uklotzde) #1304 :
|
||||
- The `time` crate is re-exported as `tantivy::time` instead of `tantivy::chrono`.
|
||||
- The type alias `tantivy::DateTime` has been removed.
|
||||
- `Value::Date` wraps `time::PrimitiveDateTime` without time zone information.
|
||||
- Internally date/time values are stored as seconds since UNIX epoch in UTC.
|
||||
- Converting a `time::OffsetDateTime` to `Value::Date` implicitly converts the value into UTC.
|
||||
If this is not desired do the time zone conversion yourself and use `time::PrimitiveDateTime`
|
||||
directly instead.
|
||||
- Add [histogram](https://github.com/quickwit-oss/tantivy/pull/1306) aggregation (@PSeitz)
|
||||
- Add support for fastfield on text fields (@PSeitz)
|
||||
- Add terms aggregation (@PSeitz)
|
||||
- Add support for zstd compression (@kryesh)
|
||||
|
||||
Tantivy 0.18.1
|
||||
================================
|
||||
- Hotfix: positions computation. #1629 (@fmassot, @fulmicoton, @PSeitz)
|
||||
|
||||
Tantivy 0.17
|
||||
================================
|
||||
|
||||
- LogMergePolicy now triggers merges if the ratio of deleted documents reaches a threshold (@shikhar @fulmicoton) [#115](https://github.com/quickwit-oss/tantivy/issues/115)
|
||||
- Adds a searcher Warmer API (@shikhar @fulmicoton)
|
||||
- Change to non-strict schema. Ignore fields in data which are not defined in schema. Previously this returned an error. #1211
|
||||
- Facets are necessarily indexed. Existing index with indexed facets should work out of the box. Index without facets that are marked with index: false should be broken (but they were already broken in a sense). (@fulmicoton) #1195 .
|
||||
- Bugfix that could in theory impact durability in theory on some filesystems [#1224](https://github.com/quickwit-oss/tantivy/issues/1224)
|
||||
- Schema now offers not indexing fieldnorms (@lpouget) [#922](https://github.com/quickwit-oss/tantivy/issues/922)
|
||||
- Reduce the number of fsync calls [#1225](https://github.com/quickwit-oss/tantivy/issues/1225)
|
||||
- Fix opening bytes index with dynamic codec (@PSeitz) [#1278](https://github.com/quickwit-oss/tantivy/issues/1278)
|
||||
- Added an aggregation collector for range, average and stats compatible with Elasticsearch. (@PSeitz)
|
||||
- Added a JSON schema type @fulmicoton [#1251](https://github.com/quickwit-oss/tantivy/issues/1251)
|
||||
- Added support for slop in phrase queries @halvorboe [#1068](https://github.com/quickwit-oss/tantivy/issues/1068)
|
||||
|
||||
Tantivy 0.16.2
|
||||
================================
|
||||
|
||||
- Bugfix in FuzzyTermQuery. (transposition_cost_one was not doing anything)
|
||||
|
||||
Tantivy 0.16.1
|
||||
========================
|
||||
|
||||
- Major Bugfix on multivalued fastfield. #1151
|
||||
- Demux operation (@PSeitz)
|
||||
|
||||
Tantivy 0.16.0
|
||||
=========================
|
||||
|
||||
- Bugfix in the filesum check. (@evanxg852000) #1127
|
||||
- Bugfix in positions when the index is sorted by a field. (@appaquet) #1125
|
||||
|
||||
Tantivy 0.15.3
|
||||
=========================
|
||||
|
||||
- Major bugfix. Deleting documents was broken when the index was sorted by a field. (@appaquet, @fulmicoton) #1101
|
||||
|
||||
Tantivy 0.15.2
|
||||
========================
|
||||
|
||||
- Major bugfix. DocStore still panics when a deleted doc is at the beginning of a block. (@appaquet) #1088
|
||||
|
||||
Tantivy 0.15.1
|
||||
=========================
|
||||
|
||||
- Major bugfix. DocStore panics when first block is deleted. (@appaquet) #1077
|
||||
|
||||
Tantivy 0.15.0
|
||||
=========================
|
||||
|
||||
- API Changes. Using Range instead of (start, end) in the API and internals (`FileSlice`, `OwnedBytes`, `Snippets`, ...)
|
||||
This change is breaking but migration is trivial.
|
||||
- Added an Histogram collector. (@fulmicoton) #994
|
||||
- Added support for Option<TCollector>. (@fulmicoton)
|
||||
- DocAddress is now a struct (@scampi) #987
|
||||
- Bugfix consistent tie break handling in facet's topk (@hardikpnsp) #357
|
||||
- Date field support for range queries (@rihardsk) #516
|
||||
- Added lz4-flex as the default compression scheme in tantivy (@PSeitz) #1009
|
||||
- Renamed a lot of symbols to avoid all uppercasing on acronyms, as per new clippy recommendation. For instance, RAMDirectory -> RamDirectory. (@fulmicoton)
|
||||
- Simplified positions index format (@fulmicoton) #1022
|
||||
- Moved bitpacking to bitpacker subcrate and add BlockedBitpacker, which bitpacks blocks of 128 elements (@PSeitz) #1030
|
||||
- Added support for more-like-this query in tantivy (@evanxg852000) #1011
|
||||
- Added support for sorting an index, e.g presorting documents in an index by a timestamp field. This can heavily improve performance for certain scenarios, by utilizing the sorted data (Top-n optimizations)(@PSeitz). #1026
|
||||
- Add iterator over documents in doc store (@PSeitz). #1044
|
||||
- Fix log merge policy (@PSeitz). #1043
|
||||
- Add detection to avoid small doc store blocks on merge (@PSeitz). #1054
|
||||
- Make doc store compression dynamic (@PSeitz). #1060
|
||||
- Switch to json for footer version handling (@PSeitz). #1060
|
||||
- Updated TermMerger implementation to rely on the union feature of the FST (@scampi) #469
|
||||
- Add boolean marking whether position is required in the query_terms API call (@fulmicoton). #1070
|
||||
|
||||
Tantivy 0.14.0
|
||||
=========================
|
||||
|
||||
- Remove dependency to atomicwrites #833 .Implemented by @fulmicoton upon suggestion and research from @asafigan).
|
||||
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
|
||||
- API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match.
|
||||
- Large API Change in the Directory API. Tantivy used to assume that all files could be somehow memory mapped. After this change, Directory return a `FileSlice` that can be reduced and eventually read into an `OwnedBytes` object. Long and blocking io operation are still required by they do not span over the entire file.
|
||||
- Added support for Brotli compression in the DocStore. (@ppodolsky)
|
||||
- Added helper for building intersections and unions in BooleanQuery (@guilload)
|
||||
- Bugfix in `Query::explain`
|
||||
- Removed dependency on `notify` #924. Replaced with `FileWatcher` struct that polls meta file every 500ms in background thread. (@halvorboe @guilload)
|
||||
- Added `FilterCollector`, which wraps another collector and filters docs using a predicate over a fast field (@barrotsteindev)
|
||||
- Simplified the encoding of the skip reader struct. BlockWAND max tf is now encoded over a single byte. (@fulmicoton)
|
||||
- `FilterCollector` now supports all Fast Field value types (@barrotsteindev)
|
||||
- FastField are not all loaded when opening the segment reader. (@fulmicoton)
|
||||
- Added an API to merge segments, see `tantivy::merge_segments` #1005. (@evanxg852000)
|
||||
|
||||
This version breaks compatibility and requires users to reindex everything.
|
||||
|
||||
Tantivy 0.13.2
|
||||
===================
|
||||
|
||||
Bugfix. Acquiring a facet reader on a segment that does not contain any
|
||||
doc with this facet returns `None`. (#896)
|
||||
|
||||
Tantivy 0.13.1
|
||||
===================
|
||||
|
||||
Made `Query` and `Collector` `Send + Sync`.
|
||||
Updated misc dependency versions.
|
||||
|
||||
Tantivy 0.13.0
|
||||
======================
|
||||
|
||||
Tantivy 0.13 introduce a change in the index format that will require
|
||||
you to reindex your index (BlockWAND information are added in the skiplist).
|
||||
The index size increase is minor as this information is only added for
|
||||
full blocks.
|
||||
If you have a massive index for which reindexing is not an option, please contact me
|
||||
so that we can discuss possible solutions.
|
||||
|
||||
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
|
||||
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
|
||||
- `MMapDirectory::open` does not return a `Result` anymore.
|
||||
- Change in the DocSet and Scorer API. (@fulmicoton).
|
||||
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
|
||||
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
|
||||
As a result, iterating through DocSet now looks as follows
|
||||
|
||||
```rust
|
||||
let mut doc = docset.doc();
|
||||
while doc != TERMINATED {
|
||||
// ...
|
||||
doc = docset.advance();
|
||||
}
|
||||
```
|
||||
|
||||
The change made it possible to greatly simplify a lot of the docset's code.
|
||||
|
||||
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
|
||||
- Added an offset option to the Top(.*)Collectors. (@robyoung)
|
||||
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
|
||||
to the PISA team for answering all my questions!)
|
||||
|
||||
Tantivy 0.12.0
|
||||
======================
|
||||
|
||||
- Removing static dispatch in tokenizers for simplicity. (#762)
|
||||
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
|
||||
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
|
||||
- Added a configurable maximum number of docs (10M by default) for a segment to be considered for merge (@hntd187, landed by @halvorboe #713)
|
||||
- Important Bugfix #777, causing tantivy to retain memory mapping. (diagnosed by @poljar)
|
||||
- Added support for field boosting. (#547, @fulmicoton)
|
||||
|
||||
## How to update?
|
||||
|
||||
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
|
||||
minor changes. Check <https://github.com/quickwit-oss/tantivy/blob/main/examples/custom_tokenizer.rs>
|
||||
to check for some code sample.
|
||||
|
||||
Tantivy 0.11.3
|
||||
=======================
|
||||
|
||||
- Fixed DateTime as a fast field (#735)
|
||||
|
||||
Tantivy 0.11.2
|
||||
=======================
|
||||
|
||||
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
|
||||
- Exposing a constructor for `WatchHandle` (#731)
|
||||
|
||||
Tantivy 0.11.1
|
||||
=====================
|
||||
|
||||
- Bug fix #729
|
||||
|
||||
Tantivy 0.11.0
|
||||
=====================
|
||||
|
||||
- Added f64 field. Internally reuse u64 code the same way i64 does (@fdb-hiroshima)
|
||||
- Various bugfixes in the query parser.
|
||||
- Better handling of hyphens in query parser. (#609)
|
||||
- Better handling of whitespaces.
|
||||
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
|
||||
- API change around `Box<BoxableTokenizer>`. See detail in #629
|
||||
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
|
||||
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
|
||||
- Add a method to check the compatibility of the footer in the index with the running version of tantivy (@petr-tik)
|
||||
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
|
||||
- Added handling of pre-tokenized text fields (#642), which will enable users to
|
||||
load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara)
|
||||
- Fix crash when committing multiple times with deleted documents. #681 (@brainlock)
|
||||
|
||||
## How to update?
|
||||
|
||||
- The index format is changed. You are required to reindex your data to use tantivy 0.11.
|
||||
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
|
||||
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
|
||||
an error and handling the `Result` is required.
|
||||
- `tantivy::version()` now returns a `Version` object. This object implements `ToString()`
|
||||
|
||||
Tantivy 0.10.2
|
||||
=====================
|
||||
|
||||
- Closes #656. Solving memory leak.
|
||||
|
||||
Tantivy 0.10.1
|
||||
=====================
|
||||
@@ -10,32 +257,31 @@ Tantivy 0.10.1
|
||||
Avoid watching the mmap directory until someone effectively creates a reader that uses
|
||||
this functionality.
|
||||
|
||||
|
||||
Tantivy 0.10.0
|
||||
=====================
|
||||
|
||||
*Tantivy 0.10.0 index format is compatible with the index format in 0.9.0.*
|
||||
|
||||
- Added an API to easily tweak or entirely replace the
|
||||
default score. See `TopDocs::tweak_score`and `TopScore::custom_score` (@pmasurel)
|
||||
- Added an API to easily tweak or entirely replace the
|
||||
default score. See `TopDocs::tweak_score`and `TopScore::custom_score` (@fulmicoton)
|
||||
- Added an ASCII folding filter (@drusellers)
|
||||
- Bugfix in `query.count` in presence of deletes (@pmasurel)
|
||||
- Added `.explain(...)` in `Query` and `Weight` to (@pmasurel)
|
||||
- Added an efficient way to `delete_all_documents` in `IndexWriter` (@petr-tik).
|
||||
- Bugfix in `query.count` in presence of deletes (@fulmicoton)
|
||||
- Added `.explain(...)` in `Query` and `Weight` to (@fulmicoton)
|
||||
- Added an efficient way to `delete_all_documents` in `IndexWriter` (@petr-tik).
|
||||
All segments are simply removed.
|
||||
|
||||
Minor
|
||||
---------
|
||||
|
||||
- Switched to Rust 2018 (@uvd)
|
||||
- Small simplification of the code.
|
||||
- Small simplification of the code.
|
||||
Calling .freq() or .doc() when .advance() has never been called
|
||||
on segment postings should panic from now on.
|
||||
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
|
||||
- Fast fields are now preloaded when the `SegmentReader` is created.
|
||||
- `IndexMeta` is now public. (@hntd187)
|
||||
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
|
||||
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
|
||||
only require a read lock. (@pmasurel)
|
||||
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
|
||||
only require a read lock. (@fulmicoton)
|
||||
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
|
||||
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
|
||||
- Bugfix - Files get deleted slightly earlier
|
||||
@@ -49,25 +295,26 @@ Your program should be usable as is.
|
||||
|
||||
Fast fields used to be accessed directly from the `SegmentReader`.
|
||||
The API changed, you are now required to acquire your fast field reader via the
|
||||
`segment_reader.fast_fields()`, and use one of the typed method:
|
||||
`segment_reader.fast_fields()`, and use one of the typed method:
|
||||
|
||||
- `.u64()`, `.i64()` if your field is single-valued ;
|
||||
- `.u64s()`, `.i64s()` if your field is multi-valued ;
|
||||
- `.bytes()` if your field is bytes fast field.
|
||||
|
||||
|
||||
|
||||
Tantivy 0.9.0
|
||||
=====================
|
||||
*0.9.0 index format is not compatible with the
|
||||
|
||||
*0.9.0 index format is not compatible with the
|
||||
previous index format.*
|
||||
- MAJOR BUGFIX :
|
||||
|
||||
- MAJOR BUGFIX :
|
||||
Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton)
|
||||
- Removed most unsafe (@fulmicoton)
|
||||
- Indexer memory footprint improved. (VInt comp, inlining the first block. (@fulmicoton)
|
||||
- Stemming in other language possible (@pentlander)
|
||||
- Segments with no docs are deleted earlier (@barrotsteindev)
|
||||
- Added grouped add and delete operations.
|
||||
They are guaranteed to happen together (i.e. they cannot be split by a commit).
|
||||
- Added grouped add and delete operations.
|
||||
They are guaranteed to happen together (i.e. they cannot be split by a commit).
|
||||
In addition, adds are guaranteed to happen on the same segment. (@elbow-jason)
|
||||
- Removed `INT_STORED` and `INT_INDEXED`. It is now possible to use `STORED` and `INDEXED`
|
||||
for int fields. (@fulmicoton)
|
||||
@@ -81,59 +328,62 @@ tantivy 0.9 brought some API breaking change.
|
||||
To update from tantivy 0.8, you will need to go through the following steps.
|
||||
|
||||
- `schema::INT_INDEXED` and `schema::INT_STORED` should be replaced by `schema::INDEXED` and `schema::INT_STORED`.
|
||||
- The index now does not hold the pool of searcher anymore. You are required to create an intermediary object called
|
||||
`IndexReader` for this.
|
||||
|
||||
- The index now does not hold the pool of searcher anymore. You are required to create an intermediary object called
|
||||
`IndexReader` for this.
|
||||
|
||||
```rust
|
||||
// create the reader. You typically need to create 1 reader for the entire
|
||||
// lifetime of you program.
|
||||
let reader = index.reader()?;
|
||||
|
||||
|
||||
// Acquire a searcher (previously `index.searcher()`) is now written:
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// With the default setting of the reader, you are not required to
|
||||
|
||||
// With the default setting of the reader, you are not required to
|
||||
// call `index.load_searchers()` anymore.
|
||||
//
|
||||
// The IndexReader will pick up that change automatically, regardless
|
||||
// of whether the update was done in a different process or not.
|
||||
// If this behavior is not wanted, you can create your reader with
|
||||
// If this behavior is not wanted, you can create your reader with
|
||||
// the `ReloadPolicy::Manual`, and manually decide when to reload the index
|
||||
// by calling `reader.reload()?`.
|
||||
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
Tantivy 0.8.2
|
||||
=====================
|
||||
|
||||
Fixing build for x86_64 platforms. (#496)
|
||||
No need to update from 0.8.1 if tantivy
|
||||
is building on your platform.
|
||||
|
||||
|
||||
Tantivy 0.8.1
|
||||
=====================
|
||||
|
||||
Hotfix of #476.
|
||||
|
||||
Merge was reflecting deletes before commit was passed.
|
||||
Merge was reflecting deletes before commit was passed.
|
||||
Thanks @barrotsteindev for reporting the bug.
|
||||
|
||||
|
||||
Tantivy 0.8.0
|
||||
=====================
|
||||
*No change in the index format*
|
||||
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
|
||||
- Multithreaded search (@jwolfe, @fulmicoton)
|
||||
|
||||
*No change in the index format*
|
||||
|
||||
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
|
||||
- Multithreaded search (@jwolfe, @fulmicoton)
|
||||
|
||||
Tantivy 0.7.1
|
||||
=====================
|
||||
|
||||
*No change in the index format*
|
||||
|
||||
- Bugfix: NGramTokenizer panics on non ascii chars
|
||||
- Added a space usage API
|
||||
|
||||
Tantivy 0.7
|
||||
=====================
|
||||
|
||||
- Skip data for doc ids and positions (@fulmicoton),
|
||||
greatly improving performance
|
||||
- Tantivy error now rely on the failure crate (@drusellers)
|
||||
@@ -143,15 +393,15 @@ Tantivy 0.7
|
||||
|
||||
Tantivy 0.6.1
|
||||
=========================
|
||||
|
||||
- Bugfix #324. GC removing was removing file that were still in useful
|
||||
- Added support for parsing AllQuery and RangeQuery via QueryParser
|
||||
- AllQuery: `*`
|
||||
- RangeQuery:
|
||||
- Inclusive `field:[startIncl to endIncl]`
|
||||
- Exclusive `field:{startExcl to endExcl}`
|
||||
- Mixed `field:[startIncl to endExcl}` and vice versa
|
||||
- Unbounded `field:[start to *]`, `field:[* to end]`
|
||||
|
||||
- AllQuery: `*`
|
||||
- RangeQuery:
|
||||
- Inclusive `field:[startIncl to endIncl]`
|
||||
- Exclusive `field:{startExcl to endExcl}`
|
||||
- Mixed `field:[startIncl to endExcl}` and vice versa
|
||||
- Unbounded `field:[start to *]`, `field:[* to end]`
|
||||
|
||||
Tantivy 0.6
|
||||
==========================
|
||||
@@ -159,63 +409,58 @@ Tantivy 0.6
|
||||
Special thanks to @drusellers and @jason-wolfe for their contributions
|
||||
to this release!
|
||||
|
||||
- Removed C code. Tantivy is now pure Rust. (@pmasurel)
|
||||
- BM25 (@pmasurel)
|
||||
- Approximate field norms encoded over 1 byte. (@pmasurel)
|
||||
- Compiles on stable rust (@pmasurel)
|
||||
- Removed C code. Tantivy is now pure Rust. (@fulmicoton)
|
||||
- BM25 (@fulmicoton)
|
||||
- Approximate field norms encoded over 1 byte. (@fulmicoton)
|
||||
- Compiles on stable rust (@fulmicoton)
|
||||
- Add &[u8] fastfield for associating arbitrary bytes to each document (@jason-wolfe) (#270)
|
||||
- Completely uncompressed
|
||||
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
|
||||
- Completely uncompressed
|
||||
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
|
||||
- Add NGram token support (@drusellers)
|
||||
- Add Stopword Filter support (@drusellers)
|
||||
- Add a FuzzyTermQuery (@drusellers)
|
||||
- Add a RegexQuery (@drusellers)
|
||||
- Various performance improvements (@pmasurel)_
|
||||
|
||||
- Various performance improvements (@fulmicoton)_
|
||||
|
||||
Tantivy 0.5.2
|
||||
===========================
|
||||
|
||||
- bugfix #274
|
||||
- bugfix #280
|
||||
- bugfix #289
|
||||
|
||||
|
||||
Tantivy 0.5.1
|
||||
==========================
|
||||
- bugfix #254 : tantivy failed if no documents in a segment contained a specific field.
|
||||
|
||||
- bugfix #254 : tantivy failed if no documents in a segment contained a specific field.
|
||||
|
||||
Tantivy 0.5
|
||||
==========================
|
||||
|
||||
- Faceting
|
||||
- RangeQuery
|
||||
- Configurable tokenization pipeline
|
||||
- Bugfix in PhraseQuery
|
||||
- Various query optimisation
|
||||
- Allowing very large indexes
|
||||
- 64 bits file address
|
||||
- Smarter encoding of the `TermInfo` objects
|
||||
|
||||
|
||||
- 64 bits file address
|
||||
- Smarter encoding of the `TermInfo` objects
|
||||
|
||||
Tantivy 0.4.3
|
||||
==========================
|
||||
|
||||
- Bugfix race condition when deleting files. (#198)
|
||||
|
||||
|
||||
Tantivy 0.4.2
|
||||
==========================
|
||||
|
||||
- Prevent usage of AVX2 instructions (#201)
|
||||
|
||||
|
||||
Tantivy 0.4.1
|
||||
==========================
|
||||
|
||||
- Bugfix for non-indexed fields. (#199)
|
||||
|
||||
|
||||
Tantivy 0.4.0
|
||||
==========================
|
||||
|
||||
@@ -230,37 +475,31 @@ Tantivy 0.4.0
|
||||
- Searching for a non-indexed field returns an explicit Error
|
||||
- Phrase query for non-tokenized field are not tokenized by the query parser.
|
||||
- Faster/Better indexing (@fulmicoton)
|
||||
- using murmurhash2
|
||||
- faster merging
|
||||
- more memory efficient fast field writer (@lnicola )
|
||||
- better handling of collisions
|
||||
- lesser memory usage
|
||||
- using murmurhash2
|
||||
- faster merging
|
||||
- more memory efficient fast field writer (@lnicola )
|
||||
- better handling of collisions
|
||||
- lesser memory usage
|
||||
- Added API, most notably to iterate over ranges of terms (@fulmicoton)
|
||||
- Bugfix that was preventing to unmap segment files, on index drop (@fulmicoton)
|
||||
- Made the doc! macro public (@fulmicoton)
|
||||
- Added an alternative implementation of the streaming dictionary (@fulmicoton)
|
||||
|
||||
|
||||
|
||||
Tantivy 0.3.1
|
||||
==========================
|
||||
|
||||
- Expose a method to trigger files garbage collection
|
||||
|
||||
|
||||
|
||||
Tantivy 0.3
|
||||
==========================
|
||||
|
||||
|
||||
Special thanks to @Kodraus @lnicola @Ameobea @manuel-woelker @celaus
|
||||
for their contribution to this release.
|
||||
|
||||
Thanks also to everyone in tantivy gitter chat
|
||||
for their advise and company :)
|
||||
|
||||
https://gitter.im/tantivy-search/tantivy
|
||||
|
||||
<https://gitter.im/tantivy-search/tantivy>
|
||||
|
||||
Warning:
|
||||
|
||||
@@ -269,19 +508,16 @@ code and index format.
|
||||
You should not expect backward compatibility before
|
||||
tantivy 1.0.
|
||||
|
||||
|
||||
|
||||
New Features
|
||||
------------
|
||||
|
||||
- Delete. You can now delete documents from an index.
|
||||
- Support for windows (Thanks to @lnicola)
|
||||
|
||||
|
||||
Various Bugfixes & small improvements
|
||||
----------------------------------------
|
||||
|
||||
- Added CI for Windows (https://ci.appveyor.com/project/fulmicoton/tantivy)
|
||||
- Added CI for Windows (<https://ci.appveyor.com/project/fulmicoton/tantivy>)
|
||||
Thanks to @KodrAus ! (#108)
|
||||
- Various dependy version update (Thanks to @Ameobea) #76
|
||||
- Fixed several race conditions in `Index.wait_merge_threads`
|
||||
@@ -293,7 +529,3 @@ Thanks to @KodrAus ! (#108)
|
||||
- Building binary targets for tantivy-cli (Thanks to @KodrAus)
|
||||
- Misc invisible bug fixes, and code cleanup.
|
||||
- Use
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
150
Cargo.toml
150
Cargo.toml
@@ -1,68 +1,86 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.10.1"
|
||||
version = "0.19.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
description = """Search engine library"""
|
||||
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
|
||||
homepage = "https://github.com/tantivy-search/tantivy"
|
||||
repository = "https://github.com/tantivy-search/tantivy"
|
||||
documentation = "https://docs.rs/tantivy/"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
rust-version = "1.62"
|
||||
|
||||
[dependencies]
|
||||
base64 = "0.10.0"
|
||||
byteorder = "1.0"
|
||||
once_cell = "0.2"
|
||||
regex = "1.0"
|
||||
tantivy-fst = "0.1"
|
||||
memmap = {version = "0.7", optional=true}
|
||||
lz4 = {version="1.20", optional=true}
|
||||
snap = {version="0.2"}
|
||||
atomicwrites = {version="0.2.2", optional=true}
|
||||
tempfile = "3.0"
|
||||
log = "0.4"
|
||||
combine = ">=3.6.0,<4.0.0"
|
||||
tempdir = "0.3"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
num_cpus = "1.2"
|
||||
fs2={version="0.4", optional=true}
|
||||
itertools = "0.8"
|
||||
levenshtein_automata = {version="0.1", features=["fst_automaton"]}
|
||||
notify = {version="4", optional=true}
|
||||
bit-set = "0.5"
|
||||
uuid = { version = "0.7.2", features = ["v4", "serde"] }
|
||||
crossbeam = "0.5"
|
||||
futures = "0.1"
|
||||
futures-cpupool = "0.1"
|
||||
owning_ref = "0.4"
|
||||
stable_deref_trait = "1.0.0"
|
||||
rust-stemmers = "1.1"
|
||||
downcast-rs = { version="1.0" }
|
||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||
census = "0.2"
|
||||
fnv = "1.0.6"
|
||||
owned-read = "0.4"
|
||||
failure = "0.1"
|
||||
oneshot = "0.1.5"
|
||||
base64 = "0.21.0"
|
||||
byteorder = "1.4.3"
|
||||
crc32fast = "1.3.2"
|
||||
once_cell = "1.10.0"
|
||||
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
||||
aho-corasick = "0.7"
|
||||
tantivy-fst = "0.4.0"
|
||||
memmap2 = { version = "0.5.3", optional = true }
|
||||
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
|
||||
brotli = { version = "3.3.4", optional = true }
|
||||
zstd = { version = "0.12", optional = true, default-features = false }
|
||||
snap = { version = "1.0.5", optional = true }
|
||||
tempfile = { version = "3.3.0", optional = true }
|
||||
log = "0.4.16"
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = "1.0.79"
|
||||
num_cpus = "1.13.1"
|
||||
fs2 = { version = "0.4.3", optional = true }
|
||||
levenshtein_automata = "0.2.1"
|
||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||
crossbeam-channel = "0.5.4"
|
||||
rust-stemmers = "1.2.0"
|
||||
downcast-rs = "1.2.0"
|
||||
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
|
||||
census = "0.4.0"
|
||||
rustc-hash = "1.1.0"
|
||||
thiserror = "1.0.30"
|
||||
htmlescape = "0.3.1"
|
||||
fail = "0.3"
|
||||
scoped-pool = "1.0"
|
||||
murmurhash32 = "0.2"
|
||||
chrono = "0.4"
|
||||
smallvec = "0.6"
|
||||
fail = "0.5.0"
|
||||
murmurhash32 = "0.2.0"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
smallvec = "1.8.0"
|
||||
rayon = "1.5.2"
|
||||
lru = "0.9.0"
|
||||
fastdivide = "0.4.0"
|
||||
itertools = "0.10.3"
|
||||
measure_time = "0.8.2"
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
|
||||
sstable = { version="0.1", path="./sstable", package ="tantivy-sstable", optional = true }
|
||||
stacker = { version="0.1", path="./stacker", package ="tantivy-stacker" }
|
||||
tantivy-query-grammar = { version= "0.19.0", path="./query-grammar" }
|
||||
tantivy-bitpacker = { version= "0.3", path="./bitpacker" }
|
||||
columnar = { version= "0.1", path="./columnar", package="tantivy-columnar" }
|
||||
common = { version= "0.5", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version="0.1", path="./tokenizer-api", package="tantivy-tokenizer-api" }
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3"
|
||||
winapi = "0.3.9"
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.7"
|
||||
maplit = "1"
|
||||
matches = "0.1.8"
|
||||
time = "0.1.42"
|
||||
rand = "0.8.5"
|
||||
maplit = "1.0.2"
|
||||
matches = "0.1.9"
|
||||
pretty_assertions = "1.2.1"
|
||||
proptest = "1.0.0"
|
||||
criterion = "0.4"
|
||||
test-log = "0.2.10"
|
||||
env_logger = "0.10.0"
|
||||
pprof = { version = "0.11.0", features = ["flamegraph", "criterion"] }
|
||||
futures = "0.3.21"
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.5.0"
|
||||
features = ["failpoints"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
@@ -74,19 +92,22 @@ debug-assertions = true
|
||||
overflow-checks = true
|
||||
|
||||
[features]
|
||||
default = ["mmap"]
|
||||
mmap = ["atomicwrites", "fs2", "memmap", "notify"]
|
||||
lz4-compression = ["lz4"]
|
||||
default = ["mmap", "stopwords", "lz4-compression"]
|
||||
mmap = ["fs2", "tempfile", "memmap2"]
|
||||
stopwords = []
|
||||
|
||||
brotli-compression = ["brotli"]
|
||||
lz4-compression = ["lz4_flex"]
|
||||
snappy-compression = ["snap"]
|
||||
zstd-compression = ["zstd"]
|
||||
|
||||
failpoints = ["fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
wasm-bindgen = ["uuid/wasm-bindgen"]
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "tantivy-search/tantivy" }
|
||||
|
||||
[dev-dependencies.fail]
|
||||
features = ["failpoints"]
|
||||
quickwit = ["sstable"]
|
||||
|
||||
[workspace]
|
||||
members = ["query-grammar", "bitpacker", "common", "ownedbytes", "stacker", "sstable", "tokenizer-api", "columnar"]
|
||||
|
||||
# Following the "fail" crate best practises, we isolate
|
||||
# tests that define specific behavior in fail check points
|
||||
@@ -98,4 +119,13 @@ features = ["failpoints"]
|
||||
[[test]]
|
||||
name = "failpoints"
|
||||
path = "tests/failpoints/mod.rs"
|
||||
required-features = ["fail/failpoints"]
|
||||
required-features = ["fail/failpoints"]
|
||||
|
||||
[[bench]]
|
||||
name = "analyzer"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "index-bench"
|
||||
harness = false
|
||||
|
||||
|
||||
6
Makefile
Normal file
6
Makefile
Normal file
@@ -0,0 +1,6 @@
|
||||
test:
|
||||
echo "Run test only... No examples."
|
||||
cargo test --tests --lib
|
||||
|
||||
fmt:
|
||||
cargo +nightly fmt --all
|
||||
160
README.md
160
README.md
@@ -1,138 +1,142 @@
|
||||
|
||||
[](https://travis-ci.org/tantivy-search/tantivy)
|
||||
[](https://codecov.io/gh/tantivy-search/tantivy)
|
||||
[](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
[](https://docs.rs/crate/tantivy/)
|
||||
[](https://github.com/quickwit-oss/tantivy/actions/workflows/test.yml)
|
||||
[](https://codecov.io/gh/quickwit-oss/tantivy)
|
||||
[](https://discord.gg/MT27AG5EVE)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
|
||||
[](https://crates.io/crates/tantivy)
|
||||
[](https://saythanks.io/to/fulmicoton)
|
||||
|
||||

|
||||
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/0)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/1)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/2)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/3)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/4)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/5)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/6)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/7)
|
||||
**Tantivy** is a **full-text search engine library** written in Rust.
|
||||
|
||||
[](https://www.patreon.com/fulmicoton)
|
||||
|
||||
|
||||
**Tantivy** is a **full text search engine library** written in rust.
|
||||
|
||||
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) and [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
an off-the-shelf search engine server, but rather a crate that can be used
|
||||
to build such a search engine.
|
||||
|
||||
Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||
|
||||
If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy.
|
||||
|
||||
# Benchmark
|
||||
|
||||
Tantivy is typically faster than Lucene, but the results will depend on
|
||||
the nature of the queries in your workload.
|
||||
The following [benchmark](https://tantivy-search.github.io/bench/) breakdowns
|
||||
performance for different types of queries/collections.
|
||||
|
||||
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
||||
performance for different type of queries / collection.
|
||||
Your mileage WILL vary depending on the nature of queries and their load.
|
||||
|
||||
<img src="doc/assets/images/searchbenchmark.png">
|
||||
|
||||
# Features
|
||||
|
||||
- Full-text search
|
||||
- Configurable tokenizer. (stemming available for 17 latin languages. Third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)) and [Japanese](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)
|
||||
- Configurable tokenizer (stemming available for 17 Latin languages) with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy), [Vaporetto](https://crates.io/crates/vaporetto_tantivy), and [tantivy-tokenizer-tiny-segmenter](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
|
||||
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
|
||||
- Tiny startup time (<10ms), perfect for command line tools
|
||||
- BM25 scoring (the same as lucene)
|
||||
- Natural query language `(michael AND jackson) OR "king of pop"`
|
||||
- Phrase queries search (`"michael jackson"`)
|
||||
- Tiny startup time (<10ms), perfect for command-line tools
|
||||
- BM25 scoring (the same as Lucene)
|
||||
- Natural query language (e.g. `(michael AND jackson) OR "king of pop"`)
|
||||
- Phrase queries search (e.g. `"michael jackson"`)
|
||||
- Incremental indexing
|
||||
- Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop)
|
||||
- Mmap directory
|
||||
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set.
|
||||
- Single valued and multivalued u64, i64 and f64 fast fields (equivalent of doc values in Lucene)
|
||||
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set
|
||||
- Single valued and multivalued u64, i64, and f64 fast fields (equivalent of doc values in Lucene)
|
||||
- `&[u8]` fast fields
|
||||
- Text, i64, u64, f64, dates and hierarchical facet fields
|
||||
- LZ4 compressed document store
|
||||
- Text, i64, u64, f64, dates, ip, bool, and hierarchical facet fields
|
||||
- Compressed document store (LZ4, Zstd, None, Brotli, Snap)
|
||||
- Range queries
|
||||
- Faceted search
|
||||
- Configurable indexing (optional term frequency and position indexing)
|
||||
- JSON Field
|
||||
- Aggregation Collector: histogram, range buckets, average, and stats metrics
|
||||
- LogMergePolicy with deletes
|
||||
- Searcher Warmer API
|
||||
- Cheesy logo with a horse
|
||||
|
||||
# Non-features
|
||||
## Non-features
|
||||
|
||||
- Distributed search is out of the scope of tantivy. That being said, tantivy is meant as a
|
||||
library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
|
||||
are within the scope of tantivy.
|
||||
|
||||
# Supported OS and compiler
|
||||
|
||||
Tantivy works on stable rust (>= 1.27) and supports Linux, MacOS and Windows.
|
||||
Distributed search is out of the scope of Tantivy, but if you are looking for this feature, check out [Quickwit](https://github.com/quickwit-oss/quickwit/).
|
||||
|
||||
# Getting started
|
||||
|
||||
- [tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
||||
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli).
|
||||
`tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
|
||||
index documents and search via the CLI or a small server with a REST API.
|
||||
It will walk you through getting a wikipedia search engine up and running in a few minutes.
|
||||
- [reference doc for the last released version](https://docs.rs/tantivy/)
|
||||
Tantivy works on stable Rust and supports Linux, macOS, and Windows.
|
||||
|
||||
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
||||
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command-line interface that makes it easy for you to create a search engine,
|
||||
index documents, and search via the CLI or a small server with a REST API.
|
||||
It walks you through getting a Wikipedia search engine up and running in a few minutes.
|
||||
- [Reference doc for the last released version](https://docs.rs/tantivy/)
|
||||
|
||||
# How can I support this project?
|
||||
|
||||
There are many ways to support this project.
|
||||
There are many ways to support this project.
|
||||
|
||||
- Use tantivy and tell us about your experience on [gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com)
|
||||
- Use Tantivy and tell us about your experience on [Discord](https://discord.gg/MT27AG5EVE) or by email (paul.masurel@gmail.com)
|
||||
- Report bugs
|
||||
- Write a blog post
|
||||
- Help with documentation by asking questions or submitting PRs
|
||||
- Contribute code (you can join [our gitter](https://gitter.im/tantivy-search/tantivy) )
|
||||
- Talk about tantivy around you
|
||||
- Drop a word on on [](https://saythanks.io/to/fulmicoton) or even [](https://www.patreon.com/fulmicoton)
|
||||
- Contribute code (you can join [our Discord server](https://discord.gg/MT27AG5EVE))
|
||||
- Talk about Tantivy around you
|
||||
|
||||
# Contributing code
|
||||
|
||||
We use the GitHub Pull Request workflow - reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
||||
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
||||
Feel free to update CHANGELOG.md with your contribution.
|
||||
|
||||
## Tokenizer
|
||||
|
||||
When implementing a tokenizer for tantivy depend on the `tantivy-tokenizer-api` crate.
|
||||
|
||||
## Clone and build locally
|
||||
|
||||
Tantivy compiles on stable rust but requires `Rust >= 1.27`.
|
||||
To check out and run tests, you can simply run :
|
||||
Tantivy compiles on stable Rust.
|
||||
To check out and run tests, you can simply run:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/tantivy-search/tantivy.git
|
||||
cd tantivy
|
||||
cargo build
|
||||
git clone https://github.com/quickwit-oss/tantivy.git
|
||||
cd tantivy
|
||||
cargo test
|
||||
```
|
||||
|
||||
## Run tests
|
||||
# Companies Using Tantivy
|
||||
|
||||
Some tests will not run with just `cargo test` because of `fail-rs`.
|
||||
To run the tests exhaustively, run `./run-tests.sh`
|
||||
<p align="left">
|
||||
<img align="center" src="doc/assets/images/etsy.png" alt="Etsy" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/Nuclia.png#gh-light-mode-only" alt="Nuclia" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/humanfirst.png#gh-light-mode-only" alt="Humanfirst.ai" height="30" width="auto" />
|
||||
<img align="center" src="doc/assets/images/element.io.svg#gh-light-mode-only" alt="Element.io" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/nuclia-dark-theme.png#gh-dark-mode-only" alt="Nuclia" height="35" width="auto" />
|
||||
<img align="center" src="doc/assets/images/humanfirst.ai-dark-theme.png#gh-dark-mode-only" alt="Humanfirst.ai" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/element-dark-theme.png#gh-dark-mode-only" alt="Element.io" height="25" width="auto" />
|
||||
</p>
|
||||
|
||||
## Debug
|
||||
# FAQ
|
||||
|
||||
You might find it useful to step through the programme with a debugger.
|
||||
### Can I use Tantivy in other languages?
|
||||
|
||||
### A failing test
|
||||
- Python → [tantivy-py](https://github.com/quickwit-oss/tantivy-py)
|
||||
- Ruby → [tantiny](https://github.com/baygeldin/tantiny)
|
||||
|
||||
Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that `target/` dir exists. Use this bash script to find the most name of the most recent debug build of tantivy and run it under rust-gdb.
|
||||
You can also find other bindings on [GitHub](https://github.com/search?q=tantivy) but they may be less maintained.
|
||||
|
||||
```bash
|
||||
find target/debug/ -maxdepth 1 -executable -type f -name "tantivy*" -printf '%TY-%Tm-%Td %TT %p\n' | sort -r | cut -d " " -f 3 | xargs -I RECENT_DBG_TANTIVY rust-gdb RECENT_DBG_TANTIVY
|
||||
```
|
||||
### What are some examples of Tantivy use?
|
||||
|
||||
Now that you are in rust-gdb, you can set breakpoints on lines and methods that match your source-code and run the debug executable with flags that you normally pass to `cargo test` to like this
|
||||
- [seshat](https://github.com/matrix-org/seshat/): A matrix message database/indexer
|
||||
- [tantiny](https://github.com/baygeldin/tantiny): Tiny full-text search for Ruby
|
||||
- [lnx](https://github.com/lnx-search/lnx): adaptable, typo tolerant search engine with a REST API
|
||||
- and [more](https://github.com/search?q=tantivy)!
|
||||
|
||||
```bash
|
||||
$gdb run --test-threads 1 --test $NAME_OF_TEST
|
||||
```
|
||||
### On average, how much faster is Tantivy compared to Lucene?
|
||||
|
||||
### An example
|
||||
- According to our [search latency benchmark](https://tantivy-search.github.io/bench/), Tantivy is approximately 2x faster than Lucene.
|
||||
|
||||
By default, rustc compiles everything in the `examples/` dir in debug mode. This makes it easy for you to make examples to reproduce bugs.
|
||||
### Does tantivy support incremental indexing?
|
||||
|
||||
```bash
|
||||
rust-gdb target/debug/examples/$EXAMPLE_NAME
|
||||
$ gdb run
|
||||
```
|
||||
- Yes.
|
||||
|
||||
### How can I edit documents?
|
||||
|
||||
- Data in tantivy is immutable. To edit a document, the document needs to be deleted and reindexed.
|
||||
|
||||
### When will my documents be searchable during indexing?
|
||||
|
||||
- Documents will be searchable after a `commit` is called on an `IndexWriter`. Existing `IndexReader`s will also need to be reloaded in order to reflect the changes. Finally, changes are only visible to newly acquired `Searcher`.
|
||||
|
||||
18
TODO.txt
Normal file
18
TODO.txt
Normal file
@@ -0,0 +1,18 @@
|
||||
Make schema_builder API fluent.
|
||||
fix doc serialization and prevent compression problems
|
||||
|
||||
u64 , etc. shoudl return Resutl<Option> now that we support optional missing a column is really not an error
|
||||
remove fastfield codecs
|
||||
ditch the first_or_default trick. if it is still useful, improve its implementation.
|
||||
rename FastFieldReaders::open to load
|
||||
|
||||
|
||||
remove fast field reader
|
||||
|
||||
find a way to unify the two DateTime.
|
||||
readd type check in the filter wrapper
|
||||
|
||||
add unit test on columnar list columns.
|
||||
|
||||
make sure sort works
|
||||
|
||||
@@ -18,5 +18,6 @@ install:
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- REM SET RUST_LOG=tantivy,test & cargo test --verbose --no-default-features --features mmap
|
||||
- REM SET RUST_LOG=tantivy,test & cargo test --all --verbose --no-default-features --features lz4-compression --features mmap
|
||||
- REM SET RUST_LOG=tantivy,test & cargo test test_store --verbose --no-default-features --features lz4-compression --features snappy-compression --features brotli-compression --features mmap
|
||||
- REM SET RUST_BACKTRACE=1 & cargo build --examples
|
||||
|
||||
3774
benches/alice.txt
Normal file
3774
benches/alice.txt
Normal file
File diff suppressed because it is too large
Load Diff
22
benches/analyzer.rs
Normal file
22
benches/analyzer.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use tantivy::tokenizer::TokenizerManager;
|
||||
|
||||
const ALICE_TXT: &str = include_str!("alice.txt");
|
||||
|
||||
pub fn criterion_benchmark(c: &mut Criterion) {
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
let tokenizer = tokenizer_manager.get("default").unwrap();
|
||||
c.bench_function("default-tokenize-alice", |b| {
|
||||
b.iter(|| {
|
||||
let mut word_count = 0;
|
||||
let mut token_stream = tokenizer.token_stream(ALICE_TXT);
|
||||
while token_stream.advance() {
|
||||
word_count += 1;
|
||||
}
|
||||
assert_eq!(word_count, 30_731);
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, criterion_benchmark);
|
||||
criterion_main!(benches);
|
||||
100000
benches/hdfs.json
Normal file
100000
benches/hdfs.json
Normal file
File diff suppressed because it is too large
Load Diff
121
benches/index-bench.rs
Normal file
121
benches/index-bench.rs
Normal file
@@ -0,0 +1,121 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use pprof::criterion::{Output, PProfProfiler};
|
||||
use tantivy::schema::{INDEXED, STORED, STRING, TEXT};
|
||||
use tantivy::Index;
|
||||
|
||||
const HDFS_LOGS: &str = include_str!("hdfs.json");
|
||||
const NUM_REPEATS: usize = 2;
|
||||
|
||||
pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
let schema = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_u64_field("timestamp", INDEXED);
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
schema_builder.add_text_field("severity", STRING);
|
||||
schema_builder.build()
|
||||
};
|
||||
let schema_with_store = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_u64_field("timestamp", INDEXED | STORED);
|
||||
schema_builder.add_text_field("body", TEXT | STORED);
|
||||
schema_builder.add_text_field("severity", STRING | STORED);
|
||||
schema_builder.build()
|
||||
};
|
||||
let dynamic_schema = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_json_field("json", TEXT);
|
||||
schema_builder.build()
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group("index-hdfs");
|
||||
group.sample_size(20);
|
||||
group.bench_function("index-hdfs-no-commit", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split('\n') {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split('\n') {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit-with-docstore", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split('\n') {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit-with-docstore", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split('\n') {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit-json-without-docstore", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split('\n') {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit-json-without-docstore", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split('\n') {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
|
||||
targets = hdfs_index_benchmark
|
||||
}
|
||||
criterion_main!(benches);
|
||||
21
bitpacker/Cargo.toml
Normal file
21
bitpacker/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "tantivy-bitpacker"
|
||||
version = "0.3.0"
|
||||
edition = "2021"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = []
|
||||
description = """Tantivy-sub crate: bitpacking"""
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
keywords = []
|
||||
documentation = "https://docs.rs/tantivy-bitpacker/latest/tantivy_bitpacker"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8"
|
||||
proptest = "1"
|
||||
65
bitpacker/benches/bench.rs
Normal file
65
bitpacker/benches/bench.rs
Normal file
@@ -0,0 +1,65 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rand::seq::IteratorRandom;
|
||||
use rand::thread_rng;
|
||||
use tantivy_bitpacker::{BitPacker, BitUnpacker, BlockedBitpacker};
|
||||
use test::Bencher;
|
||||
|
||||
#[inline(never)]
|
||||
fn create_bitpacked_data(bit_width: u8, num_els: u32) -> Vec<u8> {
|
||||
let mut bitpacker = BitPacker::new();
|
||||
let mut buffer = Vec::new();
|
||||
for _ in 0..num_els {
|
||||
// the values do not matter.
|
||||
bitpacker.write(0u64, bit_width, &mut buffer).unwrap();
|
||||
bitpacker.flush(&mut buffer).unwrap();
|
||||
}
|
||||
buffer
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_bitpacking_read(b: &mut Bencher) {
|
||||
let bit_width = 3;
|
||||
let num_els = 1_000_000u32;
|
||||
let bit_unpacker = BitUnpacker::new(bit_width);
|
||||
let data = create_bitpacked_data(bit_width, num_els);
|
||||
let idxs: Vec<u32> = (0..num_els).choose_multiple(&mut thread_rng(), 100_000);
|
||||
b.iter(|| {
|
||||
let mut out = 0u64;
|
||||
for &idx in &idxs {
|
||||
out = out.wrapping_add(bit_unpacker.get(idx, &data[..]));
|
||||
}
|
||||
out
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_blockedbitp_read(b: &mut Bencher) {
|
||||
let mut blocked_bitpacker = BlockedBitpacker::new();
|
||||
for val in 0..=21500 {
|
||||
blocked_bitpacker.add(val * val);
|
||||
}
|
||||
b.iter(|| {
|
||||
let mut out = 0u64;
|
||||
for val in 0..=21500 {
|
||||
out = out.wrapping_add(blocked_bitpacker.get(val));
|
||||
}
|
||||
out
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_blockedbitp_create(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let mut blocked_bitpacker = BlockedBitpacker::new();
|
||||
for val in 0..=21500 {
|
||||
blocked_bitpacker.add(val * val);
|
||||
}
|
||||
blocked_bitpacker
|
||||
});
|
||||
}
|
||||
}
|
||||
203
bitpacker/src/bitpacker.rs
Normal file
203
bitpacker/src/bitpacker.rs
Normal file
@@ -0,0 +1,203 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io;
|
||||
|
||||
pub struct BitPacker {
|
||||
mini_buffer: u64,
|
||||
mini_buffer_written: usize,
|
||||
}
|
||||
impl Default for BitPacker {
|
||||
fn default() -> Self {
|
||||
BitPacker::new()
|
||||
}
|
||||
}
|
||||
impl BitPacker {
|
||||
pub fn new() -> BitPacker {
|
||||
BitPacker {
|
||||
mini_buffer: 0u64,
|
||||
mini_buffer_written: 0,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn write<TWrite: io::Write + ?Sized>(
|
||||
&mut self,
|
||||
val: u64,
|
||||
num_bits: u8,
|
||||
output: &mut TWrite,
|
||||
) -> io::Result<()> {
|
||||
let num_bits = num_bits as usize;
|
||||
if self.mini_buffer_written + num_bits > 64 {
|
||||
self.mini_buffer |= val.wrapping_shl(self.mini_buffer_written as u32);
|
||||
output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
|
||||
self.mini_buffer = val.wrapping_shr((64 - self.mini_buffer_written) as u32);
|
||||
self.mini_buffer_written = self.mini_buffer_written + num_bits - 64;
|
||||
} else {
|
||||
self.mini_buffer |= val << self.mini_buffer_written;
|
||||
self.mini_buffer_written += num_bits;
|
||||
if self.mini_buffer_written == 64 {
|
||||
output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
|
||||
self.mini_buffer_written = 0;
|
||||
self.mini_buffer = 0u64;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn flush<TWrite: io::Write + ?Sized>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
if self.mini_buffer_written > 0 {
|
||||
let num_bytes = (self.mini_buffer_written + 7) / 8;
|
||||
let bytes = self.mini_buffer.to_le_bytes();
|
||||
output.write_all(&bytes[..num_bytes])?;
|
||||
self.mini_buffer_written = 0;
|
||||
self.mini_buffer = 0;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn close<TWrite: io::Write + ?Sized>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
self.flush(output)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Copy)]
|
||||
pub struct BitUnpacker {
|
||||
num_bits: u32,
|
||||
mask: u64,
|
||||
}
|
||||
|
||||
impl BitUnpacker {
|
||||
/// Creates a bit unpacker, that assumes the same bitwidth for all values.
|
||||
///
|
||||
/// The bitunpacker works by doing an unaligned read of 8 bytes.
|
||||
/// For this reason, values of `num_bits` between
|
||||
/// [57..63] are forbidden.
|
||||
pub fn new(num_bits: u8) -> BitUnpacker {
|
||||
assert!(num_bits <= 7 * 8 || num_bits == 64);
|
||||
let mask: u64 = if num_bits == 64 {
|
||||
!0u64
|
||||
} else {
|
||||
(1u64 << num_bits) - 1u64
|
||||
};
|
||||
BitUnpacker {
|
||||
num_bits: u32::from(num_bits),
|
||||
mask,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bit_width(&self) -> u8 {
|
||||
self.num_bits as u8
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, idx: u32, data: &[u8]) -> u64 {
|
||||
let addr_in_bits = idx * self.num_bits;
|
||||
let addr = (addr_in_bits >> 3) as usize;
|
||||
if addr + 8 > data.len() {
|
||||
if self.num_bits == 0 {
|
||||
return 0;
|
||||
}
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
return self.get_slow_path(addr, bit_shift, data);
|
||||
}
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
let bytes: [u8; 8] = (&data[addr..addr + 8]).try_into().unwrap();
|
||||
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
|
||||
let val_shifted = val_unshifted_unmasked >> bit_shift;
|
||||
val_shifted & self.mask
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn get_slow_path(&self, addr: usize, bit_shift: u32, data: &[u8]) -> u64 {
|
||||
let mut bytes: [u8; 8] = [0u8; 8];
|
||||
let available_bytes = data.len() - addr;
|
||||
// This function is meant to only be called if we did not have 8 bytes to load.
|
||||
debug_assert!(available_bytes < 8);
|
||||
bytes[..available_bytes].copy_from_slice(&data[addr..]);
|
||||
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
|
||||
let val_shifted = val_unshifted_unmasked >> bit_shift;
|
||||
val_shifted & self.mask
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::{BitPacker, BitUnpacker};
|
||||
|
||||
fn create_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>, Vec<u8>) {
|
||||
let mut data = Vec::new();
|
||||
let mut bitpacker = BitPacker::new();
|
||||
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
|
||||
let vals: Vec<u64> = (0u64..len as u64)
|
||||
.map(|i| if max_val == 0 { 0 } else { i % max_val })
|
||||
.collect();
|
||||
for &val in &vals {
|
||||
bitpacker.write(val, num_bits, &mut data).unwrap();
|
||||
}
|
||||
bitpacker.close(&mut data).unwrap();
|
||||
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8);
|
||||
let bitunpacker = BitUnpacker::new(num_bits);
|
||||
(bitunpacker, vals, data)
|
||||
}
|
||||
|
||||
fn test_bitpacker_util(len: usize, num_bits: u8) {
|
||||
let (bitunpacker, vals, data) = create_bitpacker(len, num_bits);
|
||||
for (i, val) in vals.iter().enumerate() {
|
||||
assert_eq!(bitunpacker.get(i as u32, &data), *val);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitpacker() {
|
||||
test_bitpacker_util(10, 3);
|
||||
test_bitpacker_util(10, 0);
|
||||
test_bitpacker_util(10, 1);
|
||||
test_bitpacker_util(6, 14);
|
||||
test_bitpacker_util(1000, 14);
|
||||
}
|
||||
|
||||
use proptest::prelude::*;
|
||||
|
||||
fn num_bits_strategy() -> impl Strategy<Value = u8> {
|
||||
prop_oneof!(Just(0), Just(1), 2u8..56u8, Just(56), Just(64),)
|
||||
}
|
||||
|
||||
fn vals_strategy() -> impl Strategy<Value = (u8, Vec<u64>)> {
|
||||
(num_bits_strategy(), 0usize..100usize).prop_flat_map(|(num_bits, len)| {
|
||||
let max_val = if num_bits == 64 {
|
||||
u64::MAX
|
||||
} else {
|
||||
(1u64 << num_bits as u32) - 1
|
||||
};
|
||||
let vals = proptest::collection::vec(0..=max_val, len);
|
||||
vals.prop_map(move |vals| (num_bits, vals))
|
||||
})
|
||||
}
|
||||
|
||||
fn test_bitpacker_aux(num_bits: u8, vals: &[u64]) {
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
let mut bitpacker = BitPacker::new();
|
||||
for &val in vals {
|
||||
bitpacker.write(val, num_bits, &mut buffer).unwrap();
|
||||
}
|
||||
bitpacker.flush(&mut buffer).unwrap();
|
||||
assert_eq!(buffer.len(), (vals.len() * num_bits as usize + 7) / 8);
|
||||
let bitunpacker = BitUnpacker::new(num_bits);
|
||||
let max_val = if num_bits == 64 {
|
||||
u64::MAX
|
||||
} else {
|
||||
(1u64 << num_bits) - 1
|
||||
};
|
||||
for (i, val) in vals.iter().copied().enumerate() {
|
||||
assert!(val <= max_val);
|
||||
assert_eq!(bitunpacker.get(i as u32, &buffer), val);
|
||||
}
|
||||
}
|
||||
|
||||
proptest::proptest! {
|
||||
#[test]
|
||||
fn test_bitpacker_proptest((num_bits, vals) in vals_strategy()) {
|
||||
test_bitpacker_aux(num_bits, &vals);
|
||||
}
|
||||
}
|
||||
}
|
||||
179
bitpacker/src/blocked_bitpacker.rs
Normal file
179
bitpacker/src/blocked_bitpacker.rs
Normal file
@@ -0,0 +1,179 @@
|
||||
use super::bitpacker::BitPacker;
|
||||
use super::compute_num_bits;
|
||||
use crate::{minmax, BitUnpacker};
|
||||
|
||||
const BLOCK_SIZE: usize = 128;
|
||||
|
||||
/// `BlockedBitpacker` compresses data in blocks of
|
||||
/// 128 elements, while keeping an index on it
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BlockedBitpacker {
|
||||
// bitpacked blocks
|
||||
compressed_blocks: Vec<u8>,
|
||||
// uncompressed data, collected until BLOCK_SIZE
|
||||
buffer: Vec<u64>,
|
||||
offset_and_bits: Vec<BlockedBitpackerEntryMetaData>,
|
||||
}
|
||||
impl Default for BlockedBitpacker {
|
||||
fn default() -> Self {
|
||||
BlockedBitpacker::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// `BlockedBitpackerEntryMetaData` encodes the
|
||||
/// offset and bit_width into a u64 bit field
|
||||
///
|
||||
/// This saves some space, since 7byte is more
|
||||
/// than enough and also keeps the access fast
|
||||
/// because of alignment
|
||||
#[derive(Debug, Clone, Default)]
|
||||
struct BlockedBitpackerEntryMetaData {
|
||||
encoded: u64,
|
||||
base_value: u64,
|
||||
}
|
||||
|
||||
impl BlockedBitpackerEntryMetaData {
|
||||
fn new(offset: u64, num_bits: u8, base_value: u64) -> Self {
|
||||
let encoded = offset | (num_bits as u64) << (64 - 8);
|
||||
Self {
|
||||
encoded,
|
||||
base_value,
|
||||
}
|
||||
}
|
||||
fn offset(&self) -> u64 {
|
||||
(self.encoded << 8) >> 8
|
||||
}
|
||||
fn num_bits(&self) -> u8 {
|
||||
(self.encoded >> 56) as u8
|
||||
}
|
||||
fn base_value(&self) -> u64 {
|
||||
self.base_value
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn metadata_test() {
|
||||
let meta = BlockedBitpackerEntryMetaData::new(50000, 6, 40000);
|
||||
assert_eq!(meta.offset(), 50000);
|
||||
assert_eq!(meta.num_bits(), 6);
|
||||
}
|
||||
|
||||
fn mem_usage<T>(items: &Vec<T>) -> usize {
|
||||
items.capacity() * std::mem::size_of::<T>()
|
||||
}
|
||||
|
||||
impl BlockedBitpacker {
|
||||
pub fn new() -> Self {
|
||||
let mut compressed_blocks = vec![];
|
||||
compressed_blocks.resize(8, 0);
|
||||
Self {
|
||||
compressed_blocks,
|
||||
buffer: vec![],
|
||||
offset_and_bits: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
/// The memory used (inclusive childs)
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
std::mem::size_of::<BlockedBitpacker>()
|
||||
+ self.compressed_blocks.capacity()
|
||||
+ mem_usage(&self.offset_and_bits)
|
||||
+ mem_usage(&self.buffer)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn add(&mut self, val: u64) {
|
||||
self.buffer.push(val);
|
||||
if self.buffer.len() == BLOCK_SIZE {
|
||||
self.flush();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flush(&mut self) {
|
||||
if let Some((min_value, max_value)) = minmax(self.buffer.iter()) {
|
||||
let mut bit_packer = BitPacker::new();
|
||||
let num_bits_block = compute_num_bits(*max_value - min_value);
|
||||
// todo performance: the padding handling could be done better, e.g. use a slice and
|
||||
// return num_bytes written from bitpacker
|
||||
self.compressed_blocks
|
||||
.resize(self.compressed_blocks.len() - 8, 0); // remove padding for bitpacker
|
||||
let offset = self.compressed_blocks.len() as u64;
|
||||
// todo performance: for some bit_width we
|
||||
// can encode multiple vals into the
|
||||
// mini_buffer before checking to flush
|
||||
// (to be done in BitPacker)
|
||||
for val in self.buffer.iter() {
|
||||
bit_packer
|
||||
.write(
|
||||
*val - min_value,
|
||||
num_bits_block,
|
||||
&mut self.compressed_blocks,
|
||||
)
|
||||
.expect("cannot write bitpacking to output"); // write to in memory can't fail
|
||||
}
|
||||
bit_packer.flush(&mut self.compressed_blocks).unwrap();
|
||||
self.offset_and_bits
|
||||
.push(BlockedBitpackerEntryMetaData::new(
|
||||
offset,
|
||||
num_bits_block,
|
||||
*min_value,
|
||||
));
|
||||
|
||||
self.buffer.clear();
|
||||
self.compressed_blocks
|
||||
.resize(self.compressed_blocks.len() + 8, 0); // add padding for bitpacker
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub fn get(&self, idx: usize) -> u64 {
|
||||
let metadata_pos = idx / BLOCK_SIZE;
|
||||
let pos_in_block = idx % BLOCK_SIZE;
|
||||
if let Some(metadata) = self.offset_and_bits.get(metadata_pos) {
|
||||
let unpacked = BitUnpacker::new(metadata.num_bits()).get(
|
||||
pos_in_block as u32,
|
||||
&self.compressed_blocks[metadata.offset() as usize..],
|
||||
);
|
||||
unpacked + metadata.base_value()
|
||||
} else {
|
||||
self.buffer[pos_in_block]
|
||||
}
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> impl Iterator<Item = u64> + '_ {
|
||||
// todo performance: we could decompress a whole block and cache it instead
|
||||
let bitpacked_elems = self.offset_and_bits.len() * BLOCK_SIZE;
|
||||
let iter = (0..bitpacked_elems)
|
||||
.map(move |idx| self.get(idx))
|
||||
.chain(self.buffer.iter().cloned());
|
||||
iter
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
#[test]
|
||||
fn blocked_bitpacker_empty() {
|
||||
let blocked_bitpacker = BlockedBitpacker::new();
|
||||
assert_eq!(blocked_bitpacker.iter().collect::<Vec<u64>>(), vec![]);
|
||||
}
|
||||
#[test]
|
||||
fn blocked_bitpacker_one() {
|
||||
let mut blocked_bitpacker = BlockedBitpacker::new();
|
||||
blocked_bitpacker.add(50000);
|
||||
assert_eq!(blocked_bitpacker.get(0), 50000);
|
||||
assert_eq!(blocked_bitpacker.iter().collect::<Vec<u64>>(), vec![50000]);
|
||||
}
|
||||
#[test]
|
||||
fn blocked_bitpacker_test() {
|
||||
let mut blocked_bitpacker = BlockedBitpacker::new();
|
||||
for val in 0..21500 {
|
||||
blocked_bitpacker.add(val);
|
||||
}
|
||||
for val in 0..21500 {
|
||||
assert_eq!(blocked_bitpacker.get(val as usize), val);
|
||||
}
|
||||
assert_eq!(blocked_bitpacker.iter().count(), 21500);
|
||||
assert_eq!(blocked_bitpacker.iter().last().unwrap(), 21499);
|
||||
}
|
||||
}
|
||||
142
bitpacker/src/lib.rs
Normal file
142
bitpacker/src/lib.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
mod bitpacker;
|
||||
mod blocked_bitpacker;
|
||||
|
||||
use std::cmp::Ordering;
|
||||
|
||||
pub use crate::bitpacker::{BitPacker, BitUnpacker};
|
||||
pub use crate::blocked_bitpacker::BlockedBitpacker;
|
||||
|
||||
/// Computes the number of bits that will be used for bitpacking.
|
||||
///
|
||||
/// In general the target is the minimum number of bits
|
||||
/// required to express the amplitude given in argument.
|
||||
///
|
||||
/// e.g. If the amplitude is 10, we can store all ints on simply 4bits.
|
||||
///
|
||||
/// The logic is slightly more convoluted here as for optimization
|
||||
/// reasons, we want to ensure that a value spawns over at most 8 bytes
|
||||
/// of aligned bytes.
|
||||
///
|
||||
/// Spanning over 9 bytes is possible for instance, if we do
|
||||
/// bitpacking with an amplitude of 63 bits.
|
||||
/// In this case, the second int will start on bit
|
||||
/// 63 (which belongs to byte 7) and ends at byte 15;
|
||||
/// Hence 9 bytes (from byte 7 to byte 15 included).
|
||||
///
|
||||
/// To avoid this, we force the number of bits to 64bits
|
||||
/// when the result is greater than `64-8 = 56 bits`.
|
||||
///
|
||||
/// Note that this only affects rare use cases spawning over
|
||||
/// a very large range of values. Even in this case, it results
|
||||
/// in an extra cost of at most 12% compared to the optimal
|
||||
/// number of bits.
|
||||
pub fn compute_num_bits(n: u64) -> u8 {
|
||||
let amplitude = (64u32 - n.leading_zeros()) as u8;
|
||||
if amplitude <= 64 - 8 {
|
||||
amplitude
|
||||
} else {
|
||||
64
|
||||
}
|
||||
}
|
||||
|
||||
/// Computes the (min, max) of an iterator of `PartialOrd` values.
|
||||
///
|
||||
/// For values implementing `Ord` (in a way consistent to their `PartialOrd` impl),
|
||||
/// this function behaves as expected.
|
||||
///
|
||||
/// For values with partial ordering, the behavior is non-trivial and may
|
||||
/// depends on the order of the values.
|
||||
/// For floats however, it simply returns the same results as if NaN were
|
||||
/// skipped.
|
||||
pub fn minmax<I, T>(mut vals: I) -> Option<(T, T)>
|
||||
where
|
||||
I: Iterator<Item = T>,
|
||||
T: Copy + PartialOrd,
|
||||
{
|
||||
let first_el = vals.find(|val| {
|
||||
// We use this to make sure we skip all NaN values when
|
||||
// working with a float type.
|
||||
val.partial_cmp(val) == Some(Ordering::Equal)
|
||||
})?;
|
||||
let mut min_so_far: T = first_el;
|
||||
let mut max_so_far: T = first_el;
|
||||
for val in vals {
|
||||
if val.partial_cmp(&min_so_far) == Some(Ordering::Less) {
|
||||
min_so_far = val;
|
||||
}
|
||||
if val.partial_cmp(&max_so_far) == Some(Ordering::Greater) {
|
||||
max_so_far = val;
|
||||
}
|
||||
}
|
||||
Some((min_so_far, max_so_far))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_compute_num_bits() {
|
||||
assert_eq!(compute_num_bits(1), 1u8);
|
||||
assert_eq!(compute_num_bits(0), 0u8);
|
||||
assert_eq!(compute_num_bits(2), 2u8);
|
||||
assert_eq!(compute_num_bits(3), 2u8);
|
||||
assert_eq!(compute_num_bits(4), 3u8);
|
||||
assert_eq!(compute_num_bits(255), 8u8);
|
||||
assert_eq!(compute_num_bits(256), 9u8);
|
||||
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minmax_empty() {
|
||||
let vals: Vec<u32> = vec![];
|
||||
assert_eq!(minmax(vals.into_iter()), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minmax_one() {
|
||||
assert_eq!(minmax(vec![1].into_iter()), Some((1, 1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minmax_two() {
|
||||
assert_eq!(minmax(vec![1, 2].into_iter()), Some((1, 2)));
|
||||
assert_eq!(minmax(vec![2, 1].into_iter()), Some((1, 2)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minmax_nan() {
|
||||
assert_eq!(
|
||||
minmax(vec![f64::NAN, 1f64, 2f64].into_iter()),
|
||||
Some((1f64, 2f64))
|
||||
);
|
||||
assert_eq!(
|
||||
minmax(vec![2f64, f64::NAN, 1f64].into_iter()),
|
||||
Some((1f64, 2f64))
|
||||
);
|
||||
assert_eq!(
|
||||
minmax(vec![2f64, 1f64, f64::NAN].into_iter()),
|
||||
Some((1f64, 2f64))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minmax_inf() {
|
||||
assert_eq!(
|
||||
minmax(vec![f64::INFINITY, 1f64, 2f64].into_iter()),
|
||||
Some((1f64, f64::INFINITY))
|
||||
);
|
||||
assert_eq!(
|
||||
minmax(vec![-f64::INFINITY, 1f64, 2f64].into_iter()),
|
||||
Some((-f64::INFINITY, 2f64))
|
||||
);
|
||||
assert_eq!(
|
||||
minmax(vec![2f64, f64::INFINITY, 1f64].into_iter()),
|
||||
Some((1f64, f64::INFINITY))
|
||||
);
|
||||
assert_eq!(
|
||||
minmax(vec![2f64, 1f64, -f64::INFINITY].into_iter()),
|
||||
Some((-f64::INFINITY, 2f64))
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,7 @@ set -ex
|
||||
main() {
|
||||
if [ ! -z $CODECOV ]; then
|
||||
echo "Codecov"
|
||||
cargo build --verbose && cargo coverage --verbose && bash <(curl -s https://codecov.io/bash) -s target/kcov
|
||||
cargo build --verbose && cargo coverage --verbose --all && bash <(curl -s https://codecov.io/bash) -s target/kcov
|
||||
else
|
||||
echo "Build"
|
||||
cross build --target $TARGET
|
||||
@@ -15,7 +15,8 @@ main() {
|
||||
return
|
||||
fi
|
||||
echo "Test"
|
||||
cross test --target $TARGET --no-default-features --features mmap -- --test-threads 1
|
||||
cross test --target $TARGET --no-default-features --features mmap
|
||||
cross test --target $TARGET --no-default-features --features mmap query-grammar
|
||||
fi
|
||||
for example in $(ls examples/*.rs)
|
||||
do
|
||||
|
||||
28
columnar/Cargo.toml
Normal file
28
columnar/Cargo.toml
Normal file
@@ -0,0 +1,28 @@
|
||||
[package]
|
||||
name = "tantivy-columnar"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
stacker = { path = "../stacker", package="tantivy-stacker"}
|
||||
serde_json = "1"
|
||||
thiserror = "1"
|
||||
fnv = "1"
|
||||
sstable = { path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { path = "../common", package = "tantivy-common" }
|
||||
itertools = "0.10"
|
||||
log = "0.4"
|
||||
tantivy-bitpacker = { version= "0.3", path = "../bitpacker/" }
|
||||
prettytable-rs = {version="0.10.0", optional= true}
|
||||
rand = {version="0.8.3", optional= true}
|
||||
fastdivide = "0.4"
|
||||
measure_time = { version="0.8.2", optional=true}
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1"
|
||||
more-asserts = "0.3.0"
|
||||
rand = "0.8.3"
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
6
columnar/Makefile
Normal file
6
columnar/Makefile
Normal file
@@ -0,0 +1,6 @@
|
||||
test:
|
||||
echo "Run test only... No examples."
|
||||
cargo test --tests --lib
|
||||
|
||||
fmt:
|
||||
cargo +nightly fmt --all
|
||||
109
columnar/README.md
Normal file
109
columnar/README.md
Normal file
@@ -0,0 +1,109 @@
|
||||
# Columnar format
|
||||
|
||||
This crate describes columnar format used in tantivy.
|
||||
|
||||
## Goals
|
||||
|
||||
This format is special in the following way.
|
||||
- it needs to be compact
|
||||
- accessing a specific column does not require to load the entire columnar. It can be done in 2 to 3 random access.
|
||||
- columns of several types can be associated with the same column name.
|
||||
- it needs to support columns with different types `(str, u64, i64, f64)`
|
||||
and different cardinality `(required, optional, multivalued)`.
|
||||
- columns, once loaded, offer cheap random access.
|
||||
- it is designed to allow range queries.
|
||||
|
||||
# Coercion rules
|
||||
|
||||
Users can create a columnar by inserting rows to a `ColumnarWriter`,
|
||||
and serializing it into a `Write` object.
|
||||
Nothing prevents a user from recording values with different type to the same `column_name`.
|
||||
|
||||
In that case, `tantivy-columnar`'s behavior is as follows:
|
||||
- JsonValues are grouped into 3 types (String, Number, bool).
|
||||
Values that corresponds to different groups are mapped to different columns. For instance, String values are treated independently
|
||||
from Number or boolean values. `tantivy-columnar` will simply emit several columns associated to a given column_name.
|
||||
- Only one column for a given json value type is emitted. If number values with different number types are recorded (e.g. u64, i64, f64),
|
||||
`tantivy-columnar` will pick the first type that can represents the set of appended value, with the following prioriy order (`i64`, `u64`, `f64`).
|
||||
`i64` is picked over `u64` as it is likely to yield less change of types. Most use cases strictly requiring `u64` show the
|
||||
restriction on 50% of the values (e.g. a 64-bit hash). On the other hand, a lot of use cases can show rare negative value.
|
||||
|
||||
# Columnar format
|
||||
|
||||
This columnar format may have more than one column (with different types) associated to the same `column_name` (see [Coercion rules](#coercion-rules) above).
|
||||
The `(column_name, columne_type)` couple however uniquely identifies a column.
|
||||
That couple is serialized as a column `column_key`. The format of that key is:
|
||||
`[column_name][ZERO_BYTE][column_type_header: u8]`
|
||||
|
||||
```
|
||||
COLUMNAR:=
|
||||
[COLUMNAR_DATA]
|
||||
[COLUMNAR_KEY_TO_DATA_INDEX]
|
||||
[COLUMNAR_FOOTER];
|
||||
|
||||
|
||||
# Columns are sorted by their column key.
|
||||
COLUMNAR_DATA:=
|
||||
[COLUMN_DATA]+;
|
||||
|
||||
COLUMNAR_FOOTER := [RANGE_SSTABLE_BYTES_LEN: 8 bytes little endian]
|
||||
|
||||
```
|
||||
|
||||
The columnar file starts by the actual column data, concatenated one after the other,
|
||||
sorted by column key.
|
||||
|
||||
A sstable associates
|
||||
`(column name, column_cardinality, column_type) to range of bytes.
|
||||
|
||||
Column name may not contain the zero byte `\0`.
|
||||
|
||||
Listing all columns associated to `column_name` can therefore
|
||||
be done by listing all keys prefixed by
|
||||
`[column_name][ZERO_BYTE]`
|
||||
|
||||
The associated range of bytes refer to a range of bytes
|
||||
|
||||
This crate exposes a columnar format for tantivy.
|
||||
This format is described in README.md
|
||||
|
||||
|
||||
The crate introduces the following concepts.
|
||||
|
||||
`Columnar` is an equivalent of a dataframe.
|
||||
It maps `column_key` to `Column`.
|
||||
|
||||
A `Column<T>` asssociates a `RowId` (u32) to any
|
||||
number of values.
|
||||
|
||||
This is made possible by wrapping a `ColumnIndex` and a `ColumnValue` object.
|
||||
The `ColumnValue<T>` represents a mapping that associates each `RowId` to
|
||||
exactly one single value.
|
||||
|
||||
The `ColumnIndex` then maps each RowId to a set of `RowId` in the
|
||||
`ColumnValue`.
|
||||
|
||||
For optimization, and compression purposes, the `ColumnIndex` has three
|
||||
possible representation, each for different cardinalities.
|
||||
|
||||
- Full
|
||||
|
||||
All RowId have exactly one value. The ColumnIndex is the trivial mapping.
|
||||
|
||||
- Optional
|
||||
|
||||
All RowIds can have at most one value. The ColumnIndex is the trivial mapping `ColumnRowId -> Option<ColumnValueRowId>`.
|
||||
|
||||
- Multivalued
|
||||
|
||||
All RowIds can have any number of values.
|
||||
The column index is mapping values to a range.
|
||||
|
||||
|
||||
All these objects are implemented an unit tested independently
|
||||
in their own module:
|
||||
|
||||
- columnar
|
||||
- column_index
|
||||
- column_values
|
||||
- column
|
||||
129
columnar/benches/bench_u128.rs
Normal file
129
columnar/benches/bench_u128.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
#![feature(test)]
|
||||
|
||||
use std::ops::RangeInclusive;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::{random, Rng, SeedableRng};
|
||||
use tantivy_columnar::ColumnValues;
|
||||
use test::Bencher;
|
||||
extern crate test;
|
||||
|
||||
// TODO does this make sense for IPv6 ?
|
||||
fn generate_random() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64)
|
||||
.map(|el| el + random::<u16>() as u64)
|
||||
.collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
fn get_u128_column_random() -> Arc<dyn ColumnValues<u128>> {
|
||||
let permutation = generate_random();
|
||||
let permutation = permutation.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
get_u128_column_from_data(&permutation)
|
||||
}
|
||||
|
||||
fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn ColumnValues<u128>> {
|
||||
let mut out = vec![];
|
||||
tantivy_columnar::column_values::serialize_column_values_u128(
|
||||
&(|| data.iter().copied()),
|
||||
data.len() as u32,
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
let out = OwnedBytes::new(out);
|
||||
tantivy_columnar::column_values::open_u128_mapped::<u128>(out).unwrap()
|
||||
}
|
||||
|
||||
const FIFTY_PERCENT_RANGE: RangeInclusive<u64> = 1..=50;
|
||||
const SINGLE_ITEM: u64 = 90;
|
||||
const SINGLE_ITEM_RANGE: RangeInclusive<u64> = 90..=90;
|
||||
const ONE_PERCENT_ITEM_RANGE: RangeInclusive<u64> = 49..=49;
|
||||
fn get_data_50percent_item() -> Vec<u128> {
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
|
||||
let mut data = vec![];
|
||||
for _ in 0..300_000 {
|
||||
let val = rng.gen_range(1..=100);
|
||||
data.push(val);
|
||||
}
|
||||
data.push(SINGLE_ITEM);
|
||||
data.shuffle(&mut rng);
|
||||
let data = data.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
data
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_50percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
*FIFTY_PERCENT_RANGE.start() as u128..=*FIFTY_PERCENT_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_single_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
*SINGLE_ITEM_RANGE.start() as u128..=*SINGLE_ITEM_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_hit_all(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
// U128 RANGE END
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
let mut a = 0u128;
|
||||
for i in 0u64..column.num_vals() as u64 {
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_stride5_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
let n = column.num_vals();
|
||||
let mut a = 0u128;
|
||||
for i in (0..n / 5).map(|val| val * 5) {
|
||||
a += column.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
213
columnar/benches/bench_u64.rs
Normal file
213
columnar/benches/bench_u64.rs
Normal file
@@ -0,0 +1,213 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use std::ops::RangeInclusive;
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand::prelude::*;
|
||||
use tantivy_columnar::column_values::{
|
||||
serialize_and_load_u64_based_column_values, CodecType, ALL_U64_CODEC_TYPES,
|
||||
};
|
||||
use tantivy_columnar::*;
|
||||
use test::Bencher;
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
fn generate_random() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64)
|
||||
.map(|el| el + random::<u16>() as u64)
|
||||
.collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation_gcd() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (1u64..100_000u64).map(|el| el * 1000).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
pub fn serialize_and_load(column: &[u64], codec_type: CodecType) -> Arc<dyn ColumnValues<u64>> {
|
||||
serialize_and_load_u64_based_column_values(&column, &[codec_type])
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_veclookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = permutation[a as usize];
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_fflookup_bitpacked(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = column.get_val(a as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
const FIFTY_PERCENT_RANGE: RangeInclusive<u64> = 1..=50;
|
||||
const SINGLE_ITEM: u64 = 90;
|
||||
const SINGLE_ITEM_RANGE: RangeInclusive<u64> = 90..=90;
|
||||
const ONE_PERCENT_ITEM_RANGE: RangeInclusive<u64> = 49..=49;
|
||||
fn get_data_50percent_item() -> Vec<u128> {
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
|
||||
let mut data = vec![];
|
||||
for _ in 0..300_000 {
|
||||
let val = rng.gen_range(1..=100);
|
||||
data.push(val);
|
||||
}
|
||||
data.push(SINGLE_ITEM);
|
||||
|
||||
data.shuffle(&mut rng);
|
||||
let data = data.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
data
|
||||
}
|
||||
|
||||
// U64 RANGE START
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_50percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
FIFTY_PERCENT_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_1percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
ONE_PERCENT_ITEM_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_single_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(SINGLE_ITEM_RANGE, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_hit_all(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(0..=u64::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
// U64 RANGE END
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_stride7_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += permutation[i as usize];
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_stride7_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut a = 0;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
let column_ref = column.as_ref();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0u32..n as u32 {
|
||||
a += column.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_gcd(b: &mut Bencher) {
|
||||
let permutation = generate_permutation_gcd();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..n {
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..permutation.len() {
|
||||
a += permutation[i as usize] as u64;
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
45
columnar/src/TODO.md
Normal file
45
columnar/src/TODO.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# zero to one
|
||||
* merges with non trivial mapping (deletes / sort)
|
||||
* emission of the sort mapping.
|
||||
+ muttivaued range queries restrat frm the beginning all of the time.
|
||||
* revisit line codec
|
||||
* removal of all rows of a column in the schema due to deletes
|
||||
* Plugging JSON
|
||||
replug examples
|
||||
|
||||
|
||||
# Perf and Size
|
||||
* re-add ZSTD compression for dictionaries
|
||||
no systematic monotonic mapping
|
||||
consider removing multilinear
|
||||
f32?
|
||||
adhoc solution for bool?
|
||||
add metrics helper for aggregate. sum(row_id)
|
||||
review inline absence/presence
|
||||
improv perf of select using PDEP
|
||||
compare with roaring bitmap/elias fano etc etc.
|
||||
SIMD range? (see blog post)
|
||||
Add alignment?
|
||||
Consider another codec to bridge the gap between few and 5k elements
|
||||
|
||||
# Cleanup and rationalization
|
||||
in benchmark, unify percent vs ratio, f32 vs f64.
|
||||
investigate if should have better errors? io::Error is overused at the moment.
|
||||
rename rank/select in unit tests
|
||||
Review the public API via cargo doc
|
||||
go through TODOs
|
||||
remove all doc_id occurences -> row_id
|
||||
use the rank & select naming in unit tests branch.
|
||||
multi-linear -> blockwise
|
||||
linear codec -> simply a multiplication for the index column
|
||||
rename columnar to something more explicit, like column_dictionary or columnar_table
|
||||
rename fastfield -> column
|
||||
document changes
|
||||
rationalization FastFieldValue, HasColumnType
|
||||
isolate u128_based and uniform naming
|
||||
|
||||
# Other
|
||||
fix enhance column-cli
|
||||
|
||||
# Santa claus
|
||||
autodetect datetime ipaddr, plug customizable tokenizer.
|
||||
102
columnar/src/column/dictionary_encoded.rs
Normal file
102
columnar/src/column/dictionary_encoded.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
use std::io;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
use sstable::{Dictionary, VoidSSTable};
|
||||
|
||||
use crate::column::Column;
|
||||
use crate::RowId;
|
||||
|
||||
/// Dictionary encoded column.
|
||||
///
|
||||
/// The column simply gives access to a regular u64-column that, in
|
||||
/// which the values are term-ordinals.
|
||||
///
|
||||
/// These ordinals are ids uniquely identify the bytes that are stored in
|
||||
/// the column. These ordinals are small, and sorted in the same order
|
||||
/// as the term_ord_column.
|
||||
#[derive(Clone)]
|
||||
pub struct BytesColumn {
|
||||
pub(crate) dictionary: Arc<Dictionary<VoidSSTable>>,
|
||||
pub(crate) term_ord_column: Column<u64>,
|
||||
}
|
||||
|
||||
impl BytesColumn {
|
||||
/// Fills the given `output` buffer with the term associated to the ordinal `ord`.
|
||||
///
|
||||
/// Returns `false` if the term does not exist (e.g. `term_ord` is greater or equal to the
|
||||
/// overll number of terms).
|
||||
pub fn ord_to_bytes(&self, ord: u64, output: &mut Vec<u8>) -> io::Result<bool> {
|
||||
self.dictionary.ord_to_term(ord, output)
|
||||
}
|
||||
|
||||
/// Returns the number of rows in the column.
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.term_ord_column.num_rows()
|
||||
}
|
||||
|
||||
pub fn term_ords(&self, row_id: RowId) -> impl Iterator<Item = u64> + '_ {
|
||||
self.term_ord_column.values(row_id)
|
||||
}
|
||||
|
||||
/// Returns the column of ordinals
|
||||
pub fn ords(&self) -> &Column<u64> {
|
||||
&self.term_ord_column
|
||||
}
|
||||
|
||||
pub fn num_terms(&self) -> usize {
|
||||
self.dictionary.num_terms()
|
||||
}
|
||||
|
||||
pub fn dictionary(&self) -> &Dictionary<VoidSSTable> {
|
||||
self.dictionary.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct StrColumn(BytesColumn);
|
||||
|
||||
impl From<BytesColumn> for StrColumn {
|
||||
fn from(bytes_col: BytesColumn) -> Self {
|
||||
StrColumn(bytes_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StrColumn> for BytesColumn {
|
||||
fn from(str_column: StrColumn) -> BytesColumn {
|
||||
str_column.0
|
||||
}
|
||||
}
|
||||
|
||||
impl StrColumn {
|
||||
pub fn dictionary(&self) -> &Dictionary<VoidSSTable> {
|
||||
self.0.dictionary.as_ref()
|
||||
}
|
||||
|
||||
/// Fills the buffer
|
||||
pub fn ord_to_str(&self, term_ord: u64, output: &mut String) -> io::Result<bool> {
|
||||
unsafe {
|
||||
let buf = output.as_mut_vec();
|
||||
if !self.0.dictionary.ord_to_term(term_ord, buf)? {
|
||||
return Ok(false);
|
||||
}
|
||||
// TODO consider remove checks if it hurts performance.
|
||||
if std::str::from_utf8(buf.as_slice()).is_err() {
|
||||
buf.clear();
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Not valid utf-8",
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for StrColumn {
|
||||
type Target = BytesColumn;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
129
columnar/src/column/mod.rs
Normal file
129
columnar/src/column/mod.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
mod dictionary_encoded;
|
||||
mod serialize;
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::io::Write;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::BinarySerializable;
|
||||
pub use dictionary_encoded::{BytesColumn, StrColumn};
|
||||
pub use serialize::{
|
||||
open_column_bytes, open_column_u128, open_column_u64, serialize_column_mappable_to_u128,
|
||||
serialize_column_mappable_to_u64,
|
||||
};
|
||||
|
||||
use crate::column_index::ColumnIndex;
|
||||
use crate::column_values::monotonic_mapping::StrictlyMonotonicMappingToInternal;
|
||||
use crate::column_values::{monotonic_map_column, ColumnValues};
|
||||
use crate::{Cardinality, MonotonicallyMappableToU64, RowId};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Column<T = u64> {
|
||||
pub idx: ColumnIndex,
|
||||
pub values: Arc<dyn ColumnValues<T>>,
|
||||
}
|
||||
|
||||
impl<T: MonotonicallyMappableToU64> Column<T> {
|
||||
pub fn to_u64_monotonic(self) -> Column<u64> {
|
||||
let values = Arc::new(monotonic_map_column(
|
||||
self.values,
|
||||
StrictlyMonotonicMappingToInternal::<T>::new(),
|
||||
));
|
||||
Column {
|
||||
idx: self.idx,
|
||||
values,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
self.idx.get_cardinality()
|
||||
}
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
match &self.idx {
|
||||
ColumnIndex::Full => self.values.num_vals() as u32,
|
||||
ColumnIndex::Optional(optional_index) => optional_index.num_rows(),
|
||||
ColumnIndex::Multivalued(col_index) => {
|
||||
// The multivalued index contains all value start row_id,
|
||||
// and one extra value at the end with the overall number of rows.
|
||||
col_index.num_rows()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn min_value(&self) -> T {
|
||||
self.values.min_value()
|
||||
}
|
||||
|
||||
pub fn max_value(&self) -> T {
|
||||
self.values.max_value()
|
||||
}
|
||||
|
||||
pub fn first(&self, row_id: RowId) -> Option<T> {
|
||||
self.values(row_id).next()
|
||||
}
|
||||
|
||||
pub fn values(&self, row_id: RowId) -> impl Iterator<Item = T> + '_ {
|
||||
self.value_row_ids(row_id)
|
||||
.map(|value_row_id: RowId| self.values.get_val(value_row_id))
|
||||
}
|
||||
|
||||
pub fn first_or_default_col(self, default_value: T) -> Arc<dyn ColumnValues<T>> {
|
||||
Arc::new(FirstValueWithDefault {
|
||||
column: self,
|
||||
default_value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for Column<T> {
|
||||
type Target = ColumnIndex;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.idx
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for Cardinality {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
self.to_code().serialize(writer)
|
||||
}
|
||||
|
||||
fn deserialize<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {
|
||||
let cardinality_code = u8::deserialize(reader)?;
|
||||
let cardinality = Cardinality::try_from_code(cardinality_code)?;
|
||||
Ok(cardinality)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO simplify or optimize
|
||||
struct FirstValueWithDefault<T: Copy> {
|
||||
column: Column<T>,
|
||||
default_value: T,
|
||||
}
|
||||
|
||||
impl<T: PartialOrd + Debug + Send + Sync + Copy + 'static> ColumnValues<T>
|
||||
for FirstValueWithDefault<T>
|
||||
{
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
self.column.first(idx).unwrap_or(self.default_value)
|
||||
}
|
||||
|
||||
fn min_value(&self) -> T {
|
||||
self.column.values.min_value()
|
||||
}
|
||||
|
||||
fn max_value(&self) -> T {
|
||||
self.column.values.max_value()
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
match &self.column.idx {
|
||||
ColumnIndex::Full => self.column.values.num_vals(),
|
||||
ColumnIndex::Optional(optional_idx) => optional_idx.num_rows(),
|
||||
ColumnIndex::Multivalued(_) => todo!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
96
columnar/src/column/serialize.rs
Normal file
96
columnar/src/column/serialize.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use std::fmt::Debug;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use sstable::Dictionary;
|
||||
|
||||
use crate::column::{BytesColumn, Column};
|
||||
use crate::column_index::{serialize_column_index, SerializableColumnIndex};
|
||||
use crate::column_values::serialize::serialize_column_values_u128;
|
||||
use crate::column_values::u64_based::{serialize_u64_based_column_values, CodecType};
|
||||
use crate::column_values::{MonotonicallyMappableToU128, MonotonicallyMappableToU64};
|
||||
use crate::iterable::{map_iterable, Iterable};
|
||||
|
||||
pub fn serialize_column_mappable_to_u128<I, T: MonotonicallyMappableToU128>(
|
||||
column_index: SerializableColumnIndex<'_>,
|
||||
iterable: &dyn Fn() -> I,
|
||||
num_vals: u32,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
I: Iterator<Item = T>,
|
||||
{
|
||||
let column_index_num_bytes = serialize_column_index(column_index, output)?;
|
||||
let u128_iterable = map_iterable(iterable, MonotonicallyMappableToU128::to_u128);
|
||||
serialize_column_values_u128(&u128_iterable, num_vals, output)?;
|
||||
output.write_all(&column_index_num_bytes.to_le_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn serialize_column_mappable_to_u64<T: MonotonicallyMappableToU64 + Debug, I>(
|
||||
column_index: SerializableColumnIndex<'_>,
|
||||
column_values: &dyn Fn() -> I,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<()>
|
||||
where I: Iterator<Item=T> {
|
||||
let column_index_num_bytes = serialize_column_index(column_index, output)?;
|
||||
serialize_u64_based_column_values(
|
||||
column_values,
|
||||
&[CodecType::Bitpacked, CodecType::BlockwiseLinear],
|
||||
output,
|
||||
)?;
|
||||
output.write_all(&column_index_num_bytes.to_le_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn open_column_u64<T: MonotonicallyMappableToU64>(bytes: OwnedBytes) -> io::Result<Column<T>> {
|
||||
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
|
||||
let column_index_num_bytes = u32::from_le_bytes(
|
||||
column_index_num_bytes_payload
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
);
|
||||
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
|
||||
let column_index = crate::column_index::open_column_index(column_index_data)?;
|
||||
let column_values =
|
||||
crate::column_values::u64_based::load_u64_based_column_values(column_values_data)?;
|
||||
Ok(Column {
|
||||
idx: column_index,
|
||||
values: column_values,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_column_u128<T: MonotonicallyMappableToU128>(
|
||||
bytes: OwnedBytes,
|
||||
) -> io::Result<Column<T>> {
|
||||
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
|
||||
let column_index_num_bytes = u32::from_le_bytes(
|
||||
column_index_num_bytes_payload
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
);
|
||||
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
|
||||
let column_index = crate::column_index::open_column_index(column_index_data)?;
|
||||
let column_values = crate::column_values::open_u128_mapped(column_values_data)?;
|
||||
Ok(Column {
|
||||
idx: column_index,
|
||||
values: column_values,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_column_bytes<T: From<BytesColumn>>(data: OwnedBytes) -> io::Result<T> {
|
||||
let (body, dictionary_len_bytes) = data.rsplit(4);
|
||||
let dictionary_len = u32::from_le_bytes(dictionary_len_bytes.as_slice().try_into().unwrap());
|
||||
let (dictionary_bytes, column_bytes) = body.split(dictionary_len as usize);
|
||||
let dictionary = Arc::new(Dictionary::from_bytes(dictionary_bytes)?);
|
||||
let term_ord_column = crate::column::open_column_u64::<u64>(column_bytes)?;
|
||||
let bytes_column = BytesColumn {
|
||||
dictionary,
|
||||
term_ord_column,
|
||||
};
|
||||
Ok(bytes_column.into())
|
||||
}
|
||||
174
columnar/src/column_index/merge.rs
Normal file
174
columnar/src/column_index/merge.rs
Normal file
@@ -0,0 +1,174 @@
|
||||
use std::iter;
|
||||
|
||||
use crate::column_index::{
|
||||
multivalued_index, serialize_column_index, SerializableColumnIndex, Set,
|
||||
};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{Cardinality, ColumnIndex, MergeRowOrder, RowId, StackMergeOrder};
|
||||
|
||||
fn detect_cardinality(columns: &[Option<ColumnIndex>]) -> Cardinality {
|
||||
columns
|
||||
.iter()
|
||||
.flatten()
|
||||
.map(ColumnIndex::get_cardinality)
|
||||
.max()
|
||||
.unwrap_or(Cardinality::Full)
|
||||
}
|
||||
|
||||
pub fn stack_column_index<'a>(
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
merge_row_order: &'a MergeRowOrder,
|
||||
) -> SerializableColumnIndex<'a> {
|
||||
let MergeRowOrder::Stack(stack_merge_order) = merge_row_order else {
|
||||
panic!("only supporting stacking at the moment.");
|
||||
};
|
||||
let cardinality = detect_cardinality(columns);
|
||||
match cardinality {
|
||||
Cardinality::Full => SerializableColumnIndex::Full,
|
||||
Cardinality::Optional => {
|
||||
let stacked_optional_index: StackedOptionalIndex<'a> = StackedOptionalIndex {
|
||||
columns,
|
||||
stack_merge_order,
|
||||
};
|
||||
SerializableColumnIndex::Optional {
|
||||
non_null_row_ids: Box::new(move || Box::new(stacked_optional_index.iter())),
|
||||
num_rows: stack_merge_order.num_rows(),
|
||||
}
|
||||
},
|
||||
Cardinality::Multivalued => {
|
||||
let stacked_multivalued_index = StackedMultivaluedIndex {
|
||||
columns,
|
||||
stack_merge_order,
|
||||
};
|
||||
SerializableColumnIndex::Multivalued(Box::new(move || stacked_multivalued_index.boxed_iter()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct StackedOptionalIndex<'a> {
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
}
|
||||
|
||||
impl<'a> StackedOptionalIndex<'a> {
|
||||
fn iter(&self) -> impl Iterator<Item=RowId> + 'a {
|
||||
Box::new(
|
||||
self.columns
|
||||
.iter()
|
||||
.enumerate()
|
||||
.flat_map(|(columnar_id, column_index_opt)| {
|
||||
let columnar_row_range = self.stack_merge_order.columnar_range(columnar_id);
|
||||
let rows_it: Box<dyn Iterator<Item = RowId>> = match column_index_opt {
|
||||
Some(ColumnIndex::Full) => Box::new(columnar_row_range),
|
||||
Some(ColumnIndex::Optional(optional_index)) => Box::new(
|
||||
optional_index
|
||||
.iter_rows()
|
||||
.map(move |row_id: RowId| row_id + columnar_row_range.start),
|
||||
),
|
||||
Some(ColumnIndex::Multivalued(_)) => {
|
||||
panic!("No multivalued index is allowed when stacking column index");
|
||||
}
|
||||
None => Box::new(std::iter::empty()),
|
||||
};
|
||||
rows_it
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct StackedMultivaluedIndex<'a> {
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
}
|
||||
|
||||
fn convert_column_opt_to_multivalued_index<'a>(
|
||||
column_index_opt: Option<&'a ColumnIndex>,
|
||||
num_rows: RowId,
|
||||
) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
match column_index_opt {
|
||||
None => Box::new(iter::repeat(0u32).take(num_rows as usize + 1)),
|
||||
Some(ColumnIndex::Full) => Box::new(0..num_rows + 1),
|
||||
Some(ColumnIndex::Optional(optional_index)) => {
|
||||
Box::new(
|
||||
(0..num_rows)
|
||||
// TODO optimize
|
||||
.map(|row_id| optional_index.rank(row_id))
|
||||
.chain(std::iter::once(optional_index.num_non_nulls())),
|
||||
)
|
||||
}
|
||||
Some(ColumnIndex::Multivalued(multivalued_index)) => {
|
||||
multivalued_index.start_index_column.iter()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> StackedMultivaluedIndex<'a> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
let multivalued_indexes =
|
||||
self.columns
|
||||
.iter()
|
||||
.map(Option::as_ref)
|
||||
.enumerate()
|
||||
.map(|(columnar_id, column_opt)| {
|
||||
let num_rows =
|
||||
self.stack_merge_order.columnar_range(columnar_id).len() as RowId;
|
||||
convert_column_opt_to_multivalued_index(column_opt, num_rows)
|
||||
});
|
||||
stack_multivalued_indexes(multivalued_indexes)
|
||||
}
|
||||
}
|
||||
|
||||
// Refactor me
|
||||
fn stack_multivalued_indexes<'a>(
|
||||
mut multivalued_indexes: impl Iterator<Item = Box<dyn Iterator<Item = RowId> + 'a>> + 'a,
|
||||
) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
let mut offset = 0;
|
||||
let mut last_row_id = 0;
|
||||
let mut current_it = multivalued_indexes.next();
|
||||
Box::new(std::iter::from_fn(move || loop {
|
||||
let Some(multivalued_index) = current_it.as_mut() else {
|
||||
return None;
|
||||
};
|
||||
if let Some(row_id) = multivalued_index.next() {
|
||||
last_row_id = offset + row_id;
|
||||
return Some(last_row_id);
|
||||
}
|
||||
offset = last_row_id;
|
||||
loop {
|
||||
current_it = multivalued_indexes.next();
|
||||
if current_it.as_mut()?.next().is_some() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
fn stack_multivalued_index<'a>(
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
stack_merge_order: &StackMergeOrder,
|
||||
) -> Box<dyn Iterable<RowId> + 'a> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::RowId;
|
||||
|
||||
fn it<'a>(row_ids: &'a [RowId]) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
Box::new(row_ids.iter().copied())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stack() {
|
||||
let columns = [
|
||||
it(&[0u32, 0u32]),
|
||||
it(&[0u32, 1u32, 1u32, 4u32]),
|
||||
it(&[0u32, 3u32, 5u32]),
|
||||
it(&[0u32, 4u32]),
|
||||
]
|
||||
.into_iter();
|
||||
let start_offsets: Vec<RowId> = super::stack_multivalued_indexes(columns).collect();
|
||||
assert_eq!(start_offsets, &[0, 0, 1, 1, 4, 7, 9, 13]);
|
||||
}
|
||||
}
|
||||
62
columnar/src/column_index/mod.rs
Normal file
62
columnar/src/column_index/mod.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
mod merge;
|
||||
mod multivalued_index;
|
||||
mod optional_index;
|
||||
mod serialize;
|
||||
|
||||
use std::ops::Range;
|
||||
|
||||
pub use merge::stack_column_index;
|
||||
pub use optional_index::{OptionalIndex, Set};
|
||||
pub use serialize::{open_column_index, serialize_column_index, SerializableColumnIndex};
|
||||
|
||||
use crate::column_index::multivalued_index::MultiValueIndex;
|
||||
use crate::{Cardinality, RowId};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum ColumnIndex {
|
||||
Full,
|
||||
Optional(OptionalIndex),
|
||||
/// In addition, at index num_rows, an extra value is added
|
||||
/// containing the overal number of values.
|
||||
Multivalued(MultiValueIndex),
|
||||
}
|
||||
|
||||
impl ColumnIndex {
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
match self {
|
||||
ColumnIndex::Full => Cardinality::Full,
|
||||
ColumnIndex::Optional(_) => Cardinality::Optional,
|
||||
ColumnIndex::Multivalued(_) => Cardinality::Multivalued,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn value_row_ids(&self, row_id: RowId) -> Range<RowId> {
|
||||
match self {
|
||||
ColumnIndex::Full => row_id..row_id + 1,
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
if let Some(val) = optional_index.rank_if_exists(row_id) {
|
||||
val..val + 1
|
||||
} else {
|
||||
0..0
|
||||
}
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => multivalued_index.range(row_id),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn select_batch_in_place(&self, rank_ids: &mut Vec<RowId>) {
|
||||
match self {
|
||||
ColumnIndex::Full => {
|
||||
// No need to do anything:
|
||||
// value_idx and row_idx are the same.
|
||||
}
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
optional_index.select_batch(&mut rank_ids[..]);
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
// TODO important: avoid using 0u32, and restart from the beginning all of the time.
|
||||
multivalued_index.select_batch_in_place(0u32, rank_ids)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
133
columnar/src/column_index/multivalued_index.rs
Normal file
133
columnar/src/column_index/multivalued_index.rs
Normal file
@@ -0,0 +1,133 @@
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::OwnedBytes;
|
||||
|
||||
use crate::column_values::u64_based::CodecType;
|
||||
use crate::column_values::ColumnValues;
|
||||
use crate::RowId;
|
||||
|
||||
pub fn serialize_multivalued_index<'a>(
|
||||
multivalued_index: &'a dyn Fn() -> Box<dyn Iterator<Item=RowId> + 'a>,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
crate::column_values::u64_based::serialize_u64_based_column_values(
|
||||
multivalued_index,
|
||||
&[CodecType::Bitpacked, CodecType::Linear],
|
||||
output,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn open_multivalued_index(bytes: OwnedBytes) -> io::Result<MultiValueIndex> {
|
||||
let start_index_column: Arc<dyn ColumnValues<RowId>> =
|
||||
crate::column_values::u64_based::load_u64_based_column_values(bytes)?;
|
||||
Ok(MultiValueIndex { start_index_column })
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
/// Index to resolve value range for given doc_id.
|
||||
/// Starts at 0.
|
||||
pub struct MultiValueIndex {
|
||||
pub start_index_column: Arc<dyn crate::ColumnValues<RowId>>,
|
||||
}
|
||||
|
||||
impl From<Arc<dyn ColumnValues<RowId>>> for MultiValueIndex {
|
||||
fn from(start_index_column: Arc<dyn ColumnValues<RowId>>) -> Self {
|
||||
MultiValueIndex { start_index_column }
|
||||
}
|
||||
}
|
||||
|
||||
impl MultiValueIndex {
|
||||
/// Returns `[start, end)`, such that the values associated with
|
||||
/// the given document are `start..end`.
|
||||
#[inline]
|
||||
pub(crate) fn range(&self, row_id: RowId) -> Range<RowId> {
|
||||
let start = self.start_index_column.get_val(row_id);
|
||||
let end = self.start_index_column.get_val(row_id + 1);
|
||||
start..end
|
||||
}
|
||||
|
||||
/// Returns the number of documents in the index.
|
||||
#[inline]
|
||||
pub fn num_rows(&self) -> u32 {
|
||||
self.start_index_column.num_vals() - 1
|
||||
}
|
||||
|
||||
/// Converts a list of ranks (row ids of values) in a 1:n index to the corresponding list of
|
||||
/// row_ids. Positions are converted inplace to docids.
|
||||
///
|
||||
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the
|
||||
/// index.
|
||||
///
|
||||
/// Correctness: positions needs to be sorted. idx_reader needs to contain monotonically
|
||||
/// increasing positions.
|
||||
///
|
||||
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
|
||||
/// match a docid to its value position.
|
||||
#[allow(clippy::bool_to_int_with_if)]
|
||||
pub(crate) fn select_batch_in_place(&self, row_start: RowId, ranks: &mut Vec<u32>) {
|
||||
if ranks.is_empty() {
|
||||
return;
|
||||
}
|
||||
let mut cur_doc = row_start;
|
||||
let mut last_doc = None;
|
||||
|
||||
assert!(self.start_index_column.get_val(row_start) as u32 <= ranks[0]);
|
||||
|
||||
let mut write_doc_pos = 0;
|
||||
for i in 0..ranks.len() {
|
||||
let pos = ranks[i];
|
||||
loop {
|
||||
let end = self.start_index_column.get_val(cur_doc + 1) as u32;
|
||||
if end > pos {
|
||||
ranks[write_doc_pos] = cur_doc;
|
||||
write_doc_pos += if last_doc == Some(cur_doc) { 0 } else { 1 };
|
||||
last_doc = Some(cur_doc);
|
||||
break;
|
||||
}
|
||||
cur_doc += 1;
|
||||
}
|
||||
}
|
||||
ranks.truncate(write_doc_pos);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::MultiValueIndex;
|
||||
use crate::column_values::IterColumn;
|
||||
use crate::{ColumnValues, RowId};
|
||||
|
||||
fn index_to_pos_helper(
|
||||
index: &MultiValueIndex,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &[u32],
|
||||
) -> Vec<u32> {
|
||||
let mut positions = positions.to_vec();
|
||||
index.select_batch_in_place(doc_id_range.start, &mut positions);
|
||||
positions
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_positions_to_docid() {
|
||||
let offsets: Vec<RowId> = vec![0, 10, 12, 15, 22, 23]; // docid values are [0..10, 10..12, 12..15, etc.]
|
||||
let column: Arc<dyn ColumnValues<RowId>> = Arc::new(IterColumn::from(offsets.into_iter()));
|
||||
let index = MultiValueIndex::from(column);
|
||||
assert_eq!(index.num_rows(), 5);
|
||||
let positions = &[10u32, 11, 15, 20, 21, 22];
|
||||
assert_eq!(index_to_pos_helper(&index, 0..5, positions), vec![1, 3, 4]);
|
||||
assert_eq!(index_to_pos_helper(&index, 1..5, positions), vec![1, 3, 4]);
|
||||
assert_eq!(index_to_pos_helper(&index, 0..5, &[9]), vec![0]);
|
||||
assert_eq!(index_to_pos_helper(&index, 1..5, &[10]), vec![1]);
|
||||
assert_eq!(index_to_pos_helper(&index, 1..5, &[11]), vec![1]);
|
||||
assert_eq!(index_to_pos_helper(&index, 2..5, &[12]), vec![2]);
|
||||
assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14]), vec![2]);
|
||||
assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14, 15]), vec![2, 3]);
|
||||
}
|
||||
}
|
||||
506
columnar/src/column_index/optional_index/mod.rs
Normal file
506
columnar/src/column_index/optional_index/mod.rs
Normal file
@@ -0,0 +1,506 @@
|
||||
use std::io::{self, Write};
|
||||
use std::sync::Arc;
|
||||
|
||||
mod set;
|
||||
mod set_block;
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes, VInt};
|
||||
pub use set::{SelectCursor, Set, SetCodec};
|
||||
use set_block::{
|
||||
DenseBlock, DenseBlockCodec, SparseBlock, SparseBlockCodec, DENSE_BLOCK_NUM_BYTES,
|
||||
};
|
||||
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{InvalidData, RowId};
|
||||
|
||||
/// The threshold for for number of elements after which we switch to dense block encoding.
|
||||
///
|
||||
/// We simply pick the value that minimize the size of the blocks.
|
||||
const DENSE_BLOCK_THRESHOLD: u32 =
|
||||
set_block::DENSE_BLOCK_NUM_BYTES / std::mem::size_of::<u16>() as u32; //< 5_120
|
||||
|
||||
const ELEMENTS_PER_BLOCK: u32 = u16::MAX as u32 + 1;
|
||||
|
||||
const BLOCK_SIZE: RowId = 1 << 16;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
struct BlockMeta {
|
||||
non_null_rows_before_block: u32,
|
||||
start_byte_offset: u32,
|
||||
block_variant: BlockVariant,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
enum BlockVariant {
|
||||
Dense,
|
||||
Sparse { num_vals: u16 },
|
||||
}
|
||||
|
||||
impl BlockVariant {
|
||||
pub fn empty() -> Self {
|
||||
Self::Sparse { num_vals: 0 }
|
||||
}
|
||||
pub fn num_bytes_in_block(&self) -> u32 {
|
||||
match *self {
|
||||
BlockVariant::Dense => set_block::DENSE_BLOCK_NUM_BYTES,
|
||||
BlockVariant::Sparse { num_vals } => num_vals as u32 * 2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This codec is inspired by roaring bitmaps.
|
||||
/// In the dense blocks, however, in order to accelerate `select`
|
||||
/// we interleave an offset over two bytes. (more on this lower)
|
||||
///
|
||||
/// The lower 16 bits of doc ids are stored as u16 while the upper 16 bits are given by the block
|
||||
/// id. Each block contains 1<<16 docids.
|
||||
///
|
||||
/// # Serialized Data Layout
|
||||
/// The data starts with the block data. Each block is either dense or sparse encoded, depending on
|
||||
/// the number of values in the block. A block is sparse when it contains less than
|
||||
/// DENSE_BLOCK_THRESHOLD (6144) values.
|
||||
/// [Sparse data block | dense data block, .. #repeat*; Desc: Either a sparse or dense encoded
|
||||
/// block]
|
||||
/// ### Sparse block data
|
||||
/// [u16 LE, .. #repeat*; Desc: Positions with values in a block]
|
||||
/// ### Dense block data
|
||||
/// [Dense codec for the whole block; Desc: Similar to a bitvec(0..ELEMENTS_PER_BLOCK) + Metadata
|
||||
/// for faster lookups. See dense.rs]
|
||||
///
|
||||
/// The data is followed by block metadata, to know which area of the raw block data belongs to
|
||||
/// which block. Only metadata for blocks with elements is recorded to
|
||||
/// keep the overhead low for scenarios with many very sparse columns. The block metadata consists
|
||||
/// of the block index and the number of values in the block. Since we don't store empty blocks
|
||||
/// num_vals is incremented by 1, e.g. 0 means 1 value.
|
||||
///
|
||||
/// The last u16 is storing the number of metadata blocks.
|
||||
/// [u16 LE, .. #repeat*; Desc: Positions with values in a block][(u16 LE, u16 LE), .. #repeat*;
|
||||
/// Desc: (Block Id u16, Num Elements u16)][u16 LE; Desc: num blocks with values u16]
|
||||
///
|
||||
/// # Opening
|
||||
/// When opening the data layout, the data is expanded to `Vec<SparseCodecBlockVariant>`, where the
|
||||
/// index is the block index. For each block `byte_start` and `offset` is computed.
|
||||
#[derive(Clone)]
|
||||
pub struct OptionalIndex {
|
||||
num_rows: RowId,
|
||||
num_non_null_rows: RowId,
|
||||
block_data: OwnedBytes,
|
||||
block_metas: Arc<[BlockMeta]>,
|
||||
}
|
||||
|
||||
impl OptionalIndex {
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.num_rows
|
||||
}
|
||||
|
||||
pub fn num_non_nulls(&self) -> RowId {
|
||||
self.num_non_null_rows
|
||||
}
|
||||
|
||||
pub fn iter_rows<'a>(&'a self) -> impl Iterator<Item = RowId> + 'a {
|
||||
// TODO optimize
|
||||
let mut select_batch = self.select_cursor();
|
||||
(0..self.num_non_null_rows).map(move |rank| select_batch.select(rank))
|
||||
}
|
||||
}
|
||||
|
||||
/// Splits a value address into lower and upper 16bits.
|
||||
/// The lower 16 bits are the value in the block
|
||||
/// The upper 16 bits are the block index
|
||||
#[derive(Copy, Debug, Clone)]
|
||||
struct RowAddr {
|
||||
block_id: u16,
|
||||
in_block_row_id: u16,
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn row_addr_from_row_id(row_id: RowId) -> RowAddr {
|
||||
RowAddr {
|
||||
block_id: (row_id / BLOCK_SIZE) as u16,
|
||||
in_block_row_id: (row_id % BLOCK_SIZE) as u16,
|
||||
}
|
||||
}
|
||||
|
||||
enum BlockSelectCursor<'a> {
|
||||
Dense(<DenseBlock<'a> as Set<u16>>::SelectCursor<'a>),
|
||||
Sparse(<SparseBlock<'a> as Set<u16>>::SelectCursor<'a>),
|
||||
}
|
||||
|
||||
impl<'a> BlockSelectCursor<'a> {
|
||||
fn select(&mut self, rank: u16) -> u16 {
|
||||
match self {
|
||||
BlockSelectCursor::Dense(dense_select_cursor) => dense_select_cursor.select(rank),
|
||||
BlockSelectCursor::Sparse(sparse_select_cursor) => sparse_select_cursor.select(rank),
|
||||
}
|
||||
}
|
||||
}
|
||||
pub struct OptionalIndexSelectCursor<'a> {
|
||||
current_block_cursor: BlockSelectCursor<'a>,
|
||||
current_block_id: u16,
|
||||
// The current block is guaranteed to contain ranks < end_rank.
|
||||
current_block_end_rank: RowId,
|
||||
optional_index: &'a OptionalIndex,
|
||||
block_doc_idx_start: RowId,
|
||||
num_null_rows_before_block: RowId,
|
||||
}
|
||||
|
||||
impl<'a> OptionalIndexSelectCursor<'a> {
|
||||
fn search_and_load_block(&mut self, rank: RowId) {
|
||||
if rank < self.current_block_end_rank {
|
||||
// we are already in the right block
|
||||
return;
|
||||
}
|
||||
self.current_block_id = self.optional_index.find_block(rank, self.current_block_id);
|
||||
self.current_block_end_rank = self
|
||||
.optional_index
|
||||
.block_metas
|
||||
.get(self.current_block_id as usize + 1)
|
||||
.map(|block_meta| block_meta.non_null_rows_before_block)
|
||||
.unwrap_or(u32::MAX);
|
||||
self.block_doc_idx_start = (self.current_block_id as u32) * ELEMENTS_PER_BLOCK;
|
||||
let block_meta = self.optional_index.block_metas[self.current_block_id as usize];
|
||||
self.num_null_rows_before_block = block_meta.non_null_rows_before_block;
|
||||
let block: Block<'_> = self.optional_index.block(block_meta);
|
||||
self.current_block_cursor = match block {
|
||||
Block::Dense(dense_block) => BlockSelectCursor::Dense(dense_block.select_cursor()),
|
||||
Block::Sparse(sparse_block) => BlockSelectCursor::Sparse(sparse_block.select_cursor()),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> SelectCursor<RowId> for OptionalIndexSelectCursor<'a> {
|
||||
fn select(&mut self, rank: RowId) -> RowId {
|
||||
self.search_and_load_block(rank);
|
||||
let index_in_block = (rank - self.num_null_rows_before_block) as u16;
|
||||
self.current_block_cursor.select(index_in_block) as RowId + self.block_doc_idx_start
|
||||
}
|
||||
}
|
||||
|
||||
impl Set<RowId> for OptionalIndex {
|
||||
type SelectCursor<'b> = OptionalIndexSelectCursor<'b> where Self: 'b;
|
||||
// Check if value at position is not null.
|
||||
#[inline]
|
||||
fn contains(&self, row_id: RowId) -> bool {
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
} = row_addr_from_row_id(row_id);
|
||||
let block_meta = self.block_metas[block_id as usize];
|
||||
match self.block(block_meta) {
|
||||
Block::Dense(dense_block) => dense_block.contains(in_block_row_id),
|
||||
Block::Sparse(sparse_block) => sparse_block.contains(in_block_row_id),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn rank(&self, row_id: RowId) -> RowId {
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
} = row_addr_from_row_id(row_id);
|
||||
let block_meta = self.block_metas[block_id as usize];
|
||||
let block = self.block(block_meta);
|
||||
let block_offset_row_id = match block {
|
||||
Block::Dense(dense_block) => dense_block.rank(in_block_row_id),
|
||||
Block::Sparse(sparse_block) => sparse_block.rank(in_block_row_id),
|
||||
} as u32;
|
||||
block_meta.non_null_rows_before_block + block_offset_row_id
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn rank_if_exists(&self, row_id: RowId) -> Option<RowId> {
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
} = row_addr_from_row_id(row_id);
|
||||
let block_meta = self.block_metas[block_id as usize];
|
||||
let block = self.block(block_meta);
|
||||
let block_offset_row_id = match block {
|
||||
Block::Dense(dense_block) => dense_block.rank_if_exists(in_block_row_id),
|
||||
Block::Sparse(sparse_block) => sparse_block.rank_if_exists(in_block_row_id),
|
||||
}? as u32;
|
||||
Some(block_meta.non_null_rows_before_block + block_offset_row_id)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn select(&self, rank: RowId) -> RowId {
|
||||
let block_pos = self.find_block(rank, 0);
|
||||
let block_doc_idx_start = (block_pos as u32) * ELEMENTS_PER_BLOCK;
|
||||
let block_meta = self.block_metas[block_pos as usize];
|
||||
let block: Block<'_> = self.block(block_meta);
|
||||
let index_in_block = (rank - block_meta.non_null_rows_before_block) as u16;
|
||||
let in_block_rank = match block {
|
||||
Block::Dense(dense_block) => dense_block.select(index_in_block),
|
||||
Block::Sparse(sparse_block) => sparse_block.select(index_in_block),
|
||||
};
|
||||
block_doc_idx_start + in_block_rank as u32
|
||||
}
|
||||
|
||||
fn select_cursor<'b>(&'b self) -> OptionalIndexSelectCursor<'b> {
|
||||
OptionalIndexSelectCursor {
|
||||
current_block_cursor: BlockSelectCursor::Sparse(
|
||||
SparseBlockCodec::open(b"").select_cursor(),
|
||||
),
|
||||
current_block_id: 0u16,
|
||||
current_block_end_rank: 0u32, //< this is sufficient to force the first load
|
||||
optional_index: self,
|
||||
block_doc_idx_start: 0u32,
|
||||
num_null_rows_before_block: 0u32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl OptionalIndex {
|
||||
pub fn select_batch(&self, ranks: &mut [RowId]) {
|
||||
let mut select_cursor = self.select_cursor();
|
||||
for rank in ranks.iter_mut() {
|
||||
*rank = select_cursor.select(*rank);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn block<'a>(&'a self, block_meta: BlockMeta) -> Block<'a> {
|
||||
let BlockMeta {
|
||||
start_byte_offset,
|
||||
block_variant,
|
||||
..
|
||||
} = block_meta;
|
||||
let start_byte_offset = start_byte_offset as usize;
|
||||
let bytes = self.block_data.as_slice();
|
||||
match block_variant {
|
||||
BlockVariant::Dense => Block::Dense(DenseBlockCodec::open(
|
||||
&bytes[start_byte_offset..start_byte_offset + DENSE_BLOCK_NUM_BYTES as usize],
|
||||
)),
|
||||
BlockVariant::Sparse { num_vals } => {
|
||||
let end_byte_offset = start_byte_offset + num_vals as usize * 2;
|
||||
let sparse_bytes = &bytes[start_byte_offset..end_byte_offset];
|
||||
Block::Sparse(SparseBlockCodec::open(sparse_bytes))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn find_block(&self, dense_idx: u32, start_block_pos: u16) -> u16 {
|
||||
for block_pos in start_block_pos..self.block_metas.len() as u16 {
|
||||
let offset = self.block_metas[block_pos as usize].non_null_rows_before_block;
|
||||
if offset > dense_idx {
|
||||
return block_pos - 1u16;
|
||||
}
|
||||
}
|
||||
self.block_metas.len() as u16 - 1u16
|
||||
}
|
||||
|
||||
// TODO Add a good API for the codec_idx to original_idx translation.
|
||||
// The Iterator API is a probably a bad idea
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
enum Block<'a> {
|
||||
Dense(DenseBlock<'a>),
|
||||
Sparse(SparseBlock<'a>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
enum OptionalIndexCodec {
|
||||
Dense = 0,
|
||||
Sparse = 1,
|
||||
}
|
||||
|
||||
impl OptionalIndexCodec {
|
||||
fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
fn try_from_code(code: u8) -> Result<Self, InvalidData> {
|
||||
match code {
|
||||
0 => Ok(Self::Dense),
|
||||
1 => Ok(Self::Sparse),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for OptionalIndexCodec {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&[self.to_code()])
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let optional_codec_code = u8::deserialize(reader)?;
|
||||
let optional_codec = Self::try_from_code(optional_codec_code)?;
|
||||
Ok(optional_codec)
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_optional_index_block(block_els: &[u16], out: &mut impl io::Write) -> io::Result<()> {
|
||||
let is_sparse = is_sparse(block_els.len() as u32);
|
||||
if is_sparse {
|
||||
SparseBlockCodec::serialize(block_els.iter().copied(), out)?;
|
||||
} else {
|
||||
DenseBlockCodec::serialize(block_els.iter().copied(), out)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn serialize_optional_index<'a, W: io::Write>(
|
||||
non_null_rows: &dyn Fn() -> Box<dyn Iterator<Item=RowId> + 'a>,
|
||||
num_rows: RowId,
|
||||
output: &mut W,
|
||||
) -> io::Result<()> {
|
||||
VInt(num_rows as u64).serialize(output)?;
|
||||
|
||||
let mut rows_it = non_null_rows();
|
||||
let mut block_metadata: Vec<SerializedBlockMeta> = Vec::new();
|
||||
let mut current_block = Vec::new();
|
||||
|
||||
// This if-statement for the first element ensures that
|
||||
// `block_metadata` is not empty in the loop below.
|
||||
let Some(idx) = rows_it.next() else {
|
||||
output.write_all(&0u16.to_le_bytes())?;
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let row_addr = row_addr_from_row_id(idx);
|
||||
|
||||
let mut current_block_id = row_addr.block_id;
|
||||
current_block.push(row_addr.in_block_row_id);
|
||||
|
||||
for idx in rows_it {
|
||||
let value_addr = row_addr_from_row_id(idx);
|
||||
if current_block_id != value_addr.block_id {
|
||||
serialize_optional_index_block(¤t_block[..], output)?;
|
||||
block_metadata.push(SerializedBlockMeta {
|
||||
block_id: current_block_id,
|
||||
num_non_null_rows: current_block.len() as u32,
|
||||
});
|
||||
current_block.clear();
|
||||
current_block_id = value_addr.block_id;
|
||||
}
|
||||
current_block.push(value_addr.in_block_row_id);
|
||||
}
|
||||
|
||||
// handle last block
|
||||
serialize_optional_index_block(¤t_block[..], output)?;
|
||||
|
||||
block_metadata.push(SerializedBlockMeta {
|
||||
block_id: current_block_id,
|
||||
num_non_null_rows: current_block.len() as u32,
|
||||
});
|
||||
|
||||
for block in &block_metadata {
|
||||
output.write_all(&block.to_bytes())?;
|
||||
}
|
||||
|
||||
output.write_all((block_metadata.len() as u16).to_le_bytes().as_ref())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const SERIALIZED_BLOCK_META_NUM_BYTES: usize = 4;
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct SerializedBlockMeta {
|
||||
block_id: u16,
|
||||
num_non_null_rows: u32, //< takes values in 1..=u16::MAX
|
||||
}
|
||||
|
||||
// TODO unit tests
|
||||
impl SerializedBlockMeta {
|
||||
#[inline]
|
||||
fn from_bytes(bytes: [u8; SERIALIZED_BLOCK_META_NUM_BYTES]) -> SerializedBlockMeta {
|
||||
let block_id = u16::from_le_bytes(bytes[0..2].try_into().unwrap());
|
||||
let num_non_null_rows: u32 =
|
||||
u16::from_le_bytes(bytes[2..4].try_into().unwrap()) as u32 + 1u32;
|
||||
SerializedBlockMeta {
|
||||
block_id,
|
||||
num_non_null_rows,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn to_bytes(&self) -> [u8; SERIALIZED_BLOCK_META_NUM_BYTES] {
|
||||
assert!(self.num_non_null_rows > 0);
|
||||
let mut bytes = [0u8; SERIALIZED_BLOCK_META_NUM_BYTES];
|
||||
bytes[0..2].copy_from_slice(&self.block_id.to_le_bytes());
|
||||
// We don't store empty blocks, therefore we can subtract 1.
|
||||
// This way we will be able to use u16 when the number of elements is 1 << 16 or u16::MAX+1
|
||||
bytes[2..4].copy_from_slice(&((self.num_non_null_rows - 1u32) as u16).to_le_bytes());
|
||||
bytes
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_sparse(num_rows_in_block: u32) -> bool {
|
||||
num_rows_in_block < DENSE_BLOCK_THRESHOLD as u32
|
||||
}
|
||||
|
||||
fn deserialize_optional_index_block_metadatas(
|
||||
data: &[u8],
|
||||
num_rows: u32,
|
||||
) -> (Box<[BlockMeta]>, u32) {
|
||||
let num_blocks = data.len() / SERIALIZED_BLOCK_META_NUM_BYTES;
|
||||
let mut block_metas = Vec::with_capacity(num_blocks as usize + 1);
|
||||
let mut start_byte_offset = 0;
|
||||
let mut non_null_rows_before_block = 0;
|
||||
for block_meta_bytes in data.chunks_exact(SERIALIZED_BLOCK_META_NUM_BYTES) {
|
||||
let block_meta_bytes: [u8; SERIALIZED_BLOCK_META_NUM_BYTES] =
|
||||
block_meta_bytes.try_into().unwrap();
|
||||
let SerializedBlockMeta {
|
||||
block_id,
|
||||
num_non_null_rows,
|
||||
} = SerializedBlockMeta::from_bytes(block_meta_bytes);
|
||||
block_metas.resize(
|
||||
block_id as usize,
|
||||
BlockMeta {
|
||||
non_null_rows_before_block,
|
||||
start_byte_offset,
|
||||
block_variant: BlockVariant::empty(),
|
||||
},
|
||||
);
|
||||
let block_variant = if is_sparse(num_non_null_rows) {
|
||||
BlockVariant::Sparse {
|
||||
num_vals: num_non_null_rows as u16,
|
||||
}
|
||||
} else {
|
||||
BlockVariant::Dense
|
||||
};
|
||||
block_metas.push(BlockMeta {
|
||||
non_null_rows_before_block,
|
||||
start_byte_offset,
|
||||
block_variant,
|
||||
});
|
||||
start_byte_offset += block_variant.num_bytes_in_block();
|
||||
non_null_rows_before_block += num_non_null_rows as u32;
|
||||
}
|
||||
block_metas.resize(
|
||||
((num_rows + BLOCK_SIZE - 1) / BLOCK_SIZE) as usize,
|
||||
BlockMeta {
|
||||
non_null_rows_before_block,
|
||||
start_byte_offset,
|
||||
block_variant: BlockVariant::empty(),
|
||||
},
|
||||
);
|
||||
(block_metas.into_boxed_slice(), non_null_rows_before_block)
|
||||
}
|
||||
|
||||
pub fn open_optional_index(bytes: OwnedBytes) -> io::Result<OptionalIndex> {
|
||||
let (mut bytes, num_non_empty_blocks_bytes) = bytes.rsplit(2);
|
||||
let num_non_empty_block_bytes =
|
||||
u16::from_le_bytes(num_non_empty_blocks_bytes.as_slice().try_into().unwrap());
|
||||
let num_rows = VInt::deserialize_u64(&mut bytes)? as u32;
|
||||
let block_metas_num_bytes =
|
||||
num_non_empty_block_bytes as usize * SERIALIZED_BLOCK_META_NUM_BYTES;
|
||||
let (block_data, block_metas) = bytes.rsplit(block_metas_num_bytes);
|
||||
let (block_metas, num_non_null_rows) =
|
||||
deserialize_optional_index_block_metadatas(block_metas.as_slice(), num_rows).into();
|
||||
let optional_index = OptionalIndex {
|
||||
num_rows,
|
||||
num_non_null_rows,
|
||||
block_data,
|
||||
block_metas: block_metas.into(),
|
||||
};
|
||||
Ok(optional_index)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
47
columnar/src/column_index/optional_index/set.rs
Normal file
47
columnar/src/column_index/optional_index/set.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
use std::io;
|
||||
|
||||
/// A codec makes it possible to serialize a set of
|
||||
/// elements, and open the resulting Set representation.
|
||||
pub trait SetCodec {
|
||||
type Item: Copy + TryFrom<usize> + Eq + std::hash::Hash + std::fmt::Debug;
|
||||
type Reader<'a>: Set<Self::Item>;
|
||||
|
||||
/// Serializes a set of unique sorted u16 elements.
|
||||
///
|
||||
/// May panic if the elements are not sorted.
|
||||
fn serialize(els: impl Iterator<Item = Self::Item>, wrt: impl io::Write) -> io::Result<()>;
|
||||
fn open<'a>(data: &'a [u8]) -> Self::Reader<'a>;
|
||||
}
|
||||
|
||||
/// Stateful object that makes it possible to compute several select in a row,
|
||||
/// provided the rank passed as argument are increasing.
|
||||
pub trait SelectCursor<T> {
|
||||
// May panic if rank is greater than the number of elements in the Set,
|
||||
// or if rank is < than value provided in the previous call.
|
||||
fn select(&mut self, rank: T) -> T;
|
||||
}
|
||||
|
||||
pub trait Set<T> {
|
||||
type SelectCursor<'b>: SelectCursor<T>
|
||||
where Self: 'b;
|
||||
|
||||
/// Returns true if the elements is contained in the Set
|
||||
fn contains(&self, el: T) -> bool;
|
||||
|
||||
/// Returns the number of rows in the set that are < `el`
|
||||
fn rank(&self, el: T) -> T;
|
||||
|
||||
/// If the set contains `el` returns the element rank.
|
||||
/// If the set does not contain the element, it returns `None`.
|
||||
fn rank_if_exists(&self, el: T) -> Option<T>;
|
||||
|
||||
/// Return the rank-th value stored in this bitmap.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if rank is greater than the number of elements in the Set.
|
||||
fn select(&self, rank: T) -> T;
|
||||
|
||||
/// Creates a brand new select cursor.
|
||||
fn select_cursor<'b>(&'b self) -> Self::SelectCursor<'b>;
|
||||
}
|
||||
278
columnar/src/column_index/optional_index/set_block/dense.rs
Normal file
278
columnar/src/column_index/optional_index/set_block/dense.rs
Normal file
@@ -0,0 +1,278 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::BinarySerializable;
|
||||
|
||||
use crate::column_index::optional_index::{SelectCursor, Set, SetCodec, ELEMENTS_PER_BLOCK};
|
||||
|
||||
#[inline(always)]
|
||||
fn get_bit_at(input: u64, n: u16) -> bool {
|
||||
input & (1 << n) != 0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_bit_at(input: &mut u64, n: u16) {
|
||||
*input |= 1 << n;
|
||||
}
|
||||
|
||||
/// For the `DenseCodec`, `data` which contains the encoded blocks.
|
||||
/// Each block consists of [u8; 12]. The first 8 bytes is a bitvec for 64 elements.
|
||||
/// The last 4 bytes are the offset, the number of set bits so far.
|
||||
///
|
||||
/// When translating the original index to a dense index, the correct block can be computed
|
||||
/// directly `orig_idx/64`. Inside the block the position is `orig_idx%64`.
|
||||
///
|
||||
/// When translating a dense index to the original index, we can use the offset to find the correct
|
||||
/// block. Direct computation is not possible, but we can employ a linear or binary search.
|
||||
|
||||
const ELEMENTS_PER_MINI_BLOCK: u16 = 64;
|
||||
const MINI_BLOCK_BITVEC_NUM_BYTES: usize = 8;
|
||||
const MINI_BLOCK_OFFSET_NUM_BYTES: usize = 2;
|
||||
pub const MINI_BLOCK_NUM_BYTES: usize = MINI_BLOCK_BITVEC_NUM_BYTES + MINI_BLOCK_OFFSET_NUM_BYTES;
|
||||
|
||||
/// Number of bytes in a dense block.
|
||||
pub const DENSE_BLOCK_NUM_BYTES: u32 =
|
||||
(ELEMENTS_PER_BLOCK as u32 / ELEMENTS_PER_MINI_BLOCK as u32) * MINI_BLOCK_NUM_BYTES as u32;
|
||||
|
||||
pub struct DenseBlockCodec;
|
||||
|
||||
impl SetCodec for DenseBlockCodec {
|
||||
type Item = u16;
|
||||
type Reader<'a> = DenseBlock<'a>;
|
||||
|
||||
fn serialize(els: impl Iterator<Item = u16>, wrt: impl io::Write) -> io::Result<()> {
|
||||
serialize_dense_codec(els, wrt)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn open<'a>(data: &'a [u8]) -> Self::Reader<'a> {
|
||||
assert_eq!(data.len(), DENSE_BLOCK_NUM_BYTES as usize);
|
||||
DenseBlock(data)
|
||||
}
|
||||
}
|
||||
|
||||
/// Interpreting the bitvec as a set of integer within 0..=63
|
||||
/// and given an element, returns the number of elements in the
|
||||
/// set lesser than the element.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic or return a wrong result if el <= 64.
|
||||
#[inline(always)]
|
||||
fn rank_u64(bitvec: u64, el: u16) -> u16 {
|
||||
debug_assert!(el < 64);
|
||||
let mask = (1u64 << el) - 1;
|
||||
let masked_bitvec = bitvec & mask;
|
||||
masked_bitvec.count_ones() as u16
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn select_u64(mut bitvec: u64, rank: u16) -> u16 {
|
||||
for _ in 0..rank {
|
||||
bitvec &= bitvec - 1;
|
||||
}
|
||||
bitvec.trailing_zeros() as u16
|
||||
}
|
||||
|
||||
// TODO test the following solution on Intel... on Ryzen Zen <3 it is a catastrophy.
|
||||
// #[target_feature(enable = "bmi2")]
|
||||
// unsafe fn select_bitvec_unsafe(bitvec: u64, rank: u16) -> u16 {
|
||||
// let pdep = _pdep_u64(1u64 << rank, bitvec);
|
||||
// pdep.trailing_zeros() as u16
|
||||
// }
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct DenseMiniBlock {
|
||||
bitvec: u64,
|
||||
rank: u16,
|
||||
}
|
||||
|
||||
impl DenseMiniBlock {
|
||||
fn from_bytes(data: [u8; MINI_BLOCK_NUM_BYTES]) -> Self {
|
||||
let bitvec = u64::from_le_bytes(data[..MINI_BLOCK_BITVEC_NUM_BYTES].try_into().unwrap());
|
||||
let rank = u16::from_le_bytes(data[MINI_BLOCK_BITVEC_NUM_BYTES..].try_into().unwrap());
|
||||
Self { bitvec, rank }
|
||||
}
|
||||
|
||||
fn to_bytes(&self) -> [u8; MINI_BLOCK_NUM_BYTES] {
|
||||
let mut bytes = [0u8; MINI_BLOCK_NUM_BYTES];
|
||||
bytes[..MINI_BLOCK_BITVEC_NUM_BYTES].copy_from_slice(&self.bitvec.to_le_bytes());
|
||||
bytes[MINI_BLOCK_BITVEC_NUM_BYTES..].copy_from_slice(&self.rank.to_le_bytes());
|
||||
bytes
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct DenseBlock<'a>(&'a [u8]);
|
||||
|
||||
pub struct DenseBlockSelectCursor<'a> {
|
||||
block_id: u16,
|
||||
dense_block: DenseBlock<'a>,
|
||||
}
|
||||
|
||||
impl<'a> SelectCursor<u16> for DenseBlockSelectCursor<'a> {
|
||||
#[inline]
|
||||
fn select(&mut self, rank: u16) -> u16 {
|
||||
self.block_id = self
|
||||
.dense_block
|
||||
.find_miniblock_containing_rank(rank, self.block_id)
|
||||
.unwrap();
|
||||
let index_block = self.dense_block.mini_block(self.block_id);
|
||||
let in_block_rank = rank - index_block.rank;
|
||||
self.block_id * ELEMENTS_PER_MINI_BLOCK + select_u64(index_block.bitvec, in_block_rank)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Set<u16> for DenseBlock<'a> {
|
||||
type SelectCursor<'b> = DenseBlockSelectCursor<'a> where Self: 'b;
|
||||
|
||||
#[inline(always)]
|
||||
fn contains(&self, el: u16) -> bool {
|
||||
let mini_block_id = el / ELEMENTS_PER_MINI_BLOCK;
|
||||
let bitvec = self.mini_block(mini_block_id).bitvec;
|
||||
let pos_in_bitvec = el % ELEMENTS_PER_MINI_BLOCK;
|
||||
get_bit_at(bitvec, pos_in_bitvec)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn rank_if_exists(&self, el: u16) -> Option<u16> {
|
||||
let block_pos = el / ELEMENTS_PER_MINI_BLOCK;
|
||||
let index_block = self.mini_block(block_pos);
|
||||
let pos_in_block_bit_vec = el % ELEMENTS_PER_MINI_BLOCK;
|
||||
let ones_in_block = rank_u64(index_block.bitvec, pos_in_block_bit_vec);
|
||||
let rank = index_block.rank + ones_in_block;
|
||||
if get_bit_at(index_block.bitvec, pos_in_block_bit_vec) {
|
||||
Some(rank)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn rank(&self, el: u16) -> u16 {
|
||||
let block_pos = el / ELEMENTS_PER_MINI_BLOCK;
|
||||
let index_block = self.mini_block(block_pos);
|
||||
let pos_in_block_bit_vec = el % ELEMENTS_PER_MINI_BLOCK;
|
||||
let ones_in_block = rank_u64(index_block.bitvec, pos_in_block_bit_vec);
|
||||
index_block.rank + ones_in_block
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn select(&self, rank: u16) -> u16 {
|
||||
let block_id = self.find_miniblock_containing_rank(rank, 0).unwrap();
|
||||
let index_block = self.mini_block(block_id);
|
||||
let in_block_rank = rank - index_block.rank;
|
||||
block_id * ELEMENTS_PER_MINI_BLOCK + select_u64(index_block.bitvec, in_block_rank)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn select_cursor<'b>(&'b self) -> Self::SelectCursor<'b> {
|
||||
DenseBlockSelectCursor {
|
||||
block_id: 0,
|
||||
dense_block: *self,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> DenseBlock<'a> {
|
||||
#[inline]
|
||||
fn mini_block(&self, mini_block_id: u16) -> DenseMiniBlock {
|
||||
let data_start_pos = mini_block_id as usize * MINI_BLOCK_NUM_BYTES;
|
||||
DenseMiniBlock::from_bytes(
|
||||
self.0[data_start_pos..data_start_pos + MINI_BLOCK_NUM_BYTES]
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter_miniblocks(
|
||||
&self,
|
||||
from_block_id: u16,
|
||||
) -> impl Iterator<Item = (u16, DenseMiniBlock)> + '_ {
|
||||
self.0
|
||||
.chunks_exact(MINI_BLOCK_NUM_BYTES)
|
||||
.enumerate()
|
||||
.skip(from_block_id as usize)
|
||||
.map(|(block_id, bytes)| {
|
||||
let mini_block = DenseMiniBlock::from_bytes(bytes.try_into().unwrap());
|
||||
(block_id as u16, mini_block)
|
||||
})
|
||||
}
|
||||
|
||||
/// Finds the block position containing the dense_idx.
|
||||
///
|
||||
/// # Correctness
|
||||
/// dense_idx needs to be smaller than the number of values in the index
|
||||
///
|
||||
/// The last offset number is equal to the number of values in the index.
|
||||
#[inline]
|
||||
fn find_miniblock_containing_rank(&self, rank: u16, from_block_id: u16) -> Option<u16> {
|
||||
self.iter_miniblocks(from_block_id)
|
||||
.take_while(|(_, block)| block.rank <= rank)
|
||||
.map(|(block_id, _)| block_id)
|
||||
.last()
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterator over all values, true if set, otherwise false
|
||||
pub fn serialize_dense_codec(
|
||||
els: impl Iterator<Item = u16>,
|
||||
mut output: impl Write,
|
||||
) -> io::Result<()> {
|
||||
let mut non_null_rows_before: u16 = 0u16;
|
||||
let mut block = 0u64;
|
||||
let mut current_block_id = 0u16;
|
||||
for el in els {
|
||||
let block_id = el / ELEMENTS_PER_MINI_BLOCK;
|
||||
let in_offset = el % ELEMENTS_PER_MINI_BLOCK;
|
||||
while block_id > current_block_id {
|
||||
let dense_mini_block = DenseMiniBlock {
|
||||
bitvec: block,
|
||||
rank: non_null_rows_before as u16,
|
||||
};
|
||||
output.write_all(&dense_mini_block.to_bytes())?;
|
||||
non_null_rows_before += block.count_ones() as u16;
|
||||
block = 0u64;
|
||||
current_block_id += 1u16;
|
||||
}
|
||||
set_bit_at(&mut block, in_offset);
|
||||
}
|
||||
while current_block_id <= u16::MAX / ELEMENTS_PER_MINI_BLOCK {
|
||||
block.serialize(&mut output)?;
|
||||
non_null_rows_before.serialize(&mut output)?;
|
||||
// This will overflow to 0 exactly if all bits are set.
|
||||
// This is however not problem as we won't use this last value.
|
||||
non_null_rows_before = non_null_rows_before.wrapping_add(block.count_ones() as u16);
|
||||
block = 0u64;
|
||||
current_block_id += 1u16;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_select_bitvec() {
|
||||
assert_eq!(select_u64(1u64, 0), 0);
|
||||
assert_eq!(select_u64(2u64, 0), 1);
|
||||
assert_eq!(select_u64(4u64, 0), 2);
|
||||
assert_eq!(select_u64(8u64, 0), 3);
|
||||
assert_eq!(select_u64(1 | 8u64, 0), 0);
|
||||
assert_eq!(select_u64(1 | 8u64, 1), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_ones() {
|
||||
for i in 0..=63 {
|
||||
assert_eq!(rank_u64(u64::MAX, i), i);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dense() {
|
||||
assert_eq!(DENSE_BLOCK_NUM_BYTES, 10_240);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
mod dense;
|
||||
mod sparse;
|
||||
|
||||
pub use dense::{DenseBlock, DenseBlockCodec, DENSE_BLOCK_NUM_BYTES};
|
||||
pub use sparse::{SparseBlock, SparseBlockCodec};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
111
columnar/src/column_index/optional_index/set_block/sparse.rs
Normal file
111
columnar/src/column_index/optional_index/set_block/sparse.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
use crate::column_index::optional_index::{SelectCursor, Set, SetCodec};
|
||||
|
||||
pub struct SparseBlockCodec;
|
||||
|
||||
impl SetCodec for SparseBlockCodec {
|
||||
type Item = u16;
|
||||
type Reader<'a> = SparseBlock<'a>;
|
||||
|
||||
fn serialize(
|
||||
els: impl Iterator<Item = u16>,
|
||||
mut wrt: impl std::io::Write,
|
||||
) -> std::io::Result<()> {
|
||||
for el in els {
|
||||
wrt.write_all(&el.to_le_bytes())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn open<'a>(data: &'a [u8]) -> Self::Reader<'a> {
|
||||
SparseBlock(data)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct SparseBlock<'a>(&'a [u8]);
|
||||
|
||||
impl<'a> SelectCursor<u16> for SparseBlock<'a> {
|
||||
#[inline]
|
||||
fn select(&mut self, rank: u16) -> u16 {
|
||||
<SparseBlock<'a> as Set<u16>>::select(self, rank)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Set<u16> for SparseBlock<'a> {
|
||||
type SelectCursor<'b> = Self where Self: 'b;
|
||||
|
||||
#[inline(always)]
|
||||
fn contains(&self, el: u16) -> bool {
|
||||
self.binary_search(el).is_ok()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn rank_if_exists(&self, el: u16) -> Option<u16> {
|
||||
self.binary_search(el).ok()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn rank(&self, el: u16) -> u16 {
|
||||
self.binary_search(el).unwrap_or_else(|el| el)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn select(&self, rank: u16) -> u16 {
|
||||
let offset = rank as usize * 2;
|
||||
u16::from_le_bytes(self.0[offset..offset + 2].try_into().unwrap())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn select_cursor<'b>(&'b self) -> Self::SelectCursor<'b> {
|
||||
*self
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn get_u16(data: &[u8], byte_position: usize) -> u16 {
|
||||
let bytes: [u8; 2] = data[byte_position..byte_position + 2].try_into().unwrap();
|
||||
u16::from_le_bytes(bytes)
|
||||
}
|
||||
|
||||
impl<'a> SparseBlock<'a> {
|
||||
#[inline(always)]
|
||||
fn value_at_idx(&self, data: &[u8], idx: u16) -> u16 {
|
||||
let start_offset: usize = idx as usize * 2;
|
||||
get_u16(data, start_offset)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn num_vals(&self) -> u16 {
|
||||
(self.0.len() / 2) as u16
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(clippy::comparison_chain)]
|
||||
// Looks for the element in the block. Returns the positions if found.
|
||||
fn binary_search(&self, target: u16) -> Result<u16, u16> {
|
||||
let data = &self.0;
|
||||
let mut size = self.num_vals();
|
||||
let mut left = 0;
|
||||
let mut right = size;
|
||||
// TODO try different implem.
|
||||
// e.g. exponential search into binary search
|
||||
while left < right {
|
||||
let mid = left + size / 2;
|
||||
|
||||
// TODO do boundary check only once, and then use an
|
||||
// unsafe `value_at_idx`
|
||||
let mid_val = self.value_at_idx(data, mid);
|
||||
|
||||
if target > mid_val {
|
||||
left = mid + 1;
|
||||
} else if target < mid_val {
|
||||
right = mid;
|
||||
} else {
|
||||
return Ok(mid);
|
||||
}
|
||||
|
||||
size = right - left;
|
||||
}
|
||||
Err(left)
|
||||
}
|
||||
}
|
||||
109
columnar/src/column_index/optional_index/set_block/tests.rs
Normal file
109
columnar/src/column_index/optional_index/set_block/tests.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::column_index::optional_index::set_block::dense::DENSE_BLOCK_NUM_BYTES;
|
||||
use crate::column_index::optional_index::set_block::{DenseBlockCodec, SparseBlockCodec};
|
||||
use crate::column_index::optional_index::{SelectCursor, Set, SetCodec};
|
||||
|
||||
fn test_set_helper<C: SetCodec<Item = u16>>(vals: &[u16]) -> usize {
|
||||
let mut buffer = Vec::new();
|
||||
C::serialize(vals.iter().copied(), &mut buffer).unwrap();
|
||||
let tested_set = C::open(buffer.as_slice());
|
||||
let hash_set: HashMap<C::Item, C::Item> = vals
|
||||
.iter()
|
||||
.copied()
|
||||
.enumerate()
|
||||
.map(|(ord, val)| (val, C::Item::try_from(ord).ok().unwrap()))
|
||||
.collect();
|
||||
for val in 0u16..=u16::MAX {
|
||||
assert_eq!(tested_set.contains(val), hash_set.contains_key(&val));
|
||||
assert_eq!(tested_set.rank_if_exists(val), hash_set.get(&val).copied());
|
||||
assert_eq!(
|
||||
tested_set.rank(val),
|
||||
vals.iter().cloned().take_while(|v| *v < val).count() as u16
|
||||
);
|
||||
}
|
||||
for rank in 0..vals.len() {
|
||||
assert_eq!(tested_set.select(rank as u16), vals[rank]);
|
||||
}
|
||||
buffer.len()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dense_block_set_u16_empty() {
|
||||
let buffer_len = test_set_helper::<DenseBlockCodec>(&[]);
|
||||
assert_eq!(buffer_len, DENSE_BLOCK_NUM_BYTES as usize);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dense_block_set_u16_max() {
|
||||
let buffer_len = test_set_helper::<DenseBlockCodec>(&[u16::MAX]);
|
||||
assert_eq!(buffer_len, DENSE_BLOCK_NUM_BYTES as usize);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sparse_block_set_u16_empty() {
|
||||
let buffer_len = test_set_helper::<SparseBlockCodec>(&[]);
|
||||
assert_eq!(buffer_len, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sparse_block_set_u16_max() {
|
||||
let buffer_len = test_set_helper::<SparseBlockCodec>(&[u16::MAX]);
|
||||
assert_eq!(buffer_len, 2);
|
||||
}
|
||||
|
||||
use proptest::prelude::*;
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(1))]
|
||||
#[test]
|
||||
fn test_prop_test_dense(els in proptest::collection::btree_set(0..=u16::MAX, 0..=u16::MAX as usize)) {
|
||||
let vals: Vec<u16> = els.into_iter().collect();
|
||||
let buffer_len = test_set_helper::<DenseBlockCodec>(&vals);
|
||||
assert_eq!(buffer_len, DENSE_BLOCK_NUM_BYTES as usize);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prop_test_sparse(els in proptest::collection::btree_set(0..=u16::MAX, 0..=u16::MAX as usize)) {
|
||||
let vals: Vec<u16> = els.into_iter().collect();
|
||||
let buffer_len = test_set_helper::<SparseBlockCodec>(&vals);
|
||||
assert_eq!(buffer_len, vals.len() * 2);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_translate_codec_codec_idx_to_original_idx_dense() {
|
||||
let mut buffer = Vec::new();
|
||||
DenseBlockCodec::serialize([1, 3, 17, 32, 30_000, 30_001].iter().copied(), &mut buffer)
|
||||
.unwrap();
|
||||
let tested_set = DenseBlockCodec::open(buffer.as_slice());
|
||||
assert!(tested_set.contains(1));
|
||||
let mut select_cursor = tested_set.select_cursor();
|
||||
assert_eq!(select_cursor.select(0), 1);
|
||||
assert_eq!(select_cursor.select(1), 3);
|
||||
assert_eq!(select_cursor.select(2), 17);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_translate_codec_idx_to_original_idx_sparse() {
|
||||
let mut buffer = Vec::new();
|
||||
SparseBlockCodec::serialize([1, 3, 17].iter().copied(), &mut buffer).unwrap();
|
||||
let tested_set = SparseBlockCodec::open(buffer.as_slice());
|
||||
assert!(tested_set.contains(1));
|
||||
let mut select_cursor = tested_set.select_cursor();
|
||||
assert_eq!(SelectCursor::select(&mut select_cursor, 0), 1);
|
||||
assert_eq!(SelectCursor::select(&mut select_cursor, 1), 3);
|
||||
assert_eq!(SelectCursor::select(&mut select_cursor, 2), 17);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_translate_codec_idx_to_original_idx_dense() {
|
||||
let mut buffer = Vec::new();
|
||||
DenseBlockCodec::serialize(0u16..150u16, &mut buffer).unwrap();
|
||||
let tested_set = DenseBlockCodec::open(buffer.as_slice());
|
||||
assert!(tested_set.contains(1));
|
||||
let mut select_cursor = tested_set.select_cursor();
|
||||
for i in 0..150 {
|
||||
assert_eq!(i, select_cursor.select(i));
|
||||
}
|
||||
}
|
||||
376
columnar/src/column_index/optional_index/tests.rs
Normal file
376
columnar/src/column_index/optional_index/tests.rs
Normal file
@@ -0,0 +1,376 @@
|
||||
use proptest::prelude::{any, prop, *};
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_dense_block_threshold() {
|
||||
assert_eq!(super::DENSE_BLOCK_THRESHOLD, 5_120);
|
||||
}
|
||||
|
||||
fn random_bitvec() -> BoxedStrategy<Vec<bool>> {
|
||||
prop_oneof![
|
||||
1 => prop::collection::vec(proptest::bool::weighted(1.0), 0..100),
|
||||
1 => prop::collection::vec(proptest::bool::weighted(0.00), 0..(ELEMENTS_PER_BLOCK as usize * 3)), // empty blocks
|
||||
1 => prop::collection::vec(proptest::bool::weighted(1.00), 0..(ELEMENTS_PER_BLOCK as usize + 10)), // full block
|
||||
1 => prop::collection::vec(proptest::bool::weighted(0.01), 0..100),
|
||||
1 => prop::collection::vec(proptest::bool::weighted(0.01), 0..u16::MAX as usize),
|
||||
8 => vec![any::<bool>()],
|
||||
]
|
||||
.boxed()
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(50))]
|
||||
#[test]
|
||||
fn test_with_random_bitvecs(bitvec1 in random_bitvec(), bitvec2 in random_bitvec(), bitvec3 in random_bitvec()) {
|
||||
let mut bitvec = Vec::new();
|
||||
bitvec.extend_from_slice(&bitvec1);
|
||||
bitvec.extend_from_slice(&bitvec2);
|
||||
bitvec.extend_from_slice(&bitvec3);
|
||||
test_null_index(&bitvec[..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_random_sets_simple() {
|
||||
let vals = 10..BLOCK_SIZE * 2;
|
||||
let mut out: Vec<u8> = Vec::new();
|
||||
serialize_optional_index(&vals.clone(), 100, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
let ranks: Vec<u32> = (65_472u32..65_473u32).collect();
|
||||
let els: Vec<u32> = ranks.iter().copied().map(|rank| rank + 10).collect();
|
||||
let mut select_cursor = null_index.select_cursor();
|
||||
for (rank, el) in ranks.iter().copied().zip(els.iter().copied()) {
|
||||
assert_eq!(select_cursor.select(rank), el);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_trailing_empty_blocks() {
|
||||
test_null_index(&[false]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_one_block_false() {
|
||||
let mut iter = vec![false; ELEMENTS_PER_BLOCK as usize];
|
||||
iter.push(true);
|
||||
test_null_index(&iter[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_one_block_true() {
|
||||
let mut iter = vec![true; ELEMENTS_PER_BLOCK as usize];
|
||||
iter.push(true);
|
||||
test_null_index(&iter[..]);
|
||||
}
|
||||
|
||||
impl<'a> Iterable<RowId> for &'a [bool] {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
Box::new(
|
||||
self.iter()
|
||||
.cloned()
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _val)| pos as u32),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn test_null_index(data: &[bool]) {
|
||||
let mut out: Vec<u8> = Vec::new();
|
||||
serialize_optional_index(&data, data.len() as RowId, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
let orig_idx_with_value: Vec<u32> = data
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| **val)
|
||||
.map(|(pos, _val)| pos as u32)
|
||||
.collect();
|
||||
let mut select_iter = null_index.select_cursor();
|
||||
for i in 0..orig_idx_with_value.len() {
|
||||
assert_eq!(select_iter.select(i as u32), orig_idx_with_value[i]);
|
||||
}
|
||||
|
||||
let step_size = (orig_idx_with_value.len() / 100).max(1);
|
||||
for (dense_idx, orig_idx) in orig_idx_with_value.iter().enumerate().step_by(step_size) {
|
||||
assert_eq!(null_index.rank_if_exists(*orig_idx), Some(dense_idx as u32));
|
||||
}
|
||||
|
||||
// 100 samples
|
||||
let step_size = (data.len() / 100).max(1);
|
||||
for (pos, value) in data.iter().enumerate().step_by(step_size) {
|
||||
assert_eq!(null_index.contains(pos as u32), *value);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_test_translation() {
|
||||
let mut out = vec![];
|
||||
let iter = &[true, false, true, false];
|
||||
serialize_optional_index(&&iter[..], iter.len() as u32, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
let mut select_cursor = null_index.select_cursor();
|
||||
assert_eq!(select_cursor.select(0), 0);
|
||||
assert_eq!(select_cursor.select(1), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_translate() {
|
||||
let mut out = vec![];
|
||||
let iter = &[true, false, true, false];
|
||||
serialize_optional_index(&&iter[..], iter.len() as RowId, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
assert_eq!(null_index.rank_if_exists(0), Some(0));
|
||||
assert_eq!(null_index.rank_if_exists(2), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_small() {
|
||||
let mut out = vec![];
|
||||
let iter = &[true, false, true, false];
|
||||
serialize_optional_index(&&iter[..], iter.len() as RowId, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
assert!(null_index.contains(0));
|
||||
assert!(!null_index.contains(1));
|
||||
assert!(null_index.contains(2));
|
||||
assert!(!null_index.contains(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_large() {
|
||||
let mut docs = vec![];
|
||||
docs.extend((0..ELEMENTS_PER_BLOCK).map(|_idx| false));
|
||||
docs.extend((0..=1).map(|_idx| true));
|
||||
|
||||
let mut out = vec![];
|
||||
serialize_optional_index(&&docs[..], docs.len() as RowId, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
assert!(!null_index.contains(0));
|
||||
assert!(!null_index.contains(100));
|
||||
assert!(!null_index.contains(ELEMENTS_PER_BLOCK - 1));
|
||||
assert!(null_index.contains(ELEMENTS_PER_BLOCK));
|
||||
assert!(null_index.contains(ELEMENTS_PER_BLOCK + 1));
|
||||
}
|
||||
|
||||
fn test_optional_index_iter_aux(row_ids: &[RowId], num_rows: RowId) {
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
serialize_optional_index(&row_ids, num_rows, &mut buffer).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(buffer)).unwrap();
|
||||
assert_eq!(null_index.num_rows(), num_rows);
|
||||
assert!(null_index.iter_rows().eq(row_ids.iter().copied()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_iter_empty() {
|
||||
test_optional_index_iter_aux(&[], 0u32);
|
||||
}
|
||||
|
||||
fn test_optional_index_rank_aux(row_ids: &[RowId]) {
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
let num_rows = row_ids.last().copied().unwrap_or(0u32) + 1;
|
||||
serialize_optional_index(&row_ids, num_rows, &mut buffer).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(buffer)).unwrap();
|
||||
assert_eq!(null_index.num_rows(), num_rows);
|
||||
for (row_id, row_val) in row_ids.iter().copied().enumerate() {
|
||||
assert_eq!(null_index.rank(row_val), row_id as u32);
|
||||
assert_eq!(null_index.rank_if_exists(row_val), Some(row_id as u32));
|
||||
if row_val > 0 && !null_index.contains(&row_val - 1) {
|
||||
assert_eq!(null_index.rank(row_val - 1), row_id as u32);
|
||||
}
|
||||
assert_eq!(null_index.rank(row_val + 1), row_id as u32 + 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_rank() {
|
||||
test_optional_index_rank_aux(&[1u32]);
|
||||
test_optional_index_rank_aux(&[0u32, 1u32]);
|
||||
let mut block = Vec::new();
|
||||
block.push(3u32);
|
||||
block.extend((0..BLOCK_SIZE).map(|i| i + BLOCK_SIZE + 1));
|
||||
test_optional_index_rank_aux(&block);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_iter_empty_one() {
|
||||
test_optional_index_iter_aux(&[1], 2u32);
|
||||
test_optional_index_iter_aux(&[100_000], 200_000u32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_iter_dense_block() {
|
||||
let mut block = Vec::new();
|
||||
block.push(3u32);
|
||||
block.extend((0..BLOCK_SIZE).map(|i| i + BLOCK_SIZE + 1));
|
||||
test_optional_index_iter_aux(&block, 3 * BLOCK_SIZE);
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use test::Bencher;
|
||||
|
||||
use super::*;
|
||||
|
||||
const TOTAL_NUM_VALUES: u32 = 1_000_000;
|
||||
fn gen_bools(fill_ratio: f64) -> OptionalIndex {
|
||||
let mut out = Vec::new();
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let vals: Vec<bool> = (0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.collect();
|
||||
serialize_optional_index(&&vals[..], &mut out).unwrap();
|
||||
let codec = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
codec
|
||||
}
|
||||
|
||||
fn random_range_iterator(
|
||||
start: u32,
|
||||
end: u32,
|
||||
avg_step_size: u32,
|
||||
avg_deviation: u32,
|
||||
) -> impl Iterator<Item = u32> {
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let mut current = start;
|
||||
std::iter::from_fn(move || {
|
||||
current += rng.gen_range(avg_step_size - avg_deviation..=avg_step_size + avg_deviation);
|
||||
if current >= end {
|
||||
None
|
||||
} else {
|
||||
Some(current)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn n_percent_step_iterator(percent: f32, num_values: u32) -> impl Iterator<Item = u32> {
|
||||
let ratio = percent as f32 / 100.0;
|
||||
let step_size = (1f32 / ratio) as u32;
|
||||
let deviation = step_size - 1;
|
||||
random_range_iterator(0, num_values, step_size, deviation)
|
||||
}
|
||||
|
||||
fn walk_over_data(codec: &OptionalIndex, avg_step_size: u32) -> Option<u32> {
|
||||
walk_over_data_from_positions(
|
||||
codec,
|
||||
random_range_iterator(0, TOTAL_NUM_VALUES, avg_step_size, 0),
|
||||
)
|
||||
}
|
||||
|
||||
fn walk_over_data_from_positions(
|
||||
codec: &OptionalIndex,
|
||||
positions: impl Iterator<Item = u32>,
|
||||
) -> Option<u32> {
|
||||
let mut dense_idx: Option<u32> = None;
|
||||
for idx in positions {
|
||||
dense_idx = dense_idx.or(codec.rank_if_exists(idx));
|
||||
}
|
||||
dense_idx
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_1percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_5percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.05f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_5percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.05f64);
|
||||
bench.iter(|| walk_over_data(&codec, 1000));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_1percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_10percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_90percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_10percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_50percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.5f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_90percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_0comma005percent_hit(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.01f64, 0.005f32, bench);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_10percent_filled_0comma005percent_hit(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.1f64, 0.005f32, bench);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.01f64, 10f32, bench);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_full_scan(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.01f64, 100f32, bench);
|
||||
}
|
||||
|
||||
fn bench_translate_codec_to_orig_util(
|
||||
percent_filled: f64,
|
||||
percent_hit: f32,
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(percent_filled);
|
||||
let num_non_nulls = codec.num_non_nulls();
|
||||
let idxs: Vec<u32> = if percent_hit == 100.0f32 {
|
||||
(0..num_non_nulls).collect()
|
||||
} else {
|
||||
n_percent_step_iterator(percent_hit, num_non_nulls).collect()
|
||||
};
|
||||
let mut output = vec![0u32; idxs.len()];
|
||||
bench.iter(|| {
|
||||
output.copy_from_slice(&idxs[..]);
|
||||
codec.select_batch(&mut output);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_90percent_filled_0comma005percent_hit(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.9f64, 0.005, bench);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_90percent_filled_full_scan(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.9f64, 100.0f32, bench);
|
||||
}
|
||||
}
|
||||
78
columnar/src/column_index/serialize.rs
Normal file
78
columnar/src/column_index/serialize.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
use common::{CountingWriter, OwnedBytes};
|
||||
|
||||
use crate::column_index::multivalued_index::{serialize_multivalued_index, self};
|
||||
use crate::column_index::optional_index::serialize_optional_index;
|
||||
use crate::column_index::ColumnIndex;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{Cardinality, RowId};
|
||||
|
||||
pub enum SerializableColumnIndex<'a> {
|
||||
Full,
|
||||
Optional {
|
||||
non_null_row_ids: Box<dyn Fn() -> Box<dyn Iterator<Item=RowId> + 'a> + 'a>,
|
||||
num_rows: RowId,
|
||||
},
|
||||
// TODO remove the Arc<dyn> apart from serialization this is not
|
||||
// dynamic at all.
|
||||
Multivalued(&'a dyn Fn() -> Box<dyn Iterator<Item=RowId> + 'a>),
|
||||
}
|
||||
|
||||
impl<'a> SerializableColumnIndex<'a> {
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
match self {
|
||||
SerializableColumnIndex::Full => Cardinality::Full,
|
||||
SerializableColumnIndex::Optional { .. } => Cardinality::Optional,
|
||||
SerializableColumnIndex::Multivalued(_) => Cardinality::Multivalued,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize_column_index<'a>(
|
||||
column_index: SerializableColumnIndex<'a>,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<u32> {
|
||||
let mut output = CountingWriter::wrap(output);
|
||||
let cardinality = column_index.get_cardinality().to_code();
|
||||
output.write_all(&[cardinality])?;
|
||||
match column_index {
|
||||
SerializableColumnIndex::Full => {}
|
||||
SerializableColumnIndex::Optional {
|
||||
non_null_row_ids,
|
||||
num_rows,
|
||||
} => serialize_optional_index(non_null_row_ids.as_ref(), num_rows, &mut output)?,
|
||||
SerializableColumnIndex::Multivalued(multivalued_index) => {
|
||||
let multivalued_index_ref: &'a dyn Fn() -> Box<dyn Iterator<Item=RowId> + 'a> = multivalued_index.as_ref();
|
||||
serialize_multivalued_index(multivalued_index_ref, &mut output)?
|
||||
}
|
||||
}
|
||||
let column_index_num_bytes = output.written_bytes() as u32;
|
||||
Ok(column_index_num_bytes)
|
||||
}
|
||||
|
||||
pub fn open_column_index(mut bytes: OwnedBytes) -> io::Result<ColumnIndex> {
|
||||
if bytes.is_empty() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"Failed to deserialize column index. Empty buffer.",
|
||||
));
|
||||
}
|
||||
let cardinality_code = bytes[0];
|
||||
let cardinality = Cardinality::try_from_code(cardinality_code)?;
|
||||
bytes.advance(1);
|
||||
match cardinality {
|
||||
Cardinality::Full => Ok(ColumnIndex::Full),
|
||||
Cardinality::Optional => {
|
||||
let optional_index = super::optional_index::open_optional_index(bytes)?;
|
||||
Ok(ColumnIndex::Optional(optional_index))
|
||||
}
|
||||
Cardinality::Multivalued => {
|
||||
let multivalue_index = super::multivalued_index::open_multivalued_index(bytes)?;
|
||||
Ok(ColumnIndex::Multivalued(multivalue_index))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO unit tests
|
||||
384
columnar/src/column_values/column.rs
Normal file
384
columnar/src/column_values/column.rs
Normal file
@@ -0,0 +1,384 @@
|
||||
use std::fmt::Debug;
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
|
||||
use tantivy_bitpacker::minmax;
|
||||
|
||||
use crate::column_values::monotonic_mapping::StrictlyMonotonicFn;
|
||||
use crate::iterable::Iterable;
|
||||
|
||||
/// `ColumnValues` provides access to a dense field column.
|
||||
///
|
||||
/// `Column` are just a wrapper over `ColumnValues` and a `ColumnIndex`.
|
||||
pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
/// Return the value associated with the given idx.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `idx` is greater than the column length.
|
||||
fn get_val(&self, idx: u32) -> T;
|
||||
|
||||
/// Fills an output buffer with the fast field values
|
||||
/// associated with the `DocId` going from
|
||||
/// `start` to `start + output.len()`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Must panic if `start + output.len()` is greater than
|
||||
/// the segment's `maxdoc`.
|
||||
#[inline(always)]
|
||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||
for (out, idx) in output.iter_mut().zip(start..) {
|
||||
*out = self.get_val(idx as u32);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the positions of values which are in the provided value range.
|
||||
///
|
||||
/// Note that position == docid for single value fast fields
|
||||
#[inline(always)]
|
||||
fn get_docids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals());
|
||||
for idx in doc_id_range.start..doc_id_range.end {
|
||||
let val = self.get_val(idx);
|
||||
if value_range.contains(&val) {
|
||||
positions.push(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the minimum value for this fast field.
|
||||
///
|
||||
/// This min_value may not be exact.
|
||||
/// For instance, the min value does not take in account of possible
|
||||
/// deleted document. All values are however guaranteed to be higher than
|
||||
/// `.min_value()`.
|
||||
fn min_value(&self) -> T;
|
||||
|
||||
/// Returns the maximum value for this fast field.
|
||||
///
|
||||
/// This max_value may not be exact.
|
||||
/// For instance, the max value does not take in account of possible
|
||||
/// deleted document. All values are however guaranteed to be higher than
|
||||
/// `.max_value()`.
|
||||
fn max_value(&self) -> T;
|
||||
|
||||
/// The number of values in the column.
|
||||
fn num_vals(&self) -> u32;
|
||||
|
||||
/// Returns a iterator over the data
|
||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = T> + 'a> {
|
||||
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
self.as_ref().get_val(idx)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> T {
|
||||
self.as_ref().min_value()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn max_value(&self) -> T {
|
||||
self.as_ref().max_value()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.as_ref().num_vals()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = T> + 'b> {
|
||||
self.as_ref().iter()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||
self.as_ref().get_range(start, output)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, C: ColumnValues<T> + ?Sized, T: Copy + PartialOrd + Debug> ColumnValues<T> for &'a C {
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
(*self).get_val(idx)
|
||||
}
|
||||
|
||||
fn min_value(&self) -> T {
|
||||
(*self).min_value()
|
||||
}
|
||||
|
||||
fn max_value(&self) -> T {
|
||||
(*self).max_value()
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
(*self).num_vals()
|
||||
}
|
||||
|
||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = T> + 'b> {
|
||||
(*self).iter()
|
||||
}
|
||||
|
||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||
(*self).get_range(start, output)
|
||||
}
|
||||
}
|
||||
|
||||
/// VecColumn provides `Column` over a slice.
|
||||
pub struct VecColumn<'a, T = u64> {
|
||||
pub(crate) values: &'a [T],
|
||||
pub(crate) min_value: T,
|
||||
pub(crate) max_value: T,
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Send + Sync + Debug> ColumnValues<T> for VecColumn<'a, T> {
|
||||
fn get_val(&self, position: u32) -> T {
|
||||
self.values[position as usize]
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||
Box::new(self.values.iter().copied())
|
||||
}
|
||||
|
||||
fn min_value(&self) -> T {
|
||||
self.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> T {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.values.len() as u32
|
||||
}
|
||||
|
||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||
output.copy_from_slice(&self.values[start as usize..][..output.len()])
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Default, V> From<&'a V> for VecColumn<'a, T>
|
||||
where V: AsRef<[T]> + ?Sized
|
||||
{
|
||||
fn from(values: &'a V) -> Self {
|
||||
let values = values.as_ref();
|
||||
let (min_value, max_value) = minmax(values.iter().copied()).unwrap_or_default();
|
||||
Self {
|
||||
values,
|
||||
min_value,
|
||||
max_value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct MonotonicMappingColumn<C, T, Input> {
|
||||
from_column: C,
|
||||
monotonic_mapping: T,
|
||||
_phantom: PhantomData<Input>,
|
||||
}
|
||||
|
||||
/// Creates a view of a column transformed by a strictly monotonic mapping. See
|
||||
/// [`StrictlyMonotonicFn`].
|
||||
///
|
||||
/// E.g. apply a gcd monotonic_mapping([100, 200, 300]) == [1, 2, 3]
|
||||
/// monotonic_mapping.mapping() is expected to be injective, and we should always have
|
||||
/// monotonic_mapping.inverse(monotonic_mapping.mapping(el)) == el
|
||||
///
|
||||
/// The inverse of the mapping is required for:
|
||||
/// `fn get_positions_for_value_range(&self, range: RangeInclusive<T>) -> Vec<u64> `
|
||||
/// The user provides the original value range and we need to monotonic map them in the same way the
|
||||
/// serialization does before calling the underlying column.
|
||||
///
|
||||
/// Note that when opening a codec, the monotonic_mapping should be the inverse of the mapping
|
||||
/// during serialization. And therefore the monotonic_mapping_inv when opening is the same as
|
||||
/// monotonic_mapping during serialization.
|
||||
pub fn monotonic_map_column<C, T, Input, Output>(
|
||||
from_column: C,
|
||||
monotonic_mapping: T,
|
||||
) -> impl ColumnValues<Output>
|
||||
where
|
||||
C: ColumnValues<Input>,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||
Input: PartialOrd + Debug + Send + Sync + Clone,
|
||||
Output: PartialOrd + Debug + Send + Sync + Clone,
|
||||
{
|
||||
MonotonicMappingColumn {
|
||||
from_column,
|
||||
monotonic_mapping,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
impl<C, T, Input, Output> ColumnValues<Output> for MonotonicMappingColumn<C, T, Input>
|
||||
where
|
||||
C: ColumnValues<Input>,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||
Input: PartialOrd + Send + Debug + Sync + Clone,
|
||||
Output: PartialOrd + Send + Debug + Sync + Clone,
|
||||
{
|
||||
#[inline]
|
||||
fn get_val(&self, idx: u32) -> Output {
|
||||
let from_val = self.from_column.get_val(idx);
|
||||
self.monotonic_mapping.mapping(from_val)
|
||||
}
|
||||
|
||||
fn min_value(&self) -> Output {
|
||||
let from_min_value = self.from_column.min_value();
|
||||
self.monotonic_mapping.mapping(from_min_value)
|
||||
}
|
||||
|
||||
fn max_value(&self) -> Output {
|
||||
let from_max_value = self.from_column.max_value();
|
||||
self.monotonic_mapping.mapping(from_max_value)
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.from_column.num_vals()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = Output> + '_> {
|
||||
Box::new(
|
||||
self.from_column
|
||||
.iter()
|
||||
.map(|el| self.monotonic_mapping.mapping(el)),
|
||||
)
|
||||
}
|
||||
|
||||
fn get_docids_for_value_range(
|
||||
&self,
|
||||
range: RangeInclusive<Output>,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
self.from_column.get_docids_for_value_range(
|
||||
self.monotonic_mapping.inverse(range.start().clone())
|
||||
..=self.monotonic_mapping.inverse(range.end().clone()),
|
||||
doc_id_range,
|
||||
positions,
|
||||
)
|
||||
}
|
||||
|
||||
// We voluntarily do not implement get_range as it yields a regression,
|
||||
// and we do not have any specialized implementation anyway.
|
||||
}
|
||||
|
||||
/// Wraps an iterator into a `Column`.
|
||||
pub struct IterColumn<T>(T);
|
||||
|
||||
impl<T> From<T> for IterColumn<T>
|
||||
where T: Iterator + Clone + ExactSizeIterator
|
||||
{
|
||||
fn from(iter: T) -> Self {
|
||||
IterColumn(iter)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ColumnValues<T::Item> for IterColumn<T>
|
||||
where
|
||||
T: Iterator + Clone + ExactSizeIterator + Send + Sync,
|
||||
T::Item: PartialOrd + Debug,
|
||||
{
|
||||
fn get_val(&self, idx: u32) -> T::Item {
|
||||
self.0.clone().nth(idx as usize).unwrap()
|
||||
}
|
||||
|
||||
fn min_value(&self) -> T::Item {
|
||||
self.0.clone().next().unwrap()
|
||||
}
|
||||
|
||||
fn max_value(&self) -> T::Item {
|
||||
self.0.clone().last().unwrap()
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.0.len() as u32
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = T::Item> + '_> {
|
||||
Box::new(self.0.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::column_values::monotonic_mapping::{
|
||||
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternalBaseval,
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_monotonic_mapping() {
|
||||
let vals = &[3u64, 5u64][..];
|
||||
let col = VecColumn::from(vals);
|
||||
let mapped = monotonic_map_column(col, StrictlyMonotonicMappingToInternalBaseval::new(2));
|
||||
assert_eq!(mapped.min_value(), 1u64);
|
||||
assert_eq!(mapped.max_value(), 3u64);
|
||||
assert_eq!(mapped.num_vals(), 2);
|
||||
assert_eq!(mapped.num_vals(), 2);
|
||||
assert_eq!(mapped.get_val(0), 1);
|
||||
assert_eq!(mapped.get_val(1), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_as_col() {
|
||||
let col = IterColumn::from(10..100);
|
||||
assert_eq!(col.num_vals(), 90);
|
||||
assert_eq!(col.max_value(), 99);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_monotonic_mapping_iter() {
|
||||
let vals: Vec<u64> = (10..110u64).map(|el| el * 10).collect();
|
||||
let col = VecColumn::from(&vals);
|
||||
let mapped = monotonic_map_column(
|
||||
col,
|
||||
StrictlyMonotonicMappingInverter::from(
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 100),
|
||||
),
|
||||
);
|
||||
let val_i64s: Vec<u64> = mapped.iter().collect();
|
||||
for i in 0..100 {
|
||||
assert_eq!(val_i64s[i as usize], mapped.get_val(i));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_monotonic_mapping_get_range() {
|
||||
let vals: Vec<u64> = (0..100u64).map(|el| el * 10).collect();
|
||||
let col = VecColumn::from(&vals);
|
||||
let mapped = monotonic_map_column(
|
||||
col,
|
||||
StrictlyMonotonicMappingInverter::from(
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 0),
|
||||
),
|
||||
);
|
||||
|
||||
assert_eq!(mapped.min_value(), 0u64);
|
||||
assert_eq!(mapped.max_value(), 9900u64);
|
||||
assert_eq!(mapped.num_vals(), 100);
|
||||
let val_u64s: Vec<u64> = mapped.iter().collect();
|
||||
assert_eq!(val_u64s.len(), 100);
|
||||
for i in 0..100 {
|
||||
assert_eq!(val_u64s[i as usize], mapped.get_val(i));
|
||||
assert_eq!(val_u64s[i as usize], vals[i as usize] * 10);
|
||||
}
|
||||
let mut buf = [0u64; 20];
|
||||
mapped.get_range(7, &mut buf[..]);
|
||||
assert_eq!(&val_u64s[7..][..20], &buf);
|
||||
}
|
||||
}
|
||||
43
columnar/src/column_values/compact_space/blank_range.rs
Normal file
43
columnar/src/column_values/compact_space/blank_range.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
/// The range of a blank in value space.
|
||||
///
|
||||
/// A blank is an unoccupied space in the data.
|
||||
/// Use try_into() to construct.
|
||||
/// A range has to have at least length of 3. Invalid ranges will be rejected.
|
||||
///
|
||||
/// Ordered by range length.
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub(crate) struct BlankRange {
|
||||
blank_range: RangeInclusive<u128>,
|
||||
}
|
||||
impl TryFrom<RangeInclusive<u128>> for BlankRange {
|
||||
type Error = &'static str;
|
||||
fn try_from(range: RangeInclusive<u128>) -> Result<Self, Self::Error> {
|
||||
let blank_size = range.end().saturating_sub(*range.start());
|
||||
if blank_size < 2 {
|
||||
Err("invalid range")
|
||||
} else {
|
||||
Ok(BlankRange { blank_range: range })
|
||||
}
|
||||
}
|
||||
}
|
||||
impl BlankRange {
|
||||
pub(crate) fn blank_size(&self) -> u128 {
|
||||
self.blank_range.end() - self.blank_range.start() + 1
|
||||
}
|
||||
pub(crate) fn blank_range(&self) -> RangeInclusive<u128> {
|
||||
self.blank_range.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for BlankRange {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
self.blank_size().cmp(&other.blank_size())
|
||||
}
|
||||
}
|
||||
impl PartialOrd for BlankRange {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.blank_size().cmp(&other.blank_size()))
|
||||
}
|
||||
}
|
||||
231
columnar/src/column_values/compact_space/build_compact_space.rs
Normal file
231
columnar/src/column_values/compact_space/build_compact_space.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
use std::collections::{BTreeSet, BinaryHeap};
|
||||
use std::iter;
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
use itertools::Itertools;
|
||||
|
||||
use super::blank_range::BlankRange;
|
||||
use super::{CompactSpace, RangeMapping};
|
||||
|
||||
/// Put the blanks for the sorted values into a binary heap
|
||||
fn get_blanks(values_sorted: &BTreeSet<u128>) -> BinaryHeap<BlankRange> {
|
||||
let mut blanks: BinaryHeap<BlankRange> = BinaryHeap::new();
|
||||
for (first, second) in values_sorted.iter().tuple_windows() {
|
||||
// Correctness Overflow: the values are deduped and sorted (BTreeSet property), that means
|
||||
// there's always space between two values.
|
||||
let blank_range = first + 1..=second - 1;
|
||||
let blank_range: Result<BlankRange, _> = blank_range.try_into();
|
||||
if let Ok(blank_range) = blank_range {
|
||||
blanks.push(blank_range);
|
||||
}
|
||||
}
|
||||
|
||||
blanks
|
||||
}
|
||||
|
||||
struct BlankCollector {
|
||||
blanks: Vec<BlankRange>,
|
||||
staged_blanks_sum: u128,
|
||||
}
|
||||
impl BlankCollector {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
blanks: vec![],
|
||||
staged_blanks_sum: 0,
|
||||
}
|
||||
}
|
||||
fn stage_blank(&mut self, blank: BlankRange) {
|
||||
self.staged_blanks_sum += blank.blank_size();
|
||||
self.blanks.push(blank);
|
||||
}
|
||||
fn drain(&mut self) -> impl Iterator<Item = BlankRange> + '_ {
|
||||
self.staged_blanks_sum = 0;
|
||||
self.blanks.drain(..)
|
||||
}
|
||||
fn staged_blanks_sum(&self) -> u128 {
|
||||
self.staged_blanks_sum
|
||||
}
|
||||
fn num_staged_blanks(&self) -> usize {
|
||||
self.blanks.len()
|
||||
}
|
||||
}
|
||||
fn num_bits(val: u128) -> u8 {
|
||||
(128u32 - val.leading_zeros()) as u8
|
||||
}
|
||||
|
||||
/// Will collect blanks and add them to compact space if more bits are saved than cost from
|
||||
/// metadata.
|
||||
pub fn get_compact_space(
|
||||
values_deduped_sorted: &BTreeSet<u128>,
|
||||
total_num_values: u32,
|
||||
cost_per_blank: usize,
|
||||
) -> CompactSpace {
|
||||
let mut compact_space_builder = CompactSpaceBuilder::new();
|
||||
if values_deduped_sorted.is_empty() {
|
||||
return compact_space_builder.finish();
|
||||
}
|
||||
|
||||
let mut blanks: BinaryHeap<BlankRange> = get_blanks(values_deduped_sorted);
|
||||
// Replace after stabilization of https://github.com/rust-lang/rust/issues/62924
|
||||
|
||||
// We start by space that's limited to min_value..=max_value
|
||||
let min_value = *values_deduped_sorted.iter().next().unwrap_or(&0);
|
||||
let max_value = *values_deduped_sorted.iter().last().unwrap_or(&0);
|
||||
|
||||
// +1 for null, in case min and max covers the whole space, we are off by one.
|
||||
let mut amplitude_compact_space = (max_value - min_value).saturating_add(1);
|
||||
if min_value != 0 {
|
||||
compact_space_builder.add_blanks(iter::once(0..=min_value - 1));
|
||||
}
|
||||
if max_value != u128::MAX {
|
||||
compact_space_builder.add_blanks(iter::once(max_value + 1..=u128::MAX));
|
||||
}
|
||||
|
||||
let mut amplitude_bits: u8 = num_bits(amplitude_compact_space);
|
||||
|
||||
let mut blank_collector = BlankCollector::new();
|
||||
// We will stage blanks until they reduce the compact space by at least 1 bit and then flush
|
||||
// them if the metadata cost is lower than the total number of saved bits.
|
||||
// Binary heap to process the gaps by their size
|
||||
while let Some(blank_range) = blanks.pop() {
|
||||
blank_collector.stage_blank(blank_range);
|
||||
|
||||
let staged_spaces_sum: u128 = blank_collector.staged_blanks_sum();
|
||||
let amplitude_new_compact_space = amplitude_compact_space - staged_spaces_sum;
|
||||
let amplitude_new_bits = num_bits(amplitude_new_compact_space);
|
||||
if amplitude_bits == amplitude_new_bits {
|
||||
continue;
|
||||
}
|
||||
let saved_bits = (amplitude_bits - amplitude_new_bits) as usize * total_num_values as usize;
|
||||
// TODO: Maybe calculate exact cost of blanks and run this more expensive computation only,
|
||||
// when amplitude_new_bits changes
|
||||
let cost = blank_collector.num_staged_blanks() * cost_per_blank;
|
||||
if cost >= saved_bits {
|
||||
// Continue here, since although we walk over the blanks by size,
|
||||
// we can potentially save a lot at the last bits, which are smaller blanks
|
||||
//
|
||||
// E.g. if the first range reduces the compact space by 1000 from 2000 to 1000, which
|
||||
// saves 11-10=1 bit and the next range reduces the compact space by 950 to
|
||||
// 50, which saves 10-6=4 bit
|
||||
continue;
|
||||
}
|
||||
|
||||
amplitude_compact_space = amplitude_new_compact_space;
|
||||
amplitude_bits = amplitude_new_bits;
|
||||
compact_space_builder.add_blanks(blank_collector.drain().map(|blank| blank.blank_range()));
|
||||
}
|
||||
|
||||
// special case, when we don't collected any blanks because:
|
||||
// * the data is empty (early exit)
|
||||
// * the algorithm did decide it's not worth the cost, which can be the case for single values
|
||||
//
|
||||
// We drain one collected blank unconditionally, so the empty case is reserved for empty
|
||||
// data, and therefore empty compact_space means the data is empty and no data is covered
|
||||
// (conversely to all data) and we can assign null to it.
|
||||
if compact_space_builder.is_empty() {
|
||||
compact_space_builder.add_blanks(
|
||||
blank_collector
|
||||
.drain()
|
||||
.map(|blank| blank.blank_range())
|
||||
.take(1),
|
||||
);
|
||||
}
|
||||
|
||||
let compact_space = compact_space_builder.finish();
|
||||
if max_value - min_value != u128::MAX {
|
||||
debug_assert_eq!(
|
||||
compact_space.amplitude_compact_space(),
|
||||
amplitude_compact_space
|
||||
);
|
||||
}
|
||||
compact_space
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
struct CompactSpaceBuilder {
|
||||
blanks: Vec<RangeInclusive<u128>>,
|
||||
}
|
||||
|
||||
impl CompactSpaceBuilder {
|
||||
/// Creates a new compact space builder which will initially cover the whole space.
|
||||
fn new() -> Self {
|
||||
Self { blanks: Vec::new() }
|
||||
}
|
||||
|
||||
/// Assumes that repeated add_blank calls don't overlap and are not adjacent,
|
||||
/// e.g. [3..=5, 5..=10] is not allowed
|
||||
///
|
||||
/// Both of those assumptions are true when blanks are produced from sorted values.
|
||||
fn add_blanks(&mut self, blank: impl Iterator<Item = RangeInclusive<u128>>) {
|
||||
self.blanks.extend(blank);
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.blanks.is_empty()
|
||||
}
|
||||
|
||||
/// Convert blanks to covered space and assign null value
|
||||
fn finish(mut self) -> CompactSpace {
|
||||
// sort by start. ranges are not allowed to overlap
|
||||
self.blanks.sort_unstable_by_key(|blank| *blank.start());
|
||||
|
||||
let mut covered_space = Vec::with_capacity(self.blanks.len());
|
||||
|
||||
// begining of the blanks
|
||||
if let Some(first_blank_start) = self.blanks.first().map(RangeInclusive::start) {
|
||||
if *first_blank_start != 0 {
|
||||
covered_space.push(0..=first_blank_start - 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Between the blanks
|
||||
let between_blanks = self.blanks.iter().tuple_windows().map(|(left, right)| {
|
||||
assert!(
|
||||
left.end() < right.start(),
|
||||
"overlapping or adjacent ranges detected"
|
||||
);
|
||||
*left.end() + 1..=*right.start() - 1
|
||||
});
|
||||
covered_space.extend(between_blanks);
|
||||
|
||||
// end of the blanks
|
||||
if let Some(last_blank_end) = self.blanks.last().map(RangeInclusive::end) {
|
||||
if *last_blank_end != u128::MAX {
|
||||
covered_space.push(last_blank_end + 1..=u128::MAX);
|
||||
}
|
||||
}
|
||||
|
||||
if covered_space.is_empty() {
|
||||
covered_space.push(0..=0); // empty data case
|
||||
};
|
||||
|
||||
let mut compact_start: u64 = 1; // 0 is reserved for `null`
|
||||
let mut ranges_mapping: Vec<RangeMapping> = Vec::with_capacity(covered_space.len());
|
||||
for cov in covered_space {
|
||||
let range_mapping = super::RangeMapping {
|
||||
value_range: cov,
|
||||
compact_start,
|
||||
};
|
||||
let covered_range_len = range_mapping.range_length();
|
||||
ranges_mapping.push(range_mapping);
|
||||
compact_start += covered_range_len;
|
||||
}
|
||||
// println!("num ranges {}", ranges_mapping.len());
|
||||
CompactSpace { ranges_mapping }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_binary_heap_pop_order() {
|
||||
let mut blanks: BinaryHeap<BlankRange> = BinaryHeap::new();
|
||||
blanks.push((0..=10).try_into().unwrap());
|
||||
blanks.push((100..=200).try_into().unwrap());
|
||||
blanks.push((100..=110).try_into().unwrap());
|
||||
assert_eq!(blanks.pop().unwrap().blank_size(), 101);
|
||||
assert_eq!(blanks.pop().unwrap().blank_size(), 11);
|
||||
}
|
||||
}
|
||||
813
columnar/src/column_values/compact_space/mod.rs
Normal file
813
columnar/src/column_values/compact_space/mod.rs
Normal file
@@ -0,0 +1,813 @@
|
||||
/// This codec takes a large number space (u128) and reduces it to a compact number space.
|
||||
///
|
||||
/// It will find spaces in the number range. For example:
|
||||
///
|
||||
/// 100, 101, 102, 103, 104, 50000, 50001
|
||||
/// could be mapped to
|
||||
/// 100..104 -> 0..4
|
||||
/// 50000..50001 -> 5..6
|
||||
///
|
||||
/// Compact space 0..=6 requires much less bits than 100..=50001
|
||||
///
|
||||
/// The codec is created to compress ip addresses, but may be employed in other use cases.
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
collections::BTreeSet,
|
||||
io::{self, Write},
|
||||
ops::{Range, RangeInclusive},
|
||||
};
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, OwnedBytes, VInt, VIntU128};
|
||||
use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::column_values::compact_space::build_compact_space::get_compact_space;
|
||||
use crate::column_values::ColumnValues;
|
||||
|
||||
mod blank_range;
|
||||
mod build_compact_space;
|
||||
|
||||
/// The cost per blank is quite hard actually, since blanks are delta encoded, the actual cost of
|
||||
/// blanks depends on the number of blanks.
|
||||
///
|
||||
/// The number is taken by looking at a real dataset. It is optimized for larger datasets.
|
||||
const COST_PER_BLANK_IN_BITS: usize = 36;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct CompactSpace {
|
||||
ranges_mapping: Vec<RangeMapping>,
|
||||
}
|
||||
|
||||
/// Maps the range from the original space to compact_start + range.len()
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
struct RangeMapping {
|
||||
value_range: RangeInclusive<u128>,
|
||||
compact_start: u64,
|
||||
}
|
||||
impl RangeMapping {
|
||||
fn range_length(&self) -> u64 {
|
||||
(self.value_range.end() - self.value_range.start()) as u64 + 1
|
||||
}
|
||||
|
||||
// The last value of the compact space in this range
|
||||
fn compact_end(&self) -> u64 {
|
||||
self.compact_start + self.range_length() - 1
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for CompactSpace {
|
||||
fn serialize<W: io::Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.ranges_mapping.len() as u64).serialize(writer)?;
|
||||
|
||||
let mut prev_value = 0;
|
||||
for value_range in self
|
||||
.ranges_mapping
|
||||
.iter()
|
||||
.map(|range_mapping| &range_mapping.value_range)
|
||||
{
|
||||
let blank_delta_start = value_range.start() - prev_value;
|
||||
VIntU128(blank_delta_start).serialize(writer)?;
|
||||
prev_value = *value_range.start();
|
||||
|
||||
let blank_delta_end = value_range.end() - prev_value;
|
||||
VIntU128(blank_delta_end).serialize(writer)?;
|
||||
prev_value = *value_range.end();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let num_ranges = VInt::deserialize(reader)?.0;
|
||||
let mut ranges_mapping: Vec<RangeMapping> = vec![];
|
||||
let mut value = 0u128;
|
||||
let mut compact_start = 1u64; // 0 is reserved for `null`
|
||||
for _ in 0..num_ranges {
|
||||
let blank_delta_start = VIntU128::deserialize(reader)?.0;
|
||||
value += blank_delta_start;
|
||||
let blank_start = value;
|
||||
|
||||
let blank_delta_end = VIntU128::deserialize(reader)?.0;
|
||||
value += blank_delta_end;
|
||||
let blank_end = value;
|
||||
|
||||
let range_mapping = RangeMapping {
|
||||
value_range: blank_start..=blank_end,
|
||||
compact_start,
|
||||
};
|
||||
let range_length = range_mapping.range_length();
|
||||
ranges_mapping.push(range_mapping);
|
||||
compact_start += range_length;
|
||||
}
|
||||
|
||||
Ok(Self { ranges_mapping })
|
||||
}
|
||||
}
|
||||
|
||||
impl CompactSpace {
|
||||
/// Amplitude is the value range of the compact space including the sentinel value used to
|
||||
/// identify null values. The compact space is 0..=amplitude .
|
||||
///
|
||||
/// It's only used to verify we don't exceed u64 number space, which would indicate a bug.
|
||||
fn amplitude_compact_space(&self) -> u128 {
|
||||
self.ranges_mapping
|
||||
.last()
|
||||
.map(|last_range| last_range.compact_end() as u128)
|
||||
.unwrap_or(1) // compact space starts at 1, 0 == null
|
||||
}
|
||||
|
||||
fn get_range_mapping(&self, pos: usize) -> &RangeMapping {
|
||||
&self.ranges_mapping[pos]
|
||||
}
|
||||
|
||||
/// Returns either Ok(the value in the compact space) or if it is outside the compact space the
|
||||
/// Err(position where it would be inserted)
|
||||
fn u128_to_compact(&self, value: u128) -> Result<u64, usize> {
|
||||
self.ranges_mapping
|
||||
.binary_search_by(|probe| {
|
||||
let value_range = &probe.value_range;
|
||||
if value < *value_range.start() {
|
||||
Ordering::Greater
|
||||
} else if value > *value_range.end() {
|
||||
Ordering::Less
|
||||
} else {
|
||||
Ordering::Equal
|
||||
}
|
||||
})
|
||||
.map(|pos| {
|
||||
let range_mapping = &self.ranges_mapping[pos];
|
||||
let pos_in_range = (value - range_mapping.value_range.start()) as u64;
|
||||
range_mapping.compact_start + pos_in_range
|
||||
})
|
||||
}
|
||||
|
||||
/// Unpacks a value from compact space u64 to u128 space
|
||||
fn compact_to_u128(&self, compact: u64) -> u128 {
|
||||
let pos = self
|
||||
.ranges_mapping
|
||||
.binary_search_by_key(&compact, |range_mapping| range_mapping.compact_start)
|
||||
// Correctness: Overflow. The first range starts at compact space 0, the error from
|
||||
// binary search can never be 0
|
||||
.map_or_else(|e| e - 1, |v| v);
|
||||
|
||||
let range_mapping = &self.ranges_mapping[pos];
|
||||
let diff = compact - range_mapping.compact_start;
|
||||
range_mapping.value_range.start() + diff as u128
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CompactSpaceCompressor {
|
||||
params: IPCodecParams,
|
||||
}
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IPCodecParams {
|
||||
compact_space: CompactSpace,
|
||||
bit_unpacker: BitUnpacker,
|
||||
min_value: u128,
|
||||
max_value: u128,
|
||||
num_vals: u32,
|
||||
num_bits: u8,
|
||||
}
|
||||
|
||||
impl CompactSpaceCompressor {
|
||||
/// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals.
|
||||
pub fn train_from(iter: impl Iterator<Item = u128>, num_vals: u32) -> Self {
|
||||
let mut values_sorted = BTreeSet::new();
|
||||
values_sorted.extend(iter);
|
||||
let total_num_values = num_vals;
|
||||
|
||||
let compact_space =
|
||||
get_compact_space(&values_sorted, total_num_values, COST_PER_BLANK_IN_BITS);
|
||||
let amplitude_compact_space = compact_space.amplitude_compact_space();
|
||||
|
||||
assert!(
|
||||
amplitude_compact_space <= u64::MAX as u128,
|
||||
"case unsupported."
|
||||
);
|
||||
|
||||
let num_bits = tantivy_bitpacker::compute_num_bits(amplitude_compact_space as u64);
|
||||
let min_value = *values_sorted.iter().next().unwrap_or(&0);
|
||||
let max_value = *values_sorted.iter().last().unwrap_or(&0);
|
||||
assert_eq!(
|
||||
compact_space
|
||||
.u128_to_compact(max_value)
|
||||
.expect("could not convert max value to compact space"),
|
||||
amplitude_compact_space as u64
|
||||
);
|
||||
CompactSpaceCompressor {
|
||||
params: IPCodecParams {
|
||||
compact_space,
|
||||
bit_unpacker: BitUnpacker::new(num_bits),
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals: total_num_values,
|
||||
num_bits,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn write_footer(self, writer: &mut impl Write) -> io::Result<()> {
|
||||
let writer = &mut CountingWriter::wrap(writer);
|
||||
self.params.serialize(writer)?;
|
||||
|
||||
let footer_len = writer.written_bytes() as u32;
|
||||
footer_len.serialize(writer)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn compress_into(
|
||||
self,
|
||||
vals: impl Iterator<Item = u128>,
|
||||
write: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
let mut bitpacker = BitPacker::default();
|
||||
for val in vals {
|
||||
let compact = self
|
||||
.params
|
||||
.compact_space
|
||||
.u128_to_compact(val)
|
||||
.map_err(|_| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Could not convert value to compact_space. This is a bug.",
|
||||
)
|
||||
})?;
|
||||
bitpacker.write(compact, self.params.num_bits, write)?;
|
||||
}
|
||||
bitpacker.close(write)?;
|
||||
self.write_footer(write)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CompactSpaceDecompressor {
|
||||
data: OwnedBytes,
|
||||
params: IPCodecParams,
|
||||
}
|
||||
|
||||
impl BinarySerializable for IPCodecParams {
|
||||
fn serialize<W: io::Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
// header flags for future optional dictionary encoding
|
||||
let footer_flags = 0u64;
|
||||
footer_flags.serialize(writer)?;
|
||||
|
||||
VIntU128(self.min_value).serialize(writer)?;
|
||||
VIntU128(self.max_value).serialize(writer)?;
|
||||
VIntU128(self.num_vals as u128).serialize(writer)?;
|
||||
self.num_bits.serialize(writer)?;
|
||||
|
||||
self.compact_space.serialize(writer)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let _header_flags = u64::deserialize(reader)?;
|
||||
let min_value = VIntU128::deserialize(reader)?.0;
|
||||
let max_value = VIntU128::deserialize(reader)?.0;
|
||||
let num_vals = VIntU128::deserialize(reader)?.0 as u32;
|
||||
let num_bits = u8::deserialize(reader)?;
|
||||
let compact_space = CompactSpace::deserialize(reader)?;
|
||||
|
||||
Ok(Self {
|
||||
compact_space,
|
||||
bit_unpacker: BitUnpacker::new(num_bits),
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals,
|
||||
num_bits,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnValues<u128> for CompactSpaceDecompressor {
|
||||
#[inline]
|
||||
fn get_val(&self, doc: u32) -> u128 {
|
||||
self.get(doc)
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u128 {
|
||||
self.min_value()
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u128 {
|
||||
self.max_value()
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.params.num_vals
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u128> + '_> {
|
||||
Box::new(self.iter())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_docids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u128>,
|
||||
positions_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
self.get_positions_for_value_range(value_range, positions_range, positions)
|
||||
}
|
||||
}
|
||||
|
||||
impl CompactSpaceDecompressor {
|
||||
pub fn open(data: OwnedBytes) -> io::Result<CompactSpaceDecompressor> {
|
||||
let (data_slice, footer_len_bytes) = data.split_at(data.len() - 4);
|
||||
let footer_len = u32::deserialize(&mut &footer_len_bytes[..])?;
|
||||
|
||||
let data_footer = &data_slice[data_slice.len() - footer_len as usize..];
|
||||
let params = IPCodecParams::deserialize(&mut &data_footer[..])?;
|
||||
let decompressor = CompactSpaceDecompressor { data, params };
|
||||
|
||||
Ok(decompressor)
|
||||
}
|
||||
|
||||
/// Converting to compact space for the decompressor is more complex, since we may get values
|
||||
/// which are outside the compact space. e.g. if we map
|
||||
/// 1000 => 5
|
||||
/// 2000 => 6
|
||||
///
|
||||
/// and we want a mapping for 1005, there is no equivalent compact space. We instead return an
|
||||
/// error with the index of the next range.
|
||||
fn u128_to_compact(&self, value: u128) -> Result<u64, usize> {
|
||||
self.params.compact_space.u128_to_compact(value)
|
||||
}
|
||||
|
||||
fn compact_to_u128(&self, compact: u64) -> u128 {
|
||||
self.params.compact_space.compact_to_u128(compact)
|
||||
}
|
||||
|
||||
/// Comparing on compact space: Random dataset 0,24 (50% random hit) - 1.05 GElements/s
|
||||
/// Comparing on compact space: Real dataset 1.08 GElements/s
|
||||
///
|
||||
/// Comparing on original space: Real dataset .06 GElements/s (not completely optimized)
|
||||
#[inline]
|
||||
pub fn get_positions_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u128>,
|
||||
position_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
if value_range.start() > value_range.end() {
|
||||
return;
|
||||
}
|
||||
let position_range = position_range.start..position_range.end.min(self.num_vals());
|
||||
let from_value = *value_range.start();
|
||||
let to_value = *value_range.end();
|
||||
assert!(to_value >= from_value);
|
||||
let compact_from = self.u128_to_compact(from_value);
|
||||
let compact_to = self.u128_to_compact(to_value);
|
||||
|
||||
// Quick return, if both ranges fall into the same non-mapped space, the range can't cover
|
||||
// any values, so we can early exit
|
||||
match (compact_to, compact_from) {
|
||||
(Err(pos1), Err(pos2)) if pos1 == pos2 => return,
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let compact_from = compact_from.unwrap_or_else(|pos| {
|
||||
// Correctness: Out of bounds, if this value is Err(last_index + 1), we early exit,
|
||||
// since the to_value also mapps into the same non-mapped space
|
||||
let range_mapping = self.params.compact_space.get_range_mapping(pos);
|
||||
range_mapping.compact_start
|
||||
});
|
||||
// If there is no compact space, we go to the closest upperbound compact space
|
||||
let compact_to = compact_to.unwrap_or_else(|pos| {
|
||||
// Correctness: Overflow, if this value is Err(0), we early exit,
|
||||
// since the from_value also mapps into the same non-mapped space
|
||||
|
||||
// Get end of previous range
|
||||
let pos = pos - 1;
|
||||
let range_mapping = self.params.compact_space.get_range_mapping(pos);
|
||||
range_mapping.compact_end()
|
||||
});
|
||||
|
||||
let range = compact_from..=compact_to;
|
||||
|
||||
let scan_num_docs = position_range.end - position_range.start;
|
||||
|
||||
let step_size = 4;
|
||||
let cutoff = position_range.start + scan_num_docs - scan_num_docs % step_size;
|
||||
|
||||
let mut push_if_in_range = |idx, val| {
|
||||
if range.contains(&val) {
|
||||
positions.push(idx);
|
||||
}
|
||||
};
|
||||
let get_val = |idx| self.params.bit_unpacker.get(idx, &self.data);
|
||||
// unrolled loop
|
||||
for idx in (position_range.start..cutoff).step_by(step_size as usize) {
|
||||
let idx1 = idx;
|
||||
let idx2 = idx + 1;
|
||||
let idx3 = idx + 2;
|
||||
let idx4 = idx + 3;
|
||||
let val1 = get_val(idx1);
|
||||
let val2 = get_val(idx2);
|
||||
let val3 = get_val(idx3);
|
||||
let val4 = get_val(idx4);
|
||||
push_if_in_range(idx1, val1);
|
||||
push_if_in_range(idx2, val2);
|
||||
push_if_in_range(idx3, val3);
|
||||
push_if_in_range(idx4, val4);
|
||||
}
|
||||
|
||||
// handle rest
|
||||
for idx in cutoff..position_range.end {
|
||||
push_if_in_range(idx, get_val(idx));
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter_compact(&self) -> impl Iterator<Item = u64> + '_ {
|
||||
(0..self.params.num_vals).map(move |idx| self.params.bit_unpacker.get(idx, &self.data))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter(&self) -> impl Iterator<Item = u128> + '_ {
|
||||
// TODO: Performance. It would be better to iterate on the ranges and check existence via
|
||||
// the bit_unpacker.
|
||||
self.iter_compact()
|
||||
.map(|compact| self.compact_to_u128(compact))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, idx: u32) -> u128 {
|
||||
let compact = self.params.bit_unpacker.get(idx, &self.data);
|
||||
self.compact_to_u128(compact)
|
||||
}
|
||||
|
||||
pub fn min_value(&self) -> u128 {
|
||||
self.params.min_value
|
||||
}
|
||||
|
||||
pub fn max_value(&self) -> u128 {
|
||||
self.params.max_value
|
||||
}
|
||||
}
|
||||
|
||||
// TODO reenable what can be reenabled.
|
||||
// #[cfg(test)]
|
||||
// mod tests {
|
||||
//
|
||||
// use super::*;
|
||||
// use crate::column::format_version::read_format_version;
|
||||
// use crate::column::column_footer::read_null_index_footer;
|
||||
// use crate::column::serialize::U128Header;
|
||||
// use crate::column::{open_u128, serialize_u128};
|
||||
//
|
||||
// #[test]
|
||||
// fn compact_space_test() {
|
||||
// let ips = &[
|
||||
// 2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
|
||||
// ]
|
||||
// .into_iter()
|
||||
// .collect();
|
||||
// let compact_space = get_compact_space(ips, ips.len() as u32, 11);
|
||||
// let amplitude = compact_space.amplitude_compact_space();
|
||||
// assert_eq!(amplitude, 17);
|
||||
// assert_eq!(1, compact_space.u128_to_compact(2).unwrap());
|
||||
// assert_eq!(2, compact_space.u128_to_compact(3).unwrap());
|
||||
// assert_eq!(compact_space.u128_to_compact(100).unwrap_err(), 1);
|
||||
//
|
||||
// for (num1, num2) in (0..3).tuple_windows() {
|
||||
// assert_eq!(
|
||||
// compact_space.get_range_mapping(num1).compact_end() + 1,
|
||||
// compact_space.get_range_mapping(num2).compact_start
|
||||
// );
|
||||
// }
|
||||
//
|
||||
// let mut output: Vec<u8> = Vec::new();
|
||||
// compact_space.serialize(&mut output).unwrap();
|
||||
//
|
||||
// assert_eq!(
|
||||
// compact_space,
|
||||
// CompactSpace::deserialize(&mut &output[..]).unwrap()
|
||||
// );
|
||||
//
|
||||
// for ip in ips {
|
||||
// let compact = compact_space.u128_to_compact(*ip).unwrap();
|
||||
// assert_eq!(compact_space.compact_to_u128(compact), *ip);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn compact_space_amplitude_test() {
|
||||
// let ips = &[100000u128, 1000000].into_iter().collect();
|
||||
// let compact_space = get_compact_space(ips, ips.len() as u32, 1);
|
||||
// let amplitude = compact_space.amplitude_compact_space();
|
||||
// assert_eq!(amplitude, 2);
|
||||
// }
|
||||
//
|
||||
// fn test_all(mut data: OwnedBytes, expected: &[u128]) {
|
||||
// let _header = U128Header::deserialize(&mut data);
|
||||
// let decompressor = CompactSpaceDecompressor::open(data).unwrap();
|
||||
// for (idx, expected_val) in expected.iter().cloned().enumerate() {
|
||||
// let val = decompressor.get(idx as u32);
|
||||
// assert_eq!(val, expected_val);
|
||||
//
|
||||
// let test_range = |range: RangeInclusive<u128>| {
|
||||
// let expected_positions = expected
|
||||
// .iter()
|
||||
// .positions(|val| range.contains(val))
|
||||
// .map(|pos| pos as u32)
|
||||
// .collect::<Vec<_>>();
|
||||
// let mut positions = Vec::new();
|
||||
// decompressor.get_positions_for_value_range(
|
||||
// range,
|
||||
// 0..decompressor.num_vals(),
|
||||
// &mut positions,
|
||||
// );
|
||||
// assert_eq!(positions, expected_positions);
|
||||
// };
|
||||
//
|
||||
// test_range(expected_val.saturating_sub(1)..=expected_val);
|
||||
// test_range(expected_val..=expected_val);
|
||||
// test_range(expected_val..=expected_val.saturating_add(1));
|
||||
// test_range(expected_val.saturating_sub(1)..=expected_val.saturating_add(1));
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// fn test_aux_vals(u128_vals: &[u128]) -> OwnedBytes {
|
||||
// let mut out = Vec::new();
|
||||
// serialize_u128(
|
||||
// || u128_vals.iter().cloned(),
|
||||
// u128_vals.len() as u32,
|
||||
// &mut out,
|
||||
// )
|
||||
// .unwrap();
|
||||
//
|
||||
// let data = OwnedBytes::new(out);
|
||||
// let (data, _format_version) = read_format_version(data).unwrap();
|
||||
// let (data, _null_index_footer) = read_null_index_footer(data).unwrap();
|
||||
// test_all(data.clone(), u128_vals);
|
||||
//
|
||||
// data
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_range_1() {
|
||||
// let vals = &[
|
||||
// 1u128,
|
||||
// 100u128,
|
||||
// 3u128,
|
||||
// 99999u128,
|
||||
// 100000u128,
|
||||
// 100001u128,
|
||||
// 4_000_211_221u128,
|
||||
// 4_000_211_222u128,
|
||||
// 333u128,
|
||||
// ];
|
||||
// let mut data = test_aux_vals(vals);
|
||||
//
|
||||
// let _header = U128Header::deserialize(&mut data);
|
||||
// let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
// let complete_range = 0..vals.len() as u32;
|
||||
// for (pos, val) in vals.iter().enumerate() {
|
||||
// let val = *val;
|
||||
// let pos = pos as u32;
|
||||
// let mut positions = Vec::new();
|
||||
// decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions);
|
||||
// assert_eq!(positions, vec![pos]);
|
||||
// }
|
||||
//
|
||||
// handle docid range out of bounds
|
||||
// let positions: Vec<u32> = get_positions_for_value_range_helper(&decomp, 0..=1, 1..u32::MAX);
|
||||
// assert!(positions.is_empty());
|
||||
//
|
||||
// let positions =
|
||||
// get_positions_for_value_range_helper(&decomp, 0..=1, complete_range.clone());
|
||||
// assert_eq!(positions, vec![0]);
|
||||
// let positions =
|
||||
// get_positions_for_value_range_helper(&decomp, 0..=2, complete_range.clone());
|
||||
// assert_eq!(positions, vec![0]);
|
||||
// let positions =
|
||||
// get_positions_for_value_range_helper(&decomp, 0..=3, complete_range.clone());
|
||||
// assert_eq!(positions, vec![0, 2]);
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99999u128..=99999u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// vec![3]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99999u128..=100000u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// vec![3, 4]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99998u128..=100000u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// vec![3, 4]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99998u128..=99999u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[3]
|
||||
// );
|
||||
// assert!(get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99998u128..=99998u128,
|
||||
// complete_range.clone()
|
||||
// )
|
||||
// .is_empty());
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 333u128..=333u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[8]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 332u128..=333u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[8]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 332u128..=334u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[8]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 333u128..=334u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[8]
|
||||
// );
|
||||
//
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 4_000_211_221u128..=5_000_000_000u128,
|
||||
// complete_range
|
||||
// ),
|
||||
// &[6, 7]
|
||||
// );
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_empty() {
|
||||
// let vals = &[];
|
||||
// let data = test_aux_vals(vals);
|
||||
// let _decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_range_2() {
|
||||
// let vals = &[
|
||||
// 100u128,
|
||||
// 99999u128,
|
||||
// 100000u128,
|
||||
// 100001u128,
|
||||
// 4_000_211_221u128,
|
||||
// 4_000_211_222u128,
|
||||
// 333u128,
|
||||
// ];
|
||||
// let mut data = test_aux_vals(vals);
|
||||
// let _header = U128Header::deserialize(&mut data);
|
||||
// let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
// let complete_range = 0..vals.len() as u32;
|
||||
// assert!(
|
||||
// &get_positions_for_value_range_helper(&decomp, 0..=5, complete_range.clone())
|
||||
// .is_empty(),
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(&decomp, 0..=100, complete_range.clone()),
|
||||
// &[0]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(&decomp, 0..=105, complete_range),
|
||||
// &[0]
|
||||
// );
|
||||
// }
|
||||
//
|
||||
// fn get_positions_for_value_range_helper<C: Column<T> + ?Sized, T: PartialOrd>(
|
||||
// column: &C,
|
||||
// value_range: RangeInclusive<T>,
|
||||
// doc_id_range: Range<u32>,
|
||||
// ) -> Vec<u32> {
|
||||
// let mut positions = Vec::new();
|
||||
// column.get_docids_for_value_range(value_range, doc_id_range, &mut positions);
|
||||
// positions
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_range_3() {
|
||||
// let vals = &[
|
||||
// 200u128,
|
||||
// 201,
|
||||
// 202,
|
||||
// 203,
|
||||
// 204,
|
||||
// 204,
|
||||
// 206,
|
||||
// 207,
|
||||
// 208,
|
||||
// 209,
|
||||
// 210,
|
||||
// 1_000_000,
|
||||
// 5_000_000_000,
|
||||
// ];
|
||||
// let mut out = Vec::new();
|
||||
// serialize_u128(|| vals.iter().cloned(), vals.len() as u32, &mut out).unwrap();
|
||||
// let decomp = open_u128::<u128>(OwnedBytes::new(out)).unwrap();
|
||||
// let complete_range = 0..vals.len() as u32;
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(&*decomp, 199..=200, complete_range.clone()),
|
||||
// vec![0]
|
||||
// );
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(&*decomp, 199..=201, complete_range.clone()),
|
||||
// vec![0, 1]
|
||||
// );
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(&*decomp, 200..=200, complete_range.clone()),
|
||||
// vec![0]
|
||||
// );
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(&*decomp, 1_000_000..=1_000_000, complete_range),
|
||||
// vec![11]
|
||||
// );
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_bug1() {
|
||||
// let vals = &[9223372036854775806];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_bug2() {
|
||||
// let vals = &[340282366920938463463374607431768211455u128];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_bug3() {
|
||||
// let vals = &[340282366920938463463374607431768211454];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_bug4() {
|
||||
// let vals = &[340282366920938463463374607431768211455, 0];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_first_large_gaps() {
|
||||
// let vals = &[1_000_000_000u128; 100];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
// use itertools::Itertools;
|
||||
// use proptest::prelude::*;
|
||||
//
|
||||
// fn num_strategy() -> impl Strategy<Value = u128> {
|
||||
// prop_oneof![
|
||||
// 1 => prop::num::u128::ANY.prop_map(|num| u128::MAX - (num % 10) ),
|
||||
// 1 => prop::num::u128::ANY.prop_map(|num| i64::MAX as u128 + 5 - (num % 10) ),
|
||||
// 1 => prop::num::u128::ANY.prop_map(|num| i128::MAX as u128 + 5 - (num % 10) ),
|
||||
// 1 => prop::num::u128::ANY.prop_map(|num| num % 10 ),
|
||||
// 20 => prop::num::u128::ANY,
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
// proptest! {
|
||||
// #![proptest_config(ProptestConfig::with_cases(10))]
|
||||
//
|
||||
// #[test]
|
||||
// fn compress_decompress_random(vals in proptest::collection::vec(num_strategy()
|
||||
// , 1..1000)) {
|
||||
// let _data = test_aux_vals(&vals);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
222
columnar/src/column_values/main.rs
Normal file
222
columnar/src/column_values/main.rs
Normal file
@@ -0,0 +1,222 @@
|
||||
#[macro_use]
|
||||
extern crate prettytable;
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
use std::io::BufRead;
|
||||
use std::net::{IpAddr, Ipv6Addr};
|
||||
use std::str::FromStr;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use fastfield_codecs::{open_u128, serialize_u128, Column, FastFieldCodecType, VecColumn};
|
||||
use itertools::Itertools;
|
||||
use measure_time::print_time;
|
||||
use prettytable::{Cell, Row, Table};
|
||||
|
||||
fn print_set_stats(ip_addrs: &[u128]) {
|
||||
println!("NumIps\t{}", ip_addrs.len());
|
||||
let ip_addr_set: HashSet<u128> = ip_addrs.iter().cloned().collect();
|
||||
println!("NumUniqueIps\t{}", ip_addr_set.len());
|
||||
let ratio_unique = ip_addr_set.len() as f64 / ip_addrs.len() as f64;
|
||||
println!("RatioUniqueOverTotal\t{ratio_unique:.4}");
|
||||
|
||||
// histogram
|
||||
let mut ip_addrs = ip_addrs.to_vec();
|
||||
ip_addrs.sort();
|
||||
let mut cnts: Vec<usize> = ip_addrs
|
||||
.into_iter()
|
||||
.dedup_with_count()
|
||||
.map(|(cnt, _)| cnt)
|
||||
.collect();
|
||||
cnts.sort();
|
||||
|
||||
let top_256_cnt: usize = cnts.iter().rev().take(256).sum();
|
||||
let top_128_cnt: usize = cnts.iter().rev().take(128).sum();
|
||||
let top_64_cnt: usize = cnts.iter().rev().take(64).sum();
|
||||
let top_8_cnt: usize = cnts.iter().rev().take(8).sum();
|
||||
let total: usize = cnts.iter().sum();
|
||||
|
||||
println!("{}", total);
|
||||
println!("{}", top_256_cnt);
|
||||
println!("{}", top_128_cnt);
|
||||
println!("Percentage Top8 {:02}", top_8_cnt as f32 / total as f32);
|
||||
println!("Percentage Top64 {:02}", top_64_cnt as f32 / total as f32);
|
||||
println!("Percentage Top128 {:02}", top_128_cnt as f32 / total as f32);
|
||||
println!("Percentage Top256 {:02}", top_256_cnt as f32 / total as f32);
|
||||
|
||||
let mut cnts: Vec<(usize, usize)> = cnts.into_iter().dedup_with_count().collect();
|
||||
cnts.sort_by(|a, b| {
|
||||
if a.1 == b.1 {
|
||||
a.0.cmp(&b.0)
|
||||
} else {
|
||||
b.1.cmp(&a.1)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn ip_dataset() -> Vec<u128> {
|
||||
let mut ip_addr_v4 = 0;
|
||||
|
||||
let stdin = std::io::stdin();
|
||||
let ip_addrs: Vec<u128> = stdin
|
||||
.lock()
|
||||
.lines()
|
||||
.flat_map(|line| {
|
||||
let line = line.unwrap();
|
||||
let line = line.trim();
|
||||
let ip_addr = IpAddr::from_str(line.trim()).ok()?;
|
||||
if ip_addr.is_ipv4() {
|
||||
ip_addr_v4 += 1;
|
||||
}
|
||||
let ip_addr_v6: Ipv6Addr = match ip_addr {
|
||||
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
|
||||
IpAddr::V6(v6) => v6,
|
||||
};
|
||||
Some(ip_addr_v6)
|
||||
})
|
||||
.map(|ip_v6| u128::from_be_bytes(ip_v6.octets()))
|
||||
.collect();
|
||||
|
||||
println!("IpAddrsAny\t{}", ip_addrs.len());
|
||||
println!("IpAddrsV4\t{}", ip_addr_v4);
|
||||
|
||||
ip_addrs
|
||||
}
|
||||
|
||||
fn bench_ip() {
|
||||
let dataset = ip_dataset();
|
||||
print_set_stats(&dataset);
|
||||
|
||||
// Chunks
|
||||
{
|
||||
let mut data = vec![];
|
||||
for dataset in dataset.chunks(500_000) {
|
||||
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
|
||||
}
|
||||
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
||||
println!("Compression 50_000 chunks {:.4}", compression);
|
||||
println!(
|
||||
"Num Bits per elem {:.2}",
|
||||
(data.len() * 8) as f32 / dataset.len() as f32
|
||||
);
|
||||
}
|
||||
|
||||
let mut data = vec![];
|
||||
{
|
||||
print_time!("creation");
|
||||
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
|
||||
}
|
||||
|
||||
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
||||
println!("Compression {:.2}", compression);
|
||||
println!(
|
||||
"Num Bits per elem {:.2}",
|
||||
(data.len() * 8) as f32 / dataset.len() as f32
|
||||
);
|
||||
|
||||
let decompressor = open_u128::<u128>(OwnedBytes::new(data)).unwrap();
|
||||
// Sample some ranges
|
||||
let mut doc_values = Vec::new();
|
||||
for value in dataset.iter().take(1110).skip(1100).cloned() {
|
||||
doc_values.clear();
|
||||
print_time!("get range");
|
||||
decompressor.get_docids_for_value_range(
|
||||
value..=value,
|
||||
0..decompressor.num_vals(),
|
||||
&mut doc_values,
|
||||
);
|
||||
println!("{:?}", doc_values.len());
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
if env::args().nth(1).unwrap() == "bench_ip" {
|
||||
bench_ip();
|
||||
return;
|
||||
}
|
||||
|
||||
let mut table = Table::new();
|
||||
|
||||
// Add a row per time
|
||||
table.add_row(row!["", "Compression Ratio", "Compression Estimation"]);
|
||||
|
||||
for (data, data_set_name) in get_codec_test_data_sets() {
|
||||
let results: Vec<(f32, f32, FastFieldCodecType)> = [
|
||||
serialize_with_codec(&data, FastFieldCodecType::Bitpacked),
|
||||
serialize_with_codec(&data, FastFieldCodecType::Linear),
|
||||
serialize_with_codec(&data, FastFieldCodecType::BlockwiseLinear),
|
||||
]
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
let best_compression_ratio_codec = results
|
||||
.iter()
|
||||
.min_by(|&res1, &res2| res1.partial_cmp(res2).unwrap())
|
||||
.cloned()
|
||||
.unwrap();
|
||||
|
||||
table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")]));
|
||||
for (est, comp, codec_type) in results {
|
||||
let est_cell = est.to_string();
|
||||
let ratio_cell = comp.to_string();
|
||||
let style = if comp == best_compression_ratio_codec.1 {
|
||||
"Fb"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
table.add_row(Row::new(vec![
|
||||
Cell::new(&format!("{codec_type:?}")).style_spec("bFg"),
|
||||
Cell::new(&ratio_cell).style_spec(style),
|
||||
Cell::new(&est_cell).style_spec(""),
|
||||
]));
|
||||
}
|
||||
}
|
||||
|
||||
table.printstd();
|
||||
}
|
||||
|
||||
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
let mut data_and_names = vec![];
|
||||
|
||||
let data = (1000..=200_000_u64).collect::<Vec<_>>();
|
||||
data_and_names.push((data, "Autoincrement"));
|
||||
|
||||
let mut current_cumulative = 0;
|
||||
let data = (1..=200_000_u64)
|
||||
.map(|num| {
|
||||
let num = (num as f32 + num as f32).log10() as u64;
|
||||
current_cumulative += num;
|
||||
current_cumulative
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
// let data = (1..=200000_u64).map(|num| num + num).collect::<Vec<_>>();
|
||||
data_and_names.push((data, "Monotonically increasing concave"));
|
||||
|
||||
let mut current_cumulative = 0;
|
||||
let data = (1..=200_000_u64)
|
||||
.map(|num| {
|
||||
let num = (200_000.0 - num as f32).log10() as u64;
|
||||
current_cumulative += num;
|
||||
current_cumulative
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
data_and_names.push((data, "Monotonically increasing convex"));
|
||||
|
||||
let data = (1000..=200_000_u64)
|
||||
.map(|num| num + rand::random::<u8>() as u64)
|
||||
.collect::<Vec<_>>();
|
||||
data_and_names.push((data, "Almost monotonically increasing"));
|
||||
|
||||
data_and_names
|
||||
}
|
||||
|
||||
pub fn serialize_with_codec(
|
||||
data: &[u64],
|
||||
codec_type: FastFieldCodecType,
|
||||
) -> Option<(f32, f32, FastFieldCodecType)> {
|
||||
let col = VecColumn::from(data);
|
||||
let estimation = fastfield_codecs::estimate(&col, codec_type)?;
|
||||
let mut out = Vec::new();
|
||||
fastfield_codecs::serialize(&col, &mut out, &[codec_type]).ok()?;
|
||||
let actual_compression = out.len() as f32 / (col.num_vals() * 8) as f32;
|
||||
Some((estimation, actual_compression, codec_type))
|
||||
}
|
||||
213
columnar/src/column_values/mod.rs
Normal file
213
columnar/src/column_values/mod.rs
Normal file
@@ -0,0 +1,213 @@
|
||||
#![warn(missing_docs)]
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
|
||||
//! # `fastfield_codecs`
|
||||
//!
|
||||
//! - Columnar storage of data for tantivy [`Column`].
|
||||
//! - Encode data in different codecs.
|
||||
//! - Monotonically map values to u64/u128
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
use compact_space::CompactSpaceDecompressor;
|
||||
pub use monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
|
||||
use monotonic_mapping::{StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal};
|
||||
pub use monotonic_mapping_u128::MonotonicallyMappableToU128;
|
||||
use serialize::U128Header;
|
||||
|
||||
mod compact_space;
|
||||
pub(crate) mod monotonic_mapping;
|
||||
pub(crate) mod monotonic_mapping_u128;
|
||||
mod stats;
|
||||
pub(crate) mod u64_based;
|
||||
|
||||
mod column;
|
||||
pub mod serialize;
|
||||
|
||||
pub use serialize::serialize_column_values_u128;
|
||||
pub use stats::Stats;
|
||||
pub use u64_based::{
|
||||
load_u64_based_column_values, serialize_and_load_u64_based_column_values,
|
||||
serialize_u64_based_column_values, CodecType, ALL_U64_CODEC_TYPES,
|
||||
};
|
||||
|
||||
pub use self::column::{monotonic_map_column, ColumnValues, IterColumn, VecColumn};
|
||||
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
||||
#[repr(u8)]
|
||||
/// Available codecs to use to encode the u128 (via [`MonotonicallyMappableToU128`]) converted data.
|
||||
pub enum U128FastFieldCodecType {
|
||||
/// This codec takes a large number space (u128) and reduces it to a compact number space, by
|
||||
/// removing the holes.
|
||||
CompactSpace = 1,
|
||||
}
|
||||
|
||||
impl BinarySerializable for U128FastFieldCodecType {
|
||||
fn serialize<W: Write + ?Sized>(&self, wrt: &mut W) -> io::Result<()> {
|
||||
self.to_code().serialize(wrt)
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let code = u8::deserialize(reader)?;
|
||||
let codec_type: Self = Self::from_code(code)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
||||
Ok(codec_type)
|
||||
}
|
||||
}
|
||||
|
||||
impl U128FastFieldCodecType {
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
||||
match code {
|
||||
1 => Some(Self::CompactSpace),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
||||
pub fn open_u128_mapped<T: MonotonicallyMappableToU128 + Debug>(
|
||||
mut bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn ColumnValues<T>>> {
|
||||
let header = U128Header::deserialize(&mut bytes)?;
|
||||
assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
|
||||
let reader = CompactSpaceDecompressor::open(bytes)?;
|
||||
|
||||
let inverted: StrictlyMonotonicMappingInverter<StrictlyMonotonicMappingToInternal<T>> =
|
||||
StrictlyMonotonicMappingToInternal::<T>::new().into();
|
||||
Ok(Arc::new(monotonic_map_column(reader, inverted)))
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use test::{self, Bencher};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn get_data() -> Vec<u64> {
|
||||
let mut rng = StdRng::seed_from_u64(2u64);
|
||||
let mut data: Vec<_> = (100..55000_u64)
|
||||
.map(|num| num + rng.gen::<u8>() as u64)
|
||||
.collect();
|
||||
data.push(99_000);
|
||||
data.insert(1000, 2000);
|
||||
data.insert(2000, 100);
|
||||
data.insert(3000, 4100);
|
||||
data.insert(4000, 100);
|
||||
data.insert(5000, 800);
|
||||
data
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn value_iter() -> impl Iterator<Item = u64> {
|
||||
0..20_000
|
||||
}
|
||||
fn get_reader_for_bench<Codec: FastFieldCodec>(data: &[u64]) -> Codec::Reader {
|
||||
let mut bytes = Vec::new();
|
||||
let min_value = *data.iter().min().unwrap();
|
||||
let data = data.iter().map(|el| *el - min_value).collect::<Vec<_>>();
|
||||
let col = VecColumn::from(&data);
|
||||
let normalized_header = NormalizedHeader {
|
||||
num_vals: col.num_vals(),
|
||||
max_value: col.max_value(),
|
||||
};
|
||||
Codec::serialize(&VecColumn::from(&data), &mut bytes).unwrap();
|
||||
Codec::open_from_bytes(OwnedBytes::new(bytes), normalized_header).unwrap()
|
||||
}
|
||||
fn bench_get<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let col = get_reader_for_bench::<Codec>(data);
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for pos in value_iter() {
|
||||
let val = col.get_val(pos as u32);
|
||||
sum = sum.wrapping_add(val);
|
||||
}
|
||||
sum
|
||||
});
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn bench_get_dynamic_helper(b: &mut Bencher, col: Arc<dyn ColumnValues>) {
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for pos in value_iter() {
|
||||
let val = col.get_val(pos as u32);
|
||||
sum = sum.wrapping_add(val);
|
||||
}
|
||||
sum
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_get_dynamic<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let col = Arc::new(get_reader_for_bench::<Codec>(data));
|
||||
bench_get_dynamic_helper(b, col);
|
||||
}
|
||||
fn bench_create<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let min_value = *data.iter().min().unwrap();
|
||||
let data = data.iter().map(|el| *el - min_value).collect::<Vec<_>>();
|
||||
|
||||
let mut bytes = Vec::new();
|
||||
b.iter(|| {
|
||||
bytes.clear();
|
||||
Codec::serialize(&VecColumn::from(&data), &mut bytes).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_get_dynamic(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get_dynamic::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_get_dynamic(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get_dynamic::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_get_dynamic(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get_dynamic::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
}
|
||||
279
columnar/src/column_values/monotonic_mapping.rs
Normal file
279
columnar/src/column_values/monotonic_mapping.rs
Normal file
@@ -0,0 +1,279 @@
|
||||
use std::fmt::Debug;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use fastdivide::DividerU64;
|
||||
|
||||
use super::MonotonicallyMappableToU128;
|
||||
use crate::RowId;
|
||||
|
||||
/// Monotonic maps a value to u64 value space.
|
||||
/// Monotonic mapping enables `PartialOrd` on u64 space without conversion to original space.
|
||||
pub trait MonotonicallyMappableToU64: 'static + PartialOrd + Debug + Copy + Send + Sync {
|
||||
/// Converts a value to u64.
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
fn to_u64(self) -> u64;
|
||||
|
||||
/// Converts a value from u64
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
/// **Note: To be used for converting encoded Term, Posting values.**
|
||||
fn from_u64(val: u64) -> Self;
|
||||
}
|
||||
|
||||
/// Values need to be strictly monotonic mapped to a `Internal` value (u64 or u128) that can be
|
||||
/// used in fast field codecs.
|
||||
///
|
||||
/// The monotonic mapping is required so that `PartialOrd` can be used on `Internal` without
|
||||
/// converting to `External`.
|
||||
///
|
||||
/// All strictly monotonic functions are invertible because they are guaranteed to have a one-to-one
|
||||
/// mapping from their range to their domain. The `inverse` method is required when opening a codec,
|
||||
/// so a value can be converted back to its original domain (e.g. ip address or f64) from its
|
||||
/// internal representation.
|
||||
pub trait StrictlyMonotonicFn<External, Internal> {
|
||||
/// Strictly monotonically maps the value from External to Internal.
|
||||
fn mapping(&self, inp: External) -> Internal;
|
||||
/// Inverse of `mapping`. Maps the value from Internal to External.
|
||||
fn inverse(&self, out: Internal) -> External;
|
||||
}
|
||||
|
||||
/// Inverts a strictly monotonic mapping from `StrictlyMonotonicFn<A, B>` to
|
||||
/// `StrictlyMonotonicFn<B, A>`.
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// This type comes with a footgun. A type being strictly monotonic does not impose that the inverse
|
||||
/// mapping is strictly monotonic over the entire space External. e.g. a -> a * 2. Use at your own
|
||||
/// risks.
|
||||
pub(crate) struct StrictlyMonotonicMappingInverter<T> {
|
||||
orig_mapping: T,
|
||||
}
|
||||
impl<T> From<T> for StrictlyMonotonicMappingInverter<T> {
|
||||
fn from(orig_mapping: T) -> Self {
|
||||
Self { orig_mapping }
|
||||
}
|
||||
}
|
||||
|
||||
impl<From, To, T> StrictlyMonotonicFn<To, From> for StrictlyMonotonicMappingInverter<T>
|
||||
where T: StrictlyMonotonicFn<From, To>
|
||||
{
|
||||
#[inline(always)]
|
||||
fn mapping(&self, val: To) -> From {
|
||||
self.orig_mapping.inverse(val)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn inverse(&self, val: From) -> To {
|
||||
self.orig_mapping.mapping(val)
|
||||
}
|
||||
}
|
||||
|
||||
/// Applies the strictly monotonic mapping from `T` without any additional changes.
|
||||
pub(crate) struct StrictlyMonotonicMappingToInternal<T> {
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> StrictlyMonotonicMappingToInternal<T> {
|
||||
pub(crate) fn new() -> StrictlyMonotonicMappingToInternal<T> {
|
||||
Self {
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<External: MonotonicallyMappableToU128, T: MonotonicallyMappableToU128>
|
||||
StrictlyMonotonicFn<External, u128> for StrictlyMonotonicMappingToInternal<T>
|
||||
where T: MonotonicallyMappableToU128
|
||||
{
|
||||
#[inline(always)]
|
||||
fn mapping(&self, inp: External) -> u128 {
|
||||
External::to_u128(inp)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn inverse(&self, out: u128) -> External {
|
||||
External::from_u128(out)
|
||||
}
|
||||
}
|
||||
|
||||
impl<External: MonotonicallyMappableToU64, T: MonotonicallyMappableToU64>
|
||||
StrictlyMonotonicFn<External, u64> for StrictlyMonotonicMappingToInternal<T>
|
||||
where T: MonotonicallyMappableToU64
|
||||
{
|
||||
#[inline(always)]
|
||||
fn mapping(&self, inp: External) -> u64 {
|
||||
External::to_u64(inp)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn inverse(&self, out: u64) -> External {
|
||||
External::from_u64(out)
|
||||
}
|
||||
}
|
||||
|
||||
/// Mapping dividing by gcd and a base value.
|
||||
///
|
||||
/// The function is assumed to be only called on values divided by passed
|
||||
/// gcd value. (It is necessary for the function to be monotonic.)
|
||||
pub(crate) struct StrictlyMonotonicMappingToInternalGCDBaseval {
|
||||
gcd_divider: DividerU64,
|
||||
gcd: u64,
|
||||
min_value: u64,
|
||||
}
|
||||
impl StrictlyMonotonicMappingToInternalGCDBaseval {
|
||||
pub(crate) fn new(gcd: u64, min_value: u64) -> Self {
|
||||
let gcd_divider = DividerU64::divide_by(gcd);
|
||||
Self {
|
||||
gcd_divider,
|
||||
gcd,
|
||||
min_value,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
|
||||
for StrictlyMonotonicMappingToInternalGCDBaseval
|
||||
{
|
||||
#[inline(always)]
|
||||
fn mapping(&self, inp: External) -> u64 {
|
||||
self.gcd_divider
|
||||
.divide(External::to_u64(inp) - self.min_value)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn inverse(&self, out: u64) -> External {
|
||||
External::from_u64(self.min_value + out * self.gcd)
|
||||
}
|
||||
}
|
||||
|
||||
/// Strictly monotonic mapping with a base value.
|
||||
pub(crate) struct StrictlyMonotonicMappingToInternalBaseval {
|
||||
min_value: u64,
|
||||
}
|
||||
impl StrictlyMonotonicMappingToInternalBaseval {
|
||||
#[inline(always)]
|
||||
pub(crate) fn new(min_value: u64) -> Self {
|
||||
Self { min_value }
|
||||
}
|
||||
}
|
||||
|
||||
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
|
||||
for StrictlyMonotonicMappingToInternalBaseval
|
||||
{
|
||||
#[inline(always)]
|
||||
fn mapping(&self, val: External) -> u64 {
|
||||
External::to_u64(val) - self.min_value
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn inverse(&self, val: u64) -> External {
|
||||
External::from_u64(self.min_value + val)
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU64 for u64 {
|
||||
#[inline(always)]
|
||||
fn to_u64(self) -> u64 {
|
||||
self
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_u64(val: u64) -> Self {
|
||||
val
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU64 for i64 {
|
||||
#[inline(always)]
|
||||
fn to_u64(self) -> u64 {
|
||||
common::i64_to_u64(self)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_u64(val: u64) -> Self {
|
||||
common::u64_to_i64(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU64 for crate::DateTime {
|
||||
#[inline(always)]
|
||||
fn to_u64(self) -> u64 {
|
||||
common::i64_to_u64(self.timestamp_micros)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_u64(val: u64) -> Self {
|
||||
crate::DateTime {
|
||||
timestamp_micros: common::u64_to_i64(val),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU64 for bool {
|
||||
#[inline(always)]
|
||||
fn to_u64(self) -> u64 {
|
||||
u64::from(self)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_u64(val: u64) -> Self {
|
||||
val > 0
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU64 for RowId {
|
||||
#[inline(always)]
|
||||
fn to_u64(self) -> u64 {
|
||||
u64::from(self)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_u64(val: u64) -> RowId {
|
||||
val as RowId
|
||||
}
|
||||
}
|
||||
|
||||
// TODO remove me.
|
||||
// Tantivy should refuse NaN values and work with NotNaN internally.
|
||||
impl MonotonicallyMappableToU64 for f64 {
|
||||
#[inline(always)]
|
||||
fn to_u64(self) -> u64 {
|
||||
common::f64_to_u64(self)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_u64(val: u64) -> Self {
|
||||
common::u64_to_f64(val)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn strictly_monotonic_test() {
|
||||
// identity mapping
|
||||
test_round_trip(&StrictlyMonotonicMappingToInternal::<u64>::new(), 100u64);
|
||||
// round trip to i64
|
||||
test_round_trip(&StrictlyMonotonicMappingToInternal::<i64>::new(), 100u64);
|
||||
// TODO
|
||||
// identity mapping
|
||||
// test_round_trip(&StrictlyMonotonicMappingToInternal::<u128>::new(), 100u128);
|
||||
|
||||
// base value to i64 round trip
|
||||
let mapping = StrictlyMonotonicMappingToInternalBaseval::new(100);
|
||||
test_round_trip::<_, _, u64>(&mapping, 100i64);
|
||||
// base value and gcd to u64 round trip
|
||||
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 100);
|
||||
test_round_trip::<_, _, u64>(&mapping, 100u64);
|
||||
}
|
||||
|
||||
fn test_round_trip<T: StrictlyMonotonicFn<K, L>, K: std::fmt::Debug + Eq + Copy, L>(
|
||||
mapping: &T,
|
||||
test_val: K,
|
||||
) {
|
||||
assert_eq!(mapping.inverse(mapping.mapping(test_val)), test_val);
|
||||
}
|
||||
}
|
||||
41
columnar/src/column_values/monotonic_mapping_u128.rs
Normal file
41
columnar/src/column_values/monotonic_mapping_u128.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
use std::fmt::Debug;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
/// Montonic maps a value to u128 value space
|
||||
/// Monotonic mapping enables `PartialOrd` on u128 space without conversion to original space.
|
||||
pub trait MonotonicallyMappableToU128: 'static + PartialOrd + Copy + Debug + Send + Sync {
|
||||
/// Converts a value to u128.
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
fn to_u128(self) -> u128;
|
||||
|
||||
/// Converts a value from u128
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
/// **Note: To be used for converting encoded Term, Posting values.**
|
||||
fn from_u128(val: u128) -> Self;
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU128 for u128 {
|
||||
fn to_u128(self) -> u128 {
|
||||
self
|
||||
}
|
||||
|
||||
fn from_u128(val: u128) -> Self {
|
||||
val
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU128 for Ipv6Addr {
|
||||
fn to_u128(self) -> u128 {
|
||||
ip_to_u128(self)
|
||||
}
|
||||
|
||||
fn from_u128(val: u128) -> Self {
|
||||
Ipv6Addr::from(val.to_be_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
fn ip_to_u128(ip_addr: Ipv6Addr) -> u128 {
|
||||
u128::from_be_bytes(ip_addr.octets())
|
||||
}
|
||||
153
columnar/src/column_values/serialize.rs
Normal file
153
columnar/src/column_values/serialize.rs
Normal file
@@ -0,0 +1,153 @@
|
||||
use std::fmt::Debug;
|
||||
use std::io;
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use common::{BinarySerializable, VInt};
|
||||
use log::warn;
|
||||
|
||||
use super::monotonic_mapping::{
|
||||
StrictlyMonotonicFn, StrictlyMonotonicMappingToInternal,
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||
};
|
||||
use super::{
|
||||
monotonic_map_column, u64_based, ColumnValues, MonotonicallyMappableToU64,
|
||||
U128FastFieldCodecType,
|
||||
};
|
||||
use crate::column_values::compact_space::CompactSpaceCompressor;
|
||||
use crate::column_values::u64_based::CodecType;
|
||||
use crate::iterable::Iterable;
|
||||
|
||||
/// The normalized header gives some parameters after applying the following
|
||||
/// normalization of the vector:
|
||||
/// `val -> (val - min_value) / gcd`
|
||||
///
|
||||
/// By design, after normalization, `min_value = 0` and `gcd = 1`.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct NormalizedHeader {
|
||||
/// The number of values in the underlying column.
|
||||
pub num_vals: u32,
|
||||
/// The max value of the underlying column.
|
||||
pub max_value: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub(crate) struct U128Header {
|
||||
pub num_vals: u32,
|
||||
pub codec_type: U128FastFieldCodecType,
|
||||
}
|
||||
|
||||
impl BinarySerializable for U128Header {
|
||||
fn serialize<W: io::Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.num_vals as u64).serialize(writer)?;
|
||||
self.codec_type.serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let num_vals = VInt::deserialize(reader)?.0 as u32;
|
||||
let codec_type = U128FastFieldCodecType::deserialize(reader)?;
|
||||
Ok(U128Header {
|
||||
num_vals,
|
||||
codec_type,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn normalize_column<C: ColumnValues>(
|
||||
from_column: C,
|
||||
min_value: u64,
|
||||
gcd: Option<NonZeroU64>,
|
||||
) -> impl ColumnValues {
|
||||
let gcd = gcd.map(|gcd| gcd.get()).unwrap_or(1);
|
||||
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd, min_value);
|
||||
monotonic_map_column(from_column, mapping)
|
||||
}
|
||||
|
||||
/// Serializes u128 values with the compact space codec.
|
||||
pub fn serialize_column_values_u128<I: Iterator<Item = u128>>(
|
||||
iterable: &dyn Fn() -> I,
|
||||
num_vals: u32,
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let header = U128Header {
|
||||
num_vals,
|
||||
codec_type: U128FastFieldCodecType::CompactSpace,
|
||||
};
|
||||
header.serialize(output)?;
|
||||
let compressor = CompactSpaceCompressor::train_from(iterable(), num_vals);
|
||||
compressor.compress_into(iterable(), output)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use crate::column_values::u64_based::{
|
||||
self, serialize_and_load_u64_based_column_values, serialize_u64_based_column_values,
|
||||
ALL_U64_CODEC_TYPES,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_serialize_deserialize_u128_header() {
|
||||
let original = U128Header {
|
||||
num_vals: 11,
|
||||
codec_type: U128FastFieldCodecType::CompactSpace,
|
||||
};
|
||||
let mut out = Vec::new();
|
||||
original.serialize(&mut out).unwrap();
|
||||
let restored = U128Header::deserialize(&mut &out[..]).unwrap();
|
||||
assert_eq!(restored, original);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_deserialize() {
|
||||
let original = [1u64, 5u64, 10u64];
|
||||
let restored: Vec<u64> =
|
||||
serialize_and_load_u64_based_column_values(&&original[..], &ALL_U64_CODEC_TYPES)
|
||||
.iter()
|
||||
.collect();
|
||||
assert_eq!(&restored, &original[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_bool_size_bitwidth_1() {
|
||||
let mut buffer = Vec::new();
|
||||
serialize_u64_based_column_values(
|
||||
|| [false, true].into_iter(),
|
||||
&ALL_U64_CODEC_TYPES,
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
// TODO put the header as a footer so that it serves as a padding.
|
||||
// 5 bytes of header, 1 byte of value, 7 bytes of padding.
|
||||
assert_eq!(buffer.len(), 5 + 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_bool_bit_size_bitwidth_0() {
|
||||
let mut buffer = Vec::new();
|
||||
serialize_u64_based_column_values(
|
||||
|| [false, true].into_iter(),
|
||||
&ALL_U64_CODEC_TYPES,
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
// 6 bytes of header, 0 bytes of value, 7 bytes of padding.
|
||||
assert_eq!(buffer.len(), 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_gcd() {
|
||||
let mut buffer = Vec::new();
|
||||
let vals: Vec<u64> = (0..80).map(|val| (val % 7) * 1_000u64).collect();
|
||||
serialize_u64_based_column_values(
|
||||
|| vals.iter().cloned(),
|
||||
&[CodecType::Bitpacked],
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
// Values are stored over 3 bits.
|
||||
assert_eq!(buffer.len(), 6 + (3 * 80 / 8));
|
||||
}
|
||||
}
|
||||
96
columnar/src/column_values/stats.rs
Normal file
96
columnar/src/column_values/stats.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use common::{BinarySerializable, VInt};
|
||||
|
||||
use crate::RowId;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct Stats {
|
||||
pub gcd: NonZeroU64,
|
||||
pub min_value: u64,
|
||||
pub max_value: u64,
|
||||
pub num_rows: RowId,
|
||||
}
|
||||
|
||||
impl Stats {
|
||||
pub fn amplitude(&self) -> u64 {
|
||||
self.max_value - self.min_value
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for Stats {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.min_value).serialize(writer)?;
|
||||
VInt(self.gcd.get()).serialize(writer)?;
|
||||
VInt(self.amplitude() / self.gcd).serialize(writer)?;
|
||||
VInt(self.num_rows as u64).serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let min_value = VInt::deserialize(reader)?.0;
|
||||
let gcd = VInt::deserialize(reader)?.0;
|
||||
let gcd = NonZeroU64::new(gcd)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "GCD of 0 is forbidden"))?;
|
||||
let amplitude = VInt::deserialize(reader)?.0 * gcd.get();
|
||||
let max_value = min_value + amplitude;
|
||||
let num_rows = VInt::deserialize(reader)?.0 as RowId;
|
||||
Ok(Stats {
|
||||
min_value,
|
||||
max_value,
|
||||
num_rows,
|
||||
gcd,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use common::BinarySerializable;
|
||||
|
||||
use crate::column_values::Stats;
|
||||
|
||||
#[track_caller]
|
||||
fn test_stats_ser_deser_aux(stats: &Stats, num_bytes: usize) {
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
stats.serialize(&mut buffer).unwrap();
|
||||
assert_eq!(buffer.len(), num_bytes);
|
||||
let deser_stats = Stats::deserialize(&mut &buffer[..]).unwrap();
|
||||
assert_eq!(stats, &deser_stats);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stats_serialization() {
|
||||
test_stats_ser_deser_aux(
|
||||
&(Stats {
|
||||
gcd: NonZeroU64::new(3).unwrap(),
|
||||
min_value: 1,
|
||||
max_value: 3001,
|
||||
num_rows: 10,
|
||||
}),
|
||||
5,
|
||||
);
|
||||
test_stats_ser_deser_aux(
|
||||
&(Stats {
|
||||
gcd: NonZeroU64::new(1_000).unwrap(),
|
||||
min_value: 1,
|
||||
max_value: 3001,
|
||||
num_rows: 10,
|
||||
}),
|
||||
5,
|
||||
);
|
||||
test_stats_ser_deser_aux(
|
||||
&(Stats {
|
||||
gcd: NonZeroU64::new(1).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 0,
|
||||
num_rows: 0,
|
||||
}),
|
||||
4,
|
||||
);
|
||||
}
|
||||
}
|
||||
127
columnar/src/column_values/u64_based/bitpacked.rs
Normal file
127
columnar/src/column_values/u64_based/bitpacked.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
use fastdivide::DividerU64;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats};
|
||||
use crate::{ColumnValues, RowId};
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct BitpackedReader {
|
||||
data: OwnedBytes,
|
||||
bit_unpacker: BitUnpacker,
|
||||
stats: Stats,
|
||||
}
|
||||
|
||||
impl ColumnValues for BitpackedReader {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, doc: u32) -> u64 {
|
||||
self.stats.min_value + self.stats.gcd.get() * self.bit_unpacker.get(doc, &self.data)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn min_value(&self) -> u64 {
|
||||
self.stats.min_value
|
||||
}
|
||||
#[inline]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.stats.max_value
|
||||
}
|
||||
#[inline]
|
||||
fn num_vals(&self) -> RowId {
|
||||
self.stats.num_rows
|
||||
}
|
||||
}
|
||||
|
||||
fn num_bits(stats: &Stats) -> u8 {
|
||||
compute_num_bits(stats.amplitude() / stats.gcd)
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct BitpackedCodecEstimator;
|
||||
|
||||
impl ColumnCodecEstimator for BitpackedCodecEstimator {
|
||||
fn collect(&mut self, _value: u64) {}
|
||||
|
||||
fn estimate(&self, stats: &Stats) -> Option<u64> {
|
||||
let num_bits_per_value = num_bits(stats);
|
||||
Some(stats.num_bytes() + (stats.num_rows as u64 * (num_bits_per_value as u64) + 7) / 8)
|
||||
}
|
||||
|
||||
fn serialize(
|
||||
&self,
|
||||
stats: &Stats,
|
||||
vals: &mut dyn Iterator<Item = u64>,
|
||||
wrt: &mut dyn Write,
|
||||
) -> io::Result<()> {
|
||||
stats.serialize(wrt)?;
|
||||
let num_bits = num_bits(stats);
|
||||
let mut bit_packer = BitPacker::new();
|
||||
let divider = DividerU64::divide_by(stats.gcd.get());
|
||||
for val in vals {
|
||||
bit_packer.write(divider.divide(val - stats.min_value), num_bits, wrt)?;
|
||||
}
|
||||
bit_packer.close(wrt)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BitpackedCodec;
|
||||
|
||||
impl ColumnCodec for BitpackedCodec {
|
||||
type Reader = BitpackedReader;
|
||||
type Estimator = BitpackedCodecEstimator;
|
||||
|
||||
/// Opens a fast field given a file.
|
||||
fn load(mut data: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let stats = Stats::deserialize(&mut data)?;
|
||||
let num_bits = num_bits(&stats);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(BitpackedReader {
|
||||
data,
|
||||
bit_unpacker,
|
||||
stats,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::column_values::u64_based::tests::create_and_validate;
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets_simple() {
|
||||
create_and_validate::<BitpackedCodec>(&[4, 3, 12], "name");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets_simple_gcd() {
|
||||
create_and_validate::<BitpackedCodec>(&[1000, 2000, 3000], "name");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = crate::column_values::u64_based::tests::get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate::<BitpackedCodec>(&data, name);
|
||||
data.reverse();
|
||||
create_and_validate::<BitpackedCodec>(&data, name);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bitpacked_fast_field_rand() {
|
||||
for _ in 0..500 {
|
||||
let mut data = (0..1 + rand::random::<u8>() as usize)
|
||||
.map(|_| rand::random::<i64>() as u64 / 2)
|
||||
.collect::<Vec<_>>();
|
||||
create_and_validate::<BitpackedCodec>(&data, "rand");
|
||||
data.reverse();
|
||||
create_and_validate::<BitpackedCodec>(&data, "rand");
|
||||
}
|
||||
}
|
||||
}
|
||||
281
columnar/src/column_values/u64_based/blockwise_linear.rs
Normal file
281
columnar/src/column_values/u64_based/blockwise_linear.rs
Normal file
@@ -0,0 +1,281 @@
|
||||
use std::io::Write;
|
||||
use std::sync::Arc;
|
||||
use std::{io, iter};
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, DeserializeFrom, OwnedBytes};
|
||||
use fastdivide::DividerU64;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::column_values::u64_based::line::Line;
|
||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats};
|
||||
use crate::column_values::{ColumnValues, VecColumn};
|
||||
use crate::MonotonicallyMappableToU64;
|
||||
|
||||
const BLOCK_SIZE: u32 = 512u32;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct Block {
|
||||
line: Line,
|
||||
bit_unpacker: BitUnpacker,
|
||||
data_start_offset: usize,
|
||||
}
|
||||
|
||||
impl BinarySerializable for Block {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.line.serialize(writer)?;
|
||||
self.bit_unpacker.bit_width().serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let line = Line::deserialize(reader)?;
|
||||
let bit_width = u8::deserialize(reader)?;
|
||||
Ok(Block {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(bit_width),
|
||||
data_start_offset: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_num_blocks(num_vals: u32) -> u32 {
|
||||
(num_vals + BLOCK_SIZE - 1) / BLOCK_SIZE
|
||||
}
|
||||
|
||||
pub struct BlockwiseLinearEstimator {
|
||||
block: Vec<u64>,
|
||||
values_num_bytes: u64,
|
||||
meta_num_bytes: u64,
|
||||
}
|
||||
|
||||
impl Default for BlockwiseLinearEstimator {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
block: Vec::with_capacity(BLOCK_SIZE as usize),
|
||||
values_num_bytes: 0u64,
|
||||
meta_num_bytes: 0u64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockwiseLinearEstimator {
|
||||
fn flush_block_estimate(&mut self) {
|
||||
if self.block.is_empty() {
|
||||
return;
|
||||
}
|
||||
let line = Line::train(&VecColumn::from(&self.block));
|
||||
let mut max_value = 0u64;
|
||||
for (i, buffer_val) in self.block.iter().enumerate() {
|
||||
let interpolated_val = line.eval(i as u32);
|
||||
let val = buffer_val.wrapping_sub(interpolated_val);
|
||||
max_value = val.max(max_value);
|
||||
}
|
||||
let bit_width = compute_num_bits(max_value) as usize;
|
||||
self.values_num_bytes += (bit_width * self.block.len() + 7) as u64 / 8;
|
||||
self.meta_num_bytes += 1 + line.num_bytes();
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnCodecEstimator for BlockwiseLinearEstimator {
|
||||
fn collect(&mut self, value: u64) {
|
||||
self.block.push(value);
|
||||
if self.block.len() == BLOCK_SIZE as usize {
|
||||
self.flush_block_estimate();
|
||||
self.block.clear();
|
||||
}
|
||||
}
|
||||
fn estimate(&self, stats: &Stats) -> Option<u64> {
|
||||
let mut estimate = 4 + stats.num_bytes() + self.meta_num_bytes + self.values_num_bytes;
|
||||
if stats.gcd.get() > 1 {
|
||||
let estimate_gain_from_gcd =
|
||||
(stats.gcd.get() as f32).log2().floor() * stats.num_rows as f32 / 8.0f32;
|
||||
estimate = estimate.saturating_sub(estimate_gain_from_gcd as u64);
|
||||
}
|
||||
Some(estimate)
|
||||
}
|
||||
|
||||
fn finalize(&mut self) {
|
||||
self.flush_block_estimate();
|
||||
}
|
||||
|
||||
fn serialize(
|
||||
&self,
|
||||
stats: &Stats,
|
||||
mut vals: &mut dyn Iterator<Item = u64>,
|
||||
wrt: &mut dyn Write,
|
||||
) -> io::Result<()> {
|
||||
stats.serialize(wrt)?;
|
||||
let mut buffer = Vec::with_capacity(BLOCK_SIZE as usize);
|
||||
let num_blocks = compute_num_blocks(stats.num_rows) as usize;
|
||||
let mut blocks = Vec::with_capacity(num_blocks);
|
||||
|
||||
let mut bit_packer = BitPacker::new();
|
||||
|
||||
let gcd_divider = DividerU64::divide_by(stats.gcd.get());
|
||||
|
||||
for _ in 0..num_blocks {
|
||||
buffer.clear();
|
||||
buffer.extend(
|
||||
(&mut vals)
|
||||
.map(MonotonicallyMappableToU64::to_u64)
|
||||
.take(BLOCK_SIZE as usize),
|
||||
);
|
||||
|
||||
for buffer_val in buffer.iter_mut() {
|
||||
*buffer_val = gcd_divider.divide(*buffer_val - stats.min_value);
|
||||
}
|
||||
|
||||
let mut line = Line::train(&VecColumn::from(&buffer));
|
||||
|
||||
assert!(!buffer.is_empty());
|
||||
|
||||
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
||||
let interpolated_val = line.eval(i as u32);
|
||||
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
|
||||
}
|
||||
|
||||
let bit_width = buffer.iter().copied().map(compute_num_bits).max().unwrap();
|
||||
|
||||
for &buffer_val in &buffer {
|
||||
bit_packer.write(buffer_val, bit_width, wrt)?;
|
||||
}
|
||||
|
||||
blocks.push(Block {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(bit_width),
|
||||
data_start_offset: 0,
|
||||
});
|
||||
}
|
||||
|
||||
bit_packer.close(wrt)?;
|
||||
|
||||
assert_eq!(blocks.len(), num_blocks);
|
||||
|
||||
let mut counting_wrt = CountingWriter::wrap(wrt);
|
||||
for block in &blocks {
|
||||
block.serialize(&mut counting_wrt)?;
|
||||
}
|
||||
let footer_len = counting_wrt.written_bytes();
|
||||
(footer_len as u32).serialize(&mut counting_wrt)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BlockwiseLinearCodec;
|
||||
|
||||
impl ColumnCodec<u64> for BlockwiseLinearCodec {
|
||||
type Reader = BlockwiseLinearReader;
|
||||
|
||||
type Estimator = BlockwiseLinearEstimator;
|
||||
|
||||
fn load(mut bytes: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let stats = Stats::deserialize(&mut bytes)?;
|
||||
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
||||
let footer_offset = bytes.len() - 4 - footer_len as usize;
|
||||
let (data, mut footer) = bytes.split(footer_offset);
|
||||
let num_blocks = compute_num_blocks(stats.num_rows);
|
||||
let mut blocks: Vec<Block> = iter::repeat_with(|| Block::deserialize(&mut footer))
|
||||
.take(num_blocks as usize)
|
||||
.collect::<io::Result<_>>()?;
|
||||
let mut start_offset = 0;
|
||||
for block in &mut blocks {
|
||||
block.data_start_offset = start_offset;
|
||||
start_offset += (block.bit_unpacker.bit_width() as usize) * BLOCK_SIZE as usize / 8;
|
||||
}
|
||||
Ok(BlockwiseLinearReader {
|
||||
blocks: blocks.into_boxed_slice().into(),
|
||||
data,
|
||||
stats,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BlockwiseLinearReader {
|
||||
blocks: Arc<[Block]>,
|
||||
data: OwnedBytes,
|
||||
stats: Stats,
|
||||
}
|
||||
|
||||
impl ColumnValues for BlockwiseLinearReader {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> u64 {
|
||||
let block_id = (idx / BLOCK_SIZE as u32) as usize;
|
||||
let idx_within_block = idx % (BLOCK_SIZE as u32);
|
||||
let block = &self.blocks[block_id];
|
||||
let interpoled_val: u64 = block.line.eval(idx_within_block);
|
||||
let block_bytes = &self.data[block.data_start_offset..];
|
||||
let bitpacked_diff = block.bit_unpacker.get(idx_within_block, block_bytes);
|
||||
// TODO optimize me! the line parameters could be tweaked to include the multiplication and
|
||||
// remove the dependency.
|
||||
self.stats.min_value
|
||||
+ self
|
||||
.stats
|
||||
.gcd
|
||||
.get()
|
||||
.wrapping_mul(interpoled_val.wrapping_add(bitpacked_diff))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> u64 {
|
||||
self.stats.min_value
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.stats.max_value
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.stats.num_rows
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::column_values::u64_based::tests::create_and_validate;
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets_simple() {
|
||||
create_and_validate::<BlockwiseLinearCodec>(
|
||||
&[11, 20, 40, 20, 10, 10, 10, 10, 10, 10],
|
||||
"simple test",
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets_simple_gcd() {
|
||||
let (_, actual_compression_rate) = create_and_validate::<BlockwiseLinearCodec>(
|
||||
&[10, 20, 40, 20, 10, 10, 10, 10, 10, 10],
|
||||
"name",
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(actual_compression_rate, 0.175);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = crate::column_values::u64_based::tests::get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, name);
|
||||
data.reverse();
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, name);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blockwise_linear_fast_field_rand() {
|
||||
for _ in 0..500 {
|
||||
let mut data = (0..1 + rand::random::<u8>() as usize)
|
||||
.map(|_| rand::random::<i64>() as u64 / 2)
|
||||
.collect::<Vec<_>>();
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, "rand");
|
||||
data.reverse();
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, "rand");
|
||||
}
|
||||
}
|
||||
}
|
||||
223
columnar/src/column_values/u64_based/line.rs
Normal file
223
columnar/src/column_values/u64_based/line.rs
Normal file
@@ -0,0 +1,223 @@
|
||||
use std::io;
|
||||
use std::num::NonZeroU32;
|
||||
|
||||
use common::{BinarySerializable, VInt};
|
||||
|
||||
use crate::column_values::ColumnValues;
|
||||
|
||||
const MID_POINT: u64 = (1u64 << 32) - 1u64;
|
||||
|
||||
/// `Line` describes a line function `y: ax + b` using integer
|
||||
/// arithmetics.
|
||||
///
|
||||
/// The slope is in fact a decimal split into a 32 bit integer value,
|
||||
/// and a 32-bit decimal value.
|
||||
///
|
||||
/// The multiplication then becomes.
|
||||
/// `y = m * x >> 32 + b`
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub struct Line {
|
||||
pub(crate) slope: u64,
|
||||
pub(crate) intercept: u64,
|
||||
}
|
||||
|
||||
/// Compute the line slope.
|
||||
///
|
||||
/// This function has the nice property of being
|
||||
/// invariant by translation.
|
||||
/// `
|
||||
/// compute_slope(y0, y1)
|
||||
/// = compute_slope(y0 + X % 2^64, y1 + X % 2^64)
|
||||
/// `
|
||||
fn compute_slope(y0: u64, y1: u64, num_vals: NonZeroU32) -> u64 {
|
||||
let dy = y1.wrapping_sub(y0);
|
||||
let sign = dy <= (1 << 63);
|
||||
let abs_dy = if sign {
|
||||
y1.wrapping_sub(y0)
|
||||
} else {
|
||||
y0.wrapping_sub(y1)
|
||||
};
|
||||
if abs_dy >= 1 << 32 {
|
||||
// This is outside of realm we handle.
|
||||
// Let's just bail.
|
||||
return 0u64;
|
||||
}
|
||||
|
||||
let abs_slope = (abs_dy << 32) / num_vals.get() as u64;
|
||||
if sign {
|
||||
abs_slope
|
||||
} else {
|
||||
// The complement does indeed create the
|
||||
// opposite decreasing slope...
|
||||
//
|
||||
// Intuitively (without the bitshifts and % u64::MAX)
|
||||
// ```
|
||||
// (x + shift)*(u64::MAX - abs_slope)
|
||||
// - (x * (u64::MAX - abs_slope))
|
||||
// = - shift * abs_slope
|
||||
// ```
|
||||
u64::MAX - abs_slope
|
||||
}
|
||||
}
|
||||
|
||||
impl Line {
|
||||
#[inline(always)]
|
||||
pub fn eval(&self, x: u32) -> u64 {
|
||||
let linear_part = ((x as u64).wrapping_mul(self.slope) >> 32) as i32 as u64;
|
||||
self.intercept.wrapping_add(linear_part)
|
||||
}
|
||||
|
||||
// Same as train, but the intercept is only estimated from provided sample positions
|
||||
pub fn estimate(sample_positions_and_values: &[(u64, u64)]) -> Self {
|
||||
let first_val = sample_positions_and_values[0].1;
|
||||
let last_val = sample_positions_and_values[sample_positions_and_values.len() - 1].1;
|
||||
let num_vals = sample_positions_and_values[sample_positions_and_values.len() - 1].0 + 1;
|
||||
Self::train_from(
|
||||
first_val,
|
||||
last_val,
|
||||
num_vals as u32,
|
||||
sample_positions_and_values.iter().cloned(),
|
||||
)
|
||||
}
|
||||
|
||||
// Intercept is only computed from provided positions
|
||||
pub fn train_from(
|
||||
first_val: u64,
|
||||
last_val: u64,
|
||||
num_vals: u32,
|
||||
positions_and_values: impl Iterator<Item = (u64, u64)>,
|
||||
) -> Self {
|
||||
// TODO replace with let else
|
||||
let idx_last_val = if let Some(idx_last_val) = NonZeroU32::new(num_vals - 1) {
|
||||
idx_last_val
|
||||
} else {
|
||||
return Line::default();
|
||||
};
|
||||
|
||||
let y0 = first_val;
|
||||
let y1 = last_val;
|
||||
|
||||
// We first independently pick our slope.
|
||||
let slope = compute_slope(y0, y1, idx_last_val);
|
||||
|
||||
// We picked our slope. Note that it does not have to be perfect.
|
||||
// Now we need to compute the best intercept.
|
||||
//
|
||||
// Intuitively, the best intercept is such that line passes through one of the
|
||||
// `(i, ys[])`.
|
||||
//
|
||||
// The best intercept therefore has the form
|
||||
// `y[i] - line.eval(i)` (using wrapping arithmetics).
|
||||
// In other words, the best intercept is one of the `y - Line::eval(ys[i])`
|
||||
// and our task is just to pick the one that minimizes our error.
|
||||
//
|
||||
// Without sorting our values, this is a difficult problem.
|
||||
// We however rely on the following trick...
|
||||
//
|
||||
// We only focus on the case where the interpolation is half decent.
|
||||
// If the line interpolation is doing its job on a dataset suited for it,
|
||||
// we can hope that the maximum error won't be larger than `u64::MAX / 2`.
|
||||
//
|
||||
// In other words, even without the intercept the values `y - Line::eval(ys[i])` will all be
|
||||
// within an interval that takes less than half of the modulo space of `u64`.
|
||||
//
|
||||
// Our task is therefore to identify this interval.
|
||||
// Here we simply translate all of our values by `y0 - 2^63` and pick the min.
|
||||
let mut line = Line {
|
||||
slope,
|
||||
intercept: 0,
|
||||
};
|
||||
let heuristic_shift = y0.wrapping_sub(MID_POINT);
|
||||
line.intercept = positions_and_values
|
||||
.map(|(pos, y)| y.wrapping_sub(line.eval(pos as u32)))
|
||||
.min_by_key(|&val| val.wrapping_sub(heuristic_shift))
|
||||
.unwrap_or(0u64); //< Never happens.
|
||||
line
|
||||
}
|
||||
|
||||
/// Returns a line that attemps to approximate a function
|
||||
/// f: i in 0..[ys.num_vals()) -> ys[i].
|
||||
///
|
||||
/// - The approximation is always lower than the actual value.
|
||||
/// Or more rigorously, formally `f(i).wrapping_sub(ys[i])` is small
|
||||
/// for any i in [0..ys.len()).
|
||||
/// - It computes without panicking for any value of it.
|
||||
///
|
||||
/// This function is only invariable by translation if all of the
|
||||
/// `ys` are packaged into half of the space. (See heuristic below)
|
||||
/// TODO USE array
|
||||
pub fn train(ys: &dyn ColumnValues) -> Self {
|
||||
let first_val = ys.iter().next().unwrap();
|
||||
let last_val = ys.iter().nth(ys.num_vals() as usize - 1).unwrap();
|
||||
Self::train_from(
|
||||
first_val,
|
||||
last_val,
|
||||
ys.num_vals(),
|
||||
ys.iter().enumerate().map(|(pos, val)| (pos as u64, val)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for Line {
|
||||
fn serialize<W: io::Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.slope).serialize(writer)?;
|
||||
VInt(self.intercept).serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let slope = VInt::deserialize(reader)?.0;
|
||||
let intercept = VInt::deserialize(reader)?.0;
|
||||
Ok(Line { slope, intercept })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::column_values::VecColumn;
|
||||
|
||||
/// Test training a line and ensuring that the maximum difference between
|
||||
/// the data points and the line is `expected`.
|
||||
///
|
||||
/// This function operates translation over the data for better coverage.
|
||||
#[track_caller]
|
||||
fn test_line_interpol_with_translation(ys: &[u64], expected: Option<u64>) {
|
||||
let mut translations = vec![0, 100, u64::MAX / 2, u64::MAX, u64::MAX - 1];
|
||||
translations.extend_from_slice(ys);
|
||||
for translation in translations {
|
||||
let translated_ys: Vec<u64> = ys
|
||||
.iter()
|
||||
.copied()
|
||||
.map(|y| y.wrapping_add(translation))
|
||||
.collect();
|
||||
let largest_err = test_eval_max_err(&translated_ys);
|
||||
assert_eq!(largest_err, expected);
|
||||
}
|
||||
}
|
||||
|
||||
fn test_eval_max_err(ys: &[u64]) -> Option<u64> {
|
||||
let line = Line::train(&VecColumn::from(&ys));
|
||||
ys.iter()
|
||||
.enumerate()
|
||||
.map(|(x, y)| y.wrapping_sub(line.eval(x as u32)))
|
||||
.max()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_train() {
|
||||
test_line_interpol_with_translation(&[11, 11, 11, 12, 12, 13], Some(1));
|
||||
test_line_interpol_with_translation(&[13, 12, 12, 11, 11, 11], Some(1));
|
||||
test_line_interpol_with_translation(&[13, 13, 12, 11, 11, 11], Some(1));
|
||||
test_line_interpol_with_translation(&[13, 13, 12, 11, 11, 11], Some(1));
|
||||
test_line_interpol_with_translation(&[u64::MAX - 1, 0, 0, 1], Some(1));
|
||||
test_line_interpol_with_translation(&[u64::MAX - 1, u64::MAX, 0, 1], Some(0));
|
||||
test_line_interpol_with_translation(&[0, 1, 2, 3, 5], Some(0));
|
||||
test_line_interpol_with_translation(&[1, 2, 3, 4], Some(0));
|
||||
|
||||
let data: Vec<u64> = (0..255).collect();
|
||||
test_line_interpol_with_translation(&data, Some(0));
|
||||
let data: Vec<u64> = (0..255).map(|el| el * 2).collect();
|
||||
test_line_interpol_with_translation(&data, Some(0));
|
||||
}
|
||||
}
|
||||
277
columnar/src/column_values/u64_based/linear.rs
Normal file
277
columnar/src/column_values/u64_based/linear.rs
Normal file
@@ -0,0 +1,277 @@
|
||||
use std::io;
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use super::line::Line;
|
||||
use super::ColumnValues;
|
||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats};
|
||||
use crate::column_values::VecColumn;
|
||||
use crate::{MonotonicallyMappableToU64, RowId};
|
||||
|
||||
const HALF_SPACE: u64 = u64::MAX / 2;
|
||||
const LINE_ESTIMATION_BLOCK_LEN: usize = 512;
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct LinearReader {
|
||||
data: OwnedBytes,
|
||||
linear_params: LinearParams,
|
||||
stats: Stats,
|
||||
}
|
||||
|
||||
impl ColumnValues for LinearReader {
|
||||
#[inline]
|
||||
fn get_val(&self, doc: u32) -> u64 {
|
||||
let interpoled_val: u64 = self.linear_params.line.eval(doc);
|
||||
let bitpacked_diff = self.linear_params.bit_unpacker.get(doc, &self.data);
|
||||
interpoled_val.wrapping_add(bitpacked_diff)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> u64 {
|
||||
self.stats.min_value
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.stats.max_value
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.stats.num_rows
|
||||
}
|
||||
}
|
||||
|
||||
/// Fastfield serializer, which tries to guess values by linear interpolation
|
||||
/// and stores the difference bitpacked.
|
||||
pub struct LinearCodec;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct LinearParams {
|
||||
line: Line,
|
||||
bit_unpacker: BitUnpacker,
|
||||
}
|
||||
|
||||
impl BinarySerializable for LinearParams {
|
||||
fn serialize<W: io::Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.line.serialize(writer)?;
|
||||
self.bit_unpacker.bit_width().serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let line = Line::deserialize(reader)?;
|
||||
let bit_width = u8::deserialize(reader)?;
|
||||
Ok(Self {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(bit_width),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LinearCodecEstimator {
|
||||
block: Vec<u64>,
|
||||
line: Option<Line>,
|
||||
row_id: RowId,
|
||||
min_deviation: u64,
|
||||
max_deviation: u64,
|
||||
first_val: u64,
|
||||
last_val: u64,
|
||||
}
|
||||
|
||||
impl Default for LinearCodecEstimator {
|
||||
fn default() -> LinearCodecEstimator {
|
||||
LinearCodecEstimator {
|
||||
block: Vec::with_capacity(LINE_ESTIMATION_BLOCK_LEN),
|
||||
line: None,
|
||||
row_id: 0,
|
||||
min_deviation: u64::MAX,
|
||||
max_deviation: u64::MIN,
|
||||
first_val: 0u64,
|
||||
last_val: 0u64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnCodecEstimator for LinearCodecEstimator {
|
||||
fn finalize(&mut self) {
|
||||
if let Some(line) = self.line.as_mut() {
|
||||
line.intercept = line
|
||||
.intercept
|
||||
.wrapping_add(self.min_deviation)
|
||||
.wrapping_sub(HALF_SPACE);
|
||||
}
|
||||
}
|
||||
|
||||
fn estimate(&self, stats: &Stats) -> Option<u64> {
|
||||
let line = self.line?;
|
||||
let amplitude = self.max_deviation - self.min_deviation;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let linear_params = LinearParams {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(num_bits),
|
||||
};
|
||||
Some(
|
||||
stats.num_bytes()
|
||||
+ linear_params.num_bytes()
|
||||
+ (num_bits as u64 * stats.num_rows as u64 + 7) / 8,
|
||||
)
|
||||
}
|
||||
|
||||
fn serialize(
|
||||
&self,
|
||||
stats: &Stats,
|
||||
vals: &mut dyn Iterator<Item = u64>,
|
||||
wrt: &mut dyn io::Write,
|
||||
) -> io::Result<()> {
|
||||
stats.serialize(wrt)?;
|
||||
let line = self.line.unwrap();
|
||||
let amplitude = self.max_deviation - self.min_deviation;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let linear_params = LinearParams {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(num_bits),
|
||||
};
|
||||
linear_params.serialize(wrt)?;
|
||||
let mut bit_packer = BitPacker::new();
|
||||
for (pos, value) in vals.enumerate() {
|
||||
let calculated_value = line.eval(pos as u32);
|
||||
let offset = value.wrapping_sub(calculated_value);
|
||||
bit_packer.write(offset, num_bits, wrt)?;
|
||||
}
|
||||
bit_packer.close(wrt)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect(&mut self, value: u64) {
|
||||
if let Some(line) = self.line {
|
||||
self.collect_after_line_estimation(&line, value);
|
||||
} else {
|
||||
self.collect_before_line_estimation(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LinearCodecEstimator {
|
||||
#[inline]
|
||||
fn collect_after_line_estimation(&mut self, line: &Line, value: u64) {
|
||||
let interpoled_val: u64 = line.eval(self.row_id);
|
||||
let deviation = value.wrapping_add(HALF_SPACE).wrapping_sub(interpoled_val);
|
||||
self.min_deviation = self.min_deviation.min(deviation);
|
||||
self.max_deviation = self.max_deviation.max(deviation);
|
||||
if self.row_id == 0 {
|
||||
self.first_val = value;
|
||||
}
|
||||
self.last_val = value;
|
||||
self.row_id += 1u32;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect_before_line_estimation(&mut self, value: u64) {
|
||||
self.block.push(value);
|
||||
if self.block.len() == LINE_ESTIMATION_BLOCK_LEN {
|
||||
let line = Line::train(&VecColumn::from(&self.block));
|
||||
let block = std::mem::take(&mut self.block);
|
||||
for val in block {
|
||||
self.collect_after_line_estimation(&line, val);
|
||||
}
|
||||
self.line = Some(line);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnCodec for LinearCodec {
|
||||
type Reader = LinearReader;
|
||||
|
||||
type Estimator = LinearCodecEstimator;
|
||||
|
||||
fn load(mut data: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let stats = Stats::deserialize(&mut data)?;
|
||||
let linear_params = LinearParams::deserialize(&mut data)?;
|
||||
Ok(LinearReader {
|
||||
stats,
|
||||
linear_params,
|
||||
data,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rand::RngCore;
|
||||
|
||||
use super::*;
|
||||
use crate::column_values::u64_based::tests::{create_and_validate, get_codec_test_datasets};
|
||||
|
||||
#[test]
|
||||
fn test_compression_simple() {
|
||||
let vals = (100u64..)
|
||||
.take(super::LINE_ESTIMATION_BLOCK_LEN)
|
||||
.collect::<Vec<_>>();
|
||||
create_and_validate::<LinearCodec>(&vals, "simple monotonically large").unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compression() {
|
||||
let data = (10..=6_000_u64).collect::<Vec<_>>();
|
||||
let (estimate, actual_compression) =
|
||||
create_and_validate::<LinearCodec>(&data, "simple monotonically large").unwrap();
|
||||
assert_le!(actual_compression, 0.001);
|
||||
assert_le!(estimate, 0.02);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_datasets() {
|
||||
let data_sets = get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate::<LinearCodec>(&data, name);
|
||||
data.reverse();
|
||||
create_and_validate::<LinearCodec>(&data, name);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_test_large_amplitude() {
|
||||
let data = vec![
|
||||
i64::MAX as u64 / 2,
|
||||
i64::MAX as u64 / 3,
|
||||
i64::MAX as u64 / 2,
|
||||
];
|
||||
create_and_validate::<LinearCodec>(&data, "large amplitude");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn overflow_error_test() {
|
||||
let data = vec![1572656989877777, 1170935903116329, 720575940379279, 0];
|
||||
create_and_validate::<LinearCodec>(&data, "overflow test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linear_interpol_fast_concave_data() {
|
||||
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
|
||||
create_and_validate::<LinearCodec>(&data, "concave data");
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_convex_data() {
|
||||
let data = vec![0, 40, 60, 70, 75, 77];
|
||||
create_and_validate::<LinearCodec>(&data, "convex data");
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_test_simple() {
|
||||
let data = (10..=20_u64).collect::<Vec<_>>();
|
||||
create_and_validate::<LinearCodec>(&data, "simple monotonically");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_rand() {
|
||||
let mut rng = rand::thread_rng();
|
||||
for _ in 0..50 {
|
||||
let mut data = (0..10_000).map(|_| rng.next_u64()).collect::<Vec<_>>();
|
||||
create_and_validate::<LinearCodec>(&data, "random");
|
||||
data.reverse();
|
||||
create_and_validate::<LinearCodec>(&data, "random");
|
||||
}
|
||||
}
|
||||
}
|
||||
186
columnar/src/column_values/u64_based/mod.rs
Normal file
186
columnar/src/column_values/u64_based/mod.rs
Normal file
@@ -0,0 +1,186 @@
|
||||
mod bitpacked;
|
||||
mod blockwise_linear;
|
||||
mod line;
|
||||
mod linear;
|
||||
mod stats_collector;
|
||||
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
|
||||
use crate::column_values::monotonic_mapping::{
|
||||
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
|
||||
};
|
||||
use crate::column_values::u64_based::bitpacked::BitpackedCodec;
|
||||
use crate::column_values::u64_based::blockwise_linear::BlockwiseLinearCodec;
|
||||
use crate::column_values::u64_based::linear::LinearCodec;
|
||||
use crate::column_values::u64_based::stats_collector::StatsCollector;
|
||||
use crate::column_values::{monotonic_map_column, Stats};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{ColumnValues, MonotonicallyMappableToU64};
|
||||
|
||||
pub trait ColumnCodecEstimator<T = u64>: 'static {
|
||||
fn collect(&mut self, value: u64);
|
||||
fn estimate(&self, stats: &Stats) -> Option<u64>;
|
||||
fn finalize(&mut self) {}
|
||||
fn serialize(
|
||||
&self,
|
||||
stats: &Stats,
|
||||
vals: &mut dyn Iterator<Item = T>,
|
||||
wrt: &mut dyn io::Write,
|
||||
) -> io::Result<()>;
|
||||
}
|
||||
|
||||
pub trait ColumnCodec<T: PartialOrd = u64> {
|
||||
type Reader: ColumnValues<T> + 'static;
|
||||
type Estimator: ColumnCodecEstimator + Default;
|
||||
|
||||
fn load(bytes: OwnedBytes) -> io::Result<Self::Reader>;
|
||||
|
||||
fn estimator() -> Self::Estimator {
|
||||
Self::Estimator::default()
|
||||
}
|
||||
fn boxed_estimator() -> Box<dyn ColumnCodecEstimator> {
|
||||
Box::new(Self::estimator())
|
||||
}
|
||||
}
|
||||
|
||||
/// Available codecs to use to encode the u64 (via [`MonotonicallyMappableToU64`]) converted data.
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
||||
#[repr(u8)]
|
||||
pub enum CodecType {
|
||||
/// Bitpack all values in the value range. The number of bits is defined by the amplitude
|
||||
/// `column.max_value() - column.min_value()`
|
||||
Bitpacked = 0u8,
|
||||
/// Linear interpolation puts a line between the first and last value and then bitpacks the
|
||||
/// values by the offset from the line. The number of bits is defined by the max deviation from
|
||||
/// the line.
|
||||
Linear = 1u8,
|
||||
/// Same as [`CodecType::Linear`], but encodes in blocks of 512 elements.
|
||||
BlockwiseLinear = 2u8,
|
||||
}
|
||||
|
||||
pub const ALL_U64_CODEC_TYPES: [CodecType; 3] = [
|
||||
CodecType::Bitpacked,
|
||||
CodecType::Linear,
|
||||
CodecType::BlockwiseLinear,
|
||||
];
|
||||
|
||||
impl CodecType {
|
||||
fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
fn try_from_code(code: u8) -> Option<CodecType> {
|
||||
match code {
|
||||
0u8 => Some(CodecType::Bitpacked),
|
||||
1u8 => Some(CodecType::Linear),
|
||||
2u8 => Some(CodecType::BlockwiseLinear),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn load<T: MonotonicallyMappableToU64>(
|
||||
&self,
|
||||
bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn ColumnValues<T>>> {
|
||||
match self {
|
||||
CodecType::Bitpacked => load_specific_codec::<BitpackedCodec, T>(bytes),
|
||||
CodecType::Linear => load_specific_codec::<LinearCodec, T>(bytes),
|
||||
CodecType::BlockwiseLinear => load_specific_codec::<BlockwiseLinearCodec, T>(bytes),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn load_specific_codec<C: ColumnCodec, T: MonotonicallyMappableToU64>(
|
||||
bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn ColumnValues<T>>> {
|
||||
let reader = C::load(bytes)?;
|
||||
let reader_typed = monotonic_map_column(
|
||||
reader,
|
||||
StrictlyMonotonicMappingInverter::from(StrictlyMonotonicMappingToInternal::<T>::new()),
|
||||
);
|
||||
Ok(Arc::new(reader_typed))
|
||||
}
|
||||
|
||||
impl CodecType {
|
||||
pub fn estimator(&self) -> Box<dyn ColumnCodecEstimator> {
|
||||
match self {
|
||||
CodecType::Bitpacked => BitpackedCodec::boxed_estimator(),
|
||||
CodecType::Linear => LinearCodec::boxed_estimator(),
|
||||
CodecType::BlockwiseLinear => BlockwiseLinearCodec::boxed_estimator(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize_u64_based_column_values<T: MonotonicallyMappableToU64, F, I>(
|
||||
vals: F,
|
||||
codec_types: &[CodecType],
|
||||
wrt: &mut dyn Write,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
I: Iterator<Item = T>,
|
||||
F: Fn() -> I,
|
||||
{
|
||||
let mut stats_collector = StatsCollector::default();
|
||||
let mut estimators: Vec<(CodecType, Box<dyn ColumnCodecEstimator>)> =
|
||||
Vec::with_capacity(codec_types.len());
|
||||
for &codec_type in codec_types {
|
||||
estimators.push((codec_type, codec_type.estimator()));
|
||||
}
|
||||
for val in vals() {
|
||||
let val_u64 = val.to_u64();
|
||||
stats_collector.collect(val_u64);
|
||||
for (_, estimator) in &mut estimators {
|
||||
estimator.collect(val_u64);
|
||||
}
|
||||
}
|
||||
for (_, estimator) in &mut estimators {
|
||||
estimator.finalize();
|
||||
}
|
||||
let stats = stats_collector.stats();
|
||||
let (_, best_codec, best_codec_estimator) = estimators
|
||||
.into_iter()
|
||||
.flat_map(|(codec_type, estimator)| {
|
||||
let num_bytes = estimator.estimate(&stats)?;
|
||||
Some((num_bytes, codec_type, estimator))
|
||||
})
|
||||
.min_by_key(|(num_bytes, _, _)| *num_bytes)
|
||||
.ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::InvalidData, "No available applicable codec.")
|
||||
})?;
|
||||
best_codec.to_code().serialize(wrt)?;
|
||||
best_codec_estimator.serialize(
|
||||
&stats,
|
||||
&mut vals().map(MonotonicallyMappableToU64::to_u64),
|
||||
wrt,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_u64_based_column_values<T: MonotonicallyMappableToU64>(
|
||||
mut bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn ColumnValues<T>>> {
|
||||
let codec_type: CodecType = bytes
|
||||
.get(0)
|
||||
.copied()
|
||||
.and_then(CodecType::try_from_code)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Failed to read codec type"))?;
|
||||
bytes.advance(1);
|
||||
codec_type.load(bytes)
|
||||
}
|
||||
|
||||
/// Helper function to serialize a column (autodetect from all codecs) and then open it
|
||||
pub fn serialize_and_load_u64_based_column_values<T: MonotonicallyMappableToU64>(
|
||||
vals: &dyn Iterable,
|
||||
codec_types: &[CodecType],
|
||||
) -> Arc<dyn ColumnValues<T>> {
|
||||
let mut buffer = Vec::new();
|
||||
serialize_u64_based_column_values(|| vals.boxed_iter(), codec_types, &mut buffer).unwrap();
|
||||
load_u64_based_column_values::<T>(OwnedBytes::new(buffer)).unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
200
columnar/src/column_values/u64_based/stats_collector.rs
Normal file
200
columnar/src/column_values/u64_based/stats_collector.rs
Normal file
@@ -0,0 +1,200 @@
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use fastdivide::DividerU64;
|
||||
|
||||
use crate::column_values::Stats;
|
||||
use crate::RowId;
|
||||
|
||||
/// Compute the gcd of two non null numbers.
|
||||
///
|
||||
/// It is recommended, but not required, to feed values such that `large >= small`.
|
||||
fn compute_gcd(mut large: NonZeroU64, mut small: NonZeroU64) -> NonZeroU64 {
|
||||
loop {
|
||||
let rem: u64 = large.get() % small;
|
||||
if let Some(new_small) = NonZeroU64::new(rem) {
|
||||
(large, small) = (small, new_small);
|
||||
} else {
|
||||
return small;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct StatsCollector {
|
||||
min_max_opt: Option<(u64, u64)>,
|
||||
num_rows: RowId,
|
||||
// We measure the GCD of the difference between the values and the minimal value.
|
||||
// This is the same as computing the difference between the values and the first value.
|
||||
//
|
||||
// This way, we can compress i64-converted-to-u64 (e.g. timestamp that were supplied in
|
||||
// seconds, only to be converted in microseconds).
|
||||
increment_gcd_opt: Option<(NonZeroU64, DividerU64)>,
|
||||
first_value_opt: Option<u64>,
|
||||
}
|
||||
|
||||
impl StatsCollector {
|
||||
pub fn stats(&self) -> Stats {
|
||||
let (min_value, max_value) = self.min_max_opt.unwrap_or((0u64, 0u64));
|
||||
let increment_gcd = if let Some((increment_gcd, _)) = self.increment_gcd_opt {
|
||||
increment_gcd
|
||||
} else {
|
||||
NonZeroU64::new(1u64).unwrap()
|
||||
};
|
||||
Stats {
|
||||
min_value,
|
||||
max_value,
|
||||
num_rows: self.num_rows,
|
||||
gcd: increment_gcd,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn update_increment_gcd(&mut self, value: u64) {
|
||||
let Some(first_value) = self.first_value_opt else {
|
||||
// We set the first value and just quit.
|
||||
self.first_value_opt = Some(value);
|
||||
return;
|
||||
};
|
||||
let Some(non_zero_value) = NonZeroU64::new(value.abs_diff(first_value)) else {
|
||||
// We can simply skip 0 values.
|
||||
return;
|
||||
};
|
||||
let Some((gcd, gcd_divider)) = self.increment_gcd_opt else {
|
||||
self.set_increment_gcd(non_zero_value);
|
||||
return;
|
||||
};
|
||||
if gcd.get() == 1 {
|
||||
// It won't see any update now.
|
||||
return;
|
||||
}
|
||||
let remainder =
|
||||
non_zero_value.get() - (gcd_divider.divide(non_zero_value.get())) * gcd.get();
|
||||
if remainder == 0 {
|
||||
return;
|
||||
}
|
||||
let new_gcd = compute_gcd(non_zero_value, gcd);
|
||||
self.set_increment_gcd(new_gcd);
|
||||
}
|
||||
|
||||
fn set_increment_gcd(&mut self, gcd: NonZeroU64) {
|
||||
let new_divider = DividerU64::divide_by(gcd.get());
|
||||
self.increment_gcd_opt = Some((gcd, new_divider));
|
||||
}
|
||||
|
||||
pub fn collect(&mut self, value: u64) {
|
||||
self.min_max_opt = Some(if let Some((min, max)) = self.min_max_opt {
|
||||
(min.min(value), max.max(value))
|
||||
} else {
|
||||
(value, value)
|
||||
});
|
||||
self.num_rows += 1;
|
||||
self.update_increment_gcd(value);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use crate::column_values::u64_based::stats_collector::{compute_gcd, StatsCollector};
|
||||
use crate::column_values::u64_based::Stats;
|
||||
|
||||
fn compute_stats(vals: impl Iterator<Item = u64>) -> Stats {
|
||||
let mut stats_collector = StatsCollector::default();
|
||||
for val in vals {
|
||||
stats_collector.collect(val);
|
||||
}
|
||||
stats_collector.stats()
|
||||
}
|
||||
|
||||
fn find_gcd(vals: impl Iterator<Item = u64>) -> u64 {
|
||||
compute_stats(vals).gcd.get()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_gcd() {
|
||||
let test_compute_gcd_aux = |large, small, expected| {
|
||||
let large = NonZeroU64::new(large).unwrap();
|
||||
let small = NonZeroU64::new(small).unwrap();
|
||||
let expected = NonZeroU64::new(expected).unwrap();
|
||||
assert_eq!(compute_gcd(small, large), expected);
|
||||
assert_eq!(compute_gcd(large, small), expected);
|
||||
};
|
||||
test_compute_gcd_aux(1, 4, 1);
|
||||
test_compute_gcd_aux(2, 4, 2);
|
||||
test_compute_gcd_aux(10, 25, 5);
|
||||
test_compute_gcd_aux(25, 25, 25);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gcd() {
|
||||
assert_eq!(find_gcd([0].into_iter()), 1);
|
||||
assert_eq!(find_gcd([0, 10].into_iter()), 10);
|
||||
assert_eq!(find_gcd([10, 0].into_iter()), 10);
|
||||
assert_eq!(find_gcd([].into_iter()), 1);
|
||||
assert_eq!(find_gcd([15, 30, 5, 10].into_iter()), 5);
|
||||
assert_eq!(find_gcd([15, 16, 10].into_iter()), 1);
|
||||
assert_eq!(find_gcd([0, 5, 5, 5].into_iter()), 5);
|
||||
assert_eq!(find_gcd([0, 0].into_iter()), 1);
|
||||
assert_eq!(find_gcd([1, 10, 4, 1, 7, 10].into_iter()), 3);
|
||||
assert_eq!(find_gcd([1, 10, 0, 4, 1, 7, 10].into_iter()), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stats() {
|
||||
assert_eq!(
|
||||
compute_stats([].into_iter()),
|
||||
Stats {
|
||||
gcd: NonZeroU64::new(1).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 0,
|
||||
num_rows: 0
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([0, 1].into_iter()),
|
||||
Stats {
|
||||
gcd: NonZeroU64::new(1).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 1,
|
||||
num_rows: 2
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([0, 1].into_iter()),
|
||||
Stats {
|
||||
gcd: NonZeroU64::new(1).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 1,
|
||||
num_rows: 2
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([10, 20, 30].into_iter()),
|
||||
Stats {
|
||||
gcd: NonZeroU64::new(10).unwrap(),
|
||||
min_value: 10,
|
||||
max_value: 30,
|
||||
num_rows: 3
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([10, 50, 10, 30].into_iter()),
|
||||
Stats {
|
||||
gcd: NonZeroU64::new(20).unwrap(),
|
||||
min_value: 10,
|
||||
max_value: 50,
|
||||
num_rows: 4
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([10, 0, 30].into_iter()),
|
||||
Stats {
|
||||
gcd: NonZeroU64::new(10).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 30,
|
||||
num_rows: 3
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
338
columnar/src/column_values/u64_based/tests.rs
Normal file
338
columnar/src/column_values/u64_based/tests.rs
Normal file
@@ -0,0 +1,338 @@
|
||||
use proptest::prelude::*;
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
#[test]
|
||||
fn test_serialize_and_load_simple() {
|
||||
let mut buffer = Vec::new();
|
||||
let vals = &[1u64, 2u64, 5u64];
|
||||
serialize_u64_based_column_values(
|
||||
|| vals.iter().cloned(),
|
||||
&[CodecType::Bitpacked, CodecType::BlockwiseLinear],
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(buffer.len(), 7);
|
||||
let col = load_u64_based_column_values::<u64>(OwnedBytes::new(buffer)).unwrap();
|
||||
assert_eq!(col.num_vals(), 3);
|
||||
assert_eq!(col.get_val(0), 1);
|
||||
assert_eq!(col.get_val(1), 2);
|
||||
assert_eq!(col.get_val(2), 5);
|
||||
}
|
||||
pub(crate) fn create_and_validate<TColumnCodec: ColumnCodec>(
|
||||
vals: &[u64],
|
||||
name: &str,
|
||||
) -> Option<(f32, f32)> {
|
||||
let mut stats_collector = StatsCollector::default();
|
||||
let mut codec_estimator: TColumnCodec::Estimator = Default::default();
|
||||
|
||||
for val in vals.boxed_iter() {
|
||||
stats_collector.collect(val);
|
||||
codec_estimator.collect(val);
|
||||
}
|
||||
codec_estimator.finalize();
|
||||
let stats = stats_collector.stats();
|
||||
let estimation = codec_estimator.estimate(&stats)?;
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
codec_estimator
|
||||
.serialize(&stats, vals.boxed_iter().as_mut(), &mut buffer)
|
||||
.unwrap();
|
||||
|
||||
let actual_compression = buffer.len() as u64;
|
||||
|
||||
let reader = TColumnCodec::load(OwnedBytes::new(buffer)).unwrap();
|
||||
assert_eq!(reader.num_vals(), vals.len() as u32);
|
||||
for (doc, orig_val) in vals.iter().copied().enumerate() {
|
||||
let val = reader.get_val(doc as u32);
|
||||
assert_eq!(
|
||||
val, orig_val,
|
||||
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data `{vals:?}`",
|
||||
);
|
||||
}
|
||||
|
||||
if !vals.is_empty() {
|
||||
let test_rand_idx = rand::thread_rng().gen_range(0..=vals.len() - 1);
|
||||
let expected_positions: Vec<u32> = vals
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, el)| **el == vals[test_rand_idx])
|
||||
.map(|(pos, _)| pos as u32)
|
||||
.collect();
|
||||
let mut positions = Vec::new();
|
||||
reader.get_docids_for_value_range(
|
||||
vals[test_rand_idx]..=vals[test_rand_idx],
|
||||
0..vals.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
assert_eq!(expected_positions, positions);
|
||||
}
|
||||
dbg!(estimation);
|
||||
dbg!(actual_compression);
|
||||
if actual_compression > 20 {
|
||||
assert!(relative_difference(estimation, actual_compression) < 0.10f32);
|
||||
}
|
||||
Some((
|
||||
compression_rate(estimation, stats.num_rows),
|
||||
compression_rate(actual_compression, stats.num_rows),
|
||||
))
|
||||
}
|
||||
|
||||
fn compression_rate(num_bytes: u64, num_values: u32) -> f32 {
|
||||
num_bytes as f32 / (num_values as f32 * 8.0)
|
||||
}
|
||||
|
||||
fn relative_difference(left: u64, right: u64) -> f32 {
|
||||
let left = left as f32;
|
||||
let right = right as f32;
|
||||
2.0f32 * (left - right).abs() / (left + right)
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(100))]
|
||||
|
||||
#[test]
|
||||
fn test_proptest_small_bitpacked(data in proptest::collection::vec(num_strategy(), 1..10)) {
|
||||
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proptest_small_linear(data in proptest::collection::vec(num_strategy(), 1..10)) {
|
||||
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proptest_small_blockwise_linear(data in proptest::collection::vec(num_strategy(), 1..10)) {
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(10))]
|
||||
|
||||
#[test]
|
||||
fn test_proptest_large_bitpacked(data in proptest::collection::vec(num_strategy(), 1..6000)) {
|
||||
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proptest_large_linear(data in proptest::collection::vec(num_strategy(), 1..6000)) {
|
||||
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proptest_large_blockwise_linear(data in proptest::collection::vec(num_strategy(), 1..6000)) {
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
|
||||
}
|
||||
}
|
||||
|
||||
fn num_strategy() -> impl Strategy<Value = u64> {
|
||||
prop_oneof![
|
||||
1 => prop::num::u64::ANY.prop_map(|num| u64::MAX - (num % 10) ),
|
||||
1 => prop::num::u64::ANY.prop_map(|num| num % 10 ),
|
||||
20 => prop::num::u64::ANY,
|
||||
]
|
||||
}
|
||||
|
||||
pub fn get_codec_test_datasets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
let mut data_and_names = vec![];
|
||||
|
||||
let data = (10..=10_000_u64).collect::<Vec<_>>();
|
||||
data_and_names.push((data, "simple monotonically increasing"));
|
||||
|
||||
data_and_names.push((
|
||||
vec![5, 6, 7, 8, 9, 10, 99, 100],
|
||||
"offset in linear interpol",
|
||||
));
|
||||
data_and_names.push((vec![5, 50, 3, 13, 1, 1000, 35], "rand small"));
|
||||
data_and_names.push((vec![10], "single value"));
|
||||
|
||||
data_and_names.push((
|
||||
vec![1572656989877777, 1170935903116329, 720575940379279, 0],
|
||||
"overflow error",
|
||||
));
|
||||
|
||||
data_and_names
|
||||
}
|
||||
|
||||
fn test_codec<C: ColumnCodec>() {
|
||||
let codec_name = std::any::type_name::<C>();
|
||||
for (data, dataset_name) in get_codec_test_datasets() {
|
||||
let estimate_actual_opt: Option<(f32, f32)> =
|
||||
tests::create_and_validate::<C>(&data, dataset_name);
|
||||
let result = if let Some((estimate, actual)) = estimate_actual_opt {
|
||||
format!("Estimate `{estimate}` Actual `{actual}`")
|
||||
} else {
|
||||
"Disabled".to_string()
|
||||
};
|
||||
println!("Codec {codec_name}, DataSet {dataset_name}, {result}");
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_codec_bitpacking() {
|
||||
test_codec::<BitpackedCodec>();
|
||||
}
|
||||
#[test]
|
||||
fn test_codec_interpolation() {
|
||||
test_codec::<LinearCodec>();
|
||||
}
|
||||
#[test]
|
||||
fn test_codec_multi_interpolation() {
|
||||
test_codec::<BlockwiseLinearCodec>();
|
||||
}
|
||||
|
||||
use super::*;
|
||||
|
||||
fn estimate<C: ColumnCodec>(vals: &[u64]) -> Option<f32> {
|
||||
let mut stats_collector = StatsCollector::default();
|
||||
let mut estimator = C::Estimator::default();
|
||||
for &val in vals {
|
||||
stats_collector.collect(val);
|
||||
estimator.collect(val);
|
||||
}
|
||||
estimator.finalize();
|
||||
let stats = stats_collector.stats();
|
||||
let num_bytes = estimator.estimate(&stats)?;
|
||||
if stats.num_rows == 0 {
|
||||
return None;
|
||||
}
|
||||
Some(num_bytes as f32 / (8.0 * stats.num_rows as f32))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn estimation_good_interpolation_case() {
|
||||
let data = (10..=20000_u64).collect::<Vec<_>>();
|
||||
|
||||
let linear_interpol_estimation = estimate::<LinearCodec>(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.01);
|
||||
|
||||
let multi_linear_interpol_estimation = estimate::<BlockwiseLinearCodec>(&data).unwrap();
|
||||
assert_le!(multi_linear_interpol_estimation, 0.2);
|
||||
assert_lt!(linear_interpol_estimation, multi_linear_interpol_estimation);
|
||||
|
||||
let bitpacked_estimation = estimate::<BitpackedCodec>(&data).unwrap();
|
||||
assert_lt!(linear_interpol_estimation, bitpacked_estimation);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
|
||||
let mut data: Vec<u64> = (201..=20000_u64).collect();
|
||||
data.push(1_000_000);
|
||||
|
||||
// in this case the linear interpolation can't in fact not be worse than bitpacking,
|
||||
// but the estimator adds some threshold, which leads to estimated worse behavior
|
||||
let linear_interpol_estimation = estimate::<LinearCodec>(&data[..]).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.35);
|
||||
|
||||
let bitpacked_estimation = estimate::<BitpackedCodec>(&data).unwrap();
|
||||
assert_le!(bitpacked_estimation, 0.32);
|
||||
assert_le!(bitpacked_estimation, linear_interpol_estimation);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fast_field_codec_type_to_code() {
|
||||
let mut count_codec = 0;
|
||||
for code in 0..=255 {
|
||||
if let Some(codec_type) = CodecType::try_from_code(code) {
|
||||
assert_eq!(codec_type.to_code(), code);
|
||||
count_codec += 1;
|
||||
}
|
||||
}
|
||||
assert_eq!(count_codec, 3);
|
||||
}
|
||||
|
||||
fn test_fastfield_gcd_i64_with_codec(codec_type: CodecType, num_vals: usize) -> io::Result<()> {
|
||||
let mut vals: Vec<i64> = (-4..=(num_vals as i64) - 5).map(|val| val * 1000).collect();
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
crate::column_values::serialize_u64_based_column_values(
|
||||
|| vals.iter().cloned(),
|
||||
&[codec_type],
|
||||
&mut buffer,
|
||||
)?;
|
||||
let buffer = OwnedBytes::new(buffer);
|
||||
let column = crate::column_values::load_u64_based_column_values::<i64>(buffer.clone())?;
|
||||
assert_eq!(column.get_val(0), -4000i64);
|
||||
assert_eq!(column.get_val(1), -3000i64);
|
||||
assert_eq!(column.get_val(2), -2000i64);
|
||||
assert_eq!(column.max_value(), (num_vals as i64 - 5) * 1000);
|
||||
assert_eq!(column.min_value(), -4000i64);
|
||||
|
||||
// Can't apply gcd
|
||||
let mut buffer_without_gcd = Vec::new();
|
||||
vals.pop();
|
||||
vals.push(1001i64);
|
||||
crate::column_values::serialize_u64_based_column_values(
|
||||
|| vals.iter().cloned(),
|
||||
&[codec_type],
|
||||
&mut buffer_without_gcd,
|
||||
)?;
|
||||
let buffer_without_gcd = OwnedBytes::new(buffer_without_gcd);
|
||||
assert!(buffer_without_gcd.len() > buffer.len());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_gcd_i64() -> io::Result<()> {
|
||||
for &codec_type in &[
|
||||
CodecType::Bitpacked,
|
||||
CodecType::BlockwiseLinear,
|
||||
CodecType::Linear,
|
||||
] {
|
||||
test_fastfield_gcd_i64_with_codec(codec_type, 5500)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_fastfield_gcd_u64_with_codec(codec_type: CodecType, num_vals: usize) -> io::Result<()> {
|
||||
let mut vals: Vec<u64> = (1..=num_vals).map(|i| i as u64 * 1000u64).collect();
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
crate::column_values::serialize_u64_based_column_values(
|
||||
|| vals.iter().cloned(),
|
||||
&[codec_type],
|
||||
&mut buffer,
|
||||
)?;
|
||||
let buffer = OwnedBytes::new(buffer);
|
||||
let column = crate::column_values::load_u64_based_column_values::<u64>(buffer.clone())?;
|
||||
assert_eq!(column.get_val(0), 1000u64);
|
||||
assert_eq!(column.get_val(1), 2000u64);
|
||||
assert_eq!(column.get_val(2), 3000u64);
|
||||
assert_eq!(column.max_value(), num_vals as u64 * 1000);
|
||||
assert_eq!(column.min_value(), 1000u64);
|
||||
|
||||
// Can't apply gcd
|
||||
let mut buffer_without_gcd = Vec::new();
|
||||
vals.pop();
|
||||
vals.push(1001u64);
|
||||
crate::column_values::serialize_u64_based_column_values(
|
||||
|| vals.iter().cloned(),
|
||||
&[codec_type],
|
||||
&mut buffer_without_gcd,
|
||||
)?;
|
||||
let buffer_without_gcd = OwnedBytes::new(buffer_without_gcd);
|
||||
assert!(buffer_without_gcd.len() > buffer.len());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_gcd_u64() -> io::Result<()> {
|
||||
for &codec_type in &[
|
||||
CodecType::Bitpacked,
|
||||
CodecType::BlockwiseLinear,
|
||||
CodecType::Linear,
|
||||
] {
|
||||
test_fastfield_gcd_u64_with_codec(codec_type, 5500)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield2() {
|
||||
let test_fastfield = crate::column_values::serialize_and_load_u64_based_column_values::<u64>(
|
||||
&&[100u64, 200u64, 300u64][..],
|
||||
&ALL_U64_CODEC_TYPES,
|
||||
);
|
||||
assert_eq!(test_fastfield.get_val(0), 100);
|
||||
assert_eq!(test_fastfield.get_val(1), 200);
|
||||
assert_eq!(test_fastfield.get_val(2), 300);
|
||||
}
|
||||
224
columnar/src/columnar/column_type.rs
Normal file
224
columnar/src/columnar/column_type.rs
Normal file
@@ -0,0 +1,224 @@
|
||||
use std::fmt::Debug;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use crate::value::NumericalType;
|
||||
use crate::InvalidData;
|
||||
|
||||
/// The column type represents the column type and can fit on 6-bits.
|
||||
///
|
||||
/// - bits[0..3]: Column category type.
|
||||
/// - bits[3..6]: Numerical type if necessary.
|
||||
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy, Ord, PartialOrd)]
|
||||
#[repr(u8)]
|
||||
pub enum ColumnType {
|
||||
I64 = 0u8,
|
||||
U64 = 1u8,
|
||||
F64 = 2u8,
|
||||
Bytes = 10u8,
|
||||
Str = 14u8,
|
||||
Bool = 18u8,
|
||||
IpAddr = 22u8,
|
||||
DateTime = 26u8,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
const COLUMN_TYPES: [ColumnType; 8] = [
|
||||
ColumnType::I64,
|
||||
ColumnType::U64,
|
||||
ColumnType::F64,
|
||||
ColumnType::Bytes,
|
||||
ColumnType::Str,
|
||||
ColumnType::Bool,
|
||||
ColumnType::IpAddr,
|
||||
ColumnType::DateTime,
|
||||
];
|
||||
|
||||
impl ColumnType {
|
||||
pub fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub(crate) fn try_from_code(code: u8) -> Result<ColumnType, InvalidData> {
|
||||
use ColumnType::*;
|
||||
match code {
|
||||
0u8 => Ok(I64),
|
||||
1u8 => Ok(U64),
|
||||
2u8 => Ok(F64),
|
||||
10u8 => Ok(Bytes),
|
||||
14u8 => Ok(Str),
|
||||
18u8 => Ok(Bool),
|
||||
22u8 => Ok(IpAddr),
|
||||
26u8 => Ok(Self::DateTime),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<NumericalType> for ColumnType {
|
||||
fn from(numerical_type: NumericalType) -> Self {
|
||||
match numerical_type {
|
||||
NumericalType::I64 => ColumnType::I64,
|
||||
NumericalType::U64 => ColumnType::U64,
|
||||
NumericalType::F64 => ColumnType::F64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnType {
|
||||
pub fn numerical_type(&self) -> Option<NumericalType> {
|
||||
match self {
|
||||
ColumnType::I64 => Some(NumericalType::I64),
|
||||
ColumnType::U64 => Some(NumericalType::U64),
|
||||
ColumnType::F64 => Some(NumericalType::F64),
|
||||
ColumnType::Bytes
|
||||
| ColumnType::Str
|
||||
| ColumnType::Bool
|
||||
| ColumnType::IpAddr
|
||||
| ColumnType::DateTime => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO remove if possible
|
||||
pub trait HasAssociatedColumnType: 'static + Debug + Send + Sync + Copy + PartialOrd {
|
||||
fn column_type() -> ColumnType;
|
||||
fn default_value() -> Self;
|
||||
}
|
||||
|
||||
impl HasAssociatedColumnType for u64 {
|
||||
fn column_type() -> ColumnType {
|
||||
ColumnType::U64
|
||||
}
|
||||
|
||||
fn default_value() -> Self {
|
||||
0u64
|
||||
}
|
||||
}
|
||||
|
||||
impl HasAssociatedColumnType for i64 {
|
||||
fn column_type() -> ColumnType {
|
||||
ColumnType::I64
|
||||
}
|
||||
|
||||
fn default_value() -> Self {
|
||||
0i64
|
||||
}
|
||||
}
|
||||
|
||||
impl HasAssociatedColumnType for f64 {
|
||||
fn column_type() -> ColumnType {
|
||||
ColumnType::F64
|
||||
}
|
||||
|
||||
fn default_value() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl HasAssociatedColumnType for bool {
|
||||
fn column_type() -> ColumnType {
|
||||
ColumnType::Bool
|
||||
}
|
||||
fn default_value() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl HasAssociatedColumnType for crate::DateTime {
|
||||
fn column_type() -> ColumnType {
|
||||
ColumnType::DateTime
|
||||
}
|
||||
fn default_value() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl HasAssociatedColumnType for Ipv6Addr {
|
||||
fn column_type() -> ColumnType {
|
||||
ColumnType::IpAddr
|
||||
}
|
||||
|
||||
fn default_value() -> Self {
|
||||
Ipv6Addr::from([0u8; 16])
|
||||
}
|
||||
}
|
||||
|
||||
/// Column types are grouped into different categories that
|
||||
/// corresponds to the different types of `JsonValue` types.
|
||||
///
|
||||
/// The columnar writer will apply coercion rules to make sure that
|
||||
/// at most one column exist per `ColumnTypeCategory`.
|
||||
///
|
||||
/// See also [README.md].
|
||||
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)]
|
||||
#[repr(u8)]
|
||||
pub enum ColumnTypeCategory {
|
||||
Bool,
|
||||
Str,
|
||||
Numerical,
|
||||
DateTime,
|
||||
Bytes,
|
||||
IpAddr,
|
||||
}
|
||||
|
||||
impl From<ColumnType> for ColumnTypeCategory {
|
||||
fn from(column_type: ColumnType) -> Self {
|
||||
match column_type {
|
||||
ColumnType::I64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::U64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::F64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::Bytes => ColumnTypeCategory::Bytes,
|
||||
ColumnType::Str => ColumnTypeCategory::Str,
|
||||
ColumnType::Bool => ColumnTypeCategory::Bool,
|
||||
ColumnType::IpAddr => ColumnTypeCategory::IpAddr,
|
||||
ColumnType::DateTime => ColumnTypeCategory::DateTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashSet;
|
||||
|
||||
use super::*;
|
||||
use crate::Cardinality;
|
||||
|
||||
#[test]
|
||||
fn test_column_type_to_code() {
|
||||
let mut column_type_set: HashSet<ColumnType> = HashSet::new();
|
||||
for code in u8::MIN..=u8::MAX {
|
||||
if let Ok(column_type) = ColumnType::try_from_code(code) {
|
||||
assert_eq!(column_type.to_code(), code);
|
||||
assert!(column_type_set.insert(column_type));
|
||||
}
|
||||
}
|
||||
assert_eq!(column_type_set.len(), super::COLUMN_TYPES.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_category_sort_consistent_with_column_type_sort() {
|
||||
// This is a very important property because we
|
||||
// we need to serialize colunmn in the right order.
|
||||
let mut column_types: Vec<ColumnType> = super::COLUMN_TYPES.iter().copied().collect();
|
||||
column_types.sort_by_key(|col| col.to_code());
|
||||
let column_categories: Vec<ColumnTypeCategory> = column_types
|
||||
.into_iter()
|
||||
.map(ColumnTypeCategory::from)
|
||||
.collect();
|
||||
for (prev, next) in column_categories.iter().zip(column_categories.iter()) {
|
||||
assert!(prev <= next);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cardinality_to_code() {
|
||||
let mut num_cardinality = 0;
|
||||
for code in u8::MIN..=u8::MAX {
|
||||
if let Ok(cardinality) = Cardinality::try_from_code(code) {
|
||||
assert_eq!(cardinality.to_code(), code);
|
||||
num_cardinality += 1;
|
||||
}
|
||||
}
|
||||
assert_eq!(num_cardinality, 3);
|
||||
}
|
||||
}
|
||||
73
columnar/src/columnar/format_version.rs
Normal file
73
columnar/src/columnar/format_version.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
use crate::InvalidData;
|
||||
|
||||
pub const VERSION_FOOTER_NUM_BYTES: usize = MAGIC_BYTES.len() + std::mem::size_of::<u32>();
|
||||
|
||||
/// We end the file by these 4 bytes just to somewhat identify that
|
||||
/// this is indeed a columnar file.
|
||||
const MAGIC_BYTES: [u8; 4] = [2, 113, 119, 066];
|
||||
|
||||
pub fn footer() -> [u8; VERSION_FOOTER_NUM_BYTES] {
|
||||
let mut footer_bytes = [0u8; VERSION_FOOTER_NUM_BYTES];
|
||||
footer_bytes[0..4].copy_from_slice(&Version::V1.to_bytes());
|
||||
footer_bytes[4..8].copy_from_slice(&MAGIC_BYTES[..]);
|
||||
footer_bytes
|
||||
}
|
||||
|
||||
pub fn parse_footer(footer_bytes: [u8; VERSION_FOOTER_NUM_BYTES]) -> Result<Version, InvalidData> {
|
||||
if footer_bytes[4..8] != MAGIC_BYTES {
|
||||
return Err(InvalidData);
|
||||
}
|
||||
Version::try_from_bytes(footer_bytes[0..4].try_into().unwrap())
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
#[repr(u32)]
|
||||
pub enum Version {
|
||||
V1 = 1u32,
|
||||
}
|
||||
|
||||
impl Version {
|
||||
fn to_bytes(&self) -> [u8; 4] {
|
||||
(*self as u32).to_le_bytes()
|
||||
}
|
||||
|
||||
fn try_from_bytes(bytes: [u8; 4]) -> Result<Version, InvalidData> {
|
||||
let code = u32::from_le_bytes(bytes);
|
||||
match code {
|
||||
1u32 => Ok(Version::V1),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashSet;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_footer_dserialization() {
|
||||
let parsed_version: Version = parse_footer(footer()).unwrap();
|
||||
assert_eq!(Version::V1, parsed_version);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_version_serialization() {
|
||||
let version_to_tests: Vec<u32> = [0, 1 << 8, 1 << 16, 1 << 24]
|
||||
.iter()
|
||||
.copied()
|
||||
.flat_map(|offset| (0..255).map(move |el| el + offset))
|
||||
.collect();
|
||||
let mut valid_versions: HashSet<u32> = HashSet::default();
|
||||
for &i in &version_to_tests {
|
||||
let version_res = Version::try_from_bytes(i.to_le_bytes());
|
||||
if let Ok(version) = version_res {
|
||||
assert_eq!(version, Version::V1);
|
||||
assert_eq!(version.to_bytes(), i.to_le_bytes());
|
||||
valid_versions.insert(i);
|
||||
}
|
||||
}
|
||||
assert_eq!(valid_versions.len(), 1);
|
||||
}
|
||||
}
|
||||
114
columnar/src/columnar/merge/merge_dict_column.rs
Normal file
114
columnar/src/columnar/merge/merge_dict_column.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::CountingWriter;
|
||||
use itertools::Itertools;
|
||||
use sstable::{SSTable, TermOrdinal};
|
||||
|
||||
use super::term_merger::TermMerger;
|
||||
use crate::column_index::{serialize_column_index, SerializableColumnIndex};
|
||||
use crate::column_values::{serialize_u64_based_column_values, CodecType};
|
||||
use crate::BytesColumn;
|
||||
|
||||
// Serialize [Dictionary, Column, dictionary num bytes U32::LE]
|
||||
// Column: [Column Index, Column Values, column index num bytes U32::LE]
|
||||
pub fn merge_bytes_or_str_column(
|
||||
column_index: SerializableColumnIndex<'_>,
|
||||
bytes_columns: &[BytesColumn],
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
// Serialize dict and generate mapping for values
|
||||
let mut output = CountingWriter::wrap(output);
|
||||
let term_ord_mapping = serialize_merged_dict(bytes_columns, &mut output)?;
|
||||
let dictionary_num_bytes: u32 = output.written_bytes() as u32;
|
||||
let output = output.finish();
|
||||
|
||||
serialize_bytes_or_str_column(column_index, bytes_columns, &term_ord_mapping, output)?;
|
||||
|
||||
output.write_all(&dictionary_num_bytes.to_le_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_bytes_or_str_column(
|
||||
column_index: SerializableColumnIndex<'_>,
|
||||
bytes_columns: &[BytesColumn],
|
||||
term_ord_mapping: &TermOrdinalMapping,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
let column_index_num_bytes = serialize_column_index(column_index, output)?;
|
||||
|
||||
let column_values = move || {
|
||||
let iter = bytes_columns
|
||||
.iter()
|
||||
.enumerate()
|
||||
.flat_map(|(segment_ord, byte_column)| {
|
||||
let segment_ord = term_ord_mapping.get_segment(segment_ord);
|
||||
byte_column
|
||||
.ords()
|
||||
.values
|
||||
.iter()
|
||||
.map(move |term_ord| segment_ord[term_ord as usize])
|
||||
});
|
||||
iter
|
||||
};
|
||||
|
||||
serialize_u64_based_column_values(
|
||||
column_values,
|
||||
&[CodecType::Bitpacked, CodecType::BlockwiseLinear],
|
||||
output,
|
||||
)?;
|
||||
|
||||
output.write_all(&column_index_num_bytes.to_le_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_merged_dict(
|
||||
bytes_columns: &[BytesColumn],
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<TermOrdinalMapping> {
|
||||
let mut term_ord_mapping = TermOrdinalMapping::default();
|
||||
|
||||
let mut field_term_streams = Vec::new();
|
||||
for column in bytes_columns {
|
||||
term_ord_mapping.add_segment(column.dictionary.num_terms());
|
||||
let terms = column.dictionary.stream()?;
|
||||
field_term_streams.push(terms);
|
||||
}
|
||||
|
||||
let mut merged_terms = TermMerger::new(field_term_streams);
|
||||
let mut sstable_builder = sstable::VoidSSTable::writer(output);
|
||||
|
||||
let mut current_term_ord = 0;
|
||||
while merged_terms.advance() {
|
||||
let term_bytes: &[u8] = merged_terms.key();
|
||||
|
||||
sstable_builder.insert(term_bytes, &())?;
|
||||
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
|
||||
term_ord_mapping.register_from_to(segment_ord, from_term_ord, current_term_ord);
|
||||
}
|
||||
current_term_ord += 1;
|
||||
}
|
||||
sstable_builder.finish()?;
|
||||
|
||||
Ok(term_ord_mapping)
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct TermOrdinalMapping {
|
||||
per_segment_new_term_ordinals: Vec<Vec<TermOrdinal>>,
|
||||
}
|
||||
|
||||
impl TermOrdinalMapping {
|
||||
fn add_segment(&mut self, max_term_ord: usize) {
|
||||
self.per_segment_new_term_ordinals
|
||||
.push(vec![TermOrdinal::default(); max_term_ord as usize]);
|
||||
}
|
||||
|
||||
fn register_from_to(&mut self, segment_ord: usize, from_ord: TermOrdinal, to_ord: TermOrdinal) {
|
||||
self.per_segment_new_term_ordinals[segment_ord][from_ord as usize] = to_ord;
|
||||
}
|
||||
|
||||
fn get_segment(&self, segment_ord: usize) -> &[TermOrdinal] {
|
||||
&(self.per_segment_new_term_ordinals[segment_ord])[..]
|
||||
}
|
||||
}
|
||||
60
columnar/src/columnar/merge/merge_mapping.rs
Normal file
60
columnar/src/columnar/merge/merge_mapping.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
use std::ops::Range;
|
||||
|
||||
use crate::{column, ColumnarReader, RowId};
|
||||
|
||||
pub struct StackMergeOrder {
|
||||
// This does not start at 0. The first row is the number of
|
||||
// rows in the first columnar.
|
||||
cumulated_row_ids: Vec<RowId>,
|
||||
}
|
||||
|
||||
impl StackMergeOrder {
|
||||
pub fn from_columnars(columnars: &[&ColumnarReader]) -> StackMergeOrder {
|
||||
let mut cumulated_row_ids: Vec<RowId> = Vec::with_capacity(columnars.len());
|
||||
let mut cumulated_row_id = 0;
|
||||
for columnar in columnars {
|
||||
cumulated_row_id += columnar.num_rows();
|
||||
cumulated_row_ids.push(cumulated_row_id);
|
||||
}
|
||||
StackMergeOrder { cumulated_row_ids }
|
||||
}
|
||||
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.cumulated_row_ids.last().copied().unwrap_or(0)
|
||||
}
|
||||
|
||||
pub fn offset(&self, columnar_id: usize) -> RowId {
|
||||
if columnar_id == 0 {
|
||||
return 0;
|
||||
}
|
||||
self.cumulated_row_ids[columnar_id - 1]
|
||||
}
|
||||
|
||||
pub fn columnar_range(&self, columnar_id: usize) -> Range<RowId> {
|
||||
self.offset(columnar_id)..self.offset(columnar_id + 1)
|
||||
}
|
||||
}
|
||||
|
||||
pub enum MergeRowOrder {
|
||||
/// Columnar tables are simply stacked one above the other.
|
||||
/// If the i-th columnar_readers has n_rows_i rows, then
|
||||
/// in the resulting columnar,
|
||||
/// rows [r0..n_row_0) contains the row of columnar_readers[0], in ordder
|
||||
/// rows [n_row_0..n_row_0 + n_row_1 contains the row of columnar_readers[1], in order.
|
||||
/// ..
|
||||
Stack(StackMergeOrder),
|
||||
/// Some more complex mapping, that can interleaves rows from the different readers and
|
||||
/// possibly drop rows.
|
||||
Complex(()),
|
||||
}
|
||||
|
||||
impl MergeRowOrder {
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
match self {
|
||||
MergeRowOrder::Stack(stack_row_order) => stack_row_order.num_rows(),
|
||||
MergeRowOrder::Complex(_) => {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
231
columnar/src/columnar/merge/mod.rs
Normal file
231
columnar/src/columnar/merge/mod.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
mod merge_dict_column;
|
||||
mod merge_mapping;
|
||||
mod term_merger;
|
||||
|
||||
// mod sorted_doc_id_column;
|
||||
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::io;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use merge_mapping::{MergeRowOrder, StackMergeOrder};
|
||||
|
||||
use super::writer::ColumnarSerializer;
|
||||
use crate::column::{serialize_column_mappable_to_u128, serialize_column_mappable_to_u64};
|
||||
use crate::columnar::column_type::ColumnTypeCategory;
|
||||
use crate::columnar::merge::merge_dict_column::merge_bytes_or_str_column;
|
||||
use crate::columnar::writer::CompatibleNumericalTypes;
|
||||
use crate::columnar::ColumnarReader;
|
||||
use crate::dynamic_column::DynamicColumn;
|
||||
use crate::{
|
||||
BytesColumn, Column, ColumnIndex, ColumnType, ColumnValues,
|
||||
NumericalType, NumericalValue,
|
||||
};
|
||||
|
||||
pub fn merge_columnar(
|
||||
columnar_readers: &[&ColumnarReader],
|
||||
mapping: MergeRowOrder,
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let mut serializer = ColumnarSerializer::new(output);
|
||||
|
||||
let columns_to_merge = group_columns_for_merge(columnar_readers)?;
|
||||
for ((column_name, column_type), columns) in columns_to_merge {
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name.as_bytes(), column_type);
|
||||
merge_column(column_type, columns, &mapping, &mut column_serializer)?;
|
||||
}
|
||||
serializer.finalize(mapping.num_rows())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn dynamic_column_to_u64_monotonic(dynamic_column: DynamicColumn) -> Option<Column<u64>> {
|
||||
match dynamic_column {
|
||||
DynamicColumn::Bool(column) => Some(column.to_u64_monotonic()),
|
||||
DynamicColumn::I64(column) => Some(column.to_u64_monotonic()),
|
||||
DynamicColumn::U64(column) => Some(column.to_u64_monotonic()),
|
||||
DynamicColumn::F64(column) => Some(column.to_u64_monotonic()),
|
||||
DynamicColumn::DateTime(column) => Some(column.to_u64_monotonic()),
|
||||
DynamicColumn::IpAddr(_) | DynamicColumn::Bytes(_) | DynamicColumn::Str(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn merge_column(
|
||||
column_type: ColumnType,
|
||||
columns: Vec<Option<DynamicColumn>>,
|
||||
merge_row_order: &MergeRowOrder,
|
||||
wrt: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
match column_type {
|
||||
ColumnType::I64
|
||||
| ColumnType::U64
|
||||
| ColumnType::F64
|
||||
| ColumnType::DateTime
|
||||
| ColumnType::Bool => {
|
||||
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
|
||||
let mut column_values: Vec<Arc<dyn ColumnValues>> = Vec::with_capacity(columns.len());
|
||||
for dynamic_column_opt in columns {
|
||||
if let Some(Column { idx, values }) =
|
||||
dynamic_column_opt.and_then(dynamic_column_to_u64_monotonic)
|
||||
{
|
||||
column_indexes.push(Some(idx));
|
||||
column_values.push(values);
|
||||
} else {
|
||||
column_indexes.push(None);
|
||||
}
|
||||
}
|
||||
let merged_column_index =
|
||||
crate::column_index::stack_column_index(&column_indexes[..], merge_row_order);
|
||||
let stacked_columns_iterable = || column_values
|
||||
.iter()
|
||||
.flat_map(|column| column.iter());
|
||||
serialize_column_mappable_to_u64(merged_column_index, &stacked_columns_iterable, wrt)?;
|
||||
}
|
||||
ColumnType::IpAddr => {
|
||||
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
|
||||
let mut column_values: Vec<Arc<dyn ColumnValues<Ipv6Addr>>> =
|
||||
Vec::with_capacity(columns.len());
|
||||
let mut num_values = 0;
|
||||
for dynamic_column_opt in columns {
|
||||
if let Some(DynamicColumn::IpAddr(Column { idx, values })) = dynamic_column_opt {
|
||||
num_values += values.num_vals();
|
||||
column_indexes.push(Some(idx));
|
||||
column_values.push(values);
|
||||
} else {
|
||||
column_indexes.push(None);
|
||||
}
|
||||
}
|
||||
let merged_column_index =
|
||||
crate::column_index::stack_column_index(&column_indexes[..], merge_row_order);
|
||||
serialize_column_mappable_to_u128(
|
||||
merged_column_index,
|
||||
&|| {
|
||||
column_values
|
||||
.iter()
|
||||
.flat_map(|column_value| column_value.iter())
|
||||
},
|
||||
num_values,
|
||||
wrt,
|
||||
)?;
|
||||
}
|
||||
ColumnType::Bytes | ColumnType::Str => {
|
||||
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
|
||||
let mut bytes_columns: Vec<BytesColumn> = Vec::with_capacity(columns.len());
|
||||
for dynamic_column_opt in columns {
|
||||
match dynamic_column_opt {
|
||||
Some(DynamicColumn::Str(str_column)) => {
|
||||
column_indexes.push(Some(str_column.term_ord_column.idx.clone()));
|
||||
bytes_columns.push(str_column.into());
|
||||
}
|
||||
Some(DynamicColumn::Bytes(bytes_column)) => {
|
||||
column_indexes.push(Some(bytes_column.term_ord_column.idx.clone()));
|
||||
bytes_columns.push(bytes_column);
|
||||
}
|
||||
_ => column_indexes.push(None),
|
||||
}
|
||||
}
|
||||
let merged_column_index =
|
||||
crate::column_index::stack_column_index(&column_indexes[..], merge_row_order);
|
||||
merge_bytes_or_str_column(merged_column_index, &bytes_columns, wrt)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn group_columns_for_merge(
|
||||
columnar_readers: &[&ColumnarReader],
|
||||
) -> io::Result<BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>>> {
|
||||
// Each column name may have multiple types of column associated.
|
||||
// For merging we are interested in the same column type category since they can be merged.
|
||||
let mut columns_grouped: HashMap<(String, ColumnTypeCategory), Vec<Option<DynamicColumn>>> =
|
||||
HashMap::new();
|
||||
|
||||
let num_columnars = columnar_readers.len();
|
||||
|
||||
for (columnar_id, columnar_reader) in columnar_readers.iter().enumerate() {
|
||||
let column_name_and_handle = columnar_reader.list_columns()?;
|
||||
for (column_name, handle) in column_name_and_handle {
|
||||
let column_type_category: ColumnTypeCategory = handle.column_type().into();
|
||||
let columns = columns_grouped
|
||||
.entry((column_name, column_type_category))
|
||||
.or_insert_with(|| vec![None; num_columnars]);
|
||||
let column = handle.open()?;
|
||||
columns[columnar_id] = Some(column);
|
||||
}
|
||||
}
|
||||
|
||||
let mut merge_columns: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
BTreeMap::default();
|
||||
|
||||
for ((column_name, col_category), mut columns) in columns_grouped {
|
||||
if col_category == ColumnTypeCategory::Numerical {
|
||||
coerce_numerical_columns_to_same_type(&mut columns);
|
||||
}
|
||||
let column_type = columns
|
||||
.iter()
|
||||
.flatten()
|
||||
.map(|col| col.column_type())
|
||||
.next()
|
||||
.unwrap();
|
||||
merge_columns.insert((column_name, column_type), columns);
|
||||
}
|
||||
|
||||
Ok(merge_columns)
|
||||
}
|
||||
|
||||
/// Coerce a set of numerical columns to the same type.
|
||||
///
|
||||
/// If all columns are already from the same type, keep this type
|
||||
/// (even if they could all be coerced to i64).
|
||||
fn coerce_numerical_columns_to_same_type(columns: &mut [Option<DynamicColumn>]) {
|
||||
let mut column_types: HashSet<NumericalType> = HashSet::default();
|
||||
let mut compatible_numerical_types = CompatibleNumericalTypes::default();
|
||||
for column in columns.iter().flatten() {
|
||||
let min_value: NumericalValue;
|
||||
let max_value: NumericalValue;
|
||||
match column {
|
||||
DynamicColumn::I64(column) => {
|
||||
min_value = column.min_value().into();
|
||||
max_value = column.max_value().into();
|
||||
}
|
||||
DynamicColumn::U64(column) => {
|
||||
min_value = column.min_value().into();
|
||||
max_value = column.min_value().into();
|
||||
}
|
||||
DynamicColumn::F64(column) => {
|
||||
min_value = column.min_value().into();
|
||||
max_value = column.min_value().into();
|
||||
}
|
||||
DynamicColumn::Bool(_)
|
||||
| DynamicColumn::IpAddr(_)
|
||||
| DynamicColumn::DateTime(_)
|
||||
| DynamicColumn::Bytes(_)
|
||||
| DynamicColumn::Str(_) => {
|
||||
panic!("We expected only numerical columns.");
|
||||
}
|
||||
}
|
||||
column_types.insert(column.column_type().numerical_type().unwrap());
|
||||
compatible_numerical_types.accept_value(min_value);
|
||||
compatible_numerical_types.accept_value(max_value);
|
||||
}
|
||||
if column_types.len() <= 1 {
|
||||
// No need to do anything. The columns are already all from the same type.
|
||||
// This is necessary to let use force a given type.
|
||||
|
||||
// TODO This works in a world where we do not allow a change of schema,
|
||||
// but in the future, we will have to pass some kind of schema to enforce
|
||||
// the logic.
|
||||
return;
|
||||
}
|
||||
let coerce_type = compatible_numerical_types.to_numerical_type();
|
||||
for column_opt in columns.iter_mut() {
|
||||
if let Some(column) = column_opt.take() {
|
||||
*column_opt = column.coerce_numerical(coerce_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
107
columnar/src/columnar/merge/sorted_doc_id_column.rs
Normal file
107
columnar/src/columnar/merge/sorted_doc_id_column.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use itertools::Itertools;
|
||||
|
||||
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
|
||||
use crate::SegmentReader;
|
||||
|
||||
pub(crate) struct RemappedDocIdColumn<'a> {
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
fast_field_readers: Vec<Arc<dyn Column<u64>>>,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
num_vals: u32,
|
||||
}
|
||||
|
||||
fn compute_min_max_val(
|
||||
u64_reader: &dyn Column<u64>,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> Option<(u64, u64)> {
|
||||
if segment_reader.max_doc() == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
if segment_reader.alive_bitset().is_none() {
|
||||
// no deleted documents,
|
||||
// we can use the previous min_val, max_val.
|
||||
return Some((u64_reader.min_value(), u64_reader.max_value()));
|
||||
}
|
||||
// some deleted documents,
|
||||
// we need to recompute the max / min
|
||||
segment_reader
|
||||
.doc_ids_alive()
|
||||
.map(|doc_id| u64_reader.get_val(doc_id))
|
||||
.minmax()
|
||||
.into_option()
|
||||
}
|
||||
|
||||
impl<'a> RemappedDocIdColumn<'a> {
|
||||
pub(crate) fn new(
|
||||
readers: &'a [SegmentReader],
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
field: &str,
|
||||
) -> Self {
|
||||
let (min_value, max_value) = readers
|
||||
.iter()
|
||||
.filter_map(|reader| {
|
||||
let u64_reader: Arc<dyn Column<u64>> =
|
||||
reader.fast_fields().typed_fast_field_reader(field).expect(
|
||||
"Failed to find a reader for single fast field. This is a tantivy bug and \
|
||||
it should never happen.",
|
||||
);
|
||||
compute_min_max_val(&*u64_reader, reader)
|
||||
})
|
||||
.reduce(|a, b| (a.0.min(b.0), a.1.max(b.1)))
|
||||
.expect("Unexpected error, empty readers in IndexMerger");
|
||||
|
||||
let fast_field_readers = readers
|
||||
.iter()
|
||||
.map(|reader| {
|
||||
let u64_reader: Arc<dyn Column<u64>> =
|
||||
reader.fast_fields().typed_fast_field_reader(field).expect(
|
||||
"Failed to find a reader for single fast field. This is a tantivy bug and \
|
||||
it should never happen.",
|
||||
);
|
||||
u64_reader
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
RemappedDocIdColumn {
|
||||
doc_id_mapping,
|
||||
fast_field_readers,
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals: doc_id_mapping.len() as u32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Column for RemappedDocIdColumn<'a> {
|
||||
fn get_val(&self, _doc: u32) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(
|
||||
self.doc_id_mapping
|
||||
.iter_old_doc_addrs()
|
||||
.map(|old_doc_addr| {
|
||||
let fast_field_reader =
|
||||
&self.fast_field_readers[old_doc_addr.segment_ord as usize];
|
||||
fast_field_reader.get_val(old_doc_addr.doc_id)
|
||||
}),
|
||||
)
|
||||
}
|
||||
fn min_value(&self) -> u64 {
|
||||
self.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.num_vals
|
||||
}
|
||||
}
|
||||
169
columnar/src/columnar/merge/sorted_doc_id_multivalue_column.rs
Normal file
169
columnar/src/columnar/merge/sorted_doc_id_multivalue_column.rs
Normal file
@@ -0,0 +1,169 @@
|
||||
use std::cmp;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::flat_map_with_buffer::FlatMapWithBufferIter;
|
||||
use crate::fastfield::{MultiValueIndex, MultiValuedFastFieldReader};
|
||||
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
|
||||
use crate::{DocAddress, SegmentReader};
|
||||
|
||||
pub(crate) struct RemappedDocIdMultiValueColumn<'a> {
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
fast_field_readers: Vec<MultiValuedFastFieldReader<u64>>,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
num_vals: u32,
|
||||
}
|
||||
|
||||
impl<'a> RemappedDocIdMultiValueColumn<'a> {
|
||||
pub(crate) fn new(
|
||||
readers: &'a [SegmentReader],
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
field: &str,
|
||||
) -> Self {
|
||||
// Our values are bitpacked and we need to know what should be
|
||||
// our bitwidth and our minimum value before serializing any values.
|
||||
//
|
||||
// Computing those is non-trivial if some documents are deleted.
|
||||
// We go through a complete first pass to compute the minimum and the
|
||||
// maximum value and initialize our Serializer.
|
||||
let mut num_vals = 0;
|
||||
let mut min_value = u64::MAX;
|
||||
let mut max_value = u64::MIN;
|
||||
let mut vals = Vec::new();
|
||||
let mut fast_field_readers = Vec::with_capacity(readers.len());
|
||||
for reader in readers {
|
||||
let ff_reader: MultiValuedFastFieldReader<u64> = reader
|
||||
.fast_fields()
|
||||
.typed_fast_field_multi_reader::<u64>(field)
|
||||
.expect(
|
||||
"Failed to find multivalued fast field reader. This is a bug in tantivy. \
|
||||
Please report.",
|
||||
);
|
||||
for doc in reader.doc_ids_alive() {
|
||||
ff_reader.get_vals(doc, &mut vals);
|
||||
for &val in &vals {
|
||||
min_value = cmp::min(val, min_value);
|
||||
max_value = cmp::max(val, max_value);
|
||||
}
|
||||
num_vals += vals.len();
|
||||
}
|
||||
fast_field_readers.push(ff_reader);
|
||||
// TODO optimize when no deletes
|
||||
}
|
||||
if min_value > max_value {
|
||||
min_value = 0;
|
||||
max_value = 0;
|
||||
}
|
||||
RemappedDocIdMultiValueColumn {
|
||||
doc_id_mapping,
|
||||
fast_field_readers,
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals: num_vals as u32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Column for RemappedDocIdMultiValueColumn<'a> {
|
||||
fn get_val(&self, _pos: u32) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(
|
||||
self.doc_id_mapping
|
||||
.iter_old_doc_addrs()
|
||||
.flat_map_with_buffer(|old_doc_addr: DocAddress, buffer| {
|
||||
let ff_reader = &self.fast_field_readers[old_doc_addr.segment_ord as usize];
|
||||
ff_reader.get_vals(old_doc_addr.doc_id, buffer);
|
||||
}),
|
||||
)
|
||||
}
|
||||
fn min_value(&self) -> u64 {
|
||||
self.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
multi_value_length_readers: Vec<&'a MultiValueIndex>,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
num_vals: u32,
|
||||
}
|
||||
|
||||
impl<'a> RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
pub(crate) fn new(
|
||||
segment_and_ff_readers: &'a [(&'a SegmentReader, &'a MultiValueIndex)],
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
) -> Self {
|
||||
// We go through a complete first pass to compute the minimum and the
|
||||
// maximum value and initialize our Column.
|
||||
let mut num_vals = 0;
|
||||
let min_value = 0;
|
||||
let mut max_value = 0;
|
||||
let mut multi_value_length_readers = Vec::with_capacity(segment_and_ff_readers.len());
|
||||
for segment_and_ff_reader in segment_and_ff_readers {
|
||||
let segment_reader = segment_and_ff_reader.0;
|
||||
let multi_value_length_reader = segment_and_ff_reader.1;
|
||||
if !segment_reader.has_deletes() {
|
||||
max_value += multi_value_length_reader.total_num_vals() as u64;
|
||||
} else {
|
||||
for doc in segment_reader.doc_ids_alive() {
|
||||
max_value += multi_value_length_reader.num_vals_for_doc(doc) as u64;
|
||||
}
|
||||
}
|
||||
num_vals += segment_reader.num_docs();
|
||||
multi_value_length_readers.push(multi_value_length_reader);
|
||||
}
|
||||
// The value range is always get_val(doc)..get_val(doc + 1)
|
||||
num_vals += 1;
|
||||
Self {
|
||||
doc_id_mapping,
|
||||
multi_value_length_readers,
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Column for RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
fn get_val(&self, _pos: u32) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
let mut offset = 0;
|
||||
Box::new(
|
||||
std::iter::once(0).chain(self.doc_id_mapping.iter_old_doc_addrs().map(
|
||||
move |old_doc_addr| {
|
||||
let ff_reader =
|
||||
&self.multi_value_length_readers[old_doc_addr.segment_ord as usize];
|
||||
offset += ff_reader.num_vals_for_doc(old_doc_addr.doc_id);
|
||||
offset as u64
|
||||
},
|
||||
)),
|
||||
)
|
||||
}
|
||||
fn min_value(&self) -> u64 {
|
||||
self.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.num_vals
|
||||
}
|
||||
}
|
||||
107
columnar/src/columnar/merge/term_merger.rs
Normal file
107
columnar/src/columnar/merge/term_merger.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
|
||||
use sstable::TermOrdinal;
|
||||
|
||||
use crate::Streamer;
|
||||
|
||||
pub struct HeapItem<'a> {
|
||||
pub streamer: Streamer<'a>,
|
||||
pub segment_ord: usize,
|
||||
}
|
||||
|
||||
impl<'a> PartialEq for HeapItem<'a> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.segment_ord == other.segment_ord
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Eq for HeapItem<'a> {}
|
||||
|
||||
impl<'a> PartialOrd for HeapItem<'a> {
|
||||
fn partial_cmp(&self, other: &HeapItem<'a>) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Ord for HeapItem<'a> {
|
||||
fn cmp(&self, other: &HeapItem<'a>) -> Ordering {
|
||||
(&other.streamer.key(), &other.segment_ord).cmp(&(&self.streamer.key(), &self.segment_ord))
|
||||
}
|
||||
}
|
||||
|
||||
/// Given a list of sorted term streams,
|
||||
/// returns an iterator over sorted unique terms.
|
||||
///
|
||||
/// The item yield is actually a pair with
|
||||
/// - the term
|
||||
/// - a slice with the ordinal of the segments containing
|
||||
/// the terms.
|
||||
pub struct TermMerger<'a> {
|
||||
heap: BinaryHeap<HeapItem<'a>>,
|
||||
current_streamers: Vec<HeapItem<'a>>,
|
||||
}
|
||||
|
||||
impl<'a> TermMerger<'a> {
|
||||
/// Stream of merged term dictionary
|
||||
pub fn new(streams: Vec<Streamer<'a>>) -> TermMerger<'a> {
|
||||
TermMerger {
|
||||
heap: BinaryHeap::new(),
|
||||
current_streamers: streams
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(ord, streamer)| HeapItem {
|
||||
streamer,
|
||||
segment_ord: ord,
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn matching_segments<'b: 'a>(
|
||||
&'b self,
|
||||
) -> impl 'b + Iterator<Item = (usize, TermOrdinal)> {
|
||||
self.current_streamers
|
||||
.iter()
|
||||
.map(|heap_item| (heap_item.segment_ord, heap_item.streamer.term_ord()))
|
||||
}
|
||||
|
||||
fn advance_segments(&mut self) {
|
||||
let streamers = &mut self.current_streamers;
|
||||
let heap = &mut self.heap;
|
||||
for mut heap_item in streamers.drain(..) {
|
||||
if heap_item.streamer.advance() {
|
||||
heap.push(heap_item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Advance the term iterator to the next term.
|
||||
/// Returns true if there is indeed another term
|
||||
/// False if there is none.
|
||||
pub fn advance(&mut self) -> bool {
|
||||
self.advance_segments();
|
||||
if let Some(head) = self.heap.pop() {
|
||||
self.current_streamers.push(head);
|
||||
while let Some(next_streamer) = self.heap.peek() {
|
||||
if self.current_streamers[0].streamer.key() != next_streamer.streamer.key() {
|
||||
break;
|
||||
}
|
||||
let next_heap_it = self.heap.pop().unwrap(); // safe : we peeked beforehand
|
||||
self.current_streamers.push(next_heap_it);
|
||||
}
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current term.
|
||||
///
|
||||
/// This method may be called
|
||||
/// if and only if advance() has been called before
|
||||
/// and "true" was returned.
|
||||
pub fn key(&self) -> &[u8] {
|
||||
self.current_streamers[0].streamer.key()
|
||||
}
|
||||
}
|
||||
252
columnar/src/columnar/merge/tests.rs
Normal file
252
columnar/src/columnar/merge/tests.rs
Normal file
@@ -0,0 +1,252 @@
|
||||
use super::*;
|
||||
use crate::{Cardinality, ColumnarWriter, HasAssociatedColumnType, RowId};
|
||||
|
||||
fn make_columnar<T: Into<NumericalValue> + HasAssociatedColumnType + Copy>(
|
||||
column_name: &str,
|
||||
vals: &[T],
|
||||
) -> ColumnarReader {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_column_type(column_name, T::column_type(), false);
|
||||
for (row_id, val) in vals.iter().copied().enumerate() {
|
||||
dataframe_writer.record_numerical(row_id as RowId, column_name, val.into());
|
||||
}
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(vals.len() as RowId, &mut buffer)
|
||||
.unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_coercion_to_u64() {
|
||||
// i64 type
|
||||
let columnar1 = make_columnar("numbers", &[1i64]);
|
||||
// u64 type
|
||||
let columnar2 = make_columnar("numbers", &[u64::MAX]);
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
group_columns_for_merge(&[&columnar1, &columnar2]).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_no_coercion_if_all_the_same() {
|
||||
let columnar1 = make_columnar("numbers", &[1u64]);
|
||||
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
group_columns_for_merge(&[&columnar1, &columnar2]).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_coercion_to_i64() {
|
||||
let columnar1 = make_columnar("numbers", &[-1i64]);
|
||||
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
group_columns_for_merge(&[&columnar1, &columnar2]).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::I64)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_missing_column() {
|
||||
let columnar1 = make_columnar("numbers", &[-1i64]);
|
||||
let columnar2 = make_columnar("numbers2", &[2u64]);
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
group_columns_for_merge(&[&columnar1, &columnar2]).unwrap();
|
||||
assert_eq!(column_map.len(), 2);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::I64)));
|
||||
{
|
||||
let columns = column_map
|
||||
.get(&("numbers".to_string(), ColumnType::I64))
|
||||
.unwrap();
|
||||
assert!(columns[0].is_some());
|
||||
assert!(columns[1].is_none());
|
||||
}
|
||||
{
|
||||
let columns = column_map
|
||||
.get(&("numbers2".to_string(), ColumnType::U64))
|
||||
.unwrap();
|
||||
assert!(columns[0].is_none());
|
||||
assert!(columns[1].is_some());
|
||||
}
|
||||
}
|
||||
|
||||
fn make_numerical_columnar_multiple_columns(
|
||||
columns: &[(&str, &[&[NumericalValue]])],
|
||||
) -> ColumnarReader {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
for (column_name, column_values) in columns {
|
||||
for (row_id, vals) in column_values.iter().enumerate() {
|
||||
for val in vals.iter() {
|
||||
dataframe_writer.record_numerical(row_id as u32, column_name, *val);
|
||||
}
|
||||
}
|
||||
}
|
||||
let num_rows = columns
|
||||
.iter()
|
||||
.map(|(_, val_rows)| val_rows.len() as RowId)
|
||||
.max()
|
||||
.unwrap_or(0u32);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
fn make_byte_columnar_multiple_columns(columns: &[(&str, &[&[&[u8]]])]) -> ColumnarReader {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
for (column_name, column_values) in columns {
|
||||
for (row_id, vals) in column_values.iter().enumerate() {
|
||||
for val in vals.iter() {
|
||||
dataframe_writer.record_bytes(row_id as u32, column_name, *val);
|
||||
}
|
||||
}
|
||||
}
|
||||
let num_rows = columns
|
||||
.iter()
|
||||
.map(|(_, val_rows)| val_rows.len() as RowId)
|
||||
.max()
|
||||
.unwrap_or(0u32);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
fn make_text_columnar_multiple_columns(columns: &[(&str, &[&[&str]])]) -> ColumnarReader {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
for (column_name, column_values) in columns {
|
||||
for (row_id, vals) in column_values.iter().enumerate() {
|
||||
for val in vals.iter() {
|
||||
dataframe_writer.record_str(row_id as u32, column_name, *val);
|
||||
}
|
||||
}
|
||||
}
|
||||
let num_rows = columns
|
||||
.iter()
|
||||
.map(|(_, val_rows)| val_rows.len() as RowId)
|
||||
.max()
|
||||
.unwrap_or(0u32);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_columnar_numbers() {
|
||||
let columnar1 =
|
||||
make_numerical_columnar_multiple_columns(&[("numbers", &[&[NumericalValue::from(-1f64)]])]);
|
||||
let columnar2 = make_numerical_columnar_multiple_columns(&[(
|
||||
"numbers",
|
||||
&[&[], &[NumericalValue::from(-3f64)]],
|
||||
)]);
|
||||
let mut buffer = Vec::new();
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let stack_merge_order = StackMergeOrder::from_columnars(columnars);
|
||||
crate::columnar::merge_columnar(
|
||||
columnars,
|
||||
MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 3);
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("numbers").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::F64(vals) = dynamic_column else { panic!() };
|
||||
assert_eq!(vals.get_cardinality(), Cardinality::Optional);
|
||||
assert_eq!(vals.first(0u32), Some(-1f64));
|
||||
assert_eq!(vals.first(1u32), None);
|
||||
assert_eq!(vals.first(2u32), Some(-3f64));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_columnar_texts() {
|
||||
let columnar1 = make_text_columnar_multiple_columns(&[("texts", &[&["a"]])]);
|
||||
let columnar2 = make_text_columnar_multiple_columns(&[("texts", &[&[], &["b"]])]);
|
||||
let mut buffer = Vec::new();
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let stack_merge_order = StackMergeOrder::from_columnars(columnars);
|
||||
crate::columnar::merge_columnar(
|
||||
columnars,
|
||||
MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 3);
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("texts").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::Str(vals) = dynamic_column else { panic!() };
|
||||
let get_str_for_ord = |ord| {
|
||||
let mut out = String::new();
|
||||
vals.ord_to_str(ord, &mut out).unwrap();
|
||||
out
|
||||
};
|
||||
|
||||
assert_eq!(vals.dictionary.num_terms(), 2);
|
||||
assert_eq!(get_str_for_ord(0), "a");
|
||||
assert_eq!(get_str_for_ord(1), "b");
|
||||
|
||||
let get_str_for_row = |row_id| {
|
||||
let term_ords: Vec<u64> = vals.term_ords(row_id).collect();
|
||||
assert!(term_ords.len() <= 1);
|
||||
let mut out = String::new();
|
||||
if term_ords.len() == 1 {
|
||||
vals.ord_to_str(term_ords[0], &mut out).unwrap();
|
||||
}
|
||||
out
|
||||
};
|
||||
|
||||
assert_eq!(get_str_for_row(0), "a");
|
||||
assert_eq!(get_str_for_row(1), "");
|
||||
assert_eq!(get_str_for_row(2), "b");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_columnar_byte() {
|
||||
let columnar1 = make_byte_columnar_multiple_columns(&[("bytes", &[&[b"bbbb"], &[b"baaa"]])]);
|
||||
let columnar2 = make_byte_columnar_multiple_columns(&[("bytes", &[&[], &[b"a"]])]);
|
||||
let mut buffer = Vec::new();
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let stack_merge_order = StackMergeOrder::from_columnars(columnars);
|
||||
crate::columnar::merge_columnar(
|
||||
columnars,
|
||||
MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 4);
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("bytes").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::Bytes(vals) = dynamic_column else { panic!() };
|
||||
let get_bytes_for_ord = |ord| {
|
||||
let mut out = Vec::new();
|
||||
vals.ord_to_bytes(ord, &mut out).unwrap();
|
||||
out
|
||||
};
|
||||
|
||||
assert_eq!(vals.dictionary.num_terms(), 3);
|
||||
assert_eq!(get_bytes_for_ord(0), b"a");
|
||||
assert_eq!(get_bytes_for_ord(1), b"baaa");
|
||||
assert_eq!(get_bytes_for_ord(2), b"bbbb");
|
||||
|
||||
let get_bytes_for_row = |row_id| {
|
||||
let term_ords: Vec<u64> = vals.term_ords(row_id).collect();
|
||||
assert!(term_ords.len() <= 1);
|
||||
let mut out = Vec::new();
|
||||
if term_ords.len() == 1 {
|
||||
vals.ord_to_bytes(term_ords[0], &mut out).unwrap();
|
||||
}
|
||||
out
|
||||
};
|
||||
|
||||
assert_eq!(get_bytes_for_row(0), b"bbbb");
|
||||
assert_eq!(get_bytes_for_row(1), b"baaa");
|
||||
assert_eq!(get_bytes_for_row(2), b"");
|
||||
assert_eq!(get_bytes_for_row(3), b"a");
|
||||
}
|
||||
1
columnar/src/columnar/merge_index.rs
Normal file
1
columnar/src/columnar/merge_index.rs
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
11
columnar/src/columnar/mod.rs
Normal file
11
columnar/src/columnar/mod.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
mod column_type;
|
||||
mod format_version;
|
||||
mod merge;
|
||||
mod merge_index;
|
||||
mod reader;
|
||||
mod writer;
|
||||
|
||||
pub use column_type::{ColumnType, HasAssociatedColumnType};
|
||||
pub use merge::{merge_columnar, MergeRowOrder, StackMergeOrder};
|
||||
pub use reader::ColumnarReader;
|
||||
pub use writer::ColumnarWriter;
|
||||
171
columnar/src/columnar/reader/mod.rs
Normal file
171
columnar/src/columnar/reader/mod.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
use std::{io, mem};
|
||||
|
||||
use common::file_slice::FileSlice;
|
||||
use common::BinarySerializable;
|
||||
use sstable::{Dictionary, RangeSSTable};
|
||||
|
||||
use crate::columnar::{format_version, ColumnType};
|
||||
use crate::dynamic_column::DynamicColumnHandle;
|
||||
use crate::RowId;
|
||||
|
||||
fn io_invalid_data(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::InvalidData, msg)
|
||||
}
|
||||
|
||||
/// The ColumnarReader makes it possible to access a set of columns
|
||||
/// associated to field names.
|
||||
#[derive(Clone)]
|
||||
pub struct ColumnarReader {
|
||||
column_dictionary: Dictionary<RangeSSTable>,
|
||||
column_data: FileSlice,
|
||||
num_rows: RowId,
|
||||
}
|
||||
|
||||
impl ColumnarReader {
|
||||
/// Opens a new Columnar file.
|
||||
pub fn open<F>(file_slice: F) -> io::Result<ColumnarReader>
|
||||
where FileSlice: From<F> {
|
||||
Self::open_inner(file_slice.into())
|
||||
}
|
||||
|
||||
fn open_inner(file_slice: FileSlice) -> io::Result<ColumnarReader> {
|
||||
let (file_slice_without_sstable_len, footer_slice) = file_slice
|
||||
.split_from_end(mem::size_of::<u64>() + 4 + format_version::VERSION_FOOTER_NUM_BYTES);
|
||||
let footer_bytes = footer_slice.read_bytes()?;
|
||||
let sstable_len = u64::deserialize(&mut &footer_bytes[0..8])?;
|
||||
let num_rows = u32::deserialize(&mut &footer_bytes[8..12])?;
|
||||
let version_footer_bytes: [u8; format_version::VERSION_FOOTER_NUM_BYTES] =
|
||||
footer_bytes[12..].try_into().unwrap();
|
||||
let _version = format_version::parse_footer(version_footer_bytes)?;
|
||||
let (column_data, sstable) =
|
||||
file_slice_without_sstable_len.split_from_end(sstable_len as usize);
|
||||
let column_dictionary = Dictionary::open(sstable)?;
|
||||
Ok(ColumnarReader {
|
||||
column_dictionary,
|
||||
column_data,
|
||||
num_rows,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.num_rows
|
||||
}
|
||||
|
||||
// TODO Add unit tests
|
||||
pub fn list_columns(&self) -> io::Result<Vec<(String, DynamicColumnHandle)>> {
|
||||
let mut stream = self.column_dictionary.stream()?;
|
||||
let mut results = Vec::new();
|
||||
while stream.advance() {
|
||||
let key_bytes: &[u8] = stream.key();
|
||||
let column_code: u8 = key_bytes.last().cloned().unwrap();
|
||||
let column_type: ColumnType = ColumnType::try_from_code(column_code)
|
||||
.map_err(|_| io_invalid_data(format!("Unknown column code `{column_code}`")))?;
|
||||
let range = stream.value().clone();
|
||||
let column_name =
|
||||
// The last two bytes are respectively the 0u8 separator and the column_type.
|
||||
String::from_utf8_lossy(&key_bytes[..key_bytes.len() - 2]).to_string();
|
||||
let file_slice = self
|
||||
.column_data
|
||||
.slice(range.start as usize..range.end as usize);
|
||||
let column_handle = DynamicColumnHandle {
|
||||
file_slice,
|
||||
column_type,
|
||||
};
|
||||
results.push((column_name, column_handle));
|
||||
}
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
/// Get all columns for the given column name.
|
||||
///
|
||||
/// There can be more than one column associated to a given column name, provided they have
|
||||
/// different types.
|
||||
pub fn read_columns(&self, column_name: &str) -> io::Result<Vec<DynamicColumnHandle>> {
|
||||
// Each column is a associated to a given `column_key`,
|
||||
// that starts by `column_name\0column_header`.
|
||||
//
|
||||
// Listing the columns associated to the given column name is therefore equivalent to
|
||||
// listing `column_key` with the prefix `column_name\0`.
|
||||
//
|
||||
// This is in turn equivalent to searching for the range
|
||||
// `[column_name,\0`..column_name\1)`.
|
||||
|
||||
// TODO can we get some more generic `prefix(..)` logic in the dictioanry.
|
||||
let mut start_key = column_name.to_string();
|
||||
start_key.push('\0');
|
||||
let mut end_key = column_name.to_string();
|
||||
end_key.push(1u8 as char);
|
||||
let mut stream = self
|
||||
.column_dictionary
|
||||
.range()
|
||||
.ge(start_key.as_bytes())
|
||||
.lt(end_key.as_bytes())
|
||||
.into_stream()?;
|
||||
let mut results = Vec::new();
|
||||
while stream.advance() {
|
||||
let key_bytes: &[u8] = stream.key();
|
||||
assert!(key_bytes.starts_with(start_key.as_bytes()));
|
||||
let column_code: u8 = key_bytes.last().cloned().unwrap();
|
||||
let column_type = ColumnType::try_from_code(column_code)
|
||||
.map_err(|_| io_invalid_data(format!("Unknown column code `{column_code}`")))?;
|
||||
let range = stream.value().clone();
|
||||
let file_slice = self
|
||||
.column_data
|
||||
.slice(range.start as usize..range.end as usize);
|
||||
let dynamic_column_handle = DynamicColumnHandle {
|
||||
file_slice,
|
||||
column_type,
|
||||
};
|
||||
results.push(dynamic_column_handle);
|
||||
}
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
/// Return the number of columns in the columnar.
|
||||
pub fn num_columns(&self) -> usize {
|
||||
self.column_dictionary.num_terms()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{ColumnType, ColumnarReader, ColumnarWriter};
|
||||
|
||||
#[test]
|
||||
fn test_list_columns() {
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
columnar_writer.record_column_type("col1", ColumnType::Str, false);
|
||||
columnar_writer.record_column_type("col2", ColumnType::U64, false);
|
||||
let mut buffer = Vec::new();
|
||||
columnar_writer.serialize(1, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
let columns = columnar.list_columns().unwrap();
|
||||
assert_eq!(columns.len(), 2);
|
||||
assert_eq!(&columns[0].0, "col1");
|
||||
assert_eq!(columns[0].1.column_type(), ColumnType::Str);
|
||||
assert_eq!(&columns[1].0, "col2");
|
||||
assert_eq!(columns[1].1.column_type(), ColumnType::U64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_list_columns_strict_typing_prevents_coercion() {
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
columnar_writer.record_column_type("count", ColumnType::U64, false);
|
||||
columnar_writer.record_numerical(1, "count", 1u64);
|
||||
let mut buffer = Vec::new();
|
||||
columnar_writer.serialize(2, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
let columns = columnar.list_columns().unwrap();
|
||||
assert_eq!(columns.len(), 1);
|
||||
assert_eq!(&columns[0].0, "count");
|
||||
assert_eq!(columns[0].1.column_type(), ColumnType::U64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expect = "Input type forbidden")]
|
||||
fn test_list_columns_strict_typing_panics_on_wrong_types() {
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
columnar_writer.record_column_type("count", ColumnType::U64, false);
|
||||
columnar_writer.record_numerical(1, "count", 1i64);
|
||||
}
|
||||
}
|
||||
360
columnar/src/columnar/writer/column_operation.rs
Normal file
360
columnar/src/columnar/writer/column_operation.rs
Normal file
@@ -0,0 +1,360 @@
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use crate::dictionary::UnorderedId;
|
||||
use crate::utils::{place_bits, pop_first_byte, select_bits};
|
||||
use crate::value::NumericalValue;
|
||||
use crate::{InvalidData, NumericalType, RowId};
|
||||
|
||||
/// When we build a columnar dataframe, we first just group
|
||||
/// all mutations per column, and appends them in append-only buffer
|
||||
/// in the stacker.
|
||||
///
|
||||
/// These ColumnOperation<T> are therefore serialize/deserialized
|
||||
/// in memory.
|
||||
///
|
||||
/// We represents all of these operations as `ColumnOperation`.
|
||||
#[derive(Eq, PartialEq, Debug, Clone, Copy)]
|
||||
pub(super) enum ColumnOperation<T> {
|
||||
NewDoc(RowId),
|
||||
Value(T),
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||
struct ColumnOperationMetadata {
|
||||
op_type: ColumnOperationType,
|
||||
len: u8,
|
||||
}
|
||||
|
||||
impl ColumnOperationMetadata {
|
||||
fn to_code(self) -> u8 {
|
||||
place_bits::<0, 6>(self.len) | place_bits::<6, 8>(self.op_type.to_code())
|
||||
}
|
||||
|
||||
fn try_from_code(code: u8) -> Result<Self, InvalidData> {
|
||||
let len = select_bits::<0, 6>(code);
|
||||
let typ_code = select_bits::<6, 8>(code);
|
||||
let column_type = ColumnOperationType::try_from_code(typ_code)?;
|
||||
Ok(ColumnOperationMetadata {
|
||||
op_type: column_type,
|
||||
len,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||
#[repr(u8)]
|
||||
enum ColumnOperationType {
|
||||
NewDoc = 0u8,
|
||||
AddValue = 1u8,
|
||||
}
|
||||
|
||||
impl ColumnOperationType {
|
||||
pub fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub fn try_from_code(code: u8) -> Result<Self, InvalidData> {
|
||||
match code {
|
||||
0 => Ok(Self::NewDoc),
|
||||
1 => Ok(Self::AddValue),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<V: SymbolValue> ColumnOperation<V> {
|
||||
pub(super) fn serialize(self) -> impl AsRef<[u8]> {
|
||||
let mut minibuf = MiniBuffer::default();
|
||||
let column_op_metadata = match self {
|
||||
ColumnOperation::NewDoc(new_doc) => {
|
||||
let symbol_len = new_doc.serialize(&mut minibuf.bytes[1..]);
|
||||
ColumnOperationMetadata {
|
||||
op_type: ColumnOperationType::NewDoc,
|
||||
len: symbol_len,
|
||||
}
|
||||
}
|
||||
ColumnOperation::Value(val) => {
|
||||
let symbol_len = val.serialize(&mut minibuf.bytes[1..]);
|
||||
ColumnOperationMetadata {
|
||||
op_type: ColumnOperationType::AddValue,
|
||||
len: symbol_len,
|
||||
}
|
||||
}
|
||||
};
|
||||
minibuf.bytes[0] = column_op_metadata.to_code();
|
||||
// +1 for the metadata
|
||||
minibuf.len = 1 + column_op_metadata.len;
|
||||
minibuf
|
||||
}
|
||||
|
||||
/// Deserialize a colummn operation.
|
||||
/// Returns None if the buffer is empty.
|
||||
///
|
||||
/// Panics if the payload is invalid:
|
||||
/// this deserialize method is meant to target in memory.
|
||||
pub(super) fn deserialize(bytes: &mut &[u8]) -> Option<Self> {
|
||||
let column_op_metadata_byte = pop_first_byte(bytes)?;
|
||||
let column_op_metadata = ColumnOperationMetadata::try_from_code(column_op_metadata_byte)
|
||||
.expect("Invalid op metadata byte");
|
||||
let symbol_bytes: &[u8];
|
||||
(symbol_bytes, *bytes) = bytes.split_at(column_op_metadata.len as usize);
|
||||
match column_op_metadata.op_type {
|
||||
ColumnOperationType::NewDoc => {
|
||||
let new_doc = u32::deserialize(symbol_bytes);
|
||||
Some(ColumnOperation::NewDoc(new_doc))
|
||||
}
|
||||
ColumnOperationType::AddValue => {
|
||||
let value = V::deserialize(symbol_bytes);
|
||||
Some(ColumnOperation::Value(value))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<T> for ColumnOperation<T> {
|
||||
fn from(value: T) -> Self {
|
||||
ColumnOperation::Value(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Serialization trait very local to the writer.
|
||||
// As we write fast fields, we accumulate them in "in memory".
|
||||
// In order to limit memory usage, and in order
|
||||
// to benefit from the stacker, we do this by serialization our data
|
||||
// as "Symbols".
|
||||
#[allow(clippy::from_over_into)]
|
||||
pub(super) trait SymbolValue: Clone + Copy {
|
||||
// Serializes the symbol into the given buffer.
|
||||
// Returns the number of bytes written into the buffer.
|
||||
/// # Panics
|
||||
/// May not exceed 9bytes
|
||||
fn serialize(self, buffer: &mut [u8]) -> u8;
|
||||
// Panics if invalid
|
||||
fn deserialize(bytes: &[u8]) -> Self;
|
||||
}
|
||||
|
||||
impl SymbolValue for bool {
|
||||
fn serialize(self, buffer: &mut [u8]) -> u8 {
|
||||
buffer[0] = u8::from(self);
|
||||
1u8
|
||||
}
|
||||
|
||||
fn deserialize(bytes: &[u8]) -> Self {
|
||||
bytes[0] == 1u8
|
||||
}
|
||||
}
|
||||
|
||||
impl SymbolValue for Ipv6Addr {
|
||||
fn serialize(self, buffer: &mut [u8]) -> u8 {
|
||||
buffer[0..16].copy_from_slice(&self.octets());
|
||||
16
|
||||
}
|
||||
|
||||
fn deserialize(bytes: &[u8]) -> Self {
|
||||
let octets: [u8; 16] = bytes[0..16].try_into().unwrap();
|
||||
Ipv6Addr::from(octets)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct MiniBuffer {
|
||||
pub bytes: [u8; 17],
|
||||
pub len: u8,
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for MiniBuffer {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&self.bytes[..self.len as usize]
|
||||
}
|
||||
}
|
||||
|
||||
impl SymbolValue for NumericalValue {
|
||||
fn deserialize(mut bytes: &[u8]) -> Self {
|
||||
let type_code = pop_first_byte(&mut bytes).unwrap();
|
||||
let symbol_type = NumericalType::try_from_code(type_code).unwrap();
|
||||
let mut octet: [u8; 8] = [0u8; 8];
|
||||
octet[..bytes.len()].copy_from_slice(bytes);
|
||||
match symbol_type {
|
||||
NumericalType::U64 => {
|
||||
let val: u64 = u64::from_le_bytes(octet);
|
||||
NumericalValue::U64(val)
|
||||
}
|
||||
NumericalType::I64 => {
|
||||
let encoded: u64 = u64::from_le_bytes(octet);
|
||||
let val: i64 = decode_zig_zag(encoded);
|
||||
NumericalValue::I64(val)
|
||||
}
|
||||
NumericalType::F64 => {
|
||||
debug_assert_eq!(bytes.len(), 8);
|
||||
let val: f64 = f64::from_le_bytes(octet);
|
||||
NumericalValue::F64(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// F64: Serialize with a fixed size of 9 bytes
|
||||
/// U64: Serialize without leading zeroes
|
||||
/// I64: ZigZag encoded and serialize without leading zeroes
|
||||
fn serialize(self, output: &mut [u8]) -> u8 {
|
||||
match self {
|
||||
NumericalValue::F64(val) => {
|
||||
output[0] = NumericalType::F64 as u8;
|
||||
output[1..9].copy_from_slice(&val.to_le_bytes());
|
||||
9u8
|
||||
}
|
||||
NumericalValue::U64(val) => {
|
||||
let len = compute_num_bytes_for_u64(val) as u8;
|
||||
output[0] = NumericalType::U64 as u8;
|
||||
output[1..9].copy_from_slice(&val.to_le_bytes());
|
||||
len + 1u8
|
||||
}
|
||||
NumericalValue::I64(val) => {
|
||||
let zig_zag_encoded = encode_zig_zag(val);
|
||||
let len = compute_num_bytes_for_u64(zig_zag_encoded) as u8;
|
||||
output[0] = NumericalType::I64 as u8;
|
||||
output[1..9].copy_from_slice(&zig_zag_encoded.to_le_bytes());
|
||||
len + 1u8
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SymbolValue for u32 {
|
||||
fn serialize(self, output: &mut [u8]) -> u8 {
|
||||
let len = compute_num_bytes_for_u64(self as u64);
|
||||
output[0..4].copy_from_slice(&self.to_le_bytes());
|
||||
len as u8
|
||||
}
|
||||
|
||||
fn deserialize(bytes: &[u8]) -> Self {
|
||||
let mut quartet: [u8; 4] = [0u8; 4];
|
||||
quartet[..bytes.len()].copy_from_slice(bytes);
|
||||
u32::from_le_bytes(quartet)
|
||||
}
|
||||
}
|
||||
|
||||
impl SymbolValue for UnorderedId {
|
||||
fn serialize(self, output: &mut [u8]) -> u8 {
|
||||
self.0.serialize(output)
|
||||
}
|
||||
|
||||
fn deserialize(bytes: &[u8]) -> Self {
|
||||
UnorderedId(u32::deserialize(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_num_bytes_for_u64(val: u64) -> usize {
|
||||
let msb = (64u32 - val.leading_zeros()) as usize;
|
||||
(msb + 7) / 8
|
||||
}
|
||||
|
||||
fn encode_zig_zag(n: i64) -> u64 {
|
||||
((n << 1) ^ (n >> 63)) as u64
|
||||
}
|
||||
|
||||
fn decode_zig_zag(n: u64) -> i64 {
|
||||
((n >> 1) as i64) ^ (-((n & 1) as i64))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[track_caller]
|
||||
fn test_zig_zag_aux(val: i64) {
|
||||
let encoded = super::encode_zig_zag(val);
|
||||
assert_eq!(decode_zig_zag(encoded), val);
|
||||
if let Some(abs_val) = val.checked_abs() {
|
||||
let abs_val = abs_val as u64;
|
||||
assert!(encoded <= abs_val * 2);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zig_zag() {
|
||||
assert_eq!(encode_zig_zag(0i64), 0u64);
|
||||
assert_eq!(encode_zig_zag(-1i64), 1u64);
|
||||
assert_eq!(encode_zig_zag(1i64), 2u64);
|
||||
test_zig_zag_aux(0i64);
|
||||
test_zig_zag_aux(i64::MIN);
|
||||
test_zig_zag_aux(i64::MAX);
|
||||
}
|
||||
|
||||
use proptest::prelude::any;
|
||||
use proptest::proptest;
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_proptest_zig_zag(val in any::<i64>()) {
|
||||
test_zig_zag_aux(val);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_op_metadata_byte_serialization() {
|
||||
for len in 0..=15 {
|
||||
for op_type in [ColumnOperationType::AddValue, ColumnOperationType::NewDoc] {
|
||||
let column_op_metadata = ColumnOperationMetadata { op_type, len };
|
||||
let column_op_metadata_code = column_op_metadata.to_code();
|
||||
let serdeser_metadata =
|
||||
ColumnOperationMetadata::try_from_code(column_op_metadata_code).unwrap();
|
||||
assert_eq!(column_op_metadata, serdeser_metadata);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn ser_deser_symbol(column_op: ColumnOperation<NumericalValue>) {
|
||||
let buf = column_op.serialize();
|
||||
let mut buffer = buf.as_ref().to_vec();
|
||||
buffer.extend_from_slice(b"234234");
|
||||
let mut bytes = &buffer[..];
|
||||
let serdeser_symbol = ColumnOperation::deserialize(&mut bytes).unwrap();
|
||||
assert_eq!(bytes.len() + buf.as_ref().len() as usize, buffer.len());
|
||||
assert_eq!(column_op, serdeser_symbol);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_num_bytes_for_u64() {
|
||||
assert_eq!(compute_num_bytes_for_u64(0), 0);
|
||||
assert_eq!(compute_num_bytes_for_u64(1), 1);
|
||||
assert_eq!(compute_num_bytes_for_u64(255), 1);
|
||||
assert_eq!(compute_num_bytes_for_u64(256), 2);
|
||||
assert_eq!(compute_num_bytes_for_u64((1 << 16) - 1), 2);
|
||||
assert_eq!(compute_num_bytes_for_u64(1 << 16), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_symbol_serialization() {
|
||||
ser_deser_symbol(ColumnOperation::NewDoc(0));
|
||||
ser_deser_symbol(ColumnOperation::NewDoc(3));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(0i64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(1i64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(257u64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(-257i64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(i64::MIN)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(0u64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(u64::MIN)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(u64::MAX)));
|
||||
}
|
||||
|
||||
fn test_column_operation_unordered_aux(val: u32, expected_len: usize) {
|
||||
let column_op = ColumnOperation::Value(UnorderedId(val));
|
||||
let minibuf = column_op.serialize();
|
||||
assert_eq!(minibuf.as_ref().len() as usize, expected_len);
|
||||
let mut buf = minibuf.as_ref().to_vec();
|
||||
buf.extend_from_slice(&[2, 2, 2, 2, 2, 2]);
|
||||
let mut cursor = &buf[..];
|
||||
let column_op_serdeser: ColumnOperation<UnorderedId> =
|
||||
ColumnOperation::deserialize(&mut cursor).unwrap();
|
||||
assert_eq!(column_op_serdeser, ColumnOperation::Value(UnorderedId(val)));
|
||||
assert_eq!(cursor.len() + expected_len, buf.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_operation_unordered() {
|
||||
test_column_operation_unordered_aux(300u32, 3);
|
||||
test_column_operation_unordered_aux(1u32, 2);
|
||||
test_column_operation_unordered_aux(0u32, 1);
|
||||
}
|
||||
}
|
||||
336
columnar/src/columnar/writer/column_writers.rs
Normal file
336
columnar/src/columnar/writer/column_writers.rs
Normal file
@@ -0,0 +1,336 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use stacker::{ExpUnrolledLinkedList, MemoryArena};
|
||||
|
||||
use crate::columnar::writer::column_operation::{ColumnOperation, SymbolValue};
|
||||
use crate::dictionary::{DictionaryBuilder, UnorderedId};
|
||||
use crate::{Cardinality, NumericalType, NumericalValue, RowId};
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
#[repr(u8)]
|
||||
enum DocumentStep {
|
||||
Same = 0,
|
||||
Next = 1,
|
||||
Skipped = 2,
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn delta_with_last_doc(last_doc_opt: Option<u32>, doc: u32) -> DocumentStep {
|
||||
let expected_next_doc = last_doc_opt.map(|last_doc| last_doc + 1).unwrap_or(0u32);
|
||||
match doc.cmp(&expected_next_doc) {
|
||||
Ordering::Less => DocumentStep::Same,
|
||||
Ordering::Equal => DocumentStep::Next,
|
||||
Ordering::Greater => DocumentStep::Skipped,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Default)]
|
||||
pub struct ColumnWriter {
|
||||
// Detected cardinality of the column so far.
|
||||
cardinality: Cardinality,
|
||||
// Last document inserted.
|
||||
// None if no doc has been added yet.
|
||||
last_doc_opt: Option<u32>,
|
||||
// Buffer containing the serialized values.
|
||||
values: ExpUnrolledLinkedList,
|
||||
}
|
||||
|
||||
impl ColumnWriter {
|
||||
/// Returns an iterator over the Symbol that have been recorded
|
||||
/// for the given column.
|
||||
pub(super) fn operation_iterator<'a, V: SymbolValue>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<V>> + 'a {
|
||||
buffer.clear();
|
||||
self.values.read_to_end(arena, buffer);
|
||||
let mut cursor: &[u8] = &buffer[..];
|
||||
std::iter::from_fn(move || ColumnOperation::deserialize(&mut cursor))
|
||||
}
|
||||
|
||||
/// Records a change of the document being recorded.
|
||||
///
|
||||
/// This function will also update the cardinality of the column
|
||||
/// if necessary.
|
||||
pub(super) fn record<S: SymbolValue>(&mut self, doc: RowId, value: S, arena: &mut MemoryArena) {
|
||||
// Difference between `doc` and the last doc.
|
||||
match delta_with_last_doc(self.last_doc_opt, doc) {
|
||||
DocumentStep::Same => {
|
||||
// This is the last encounterred document.
|
||||
self.cardinality = Cardinality::Multivalued;
|
||||
}
|
||||
DocumentStep::Next => {
|
||||
self.last_doc_opt = Some(doc);
|
||||
self.write_symbol::<S>(ColumnOperation::NewDoc(doc), arena);
|
||||
}
|
||||
DocumentStep::Skipped => {
|
||||
self.cardinality = self.cardinality.max(Cardinality::Optional);
|
||||
self.last_doc_opt = Some(doc);
|
||||
self.write_symbol::<S>(ColumnOperation::NewDoc(doc), arena);
|
||||
}
|
||||
}
|
||||
self.write_symbol(ColumnOperation::Value(value), arena);
|
||||
}
|
||||
|
||||
// Get the cardinality.
|
||||
// The overall number of docs in the column is necessary to
|
||||
// deal with the case where the all docs contain 1 value, except some documents
|
||||
// at the end of the column.
|
||||
pub(crate) fn get_cardinality(&self, num_docs: RowId) -> Cardinality {
|
||||
match delta_with_last_doc(self.last_doc_opt, num_docs) {
|
||||
DocumentStep::Same | DocumentStep::Next => self.cardinality,
|
||||
DocumentStep::Skipped => self.cardinality.max(Cardinality::Optional),
|
||||
}
|
||||
}
|
||||
|
||||
/// Appends a new symbol to the `ColumnWriter`.
|
||||
fn write_symbol<V: SymbolValue>(
|
||||
&mut self,
|
||||
column_operation: ColumnOperation<V>,
|
||||
arena: &mut MemoryArena,
|
||||
) {
|
||||
self.values
|
||||
.writer(arena)
|
||||
.extend_from_slice(column_operation.serialize().as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Default)]
|
||||
pub(crate) struct NumericalColumnWriter {
|
||||
compatible_numerical_types: CompatibleNumericalTypes,
|
||||
column_writer: ColumnWriter,
|
||||
}
|
||||
|
||||
impl NumericalColumnWriter {
|
||||
pub fn force_numerical_type(&mut self, numerical_type: NumericalType) {
|
||||
assert!(self
|
||||
.compatible_numerical_types
|
||||
.is_type_accepted(numerical_type));
|
||||
self.compatible_numerical_types = CompatibleNumericalTypes::StaticType(numerical_type);
|
||||
}
|
||||
}
|
||||
|
||||
/// State used to store what types are still acceptable
|
||||
/// after having seen a set of numerical values.
|
||||
#[derive(Clone, Copy)]
|
||||
pub(crate) enum CompatibleNumericalTypes {
|
||||
Dynamic {
|
||||
all_values_within_i64_range: bool,
|
||||
all_values_within_u64_range: bool,
|
||||
},
|
||||
StaticType(NumericalType),
|
||||
}
|
||||
|
||||
impl Default for CompatibleNumericalTypes {
|
||||
fn default() -> CompatibleNumericalTypes {
|
||||
CompatibleNumericalTypes::Dynamic {
|
||||
all_values_within_i64_range: true,
|
||||
all_values_within_u64_range: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CompatibleNumericalTypes {
|
||||
pub fn is_type_accepted(&self, numerical_type: NumericalType) -> bool {
|
||||
match self {
|
||||
CompatibleNumericalTypes::Dynamic {
|
||||
all_values_within_i64_range,
|
||||
all_values_within_u64_range,
|
||||
} => match numerical_type {
|
||||
NumericalType::I64 => *all_values_within_i64_range,
|
||||
NumericalType::U64 => *all_values_within_u64_range,
|
||||
NumericalType::F64 => true,
|
||||
},
|
||||
CompatibleNumericalTypes::StaticType(static_numerical_type) => {
|
||||
*static_numerical_type == numerical_type
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn accept_value(&mut self, numerical_value: NumericalValue) {
|
||||
match self {
|
||||
CompatibleNumericalTypes::Dynamic {
|
||||
all_values_within_i64_range,
|
||||
all_values_within_u64_range,
|
||||
} => match numerical_value {
|
||||
NumericalValue::I64(val_i64) => {
|
||||
let value_within_u64_range = val_i64 >= 0i64;
|
||||
*all_values_within_u64_range &= value_within_u64_range;
|
||||
}
|
||||
NumericalValue::U64(val_u64) => {
|
||||
let value_within_i64_range = val_u64 < i64::MAX as u64;
|
||||
*all_values_within_i64_range &= value_within_i64_range;
|
||||
}
|
||||
NumericalValue::F64(_) => {
|
||||
*all_values_within_i64_range = false;
|
||||
*all_values_within_u64_range = false;
|
||||
}
|
||||
},
|
||||
CompatibleNumericalTypes::StaticType(typ) => {
|
||||
assert_eq!(
|
||||
numerical_value.numerical_type(),
|
||||
*typ,
|
||||
"Input type forbidden. This column has been forced to type {typ:?}, received \
|
||||
{numerical_value:?}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_numerical_type(self) -> NumericalType {
|
||||
for numerical_type in [NumericalType::I64, NumericalType::U64] {
|
||||
if self.is_type_accepted(numerical_type) {
|
||||
return numerical_type;
|
||||
}
|
||||
}
|
||||
NumericalType::F64
|
||||
}
|
||||
}
|
||||
|
||||
impl NumericalColumnWriter {
|
||||
pub fn column_type_and_cardinality(&self, num_docs: RowId) -> (NumericalType, Cardinality) {
|
||||
let numerical_type = self.compatible_numerical_types.to_numerical_type();
|
||||
let cardinality = self.column_writer.get_cardinality(num_docs);
|
||||
(numerical_type, cardinality)
|
||||
}
|
||||
|
||||
pub fn record_numerical_value(
|
||||
&mut self,
|
||||
doc: RowId,
|
||||
value: NumericalValue,
|
||||
arena: &mut MemoryArena,
|
||||
) {
|
||||
self.compatible_numerical_types.accept_value(value);
|
||||
self.column_writer.record(doc, value, arena);
|
||||
}
|
||||
|
||||
pub(super) fn operation_iterator<'a>(
|
||||
self,
|
||||
arena: &MemoryArena,
|
||||
buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<NumericalValue>> + 'a {
|
||||
self.column_writer.operation_iterator(arena, buffer)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub(crate) struct StrOrBytesColumnWriter {
|
||||
pub(crate) dictionary_id: u32,
|
||||
pub(crate) column_writer: ColumnWriter,
|
||||
// If true, when facing a multivalued cardinality,
|
||||
// values associated to a given document will be sorted.
|
||||
//
|
||||
// This is useful for facets.
|
||||
//
|
||||
// If false, the order of appearance in the document will be
|
||||
// observed.
|
||||
pub(crate) sort_values_within_row: bool,
|
||||
}
|
||||
|
||||
impl StrOrBytesColumnWriter {
|
||||
pub(crate) fn with_dictionary_id(dictionary_id: u32) -> StrOrBytesColumnWriter {
|
||||
StrOrBytesColumnWriter {
|
||||
dictionary_id,
|
||||
column_writer: Default::default(),
|
||||
sort_values_within_row: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn record_bytes(
|
||||
&mut self,
|
||||
doc: RowId,
|
||||
bytes: &[u8],
|
||||
dictionaries: &mut [DictionaryBuilder],
|
||||
arena: &mut MemoryArena,
|
||||
) {
|
||||
let unordered_id = dictionaries[self.dictionary_id as usize].get_or_allocate_id(bytes);
|
||||
self.column_writer.record(doc, unordered_id, arena);
|
||||
}
|
||||
|
||||
pub(super) fn operation_iterator<'a>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
byte_buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<UnorderedId>> + 'a {
|
||||
self.column_writer.operation_iterator(arena, byte_buffer)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_delta_with_last_doc() {
|
||||
assert_eq!(delta_with_last_doc(None, 0u32), DocumentStep::Next);
|
||||
assert_eq!(delta_with_last_doc(None, 1u32), DocumentStep::Skipped);
|
||||
assert_eq!(delta_with_last_doc(None, 2u32), DocumentStep::Skipped);
|
||||
assert_eq!(delta_with_last_doc(Some(0u32), 0u32), DocumentStep::Same);
|
||||
assert_eq!(delta_with_last_doc(Some(1u32), 1u32), DocumentStep::Same);
|
||||
assert_eq!(delta_with_last_doc(Some(1u32), 2u32), DocumentStep::Next);
|
||||
assert_eq!(delta_with_last_doc(Some(1u32), 3u32), DocumentStep::Skipped);
|
||||
assert_eq!(delta_with_last_doc(Some(1u32), 4u32), DocumentStep::Skipped);
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn test_column_writer_coercion_iter_aux(
|
||||
values: impl Iterator<Item = NumericalValue>,
|
||||
expected_numerical_type: NumericalType,
|
||||
) {
|
||||
let mut compatible_numerical_types = CompatibleNumericalTypes::default();
|
||||
for value in values {
|
||||
compatible_numerical_types.accept_value(value);
|
||||
}
|
||||
assert_eq!(
|
||||
compatible_numerical_types.to_numerical_type(),
|
||||
expected_numerical_type
|
||||
);
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn test_column_writer_coercion_aux(
|
||||
values: &[NumericalValue],
|
||||
expected_numerical_type: NumericalType,
|
||||
) {
|
||||
test_column_writer_coercion_iter_aux(values.iter().copied(), expected_numerical_type);
|
||||
test_column_writer_coercion_iter_aux(values.iter().rev().copied(), expected_numerical_type);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_coercion() {
|
||||
test_column_writer_coercion_aux(&[], NumericalType::I64);
|
||||
test_column_writer_coercion_aux(&[1i64.into()], NumericalType::I64);
|
||||
test_column_writer_coercion_aux(&[1u64.into()], NumericalType::I64);
|
||||
// We don't detect exact integer at the moment. We could!
|
||||
test_column_writer_coercion_aux(&[1f64.into()], NumericalType::F64);
|
||||
test_column_writer_coercion_aux(&[u64::MAX.into()], NumericalType::U64);
|
||||
test_column_writer_coercion_aux(&[(i64::MAX as u64).into()], NumericalType::U64);
|
||||
test_column_writer_coercion_aux(&[(1u64 << 63).into()], NumericalType::U64);
|
||||
test_column_writer_coercion_aux(&[1i64.into(), 1u64.into()], NumericalType::I64);
|
||||
test_column_writer_coercion_aux(&[u64::MAX.into(), (-1i64).into()], NumericalType::F64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_compatible_numerical_types_static_incompatible_type() {
|
||||
let mut compatible_numerical_types =
|
||||
CompatibleNumericalTypes::StaticType(NumericalType::U64);
|
||||
compatible_numerical_types.accept_value(NumericalValue::I64(1i64));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compatible_numerical_types_static_different_type_forbidden() {
|
||||
let mut compatible_numerical_types =
|
||||
CompatibleNumericalTypes::StaticType(NumericalType::U64);
|
||||
compatible_numerical_types.accept_value(NumericalValue::U64(u64::MAX));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compatible_numerical_types_static() {
|
||||
for typ in [NumericalType::I64, NumericalType::I64, NumericalType::F64] {
|
||||
let compatible_numerical_types = CompatibleNumericalTypes::StaticType(typ);
|
||||
assert_eq!(compatible_numerical_types.to_numerical_type(), typ);
|
||||
}
|
||||
}
|
||||
}
|
||||
786
columnar/src/columnar/writer/mod.rs
Normal file
786
columnar/src/columnar/writer/mod.rs
Normal file
@@ -0,0 +1,786 @@
|
||||
mod column_operation;
|
||||
mod column_writers;
|
||||
mod serializer;
|
||||
mod value_index;
|
||||
|
||||
use std::io;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use column_operation::ColumnOperation;
|
||||
pub(crate) use column_writers::CompatibleNumericalTypes;
|
||||
use common::CountingWriter;
|
||||
pub(crate) use serializer::ColumnarSerializer;
|
||||
use stacker::{Addr, ArenaHashMap, MemoryArena};
|
||||
|
||||
use crate::column_index::SerializableColumnIndex;
|
||||
use crate::column_values::{
|
||||
ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64, VecColumn,
|
||||
};
|
||||
use crate::columnar::column_type::{ColumnType, ColumnTypeCategory};
|
||||
use crate::columnar::writer::column_writers::{
|
||||
ColumnWriter, NumericalColumnWriter, StrOrBytesColumnWriter,
|
||||
};
|
||||
use crate::columnar::writer::value_index::{IndexBuilder, PreallocatedIndexBuilders, OptionalIndexBuilder};
|
||||
use crate::dictionary::{DictionaryBuilder, TermIdMapping, UnorderedId};
|
||||
use crate::value::{Coerce, NumericalType, NumericalValue};
|
||||
use crate::{Cardinality, RowId};
|
||||
|
||||
/// This is a set of buffers that are used to temporarily write the values into before passing them
|
||||
/// to the fast field codecs.
|
||||
#[derive(Default)]
|
||||
struct SpareBuffers {
|
||||
value_index_builders: PreallocatedIndexBuilders,
|
||||
u64_values: Vec<u64>,
|
||||
ip_addr_values: Vec<Ipv6Addr>,
|
||||
}
|
||||
|
||||
/// Makes it possible to create a new columnar.
|
||||
///
|
||||
/// ```rust
|
||||
/// use tantivy_columnar::ColumnarWriter;
|
||||
///
|
||||
/// let mut columnar_writer = ColumnarWriter::default();
|
||||
/// columnar_writer.record_str(0u32 /* doc id */, "product_name", "Red backpack");
|
||||
/// columnar_writer.record_numerical(0u32 /* doc id */, "price", 10u64);
|
||||
/// columnar_writer.record_str(1u32 /* doc id */, "product_name", "Apple");
|
||||
/// columnar_writer.record_numerical(0u32 /* doc id */, "price", 10.5f64); //< uh oh we ended up mixing integer and floats.
|
||||
/// let mut wrt: Vec<u8> = Vec::new();
|
||||
/// columnar_writer.serialize(2u32, &mut wrt).unwrap();
|
||||
/// ```
|
||||
pub struct ColumnarWriter {
|
||||
numerical_field_hash_map: ArenaHashMap,
|
||||
datetime_field_hash_map: ArenaHashMap,
|
||||
bool_field_hash_map: ArenaHashMap,
|
||||
ip_addr_field_hash_map: ArenaHashMap,
|
||||
bytes_field_hash_map: ArenaHashMap,
|
||||
str_field_hash_map: ArenaHashMap,
|
||||
arena: MemoryArena,
|
||||
// Dictionaries used to store dictionary-encoded values.
|
||||
dictionaries: Vec<DictionaryBuilder>,
|
||||
buffers: SpareBuffers,
|
||||
}
|
||||
|
||||
impl Default for ColumnarWriter {
|
||||
fn default() -> Self {
|
||||
ColumnarWriter {
|
||||
numerical_field_hash_map: ArenaHashMap::new(10_000),
|
||||
bool_field_hash_map: ArenaHashMap::new(10_000),
|
||||
ip_addr_field_hash_map: ArenaHashMap::new(10_000),
|
||||
bytes_field_hash_map: ArenaHashMap::new(10_000),
|
||||
str_field_hash_map: ArenaHashMap::new(10_000),
|
||||
datetime_field_hash_map: ArenaHashMap::new(10_000),
|
||||
dictionaries: Vec::new(),
|
||||
arena: MemoryArena::default(),
|
||||
buffers: SpareBuffers::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn mutate_or_create_column<V, TMutator>(
|
||||
arena_hash_map: &mut ArenaHashMap,
|
||||
column_name: &str,
|
||||
updater: TMutator,
|
||||
) where
|
||||
V: Copy + 'static,
|
||||
TMutator: FnMut(Option<V>) -> V,
|
||||
{
|
||||
assert!(
|
||||
!column_name.as_bytes().contains(&0u8),
|
||||
"key may not contain the 0 byte"
|
||||
);
|
||||
arena_hash_map.mutate_or_create(column_name.as_bytes(), updater);
|
||||
}
|
||||
|
||||
impl ColumnarWriter {
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
// TODO add dictionary builders.
|
||||
self.arena.mem_usage()
|
||||
+ self.numerical_field_hash_map.mem_usage()
|
||||
+ self.bool_field_hash_map.mem_usage()
|
||||
+ self.bytes_field_hash_map.mem_usage()
|
||||
+ self.str_field_hash_map.mem_usage()
|
||||
+ self.ip_addr_field_hash_map.mem_usage()
|
||||
+ self.datetime_field_hash_map.mem_usage()
|
||||
}
|
||||
|
||||
/// Records a column type. This is useful to bypass the coercion process,
|
||||
/// makes sure the empty is present in the resulting columnar, or set
|
||||
/// the `sort_values_within_row`.
|
||||
///
|
||||
/// `sort_values_within_row` is only allowed for `Bytes` or `Str` columns.
|
||||
pub fn record_column_type(
|
||||
&mut self,
|
||||
column_name: &str,
|
||||
column_type: ColumnType,
|
||||
sort_values_within_row: bool,
|
||||
) {
|
||||
if sort_values_within_row {
|
||||
assert!(
|
||||
column_type == ColumnType::Bytes || column_type == ColumnType::Str,
|
||||
"sort_values_within_row is only allowed for Bytes and Str columns",
|
||||
);
|
||||
}
|
||||
match column_type {
|
||||
ColumnType::Str | ColumnType::Bytes => {
|
||||
let (hash_map, dictionaries) = (
|
||||
if column_type == ColumnType::Str {
|
||||
&mut self.str_field_hash_map
|
||||
} else {
|
||||
&mut self.bytes_field_hash_map
|
||||
},
|
||||
&mut self.dictionaries,
|
||||
);
|
||||
mutate_or_create_column(
|
||||
hash_map,
|
||||
column_name,
|
||||
|column_opt: Option<StrOrBytesColumnWriter>| {
|
||||
let mut column_writer = if let Some(column_writer) = column_opt {
|
||||
column_writer
|
||||
} else {
|
||||
let dictionary_id = dictionaries.len() as u32;
|
||||
dictionaries.push(DictionaryBuilder::default());
|
||||
StrOrBytesColumnWriter::with_dictionary_id(dictionary_id)
|
||||
};
|
||||
column_writer.sort_values_within_row = sort_values_within_row;
|
||||
column_writer
|
||||
},
|
||||
);
|
||||
}
|
||||
ColumnType::Bool => {
|
||||
mutate_or_create_column(
|
||||
&mut self.bool_field_hash_map,
|
||||
column_name,
|
||||
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
||||
);
|
||||
}
|
||||
ColumnType::DateTime => {
|
||||
mutate_or_create_column(
|
||||
&mut self.datetime_field_hash_map,
|
||||
column_name,
|
||||
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
||||
);
|
||||
}
|
||||
ColumnType::I64 | ColumnType::F64 | ColumnType::U64 => {
|
||||
let numerical_type = column_type.numerical_type().unwrap();
|
||||
mutate_or_create_column(
|
||||
&mut self.numerical_field_hash_map,
|
||||
column_name,
|
||||
|column_opt: Option<NumericalColumnWriter>| {
|
||||
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
|
||||
column.force_numerical_type(numerical_type);
|
||||
column
|
||||
},
|
||||
);
|
||||
}
|
||||
ColumnType::IpAddr => mutate_or_create_column(
|
||||
&mut self.ip_addr_field_hash_map,
|
||||
column_name,
|
||||
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record_numerical<T: Into<NumericalValue> + Copy>(
|
||||
&mut self,
|
||||
doc: RowId,
|
||||
column_name: &str,
|
||||
numerical_value: T,
|
||||
) {
|
||||
let (hash_map, arena) = (&mut self.numerical_field_hash_map, &mut self.arena);
|
||||
mutate_or_create_column(
|
||||
hash_map,
|
||||
column_name,
|
||||
|column_opt: Option<NumericalColumnWriter>| {
|
||||
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record_numerical_value(doc, numerical_value.into(), arena);
|
||||
column
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn record_ip_addr(&mut self, doc: RowId, column_name: &str, ip_addr: Ipv6Addr) {
|
||||
assert!(
|
||||
!column_name.as_bytes().contains(&0u8),
|
||||
"key may not contain the 0 byte"
|
||||
);
|
||||
let (hash_map, arena) = (&mut self.ip_addr_field_hash_map, &mut self.arena);
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<ColumnWriter>| {
|
||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record(doc, ip_addr, arena);
|
||||
column
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn record_bool(&mut self, doc: RowId, column_name: &str, val: bool) {
|
||||
let (hash_map, arena) = (&mut self.bool_field_hash_map, &mut self.arena);
|
||||
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
|
||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record(doc, val, arena);
|
||||
column
|
||||
});
|
||||
}
|
||||
|
||||
pub fn record_datetime(&mut self, doc: RowId, column_name: &str, datetime: crate::DateTime) {
|
||||
let (hash_map, arena) = (&mut self.datetime_field_hash_map, &mut self.arena);
|
||||
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
|
||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record(doc, NumericalValue::I64(datetime.timestamp_micros), arena);
|
||||
column
|
||||
});
|
||||
}
|
||||
|
||||
pub fn record_str(&mut self, doc: RowId, column_name: &str, value: &str) {
|
||||
let (hash_map, arena, dictionaries) = (
|
||||
&mut self.str_field_hash_map,
|
||||
&mut self.arena,
|
||||
&mut self.dictionaries,
|
||||
);
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<StrOrBytesColumnWriter>| {
|
||||
let mut column: StrOrBytesColumnWriter = column_opt.unwrap_or_else(|| {
|
||||
// Each column has its own dictionary
|
||||
let dictionary_id = dictionaries.len() as u32;
|
||||
dictionaries.push(DictionaryBuilder::default());
|
||||
StrOrBytesColumnWriter::with_dictionary_id(dictionary_id)
|
||||
});
|
||||
column.record_bytes(doc, value.as_bytes(), dictionaries, arena);
|
||||
column
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn record_bytes(&mut self, doc: RowId, column_name: &str, value: &[u8]) {
|
||||
assert!(
|
||||
!column_name.as_bytes().contains(&0u8),
|
||||
"key may not contain the 0 byte"
|
||||
);
|
||||
let (hash_map, arena, dictionaries) = (
|
||||
&mut self.bytes_field_hash_map,
|
||||
&mut self.arena,
|
||||
&mut self.dictionaries,
|
||||
);
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<StrOrBytesColumnWriter>| {
|
||||
let mut column: StrOrBytesColumnWriter = column_opt.unwrap_or_else(|| {
|
||||
// Each column has its own dictionary
|
||||
let dictionary_id = dictionaries.len() as u32;
|
||||
dictionaries.push(DictionaryBuilder::default());
|
||||
StrOrBytesColumnWriter::with_dictionary_id(dictionary_id)
|
||||
});
|
||||
column.record_bytes(doc, value, dictionaries, arena);
|
||||
column
|
||||
},
|
||||
);
|
||||
}
|
||||
pub fn serialize(&mut self, num_docs: RowId, wrt: &mut dyn io::Write) -> io::Result<()> {
|
||||
let mut serializer = ColumnarSerializer::new(wrt);
|
||||
let mut columns: Vec<(&[u8], ColumnTypeCategory, Addr)> = self
|
||||
.numerical_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Numerical, addr))
|
||||
.collect();
|
||||
columns.extend(
|
||||
self.bytes_field_hash_map
|
||||
.iter()
|
||||
.map(|(term, addr, _)| (term, ColumnTypeCategory::Bytes, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.str_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Str, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.bool_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Bool, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.ip_addr_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::IpAddr, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.datetime_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::DateTime, addr)),
|
||||
);
|
||||
columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
|
||||
|
||||
let (arena, buffers, dictionaries) = (&self.arena, &mut self.buffers, &self.dictionaries);
|
||||
let mut symbol_byte_buffer: Vec<u8> = Vec::new();
|
||||
for (column_name, column_type, addr) in columns {
|
||||
match column_type {
|
||||
ColumnTypeCategory::Bool => {
|
||||
let column_writer: ColumnWriter = self.bool_field_hash_map.read(addr);
|
||||
let cardinality = column_writer.get_cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, ColumnType::Bool);
|
||||
serialize_bool_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::IpAddr => {
|
||||
let column_writer: ColumnWriter = self.ip_addr_field_hash_map.read(addr);
|
||||
let cardinality = column_writer.get_cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, ColumnType::IpAddr);
|
||||
serialize_ip_addr_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::Bytes | ColumnTypeCategory::Str => {
|
||||
let (column_type, str_column_writer): (ColumnType, StrOrBytesColumnWriter) =
|
||||
if column_type == ColumnTypeCategory::Bytes {
|
||||
(ColumnType::Bytes, self.bytes_field_hash_map.read(addr))
|
||||
} else {
|
||||
(ColumnType::Str, self.str_field_hash_map.read(addr))
|
||||
};
|
||||
let dictionary_builder =
|
||||
&dictionaries[str_column_writer.dictionary_id as usize];
|
||||
let cardinality = str_column_writer.column_writer.get_cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, column_type);
|
||||
serialize_bytes_or_str_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
str_column_writer.sort_values_within_row,
|
||||
dictionary_builder,
|
||||
str_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::Numerical => {
|
||||
let numerical_column_writer: NumericalColumnWriter =
|
||||
self.numerical_field_hash_map.read(addr);
|
||||
let (numerical_type, cardinality) =
|
||||
numerical_column_writer.column_type_and_cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, ColumnType::from(numerical_type));
|
||||
serialize_numerical_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
numerical_type,
|
||||
numerical_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::DateTime => {
|
||||
let column_writer: ColumnWriter = self.datetime_field_hash_map.read(addr);
|
||||
let cardinality = column_writer.get_cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, ColumnType::DateTime);
|
||||
serialize_numerical_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
NumericalType::I64,
|
||||
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
};
|
||||
}
|
||||
serializer.finalize(num_docs)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize [Dictionary, Column, dictionary num bytes U32::LE]
|
||||
// Column: [Column Index, Column Values, column index num bytes U32::LE]
|
||||
fn serialize_bytes_or_str_column(
|
||||
cardinality: Cardinality,
|
||||
num_docs: RowId,
|
||||
sort_values_within_row: bool,
|
||||
dictionary_builder: &DictionaryBuilder,
|
||||
operation_it: impl Iterator<Item = ColumnOperation<UnorderedId>>,
|
||||
buffers: &mut SpareBuffers,
|
||||
wrt: impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let SpareBuffers {
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
..
|
||||
} = buffers;
|
||||
let mut counting_writer = CountingWriter::wrap(wrt);
|
||||
let term_id_mapping: TermIdMapping = dictionary_builder.serialize(&mut counting_writer)?;
|
||||
let dictionary_num_bytes: u32 = counting_writer.written_bytes() as u32;
|
||||
let mut wrt = counting_writer.finish();
|
||||
let operation_iterator = operation_it.map(|symbol: ColumnOperation<UnorderedId>| {
|
||||
// We map unordered ids to ordered ids.
|
||||
match symbol {
|
||||
ColumnOperation::Value(unordered_id) => {
|
||||
let ordered_id = term_id_mapping.to_ord(unordered_id);
|
||||
ColumnOperation::Value(ordered_id.0 as u64)
|
||||
}
|
||||
ColumnOperation::NewDoc(doc) => ColumnOperation::NewDoc(doc),
|
||||
}
|
||||
});
|
||||
send_to_serialize_column_mappable_to_u64(
|
||||
operation_iterator,
|
||||
cardinality,
|
||||
num_docs,
|
||||
sort_values_within_row,
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
&mut wrt,
|
||||
)?;
|
||||
wrt.write_all(&dictionary_num_bytes.to_le_bytes()[..])?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_numerical_column(
|
||||
cardinality: Cardinality,
|
||||
num_docs: RowId,
|
||||
numerical_type: NumericalType,
|
||||
op_iterator: impl Iterator<Item = ColumnOperation<NumericalValue>>,
|
||||
buffers: &mut SpareBuffers,
|
||||
wrt: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let SpareBuffers {
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
..
|
||||
} = buffers;
|
||||
match numerical_type {
|
||||
NumericalType::I64 => {
|
||||
send_to_serialize_column_mappable_to_u64(
|
||||
coerce_numerical_symbol::<i64>(op_iterator),
|
||||
cardinality,
|
||||
num_docs,
|
||||
false,
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
wrt,
|
||||
)?;
|
||||
}
|
||||
NumericalType::U64 => {
|
||||
send_to_serialize_column_mappable_to_u64(
|
||||
coerce_numerical_symbol::<u64>(op_iterator),
|
||||
cardinality,
|
||||
num_docs,
|
||||
false,
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
wrt,
|
||||
)?;
|
||||
}
|
||||
NumericalType::F64 => {
|
||||
send_to_serialize_column_mappable_to_u64(
|
||||
coerce_numerical_symbol::<f64>(op_iterator),
|
||||
cardinality,
|
||||
num_docs,
|
||||
false,
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
wrt,
|
||||
)?;
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_bool_column(
|
||||
cardinality: Cardinality,
|
||||
num_docs: RowId,
|
||||
column_operations_it: impl Iterator<Item = ColumnOperation<bool>>,
|
||||
buffers: &mut SpareBuffers,
|
||||
wrt: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let SpareBuffers {
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
..
|
||||
} = buffers;
|
||||
send_to_serialize_column_mappable_to_u64(
|
||||
column_operations_it.map(|bool_column_operation| match bool_column_operation {
|
||||
ColumnOperation::NewDoc(doc) => ColumnOperation::NewDoc(doc),
|
||||
ColumnOperation::Value(bool_val) => ColumnOperation::Value(bool_val.to_u64()),
|
||||
}),
|
||||
cardinality,
|
||||
num_docs,
|
||||
false,
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
wrt,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_ip_addr_column(
|
||||
cardinality: Cardinality,
|
||||
num_docs: RowId,
|
||||
column_operations_it: impl Iterator<Item = ColumnOperation<Ipv6Addr>>,
|
||||
buffers: &mut SpareBuffers,
|
||||
wrt: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let SpareBuffers {
|
||||
value_index_builders,
|
||||
ip_addr_values,
|
||||
..
|
||||
} = buffers;
|
||||
send_to_serialize_column_mappable_to_u128(
|
||||
column_operations_it,
|
||||
cardinality,
|
||||
num_docs,
|
||||
value_index_builders,
|
||||
ip_addr_values,
|
||||
wrt,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn send_to_serialize_column_mappable_to_u128<
|
||||
T: Copy + Ord + std::fmt::Debug + Send + Sync + MonotonicallyMappableToU128 + PartialOrd,
|
||||
>(
|
||||
op_iterator: impl Iterator<Item = ColumnOperation<T>>,
|
||||
cardinality: Cardinality,
|
||||
num_rows: RowId,
|
||||
value_index_builders: &mut PreallocatedIndexBuilders,
|
||||
values: &mut Vec<T>,
|
||||
mut wrt: impl io::Write,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
for<'a> VecColumn<'a, T>: ColumnValues<T>,
|
||||
{
|
||||
values.clear();
|
||||
// TODO: split index and values
|
||||
let serializable_column_index = match cardinality {
|
||||
Cardinality::Full => {
|
||||
consume_operation_iterator(
|
||||
op_iterator,
|
||||
value_index_builders.borrow_required_index_builder(),
|
||||
values,
|
||||
);
|
||||
SerializableColumnIndex::Full
|
||||
}
|
||||
Cardinality::Optional => {
|
||||
let optional_index_builder = value_index_builders.borrow_optional_index_builder();
|
||||
consume_operation_iterator(op_iterator, optional_index_builder, values);
|
||||
let non_null_rows: &[u32] = optional_index_builder.finish(num_rows);
|
||||
SerializableColumnIndex::Optional {
|
||||
num_rows,
|
||||
non_null_row_ids: Box::new(|| Box::new(non_null_rows.iter().copied())),
|
||||
}
|
||||
}
|
||||
Cardinality::Multivalued => {
|
||||
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
|
||||
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
|
||||
let multivalued_index = multivalued_index_builder.finish(num_rows);
|
||||
SerializableColumnIndex::Multivalued(Box::new(|| Box::new(multivalued_index.iter().copied())))
|
||||
}
|
||||
};
|
||||
crate::column::serialize_column_mappable_to_u128(
|
||||
serializable_column_index,
|
||||
&|| values.iter().copied(),
|
||||
values.len() as u32,
|
||||
&mut wrt,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sort_values_within_row_in_place(multivalued_index: &[RowId], values: &mut Vec<u64>) {
|
||||
let mut start_index: usize = 0;
|
||||
for end_index in multivalued_index.iter().copied() {
|
||||
let end_index = end_index as usize;
|
||||
values[start_index..end_index].sort_unstable();
|
||||
start_index = end_index;
|
||||
}
|
||||
}
|
||||
|
||||
fn send_to_serialize_column_mappable_to_u64<'a>(
|
||||
op_iterator: impl Iterator<Item = ColumnOperation<u64>>,
|
||||
cardinality: Cardinality,
|
||||
num_rows: RowId,
|
||||
sort_values_within_row: bool,
|
||||
value_index_builders: &'a mut PreallocatedIndexBuilders,
|
||||
values: &mut Vec<u64>,
|
||||
mut wrt: impl io::Write,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
for<'b> VecColumn<'b, u64>: ColumnValues<u64>,
|
||||
{
|
||||
values.clear();
|
||||
let serializable_column_index = match cardinality {
|
||||
Cardinality::Full => {
|
||||
consume_operation_iterator(
|
||||
op_iterator,
|
||||
value_index_builders.borrow_required_index_builder(),
|
||||
values,
|
||||
);
|
||||
SerializableColumnIndex::Full
|
||||
}
|
||||
Cardinality::Optional => {
|
||||
let optional_index_builder: &'a mut OptionalIndexBuilder = value_index_builders.borrow_optional_index_builder();
|
||||
consume_operation_iterator(op_iterator, optional_index_builder, values);
|
||||
let optional_index: &'a [u32] = optional_index_builder.finish(num_rows);
|
||||
SerializableColumnIndex::Optional {
|
||||
non_null_row_ids: Box::new(move || Box::new(optional_index.iter().copied())),
|
||||
num_rows,
|
||||
}
|
||||
}
|
||||
Cardinality::Multivalued => {
|
||||
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
|
||||
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
|
||||
let multivalued_index = multivalued_index_builder.finish(num_rows);
|
||||
if sort_values_within_row {
|
||||
sort_values_within_row_in_place(multivalued_index, values);
|
||||
}
|
||||
SerializableColumnIndex::Multivalued(Box::new(|| Box::new(multivalued_index.iter().copied())))
|
||||
}
|
||||
};
|
||||
crate::column::serialize_column_mappable_to_u64(
|
||||
serializable_column_index,
|
||||
&|| values.iter().copied(),
|
||||
&mut wrt,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn coerce_numerical_symbol<T>(
|
||||
operation_iterator: impl Iterator<Item = ColumnOperation<NumericalValue>>,
|
||||
) -> impl Iterator<Item = ColumnOperation<u64>>
|
||||
where T: Coerce + MonotonicallyMappableToU64 {
|
||||
operation_iterator.map(|symbol| match symbol {
|
||||
ColumnOperation::NewDoc(doc) => ColumnOperation::NewDoc(doc),
|
||||
ColumnOperation::Value(numerical_value) => {
|
||||
ColumnOperation::Value(T::coerce(numerical_value).to_u64())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn consume_operation_iterator<T: Ord, TIndexBuilder: IndexBuilder>(
|
||||
operation_iterator: impl Iterator<Item = ColumnOperation<T>>,
|
||||
index_builder: &mut TIndexBuilder,
|
||||
values: &mut Vec<T>,
|
||||
) {
|
||||
for symbol in operation_iterator {
|
||||
match symbol {
|
||||
ColumnOperation::NewDoc(doc) => {
|
||||
index_builder.record_row(doc);
|
||||
}
|
||||
ColumnOperation::Value(value) => {
|
||||
index_builder.record_value();
|
||||
values.push(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use stacker::MemoryArena;
|
||||
|
||||
use crate::columnar::writer::column_operation::ColumnOperation;
|
||||
use crate::{Cardinality, NumericalValue};
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_required_simple() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut column_writer = super::ColumnWriter::default();
|
||||
column_writer.record(0u32, NumericalValue::from(14i64), &mut arena);
|
||||
column_writer.record(1u32, NumericalValue::from(15i64), &mut arena);
|
||||
column_writer.record(2u32, NumericalValue::from(-16i64), &mut arena);
|
||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Full);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 6);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
assert!(matches!(
|
||||
symbols[1],
|
||||
ColumnOperation::Value(NumericalValue::I64(14i64))
|
||||
));
|
||||
assert!(matches!(symbols[2], ColumnOperation::NewDoc(1u32)));
|
||||
assert!(matches!(
|
||||
symbols[3],
|
||||
ColumnOperation::Value(NumericalValue::I64(15i64))
|
||||
));
|
||||
assert!(matches!(symbols[4], ColumnOperation::NewDoc(2u32)));
|
||||
assert!(matches!(
|
||||
symbols[5],
|
||||
ColumnOperation::Value(NumericalValue::I64(-16i64))
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_optional_cardinality_missing_first() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut column_writer = super::ColumnWriter::default();
|
||||
column_writer.record(1u32, NumericalValue::from(15i64), &mut arena);
|
||||
column_writer.record(2u32, NumericalValue::from(-16i64), &mut arena);
|
||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Optional);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 4);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(1u32)));
|
||||
assert!(matches!(
|
||||
symbols[1],
|
||||
ColumnOperation::Value(NumericalValue::I64(15i64))
|
||||
));
|
||||
assert!(matches!(symbols[2], ColumnOperation::NewDoc(2u32)));
|
||||
assert!(matches!(
|
||||
symbols[3],
|
||||
ColumnOperation::Value(NumericalValue::I64(-16i64))
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_optional_cardinality_missing_last() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut column_writer = super::ColumnWriter::default();
|
||||
column_writer.record(0u32, NumericalValue::from(15i64), &mut arena);
|
||||
assert_eq!(column_writer.get_cardinality(2), Cardinality::Optional);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 2);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
assert!(matches!(
|
||||
symbols[1],
|
||||
ColumnOperation::Value(NumericalValue::I64(15i64))
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_multivalued() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut column_writer = super::ColumnWriter::default();
|
||||
column_writer.record(0u32, NumericalValue::from(16i64), &mut arena);
|
||||
column_writer.record(0u32, NumericalValue::from(17i64), &mut arena);
|
||||
assert_eq!(column_writer.get_cardinality(1), Cardinality::Multivalued);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 3);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
assert!(matches!(
|
||||
symbols[1],
|
||||
ColumnOperation::Value(NumericalValue::I64(16i64))
|
||||
));
|
||||
assert!(matches!(
|
||||
symbols[2],
|
||||
ColumnOperation::Value(NumericalValue::I64(17i64))
|
||||
));
|
||||
}
|
||||
}
|
||||
108
columnar/src/columnar/writer/serializer.rs
Normal file
108
columnar/src/columnar/writer/serializer.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
use common::{BinarySerializable, CountingWriter};
|
||||
use sstable::value::RangeValueWriter;
|
||||
use sstable::RangeSSTable;
|
||||
|
||||
use crate::columnar::ColumnType;
|
||||
use crate::RowId;
|
||||
|
||||
pub struct ColumnarSerializer<W: io::Write> {
|
||||
wrt: CountingWriter<W>,
|
||||
sstable_range: sstable::Writer<Vec<u8>, RangeValueWriter>,
|
||||
prepare_key_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Returns a key consisting of the concatenation of the key and the column_type_and_cardinality
|
||||
/// code.
|
||||
fn prepare_key(key: &[u8], column_type: ColumnType, buffer: &mut Vec<u8>) {
|
||||
buffer.clear();
|
||||
buffer.extend_from_slice(key);
|
||||
buffer.push(0u8);
|
||||
buffer.push(column_type.to_code());
|
||||
}
|
||||
|
||||
impl<W: io::Write> ColumnarSerializer<W> {
|
||||
pub(crate) fn new(wrt: W) -> ColumnarSerializer<W> {
|
||||
let sstable_range: sstable::Writer<Vec<u8>, RangeValueWriter> =
|
||||
sstable::Dictionary::<RangeSSTable>::builder(Vec::with_capacity(100_000)).unwrap();
|
||||
ColumnarSerializer {
|
||||
wrt: CountingWriter::wrap(wrt),
|
||||
sstable_range,
|
||||
prepare_key_buffer: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize_column<'a>(
|
||||
&'a mut self,
|
||||
column_name: &[u8],
|
||||
column_type: ColumnType,
|
||||
) -> impl io::Write + 'a {
|
||||
let start_offset = self.wrt.written_bytes();
|
||||
prepare_key(column_name, column_type, &mut self.prepare_key_buffer);
|
||||
ColumnSerializer {
|
||||
columnar_serializer: self,
|
||||
start_offset,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn finalize(mut self, num_rows: RowId) -> io::Result<()> {
|
||||
let sstable_bytes: Vec<u8> = self.sstable_range.finish()?;
|
||||
let sstable_num_bytes: u64 = sstable_bytes.len() as u64;
|
||||
self.wrt.write_all(&sstable_bytes)?;
|
||||
self.wrt.write_all(&sstable_num_bytes.to_le_bytes()[..])?;
|
||||
num_rows.serialize(&mut self.wrt)?;
|
||||
self.wrt
|
||||
.write_all(&super::super::format_version::footer())?;
|
||||
self.wrt.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ColumnSerializer<'a, W: io::Write> {
|
||||
columnar_serializer: &'a mut ColumnarSerializer<W>,
|
||||
start_offset: u64,
|
||||
}
|
||||
|
||||
impl<'a, W: io::Write> Drop for ColumnSerializer<'a, W> {
|
||||
fn drop(&mut self) {
|
||||
let end_offset: u64 = self.columnar_serializer.wrt.written_bytes();
|
||||
let byte_range = self.start_offset..end_offset;
|
||||
self.columnar_serializer.sstable_range.insert_cannot_fail(
|
||||
&self.columnar_serializer.prepare_key_buffer[..],
|
||||
&byte_range,
|
||||
);
|
||||
self.columnar_serializer.prepare_key_buffer.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, W: io::Write> io::Write for ColumnSerializer<'a, W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.columnar_serializer.wrt.write(buf)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.columnar_serializer.wrt.flush()
|
||||
}
|
||||
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
self.columnar_serializer.wrt.write_all(buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::columnar::column_type::ColumnType;
|
||||
|
||||
#[test]
|
||||
fn test_prepare_key_bytes() {
|
||||
let mut buffer: Vec<u8> = b"somegarbage".to_vec();
|
||||
prepare_key(b"root\0child", ColumnType::Str, &mut buffer);
|
||||
assert_eq!(buffer.len(), 12);
|
||||
assert_eq!(&buffer[..10], b"root\0child");
|
||||
assert_eq!(buffer[10], 0u8);
|
||||
assert_eq!(buffer[11], ColumnType::Str.to_code());
|
||||
}
|
||||
}
|
||||
166
columnar/src/columnar/writer/value_index.rs
Normal file
166
columnar/src/columnar/writer/value_index.rs
Normal file
@@ -0,0 +1,166 @@
|
||||
use crate::RowId;
|
||||
|
||||
/// The `IndexBuilder` interprets a sequence of
|
||||
/// calls of the form:
|
||||
/// (record_doc,record_value+)*
|
||||
/// and can then serialize the results into an index to associate docids with their value[s].
|
||||
///
|
||||
/// It has different implementation depending on whether the
|
||||
/// cardinality is required, optional, or multivalued.
|
||||
pub(crate) trait IndexBuilder {
|
||||
fn record_row(&mut self, doc: RowId);
|
||||
#[inline]
|
||||
fn record_value(&mut self) {}
|
||||
}
|
||||
|
||||
/// The FullIndexBuilder does nothing.
|
||||
#[derive(Default)]
|
||||
pub struct FullIndexBuilder;
|
||||
|
||||
impl IndexBuilder for FullIndexBuilder {
|
||||
#[inline(always)]
|
||||
fn record_row(&mut self, _doc: RowId) {}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct OptionalIndexBuilder {
|
||||
docs: Vec<RowId>,
|
||||
}
|
||||
|
||||
impl OptionalIndexBuilder {
|
||||
pub fn finish<'a>(&'a mut self, num_rows: RowId) -> &'a [RowId] {
|
||||
debug_assert!(self
|
||||
.docs
|
||||
.last()
|
||||
.copied()
|
||||
.map(|last_doc| last_doc < num_rows)
|
||||
.unwrap_or(true));
|
||||
&self.docs[..]
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.docs.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexBuilder for OptionalIndexBuilder {
|
||||
#[inline(always)]
|
||||
fn record_row(&mut self, doc: RowId) {
|
||||
debug_assert!(self
|
||||
.docs
|
||||
.last()
|
||||
.copied()
|
||||
.map(|prev_doc| doc > prev_doc)
|
||||
.unwrap_or(true));
|
||||
self.docs.push(doc);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MultivaluedIndexBuilder {
|
||||
start_offsets: Vec<RowId>,
|
||||
total_num_vals_seen: u32,
|
||||
}
|
||||
|
||||
impl MultivaluedIndexBuilder {
|
||||
pub fn finish(&mut self, num_docs: RowId) -> &[u32] {
|
||||
self.start_offsets
|
||||
.resize(num_docs as usize + 1, self.total_num_vals_seen);
|
||||
&self.start_offsets[..]
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.start_offsets.clear();
|
||||
self.start_offsets.push(0u32);
|
||||
self.total_num_vals_seen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexBuilder for MultivaluedIndexBuilder {
|
||||
fn record_row(&mut self, row_id: RowId) {
|
||||
self.start_offsets
|
||||
.resize(row_id as usize + 1, self.total_num_vals_seen);
|
||||
}
|
||||
|
||||
fn record_value(&mut self) {
|
||||
self.total_num_vals_seen += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// The `SpareIndexBuilders` is there to avoid allocating a
|
||||
/// new index builder for every single column.
|
||||
#[derive(Default)]
|
||||
pub struct PreallocatedIndexBuilders {
|
||||
required_index_builder: FullIndexBuilder,
|
||||
optional_index_builder: OptionalIndexBuilder,
|
||||
multivalued_index_builder: MultivaluedIndexBuilder,
|
||||
}
|
||||
|
||||
impl PreallocatedIndexBuilders {
|
||||
pub fn borrow_required_index_builder(&mut self) -> &mut FullIndexBuilder {
|
||||
&mut self.required_index_builder
|
||||
}
|
||||
|
||||
pub fn borrow_optional_index_builder(&mut self) -> &mut OptionalIndexBuilder {
|
||||
self.optional_index_builder.reset();
|
||||
&mut self.optional_index_builder
|
||||
}
|
||||
|
||||
pub fn borrow_multivalued_index_builder(&mut self) -> &mut MultivaluedIndexBuilder {
|
||||
self.multivalued_index_builder.reset();
|
||||
&mut self.multivalued_index_builder
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_optional_value_index_builder() {
|
||||
let mut opt_value_index_builder = OptionalIndexBuilder::default();
|
||||
opt_value_index_builder.record_row(0u32);
|
||||
opt_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
&opt_value_index_builder.finish(1u32),
|
||||
&[0]
|
||||
);
|
||||
opt_value_index_builder.reset();
|
||||
opt_value_index_builder.record_row(1u32);
|
||||
opt_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
&opt_value_index_builder.finish(2u32),
|
||||
&[1]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multivalued_value_index_builder() {
|
||||
let mut multivalued_value_index_builder = MultivaluedIndexBuilder::default();
|
||||
multivalued_value_index_builder.record_row(1u32);
|
||||
multivalued_value_index_builder.record_value();
|
||||
multivalued_value_index_builder.record_value();
|
||||
multivalued_value_index_builder.record_row(2u32);
|
||||
multivalued_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
multivalued_value_index_builder
|
||||
.finish(4u32)
|
||||
.iter()
|
||||
.copied()
|
||||
.collect::<Vec<u32>>(),
|
||||
vec![0, 0, 2, 3, 3]
|
||||
);
|
||||
multivalued_value_index_builder.reset();
|
||||
multivalued_value_index_builder.record_row(2u32);
|
||||
multivalued_value_index_builder.record_value();
|
||||
multivalued_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
multivalued_value_index_builder
|
||||
.finish(4u32)
|
||||
.iter()
|
||||
.copied()
|
||||
.collect::<Vec<u32>>(),
|
||||
vec![0, 0, 0, 2, 2]
|
||||
);
|
||||
}
|
||||
}
|
||||
84
columnar/src/dictionary.rs
Normal file
84
columnar/src/dictionary.rs
Normal file
@@ -0,0 +1,84 @@
|
||||
use std::io;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use sstable::SSTable;
|
||||
|
||||
pub(crate) struct TermIdMapping {
|
||||
unordered_to_ord: Vec<OrderedId>,
|
||||
}
|
||||
|
||||
impl TermIdMapping {
|
||||
pub fn to_ord(&self, unordered: UnorderedId) -> OrderedId {
|
||||
self.unordered_to_ord[unordered.0 as usize]
|
||||
}
|
||||
}
|
||||
|
||||
/// When we add values, we cannot know their ordered id yet.
|
||||
/// For this reason, we temporarily assign them a `UnorderedId`
|
||||
/// that will be mapped to an `OrderedId` upon serialization.
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct UnorderedId(pub u32);
|
||||
|
||||
#[derive(Clone, Copy, Hash, PartialEq, Eq, Debug)]
|
||||
pub struct OrderedId(pub u32);
|
||||
|
||||
/// `DictionaryBuilder` for dictionary encoding.
|
||||
///
|
||||
/// It stores the different terms encounterred and assigns them a temporary value
|
||||
/// we call unordered id.
|
||||
///
|
||||
/// Upon serialization, we will sort the ids and hence build a `UnorderedId -> Term ordinal`
|
||||
/// mapping.
|
||||
#[derive(Default)]
|
||||
pub(crate) struct DictionaryBuilder {
|
||||
dict: FnvHashMap<Vec<u8>, UnorderedId>,
|
||||
}
|
||||
|
||||
impl DictionaryBuilder {
|
||||
/// Get or allocate an unordered id.
|
||||
/// (This ID is simply an auto-incremented id.)
|
||||
pub fn get_or_allocate_id(&mut self, term: &[u8]) -> UnorderedId {
|
||||
if let Some(term_id) = self.dict.get(term) {
|
||||
return *term_id;
|
||||
}
|
||||
let new_id = UnorderedId(self.dict.len() as u32);
|
||||
self.dict.insert(term.to_vec(), new_id);
|
||||
new_id
|
||||
}
|
||||
|
||||
/// Serialize the dictionary into an fst, and returns the
|
||||
/// `UnorderedId -> TermOrdinal` map.
|
||||
pub fn serialize<'a, W: io::Write + 'a>(&self, wrt: &mut W) -> io::Result<TermIdMapping> {
|
||||
let mut terms: Vec<(&[u8], UnorderedId)> =
|
||||
self.dict.iter().map(|(k, v)| (k.as_slice(), *v)).collect();
|
||||
terms.sort_unstable_by_key(|(key, _)| *key);
|
||||
// TODO Remove the allocation.
|
||||
let mut unordered_to_ord: Vec<OrderedId> = vec![OrderedId(0u32); terms.len()];
|
||||
let mut sstable_builder = sstable::VoidSSTable::writer(wrt);
|
||||
for (ord, (key, unordered_id)) in terms.into_iter().enumerate() {
|
||||
let ordered_id = OrderedId(ord as u32);
|
||||
sstable_builder.insert(key, &())?;
|
||||
unordered_to_ord[unordered_id.0 as usize] = ordered_id;
|
||||
}
|
||||
sstable_builder.finish()?;
|
||||
Ok(TermIdMapping { unordered_to_ord })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_dictionary_builder() {
|
||||
let mut dictionary_builder = DictionaryBuilder::default();
|
||||
let hello_uid = dictionary_builder.get_or_allocate_id(b"hello");
|
||||
let happy_uid = dictionary_builder.get_or_allocate_id(b"happy");
|
||||
let tax_uid = dictionary_builder.get_or_allocate_id(b"tax");
|
||||
let mut buffer = Vec::new();
|
||||
let id_mapping = dictionary_builder.serialize(&mut buffer).unwrap();
|
||||
assert_eq!(id_mapping.to_ord(hello_uid), OrderedId(1));
|
||||
assert_eq!(id_mapping.to_ord(happy_uid), OrderedId(0));
|
||||
assert_eq!(id_mapping.to_ord(tax_uid), OrderedId(2));
|
||||
}
|
||||
}
|
||||
261
columnar/src/dynamic_column.rs
Normal file
261
columnar/src/dynamic_column.rs
Normal file
@@ -0,0 +1,261 @@
|
||||
use std::io;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::file_slice::FileSlice;
|
||||
use common::{HasLen, OwnedBytes};
|
||||
|
||||
use crate::column::{BytesColumn, Column, StrColumn};
|
||||
use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn};
|
||||
use crate::columnar::ColumnType;
|
||||
use crate::{Cardinality, DateTime, NumericalType};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum DynamicColumn {
|
||||
Bool(Column<bool>),
|
||||
I64(Column<i64>),
|
||||
U64(Column<u64>),
|
||||
F64(Column<f64>),
|
||||
IpAddr(Column<Ipv6Addr>),
|
||||
DateTime(Column<DateTime>),
|
||||
Bytes(BytesColumn),
|
||||
Str(StrColumn),
|
||||
}
|
||||
|
||||
impl DynamicColumn {
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
match self {
|
||||
DynamicColumn::Bool(c) => c.get_cardinality(),
|
||||
DynamicColumn::I64(c) => c.get_cardinality(),
|
||||
DynamicColumn::U64(c) => c.get_cardinality(),
|
||||
DynamicColumn::F64(c) => c.get_cardinality(),
|
||||
DynamicColumn::IpAddr(c) => c.get_cardinality(),
|
||||
DynamicColumn::DateTime(c) => c.get_cardinality(),
|
||||
DynamicColumn::Bytes(c) => c.ords().get_cardinality(),
|
||||
DynamicColumn::Str(c) => c.ords().get_cardinality(),
|
||||
}
|
||||
}
|
||||
pub fn column_type(&self) -> ColumnType {
|
||||
match self {
|
||||
DynamicColumn::Bool(_) => ColumnType::Bool,
|
||||
DynamicColumn::I64(_) => ColumnType::I64,
|
||||
DynamicColumn::U64(_) => ColumnType::U64,
|
||||
DynamicColumn::F64(_) => ColumnType::F64,
|
||||
DynamicColumn::IpAddr(_) => ColumnType::IpAddr,
|
||||
DynamicColumn::DateTime(_) => ColumnType::DateTime,
|
||||
DynamicColumn::Bytes(_) => ColumnType::Bytes,
|
||||
DynamicColumn::Str(_) => ColumnType::Str,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn coerce_numerical(self, target_numerical_type: NumericalType) -> Option<Self> {
|
||||
match target_numerical_type {
|
||||
NumericalType::I64 => self.coerce_to_i64(),
|
||||
NumericalType::U64 => self.coerce_to_u64(),
|
||||
NumericalType::F64 => self.coerce_to_f64(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_numerical(&self) -> bool {
|
||||
self.column_type().numerical_type().is_some()
|
||||
}
|
||||
|
||||
pub fn is_f64(&self) -> bool {
|
||||
self.column_type().numerical_type() == Some(NumericalType::F64)
|
||||
}
|
||||
pub fn is_i64(&self) -> bool {
|
||||
self.column_type().numerical_type() == Some(NumericalType::I64)
|
||||
}
|
||||
pub fn is_u64(&self) -> bool {
|
||||
self.column_type().numerical_type() == Some(NumericalType::U64)
|
||||
}
|
||||
|
||||
fn coerce_to_f64(self) -> Option<DynamicColumn> {
|
||||
match self {
|
||||
DynamicColumn::I64(column) => Some(DynamicColumn::F64(Column {
|
||||
idx: column.idx,
|
||||
values: Arc::new(monotonic_map_column(column.values, MapI64ToF64)),
|
||||
})),
|
||||
DynamicColumn::U64(column) => Some(DynamicColumn::F64(Column {
|
||||
idx: column.idx,
|
||||
values: Arc::new(monotonic_map_column(column.values, MapU64ToF64)),
|
||||
})),
|
||||
DynamicColumn::F64(_) => Some(self),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
fn coerce_to_i64(self) -> Option<DynamicColumn> {
|
||||
match self {
|
||||
DynamicColumn::U64(column) => {
|
||||
if column.max_value() > i64::MAX as u64 {
|
||||
return None;
|
||||
}
|
||||
Some(DynamicColumn::I64(Column {
|
||||
idx: column.idx,
|
||||
values: Arc::new(monotonic_map_column(column.values, MapU64ToI64)),
|
||||
}))
|
||||
}
|
||||
DynamicColumn::I64(_) => Some(self),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
fn coerce_to_u64(self) -> Option<DynamicColumn> {
|
||||
match self {
|
||||
DynamicColumn::I64(column) => {
|
||||
if column.min_value() < 0 {
|
||||
return None;
|
||||
}
|
||||
Some(DynamicColumn::U64(Column {
|
||||
idx: column.idx,
|
||||
values: Arc::new(monotonic_map_column(column.values, MapI64ToU64)),
|
||||
}))
|
||||
}
|
||||
DynamicColumn::U64(_) => Some(self),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct MapI64ToF64;
|
||||
impl StrictlyMonotonicFn<i64, f64> for MapI64ToF64 {
|
||||
#[inline(always)]
|
||||
fn mapping(&self, inp: i64) -> f64 {
|
||||
inp as f64
|
||||
}
|
||||
#[inline(always)]
|
||||
fn inverse(&self, out: f64) -> i64 {
|
||||
out as i64
|
||||
}
|
||||
}
|
||||
|
||||
struct MapU64ToF64;
|
||||
impl StrictlyMonotonicFn<u64, f64> for MapU64ToF64 {
|
||||
#[inline(always)]
|
||||
fn mapping(&self, inp: u64) -> f64 {
|
||||
inp as f64
|
||||
}
|
||||
#[inline(always)]
|
||||
fn inverse(&self, out: f64) -> u64 {
|
||||
out as u64
|
||||
}
|
||||
}
|
||||
|
||||
struct MapU64ToI64;
|
||||
impl StrictlyMonotonicFn<u64, i64> for MapU64ToI64 {
|
||||
#[inline(always)]
|
||||
fn mapping(&self, inp: u64) -> i64 {
|
||||
inp as i64
|
||||
}
|
||||
#[inline(always)]
|
||||
fn inverse(&self, out: i64) -> u64 {
|
||||
out as u64
|
||||
}
|
||||
}
|
||||
|
||||
struct MapI64ToU64;
|
||||
impl StrictlyMonotonicFn<i64, u64> for MapI64ToU64 {
|
||||
#[inline(always)]
|
||||
fn mapping(&self, inp: i64) -> u64 {
|
||||
inp as u64
|
||||
}
|
||||
#[inline(always)]
|
||||
fn inverse(&self, out: u64) -> i64 {
|
||||
out as i64
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! static_dynamic_conversions {
|
||||
($typ:ty, $enum_name:ident) => {
|
||||
impl Into<Option<$typ>> for DynamicColumn {
|
||||
fn into(self) -> Option<$typ> {
|
||||
if let DynamicColumn::$enum_name(col) = self {
|
||||
Some(col)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<$typ> for DynamicColumn {
|
||||
fn from(typed_column: $typ) -> Self {
|
||||
DynamicColumn::$enum_name(typed_column)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
static_dynamic_conversions!(Column<bool>, Bool);
|
||||
static_dynamic_conversions!(Column<u64>, U64);
|
||||
static_dynamic_conversions!(Column<i64>, I64);
|
||||
static_dynamic_conversions!(Column<f64>, F64);
|
||||
static_dynamic_conversions!(Column<crate::DateTime>, DateTime);
|
||||
static_dynamic_conversions!(StrColumn, Str);
|
||||
static_dynamic_conversions!(BytesColumn, Bytes);
|
||||
static_dynamic_conversions!(Column<Ipv6Addr>, IpAddr);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DynamicColumnHandle {
|
||||
pub(crate) file_slice: FileSlice,
|
||||
pub(crate) column_type: ColumnType,
|
||||
}
|
||||
|
||||
impl DynamicColumnHandle {
|
||||
// TODO rename load
|
||||
pub fn open(&self) -> io::Result<DynamicColumn> {
|
||||
let column_bytes: OwnedBytes = self.file_slice.read_bytes()?;
|
||||
self.open_internal(column_bytes)
|
||||
}
|
||||
|
||||
// TODO rename load_async
|
||||
pub async fn open_async(&self) -> io::Result<DynamicColumn> {
|
||||
let column_bytes: OwnedBytes = self.file_slice.read_bytes_async().await?;
|
||||
self.open_internal(column_bytes)
|
||||
}
|
||||
|
||||
/// Returns the `u64` fast field reader reader associated with `fields` of types
|
||||
/// Str, u64, i64, f64, or datetime.
|
||||
///
|
||||
/// If not, the fastfield reader will returns the u64-value associated with the original
|
||||
/// FastValue.
|
||||
pub fn open_u64_lenient(&self) -> io::Result<Option<Column<u64>>> {
|
||||
let column_bytes = self.file_slice.read_bytes()?;
|
||||
match self.column_type {
|
||||
ColumnType::Str | ColumnType::Bytes => {
|
||||
let column: BytesColumn = crate::column::open_column_bytes(column_bytes)?;
|
||||
Ok(Some(column.term_ord_column))
|
||||
}
|
||||
ColumnType::Bool => Ok(None),
|
||||
ColumnType::IpAddr => Ok(None),
|
||||
ColumnType::I64 | ColumnType::U64 | ColumnType::F64 | ColumnType::DateTime => {
|
||||
let column = crate::column::open_column_u64::<u64>(column_bytes)?;
|
||||
Ok(Some(column))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn open_internal(&self, column_bytes: OwnedBytes) -> io::Result<DynamicColumn> {
|
||||
let dynamic_column: DynamicColumn = match self.column_type {
|
||||
ColumnType::Bytes => {
|
||||
crate::column::open_column_bytes::<BytesColumn>(column_bytes)?.into()
|
||||
}
|
||||
ColumnType::Str => crate::column::open_column_bytes::<StrColumn>(column_bytes)?.into(),
|
||||
ColumnType::I64 => crate::column::open_column_u64::<i64>(column_bytes)?.into(),
|
||||
ColumnType::U64 => crate::column::open_column_u64::<u64>(column_bytes)?.into(),
|
||||
ColumnType::F64 => crate::column::open_column_u64::<f64>(column_bytes)?.into(),
|
||||
ColumnType::Bool => crate::column::open_column_u64::<bool>(column_bytes)?.into(),
|
||||
ColumnType::IpAddr => crate::column::open_column_u128::<Ipv6Addr>(column_bytes)?.into(),
|
||||
ColumnType::DateTime => {
|
||||
crate::column::open_column_u64::<crate::DateTime>(column_bytes)?.into()
|
||||
}
|
||||
};
|
||||
Ok(dynamic_column)
|
||||
}
|
||||
|
||||
pub fn num_bytes(&self) -> usize {
|
||||
self.file_slice.len()
|
||||
}
|
||||
|
||||
pub fn column_type(&self) -> ColumnType {
|
||||
self.column_type
|
||||
}
|
||||
}
|
||||
61
columnar/src/iterable.rs
Normal file
61
columnar/src/iterable.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::Range;
|
||||
|
||||
pub trait Iterable<T = u64> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_>;
|
||||
}
|
||||
|
||||
struct Mapped<U, Original, Transform> {
|
||||
original_iterable: Original,
|
||||
transform: Transform,
|
||||
input_type: PhantomData<U>,
|
||||
}
|
||||
|
||||
impl<U, V, Original, Transform> Iterable<V> for Mapped<U, Original, Transform>
|
||||
where
|
||||
Original: Iterable<U>,
|
||||
Transform: Fn(U) -> V,
|
||||
{
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = V> + '_> {
|
||||
Box::new(self.original_iterable.boxed_iter().map(&self.transform))
|
||||
}
|
||||
}
|
||||
|
||||
impl<U> Iterable<U> for &dyn Iterable<U> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = U> + '_> {
|
||||
(*self).boxed_iter()
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, T> Iterable<T> for F
|
||||
where F: Fn() -> Box<dyn Iterator<Item = T>>
|
||||
{
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||
self()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map_iterable<U, V, F, I>(
|
||||
original_iterable: impl Fn() -> I,
|
||||
transform: F,
|
||||
) -> impl Fn() -> std::iter::Map<I, F>
|
||||
where
|
||||
F: Fn(U) -> V + Clone,
|
||||
I: Iterator<Item = U>,
|
||||
{
|
||||
move || original_iterable().map(transform.clone())
|
||||
}
|
||||
|
||||
impl<'a, T: Copy> Iterable<T> for &'a [T] {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||
Box::new(self.iter().copied())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy> Iterable<T> for Range<T>
|
||||
where Range<T>: Iterator<Item = T>
|
||||
{
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
92
columnar/src/lib.rs
Normal file
92
columnar/src/lib.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate more_asserts;
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
extern crate test;
|
||||
|
||||
use std::io;
|
||||
|
||||
mod column;
|
||||
mod column_index;
|
||||
pub mod column_values;
|
||||
mod columnar;
|
||||
mod dictionary;
|
||||
mod dynamic_column;
|
||||
mod iterable;
|
||||
pub(crate) mod utils;
|
||||
mod value;
|
||||
|
||||
pub use column::{BytesColumn, Column, StrColumn};
|
||||
pub use column_index::ColumnIndex;
|
||||
pub use column_values::{ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64};
|
||||
pub use columnar::{
|
||||
merge_columnar, ColumnType, ColumnarReader, ColumnarWriter, HasAssociatedColumnType,
|
||||
MergeRowOrder, StackMergeOrder,
|
||||
};
|
||||
pub(crate) use iterable::{map_iterable, Iterable};
|
||||
use sstable::VoidSSTable;
|
||||
pub use value::{NumericalType, NumericalValue};
|
||||
|
||||
pub use self::dynamic_column::{DynamicColumn, DynamicColumnHandle};
|
||||
|
||||
pub type RowId = u32;
|
||||
pub use sstable::Dictionary;
|
||||
pub type Streamer<'a> = sstable::Streamer<'a, VoidSSTable>;
|
||||
|
||||
#[derive(Clone, Copy, PartialOrd, PartialEq, Default, Debug)]
|
||||
pub struct DateTime {
|
||||
pub timestamp_micros: i64,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct InvalidData;
|
||||
|
||||
impl From<InvalidData> for io::Error {
|
||||
fn from(_: InvalidData) -> Self {
|
||||
io::Error::new(io::ErrorKind::InvalidData, "Invalid data")
|
||||
}
|
||||
}
|
||||
|
||||
/// Enum describing the number of values that can exist per document
|
||||
/// (or per row if you will).
|
||||
///
|
||||
/// The cardinality must fit on 2 bits.
|
||||
#[derive(Clone, Copy, Hash, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[repr(u8)]
|
||||
pub enum Cardinality {
|
||||
/// All documents contain exactly one value.
|
||||
/// `Full` is the default for auto-detecting the Cardinality, since it is the most strict.
|
||||
#[default]
|
||||
Full = 0,
|
||||
/// All documents contain at most one value.
|
||||
Optional = 1,
|
||||
/// All documents may contain any number of values.
|
||||
Multivalued = 2,
|
||||
}
|
||||
|
||||
impl Cardinality {
|
||||
pub fn is_optional(&self) -> bool {
|
||||
matches!(self, Cardinality::Optional)
|
||||
}
|
||||
pub fn is_multivalue(&self) -> bool {
|
||||
matches!(self, Cardinality::Multivalued)
|
||||
}
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub(crate) fn try_from_code(code: u8) -> Result<Cardinality, InvalidData> {
|
||||
match code {
|
||||
0 => Ok(Cardinality::Full),
|
||||
1 => Ok(Cardinality::Optional),
|
||||
2 => Ok(Cardinality::Multivalued),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
212
columnar/src/tests.rs
Normal file
212
columnar/src/tests.rs
Normal file
@@ -0,0 +1,212 @@
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use crate::column_values::MonotonicallyMappableToU128;
|
||||
use crate::columnar::ColumnType;
|
||||
use crate::dynamic_column::{DynamicColumn, DynamicColumnHandle};
|
||||
use crate::value::NumericalValue;
|
||||
use crate::{Cardinality, ColumnarReader, ColumnarWriter};
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_writer_str() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_str(1u32, "my_string", "hello");
|
||||
dataframe_writer.record_str(3u32, "my_string", "helloeee");
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 158);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_writer_bytes() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_bytes(1u32, "my_string", b"hello");
|
||||
dataframe_writer.record_bytes(3u32, "my_string", b"helloeee");
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 158);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_writer_bool() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_bool(1u32, "bool.value", false);
|
||||
dataframe_writer.record_bool(3u32, "bool.value", true);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("bool.value").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 22);
|
||||
assert_eq!(cols[0].column_type(), ColumnType::Bool);
|
||||
let dyn_bool_col = cols[0].open().unwrap();
|
||||
let DynamicColumn::Bool(bool_col) = dyn_bool_col else { panic!(); };
|
||||
let vals: Vec<Option<bool>> = (0..5).map(|row_id| bool_col.first(row_id)).collect();
|
||||
assert_eq!(&vals, &[None, Some(false), None, Some(true), None,]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_writer_u64_multivalued() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(2u32, "divisor", 2u64);
|
||||
dataframe_writer.record_numerical(3u32, "divisor", 3u64);
|
||||
dataframe_writer.record_numerical(4u32, "divisor", 2u64);
|
||||
dataframe_writer.record_numerical(5u32, "divisor", 5u64);
|
||||
dataframe_writer.record_numerical(6u32, "divisor", 2u64);
|
||||
dataframe_writer.record_numerical(6u32, "divisor", 3u64);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(7, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("divisor").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 29);
|
||||
let dyn_i64_col = cols[0].open().unwrap();
|
||||
let DynamicColumn::I64(divisor_col) = dyn_i64_col else { panic!(); };
|
||||
assert_eq!(
|
||||
divisor_col.get_cardinality(),
|
||||
crate::Cardinality::Multivalued
|
||||
);
|
||||
assert_eq!(divisor_col.num_rows(), 7);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_writer_ip_addr() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_ip_addr(1, "ip_addr", Ipv6Addr::from_u128(1001));
|
||||
dataframe_writer.record_ip_addr(3, "ip_addr", Ipv6Addr::from_u128(1050));
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("ip_addr").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 42);
|
||||
assert_eq!(cols[0].column_type(), ColumnType::IpAddr);
|
||||
let dyn_bool_col = cols[0].open().unwrap();
|
||||
let DynamicColumn::IpAddr(ip_col) = dyn_bool_col else { panic!(); };
|
||||
let vals: Vec<Option<Ipv6Addr>> = (0..5).map(|row_id| ip_col.first(row_id)).collect();
|
||||
assert_eq!(
|
||||
&vals,
|
||||
&[
|
||||
None,
|
||||
Some(Ipv6Addr::from_u128(1001)),
|
||||
None,
|
||||
Some(Ipv6Addr::from_u128(1050)),
|
||||
None,
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_writer_numerical() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(1u32, "srical.value", NumericalValue::U64(12u64));
|
||||
dataframe_writer.record_numerical(2u32, "srical.value", NumericalValue::U64(13u64));
|
||||
dataframe_writer.record_numerical(4u32, "srical.value", NumericalValue::U64(15u64));
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(6, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("srical.value").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
// Right now this 31 bytes are spent as follows
|
||||
//
|
||||
// - header 14 bytes
|
||||
// - vals 8 //< due to padding? could have been 1byte?.
|
||||
// - null footer 6 bytes
|
||||
assert_eq!(cols[0].num_bytes(), 33);
|
||||
let column = cols[0].open().unwrap();
|
||||
let DynamicColumn::I64(column_i64) = column else { panic!(); };
|
||||
assert_eq!(column_i64.idx.get_cardinality(), Cardinality::Optional);
|
||||
assert_eq!(column_i64.first(0), None);
|
||||
assert_eq!(column_i64.first(1), Some(12i64));
|
||||
assert_eq!(column_i64.first(2), Some(13i64));
|
||||
assert_eq!(column_i64.first(3), None);
|
||||
assert_eq!(column_i64.first(4), Some(15i64));
|
||||
assert_eq!(column_i64.first(5), None);
|
||||
assert_eq!(column_i64.first(6), None); //< we can change the spec for that one.
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dictionary_encoded_str() {
|
||||
let mut buffer = Vec::new();
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
columnar_writer.record_str(1, "my.column", "a");
|
||||
columnar_writer.record_str(3, "my.column", "c");
|
||||
columnar_writer.record_str(3, "my.column2", "different_column!");
|
||||
columnar_writer.record_str(4, "my.column", "b");
|
||||
columnar_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let col_handles = columnar_reader.read_columns("my.column").unwrap();
|
||||
assert_eq!(col_handles.len(), 1);
|
||||
let DynamicColumn::Str(str_col) = col_handles[0].open().unwrap() else { panic!(); };
|
||||
let index: Vec<Option<u64>> = (0..5).map(|row_id| str_col.ords().first(row_id)).collect();
|
||||
assert_eq!(index, &[None, Some(0), None, Some(2), Some(1)]);
|
||||
assert_eq!(str_col.num_rows(), 5);
|
||||
let mut term_buffer = String::new();
|
||||
let term_ords = str_col.ords();
|
||||
assert_eq!(term_ords.first(0), None);
|
||||
assert_eq!(term_ords.first(1), Some(0));
|
||||
str_col.ord_to_str(0u64, &mut term_buffer).unwrap();
|
||||
assert_eq!(term_buffer, "a");
|
||||
assert_eq!(term_ords.first(2), None);
|
||||
assert_eq!(term_ords.first(3), Some(2));
|
||||
str_col.ord_to_str(2u64, &mut term_buffer).unwrap();
|
||||
assert_eq!(term_buffer, "c");
|
||||
assert_eq!(term_ords.first(4), Some(1));
|
||||
str_col.ord_to_str(1u64, &mut term_buffer).unwrap();
|
||||
assert_eq!(term_buffer, "b");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dictionary_encoded_bytes() {
|
||||
let mut buffer = Vec::new();
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
columnar_writer.record_bytes(1, "my.column", b"a");
|
||||
columnar_writer.record_bytes(3, "my.column", b"c");
|
||||
columnar_writer.record_bytes(3, "my.column2", b"different_column!");
|
||||
columnar_writer.record_bytes(4, "my.column", b"b");
|
||||
columnar_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let col_handles = columnar_reader.read_columns("my.column").unwrap();
|
||||
assert_eq!(col_handles.len(), 1);
|
||||
let DynamicColumn::Bytes(bytes_col) = col_handles[0].open().unwrap() else { panic!(); };
|
||||
let index: Vec<Option<u64>> = (0..5)
|
||||
.map(|row_id| bytes_col.ords().first(row_id))
|
||||
.collect();
|
||||
assert_eq!(index, &[None, Some(0), None, Some(2), Some(1)]);
|
||||
assert_eq!(bytes_col.num_rows(), 5);
|
||||
let mut term_buffer = Vec::new();
|
||||
let term_ords = bytes_col.ords();
|
||||
assert_eq!(term_ords.first(0), None);
|
||||
assert_eq!(term_ords.first(1), Some(0));
|
||||
bytes_col
|
||||
.dictionary
|
||||
.ord_to_term(0u64, &mut term_buffer)
|
||||
.unwrap();
|
||||
assert_eq!(term_buffer, b"a");
|
||||
assert_eq!(term_ords.first(2), None);
|
||||
assert_eq!(term_ords.first(3), Some(2));
|
||||
bytes_col
|
||||
.dictionary
|
||||
.ord_to_term(2u64, &mut term_buffer)
|
||||
.unwrap();
|
||||
assert_eq!(term_buffer, b"c");
|
||||
assert_eq!(term_ords.first(4), Some(1));
|
||||
bytes_col
|
||||
.dictionary
|
||||
.ord_to_term(1u64, &mut term_buffer)
|
||||
.unwrap();
|
||||
assert_eq!(term_buffer, b"b");
|
||||
}
|
||||
76
columnar/src/utils.rs
Normal file
76
columnar/src/utils.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
const fn compute_mask(num_bits: u8) -> u8 {
|
||||
if num_bits == 8 {
|
||||
u8::MAX
|
||||
} else {
|
||||
(1u8 << num_bits) - 1
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[must_use]
|
||||
pub(crate) fn select_bits<const START: u8, const END: u8>(code: u8) -> u8 {
|
||||
assert!(START <= END);
|
||||
assert!(END <= 8);
|
||||
let num_bits: u8 = END - START;
|
||||
let mask: u8 = compute_mask(num_bits);
|
||||
(code >> START) & mask
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[must_use]
|
||||
pub(crate) fn place_bits<const START: u8, const END: u8>(code: u8) -> u8 {
|
||||
assert!(START <= END);
|
||||
assert!(END <= 8);
|
||||
let num_bits: u8 = END - START;
|
||||
let mask: u8 = compute_mask(num_bits);
|
||||
assert!(code <= mask);
|
||||
code << START
|
||||
}
|
||||
|
||||
/// Pop-front one bytes from a slice of bytes.
|
||||
#[inline(always)]
|
||||
pub fn pop_first_byte(bytes: &mut &[u8]) -> Option<u8> {
|
||||
if bytes.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let first_byte = bytes[0];
|
||||
*bytes = &bytes[1..];
|
||||
Some(first_byte)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_select_bits() {
|
||||
assert_eq!(255u8, select_bits::<0, 8>(255u8));
|
||||
assert_eq!(0u8, select_bits::<0, 0>(255u8));
|
||||
assert_eq!(8u8, select_bits::<0, 4>(8u8));
|
||||
assert_eq!(4u8, select_bits::<1, 4>(8u8));
|
||||
assert_eq!(0u8, select_bits::<1, 3>(8u8));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_place_bits() {
|
||||
assert_eq!(255u8, place_bits::<0, 8>(255u8));
|
||||
assert_eq!(4u8, place_bits::<2, 3>(1u8));
|
||||
assert_eq!(0u8, place_bits::<2, 2>(0u8));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_place_bits_overflows() {
|
||||
let _ = place_bits::<1, 4>(8u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pop_first_byte() {
|
||||
let mut cursor: &[u8] = &b"abcd"[..];
|
||||
assert_eq!(pop_first_byte(&mut cursor), Some(b'a'));
|
||||
assert_eq!(pop_first_byte(&mut cursor), Some(b'b'));
|
||||
assert_eq!(pop_first_byte(&mut cursor), Some(b'c'));
|
||||
assert_eq!(pop_first_byte(&mut cursor), Some(b'd'));
|
||||
assert_eq!(pop_first_byte(&mut cursor), None);
|
||||
}
|
||||
}
|
||||
129
columnar/src/value.rs
Normal file
129
columnar/src/value.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
use crate::InvalidData;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Debug)]
|
||||
pub enum NumericalValue {
|
||||
I64(i64),
|
||||
U64(u64),
|
||||
F64(f64),
|
||||
}
|
||||
|
||||
impl NumericalValue {
|
||||
pub fn numerical_type(&self) -> NumericalType {
|
||||
match self {
|
||||
NumericalValue::I64(_) => NumericalType::I64,
|
||||
NumericalValue::U64(_) => NumericalType::U64,
|
||||
NumericalValue::F64(_) => NumericalType::F64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for NumericalValue {
|
||||
fn from(val: u64) -> NumericalValue {
|
||||
NumericalValue::U64(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<i64> for NumericalValue {
|
||||
fn from(val: i64) -> Self {
|
||||
NumericalValue::I64(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<f64> for NumericalValue {
|
||||
fn from(val: f64) -> Self {
|
||||
NumericalValue::F64(val)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Hash, Eq, PartialEq)]
|
||||
#[repr(u8)]
|
||||
pub enum NumericalType {
|
||||
#[default]
|
||||
I64 = 0,
|
||||
U64 = 1,
|
||||
F64 = 2,
|
||||
}
|
||||
|
||||
impl NumericalType {
|
||||
pub fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub fn try_from_code(code: u8) -> Result<NumericalType, InvalidData> {
|
||||
match code {
|
||||
0 => Ok(NumericalType::I64),
|
||||
1 => Ok(NumericalType::U64),
|
||||
2 => Ok(NumericalType::F64),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// We voluntarily avoid using `Into` here to keep this
|
||||
/// implementation quirk as private as possible.
|
||||
///
|
||||
/// # Panics
|
||||
/// This coercion trait actually panics if it is used
|
||||
/// to convert a loose types to a stricter type.
|
||||
///
|
||||
/// The level is strictness is somewhat arbitrary.
|
||||
/// - i64
|
||||
/// - u64
|
||||
/// - f64.
|
||||
pub(crate) trait Coerce {
|
||||
fn coerce(numerical_value: NumericalValue) -> Self;
|
||||
}
|
||||
|
||||
impl Coerce for i64 {
|
||||
fn coerce(value: NumericalValue) -> Self {
|
||||
match value {
|
||||
NumericalValue::I64(val) => val,
|
||||
NumericalValue::U64(val) => val as i64,
|
||||
NumericalValue::F64(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Coerce for u64 {
|
||||
fn coerce(value: NumericalValue) -> Self {
|
||||
match value {
|
||||
NumericalValue::I64(val) => val as u64,
|
||||
NumericalValue::U64(val) => val,
|
||||
NumericalValue::F64(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Coerce for f64 {
|
||||
fn coerce(value: NumericalValue) -> Self {
|
||||
match value {
|
||||
NumericalValue::I64(val) => val as f64,
|
||||
NumericalValue::U64(val) => val as f64,
|
||||
NumericalValue::F64(val) => val,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Coerce for crate::DateTime {
|
||||
fn coerce(value: NumericalValue) -> Self {
|
||||
let timestamp_micros = i64::coerce(value);
|
||||
crate::DateTime { timestamp_micros }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::NumericalType;
|
||||
|
||||
#[test]
|
||||
fn test_numerical_type_code() {
|
||||
let mut num_numerical_type = 0;
|
||||
for code in u8::MIN..=u8::MAX {
|
||||
if let Ok(numerical_type) = NumericalType::try_from_code(code) {
|
||||
assert_eq!(numerical_type.to_code(), code);
|
||||
num_numerical_type += 1;
|
||||
}
|
||||
}
|
||||
assert_eq!(num_numerical_type, 3);
|
||||
}
|
||||
}
|
||||
22
common/Cargo.toml
Normal file
22
common/Cargo.toml
Normal file
@@ -0,0 +1,22 @@
|
||||
[package]
|
||||
name = "tantivy-common"
|
||||
version = "0.5.0"
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2021"
|
||||
description = "common traits and utility functions used by multiple tantivy subcrates"
|
||||
documentation = "https://docs.rs/tantivy_common/"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4.3"
|
||||
ownedbytes = { version= "0.5", path="../ownedbytes" }
|
||||
async-trait = "0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1.0.0"
|
||||
rand = "0.8.4"
|
||||
737
common/src/bitset.rs
Normal file
737
common/src/bitset.rs
Normal file
@@ -0,0 +1,737 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io::Write;
|
||||
use std::{fmt, io, u64};
|
||||
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
pub struct TinySet(u64);
|
||||
|
||||
impl fmt::Debug for TinySet {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.into_iter().collect::<Vec<u32>>().fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TinySetIterator(TinySet);
|
||||
impl Iterator for TinySetIterator {
|
||||
type Item = u32;
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.pop_lowest()
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoIterator for TinySet {
|
||||
type Item = u32;
|
||||
type IntoIter = TinySetIterator;
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
TinySetIterator(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl TinySet {
|
||||
pub fn serialize<T: Write>(&self, writer: &mut T) -> io::Result<()> {
|
||||
writer.write_all(self.0.to_le_bytes().as_ref())
|
||||
}
|
||||
|
||||
pub fn into_bytes(self) -> [u8; 8] {
|
||||
self.0.to_le_bytes()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn deserialize(data: [u8; 8]) -> Self {
|
||||
let val: u64 = u64::from_le_bytes(data);
|
||||
TinySet(val)
|
||||
}
|
||||
|
||||
/// Returns an empty `TinySet`.
|
||||
#[inline]
|
||||
pub fn empty() -> TinySet {
|
||||
TinySet(0u64)
|
||||
}
|
||||
|
||||
/// Returns a full `TinySet`.
|
||||
#[inline]
|
||||
pub fn full() -> TinySet {
|
||||
TinySet::empty().complement()
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.0 = 0u64;
|
||||
}
|
||||
|
||||
/// Returns the complement of the set in `[0, 64[`.
|
||||
///
|
||||
/// Careful on making this function public, as it will break the padding handling in the last
|
||||
/// bucket.
|
||||
#[inline]
|
||||
fn complement(self) -> TinySet {
|
||||
TinySet(!self.0)
|
||||
}
|
||||
|
||||
/// Returns true iff the `TinySet` contains the element `el`.
|
||||
#[inline]
|
||||
pub fn contains(self, el: u32) -> bool {
|
||||
!self.intersect(TinySet::singleton(el)).is_empty()
|
||||
}
|
||||
|
||||
/// Returns the number of elements in the TinySet.
|
||||
#[inline]
|
||||
pub fn len(self) -> u32 {
|
||||
self.0.count_ones()
|
||||
}
|
||||
|
||||
/// Returns the intersection of `self` and `other`
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn intersect(self, other: TinySet) -> TinySet {
|
||||
TinySet(self.0 & other.0)
|
||||
}
|
||||
|
||||
/// Creates a new `TinySet` containing only one element
|
||||
/// within `[0; 64[`
|
||||
#[inline]
|
||||
pub fn singleton(el: u32) -> TinySet {
|
||||
TinySet(1u64 << u64::from(el))
|
||||
}
|
||||
|
||||
/// Insert a new element within [0..64)
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn insert(self, el: u32) -> TinySet {
|
||||
self.union(TinySet::singleton(el))
|
||||
}
|
||||
|
||||
/// Removes an element within [0..64)
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn remove(self, el: u32) -> TinySet {
|
||||
self.intersect(TinySet::singleton(el).complement())
|
||||
}
|
||||
|
||||
/// Insert a new element within [0..64)
|
||||
///
|
||||
/// returns true if the set changed
|
||||
#[inline]
|
||||
pub fn insert_mut(&mut self, el: u32) -> bool {
|
||||
let old = *self;
|
||||
*self = old.insert(el);
|
||||
old != *self
|
||||
}
|
||||
|
||||
/// Remove a element within [0..64)
|
||||
///
|
||||
/// returns true if the set changed
|
||||
#[inline]
|
||||
pub fn remove_mut(&mut self, el: u32) -> bool {
|
||||
let old = *self;
|
||||
*self = old.remove(el);
|
||||
old != *self
|
||||
}
|
||||
|
||||
/// Returns the union of two tinysets
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn union(self, other: TinySet) -> TinySet {
|
||||
TinySet(self.0 | other.0)
|
||||
}
|
||||
|
||||
/// Returns true iff the `TinySet` is empty.
|
||||
#[inline]
|
||||
pub fn is_empty(self) -> bool {
|
||||
self.0 == 0u64
|
||||
}
|
||||
|
||||
/// Returns the lowest element in the `TinySet`
|
||||
/// and removes it.
|
||||
#[inline]
|
||||
pub fn pop_lowest(&mut self) -> Option<u32> {
|
||||
if self.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let lowest = self.0.trailing_zeros();
|
||||
self.0 ^= TinySet::singleton(lowest).0;
|
||||
Some(lowest)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a `TinySet` than contains all values up
|
||||
/// to limit excluded.
|
||||
///
|
||||
/// The limit is assumed to be strictly lower than 64.
|
||||
pub fn range_lower(upper_bound: u32) -> TinySet {
|
||||
TinySet((1u64 << u64::from(upper_bound % 64u32)) - 1u64)
|
||||
}
|
||||
|
||||
/// Returns a `TinySet` that contains all values greater
|
||||
/// or equal to the given limit, included. (and up to 63)
|
||||
///
|
||||
/// The limit is assumed to be strictly lower than 64.
|
||||
pub fn range_greater_or_equal(from_included: u32) -> TinySet {
|
||||
TinySet::range_lower(from_included).complement()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BitSet {
|
||||
tinysets: Box<[TinySet]>,
|
||||
len: u64,
|
||||
max_value: u32,
|
||||
}
|
||||
|
||||
fn num_buckets(max_val: u32) -> u32 {
|
||||
(max_val + 63u32) / 64u32
|
||||
}
|
||||
|
||||
impl BitSet {
|
||||
/// serialize a `BitSet`.
|
||||
pub fn serialize<T: Write>(&self, writer: &mut T) -> io::Result<()> {
|
||||
writer.write_all(self.max_value.to_le_bytes().as_ref())?;
|
||||
for tinyset in self.tinysets.iter().cloned() {
|
||||
writer.write_all(&tinyset.into_bytes())?;
|
||||
}
|
||||
writer.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a new `BitSet` that may contain elements
|
||||
/// within `[0, max_val)`.
|
||||
pub fn with_max_value(max_value: u32) -> BitSet {
|
||||
let num_buckets = num_buckets(max_value);
|
||||
let tinybitsets = vec![TinySet::empty(); num_buckets as usize].into_boxed_slice();
|
||||
BitSet {
|
||||
tinysets: tinybitsets,
|
||||
len: 0,
|
||||
max_value,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `BitSet` that may contain elements. Initially all values will be set.
|
||||
/// within `[0, max_val)`.
|
||||
pub fn with_max_value_and_full(max_value: u32) -> BitSet {
|
||||
let num_buckets = num_buckets(max_value);
|
||||
let mut tinybitsets = vec![TinySet::full(); num_buckets as usize].into_boxed_slice();
|
||||
|
||||
// Fix padding
|
||||
let lower = max_value % 64u32;
|
||||
if lower != 0 {
|
||||
tinybitsets[tinybitsets.len() - 1] = TinySet::range_lower(lower);
|
||||
}
|
||||
BitSet {
|
||||
tinysets: tinybitsets,
|
||||
len: max_value as u64,
|
||||
max_value,
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes all elements from the `BitSet`.
|
||||
pub fn clear(&mut self) {
|
||||
for tinyset in self.tinysets.iter_mut() {
|
||||
*tinyset = TinySet::empty();
|
||||
}
|
||||
}
|
||||
|
||||
/// Intersect with serialized bitset
|
||||
pub fn intersect_update(&mut self, other: &ReadOnlyBitSet) {
|
||||
self.intersect_update_with_iter(other.iter_tinysets());
|
||||
}
|
||||
|
||||
/// Intersect with tinysets
|
||||
fn intersect_update_with_iter(&mut self, other: impl Iterator<Item = TinySet>) {
|
||||
self.len = 0;
|
||||
for (left, right) in self.tinysets.iter_mut().zip(other) {
|
||||
*left = left.intersect(right);
|
||||
self.len += left.len() as u64;
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of elements in the `BitSet`.
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.len as usize
|
||||
}
|
||||
|
||||
/// Inserts an element in the `BitSet`
|
||||
#[inline]
|
||||
pub fn insert(&mut self, el: u32) {
|
||||
// we do not check saturated els.
|
||||
let higher = el / 64u32;
|
||||
let lower = el % 64u32;
|
||||
self.len += u64::from(self.tinysets[higher as usize].insert_mut(lower));
|
||||
}
|
||||
|
||||
/// Inserts an element in the `BitSet`
|
||||
#[inline]
|
||||
pub fn remove(&mut self, el: u32) {
|
||||
// we do not check saturated els.
|
||||
let higher = el / 64u32;
|
||||
let lower = el % 64u32;
|
||||
self.len -= u64::from(self.tinysets[higher as usize].remove_mut(lower));
|
||||
}
|
||||
|
||||
/// Returns true iff the elements is in the `BitSet`.
|
||||
#[inline]
|
||||
pub fn contains(&self, el: u32) -> bool {
|
||||
self.tinyset(el / 64u32).contains(el % 64)
|
||||
}
|
||||
|
||||
/// Returns the first non-empty `TinySet` associated with a bucket lower
|
||||
/// or greater than bucket.
|
||||
///
|
||||
/// Reminder: the tiny set with the bucket `bucket`, represents the
|
||||
/// elements from `bucket * 64` to `(bucket+1) * 64`.
|
||||
pub fn first_non_empty_bucket(&self, bucket: u32) -> Option<u32> {
|
||||
self.tinysets[bucket as usize..]
|
||||
.iter()
|
||||
.cloned()
|
||||
.position(|tinyset| !tinyset.is_empty())
|
||||
.map(|delta_bucket| bucket + delta_bucket as u32)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn max_value(&self) -> u32 {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
/// Returns the tiny bitset representing the
|
||||
/// the set restricted to the number range from
|
||||
/// `bucket * 64` to `(bucket + 1) * 64`.
|
||||
pub fn tinyset(&self, bucket: u32) -> TinySet {
|
||||
self.tinysets[bucket as usize]
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialized BitSet.
|
||||
#[derive(Clone)]
|
||||
pub struct ReadOnlyBitSet {
|
||||
data: OwnedBytes,
|
||||
max_value: u32,
|
||||
}
|
||||
|
||||
pub fn intersect_bitsets(left: &ReadOnlyBitSet, other: &ReadOnlyBitSet) -> ReadOnlyBitSet {
|
||||
assert_eq!(left.max_value(), other.max_value());
|
||||
assert_eq!(left.data.len(), other.data.len());
|
||||
let union_tinyset_it = left
|
||||
.iter_tinysets()
|
||||
.zip(other.iter_tinysets())
|
||||
.map(|(left_tinyset, right_tinyset)| left_tinyset.intersect(right_tinyset));
|
||||
let mut output_dataset: Vec<u8> = Vec::with_capacity(left.data.len());
|
||||
for tinyset in union_tinyset_it {
|
||||
output_dataset.extend_from_slice(&tinyset.into_bytes());
|
||||
}
|
||||
ReadOnlyBitSet {
|
||||
data: OwnedBytes::new(output_dataset),
|
||||
max_value: left.max_value(),
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadOnlyBitSet {
|
||||
pub fn open(data: OwnedBytes) -> Self {
|
||||
let (max_value_data, data) = data.split(4);
|
||||
assert_eq!(data.len() % 8, 0);
|
||||
let max_value: u32 = u32::from_le_bytes(max_value_data.as_ref().try_into().unwrap());
|
||||
ReadOnlyBitSet { data, max_value }
|
||||
}
|
||||
|
||||
/// Number of elements in the bitset.
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.iter_tinysets()
|
||||
.map(|tinyset| tinyset.len() as usize)
|
||||
.sum()
|
||||
}
|
||||
|
||||
/// Iterate the tinyset on the fly from serialized data.
|
||||
#[inline]
|
||||
fn iter_tinysets(&self) -> impl Iterator<Item = TinySet> + '_ {
|
||||
self.data.chunks_exact(8).map(move |chunk| {
|
||||
let tinyset: TinySet = TinySet::deserialize(chunk.try_into().unwrap());
|
||||
tinyset
|
||||
})
|
||||
}
|
||||
|
||||
/// Iterate over the positions of the elements.
|
||||
#[inline]
|
||||
pub fn iter(&self) -> impl Iterator<Item = u32> + '_ {
|
||||
self.iter_tinysets()
|
||||
.enumerate()
|
||||
.flat_map(move |(chunk_num, tinyset)| {
|
||||
let chunk_base_val = chunk_num as u32 * 64;
|
||||
tinyset
|
||||
.into_iter()
|
||||
.map(move |val| val + chunk_base_val)
|
||||
.take_while(move |doc| *doc < self.max_value)
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns true iff the elements is in the `BitSet`.
|
||||
#[inline]
|
||||
pub fn contains(&self, el: u32) -> bool {
|
||||
let byte_offset = el / 8u32;
|
||||
let b: u8 = self.data[byte_offset as usize];
|
||||
let shift = (el % 8) as u8;
|
||||
b & (1u8 << shift) != 0
|
||||
}
|
||||
|
||||
/// Maximum value the bitset may contain.
|
||||
/// (Note this is not the maximum value contained in the set.)
|
||||
///
|
||||
/// A bitset has an intrinsic capacity.
|
||||
/// It only stores elements within [0..max_value).
|
||||
#[inline]
|
||||
pub fn max_value(&self) -> u32 {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
/// Number of bytes used in the bitset representation.
|
||||
pub fn num_bytes(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a BitSet> for ReadOnlyBitSet {
|
||||
fn from(bitset: &'a BitSet) -> ReadOnlyBitSet {
|
||||
let mut buffer = Vec::with_capacity(bitset.tinysets.len() * 8 + 4);
|
||||
bitset
|
||||
.serialize(&mut buffer)
|
||||
.expect("serializing into a buffer should never fail");
|
||||
ReadOnlyBitSet::open(OwnedBytes::new(buffer))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use ownedbytes::OwnedBytes;
|
||||
use rand::distributions::Bernoulli;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
|
||||
use super::{BitSet, ReadOnlyBitSet, TinySet};
|
||||
|
||||
#[test]
|
||||
fn test_read_serialized_bitset_full_multi() {
|
||||
for i in 0..1000 {
|
||||
let bitset = BitSet::with_max_value_and_full(i);
|
||||
let mut out = vec![];
|
||||
bitset.serialize(&mut out).unwrap();
|
||||
|
||||
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
|
||||
assert_eq!(bitset.len(), i as usize);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_serialized_bitset_full_block() {
|
||||
let bitset = BitSet::with_max_value_and_full(64);
|
||||
let mut out = vec![];
|
||||
bitset.serialize(&mut out).unwrap();
|
||||
|
||||
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
|
||||
assert_eq!(bitset.len(), 64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_serialized_bitset_full() {
|
||||
let mut bitset = BitSet::with_max_value_and_full(5);
|
||||
bitset.remove(3);
|
||||
let mut out = vec![];
|
||||
bitset.serialize(&mut out).unwrap();
|
||||
|
||||
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
|
||||
assert_eq!(bitset.len(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitset_intersect() {
|
||||
let bitset_serialized = {
|
||||
let mut bitset = BitSet::with_max_value_and_full(5);
|
||||
bitset.remove(1);
|
||||
bitset.remove(3);
|
||||
let mut out = vec![];
|
||||
bitset.serialize(&mut out).unwrap();
|
||||
|
||||
ReadOnlyBitSet::open(OwnedBytes::new(out))
|
||||
};
|
||||
|
||||
let mut bitset = BitSet::with_max_value_and_full(5);
|
||||
bitset.remove(1);
|
||||
bitset.intersect_update(&bitset_serialized);
|
||||
|
||||
assert!(bitset.contains(0));
|
||||
assert!(!bitset.contains(1));
|
||||
assert!(bitset.contains(2));
|
||||
assert!(!bitset.contains(3));
|
||||
assert!(bitset.contains(4));
|
||||
|
||||
bitset.intersect_update_with_iter(vec![TinySet::singleton(0)].into_iter());
|
||||
|
||||
assert!(bitset.contains(0));
|
||||
assert!(!bitset.contains(1));
|
||||
assert!(!bitset.contains(2));
|
||||
assert!(!bitset.contains(3));
|
||||
assert!(!bitset.contains(4));
|
||||
assert_eq!(bitset.len(), 1);
|
||||
|
||||
bitset.intersect_update_with_iter(vec![TinySet::singleton(1)].into_iter());
|
||||
assert!(!bitset.contains(0));
|
||||
assert!(!bitset.contains(1));
|
||||
assert!(!bitset.contains(2));
|
||||
assert!(!bitset.contains(3));
|
||||
assert!(!bitset.contains(4));
|
||||
assert_eq!(bitset.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_serialized_bitset_empty() {
|
||||
let mut bitset = BitSet::with_max_value(5);
|
||||
bitset.insert(3);
|
||||
let mut out = vec![];
|
||||
bitset.serialize(&mut out).unwrap();
|
||||
|
||||
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
|
||||
assert_eq!(bitset.len(), 1);
|
||||
|
||||
{
|
||||
let bitset = BitSet::with_max_value(5);
|
||||
let mut out = vec![];
|
||||
bitset.serialize(&mut out).unwrap();
|
||||
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
|
||||
assert_eq!(bitset.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tiny_set_remove() {
|
||||
{
|
||||
let mut u = TinySet::empty().insert(63u32).insert(5).remove(63u32);
|
||||
assert_eq!(u.pop_lowest(), Some(5u32));
|
||||
assert!(u.pop_lowest().is_none());
|
||||
}
|
||||
{
|
||||
let mut u = TinySet::empty()
|
||||
.insert(63u32)
|
||||
.insert(1)
|
||||
.insert(5)
|
||||
.remove(63u32);
|
||||
assert_eq!(u.pop_lowest(), Some(1u32));
|
||||
assert_eq!(u.pop_lowest(), Some(5u32));
|
||||
assert!(u.pop_lowest().is_none());
|
||||
}
|
||||
{
|
||||
let mut u = TinySet::empty().insert(1).remove(63u32);
|
||||
assert_eq!(u.pop_lowest(), Some(1u32));
|
||||
assert!(u.pop_lowest().is_none());
|
||||
}
|
||||
{
|
||||
let mut u = TinySet::empty().insert(1).remove(1u32);
|
||||
assert!(u.pop_lowest().is_none());
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_tiny_set() {
|
||||
assert!(TinySet::empty().is_empty());
|
||||
{
|
||||
let mut u = TinySet::empty().insert(1u32);
|
||||
assert_eq!(u.pop_lowest(), Some(1u32));
|
||||
assert!(u.pop_lowest().is_none())
|
||||
}
|
||||
{
|
||||
let mut u = TinySet::empty().insert(1u32).insert(1u32);
|
||||
assert_eq!(u.pop_lowest(), Some(1u32));
|
||||
assert!(u.pop_lowest().is_none())
|
||||
}
|
||||
{
|
||||
let mut u = TinySet::empty().insert(2u32);
|
||||
assert_eq!(u.pop_lowest(), Some(2u32));
|
||||
u.insert_mut(1u32);
|
||||
assert_eq!(u.pop_lowest(), Some(1u32));
|
||||
assert!(u.pop_lowest().is_none());
|
||||
}
|
||||
{
|
||||
let mut u = TinySet::empty().insert(63u32);
|
||||
assert_eq!(u.pop_lowest(), Some(63u32));
|
||||
assert!(u.pop_lowest().is_none());
|
||||
}
|
||||
{
|
||||
let mut u = TinySet::empty().insert(63u32).insert(5);
|
||||
assert_eq!(u.pop_lowest(), Some(5u32));
|
||||
assert_eq!(u.pop_lowest(), Some(63u32));
|
||||
assert!(u.pop_lowest().is_none());
|
||||
}
|
||||
{
|
||||
let original = TinySet::empty().insert(63u32).insert(5);
|
||||
let after_serialize_deserialize = TinySet::deserialize(original.into_bytes());
|
||||
assert_eq!(original, after_serialize_deserialize);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitset() {
|
||||
let test_against_hashset = |els: &[u32], max_value: u32| {
|
||||
let mut hashset: HashSet<u32> = HashSet::new();
|
||||
let mut bitset = BitSet::with_max_value(max_value);
|
||||
for &el in els {
|
||||
assert!(el < max_value);
|
||||
hashset.insert(el);
|
||||
bitset.insert(el);
|
||||
}
|
||||
for el in 0..max_value {
|
||||
assert_eq!(hashset.contains(&el), bitset.contains(el));
|
||||
}
|
||||
assert_eq!(bitset.max_value(), max_value);
|
||||
|
||||
// test deser
|
||||
let mut data = vec![];
|
||||
bitset.serialize(&mut data).unwrap();
|
||||
let ro_bitset = ReadOnlyBitSet::open(OwnedBytes::new(data));
|
||||
for el in 0..max_value {
|
||||
assert_eq!(hashset.contains(&el), ro_bitset.contains(el));
|
||||
}
|
||||
assert_eq!(ro_bitset.max_value(), max_value);
|
||||
assert_eq!(ro_bitset.len(), els.len());
|
||||
};
|
||||
|
||||
test_against_hashset(&[], 0);
|
||||
test_against_hashset(&[], 1);
|
||||
test_against_hashset(&[0u32], 1);
|
||||
test_against_hashset(&[0u32], 100);
|
||||
test_against_hashset(&[1u32, 2u32], 4);
|
||||
test_against_hashset(&[99u32], 100);
|
||||
test_against_hashset(&[63u32], 64);
|
||||
test_against_hashset(&[62u32, 63u32], 64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitset_num_buckets() {
|
||||
use super::num_buckets;
|
||||
assert_eq!(num_buckets(0u32), 0);
|
||||
assert_eq!(num_buckets(1u32), 1);
|
||||
assert_eq!(num_buckets(64u32), 1);
|
||||
assert_eq!(num_buckets(65u32), 2);
|
||||
assert_eq!(num_buckets(128u32), 2);
|
||||
assert_eq!(num_buckets(129u32), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tinyset_range() {
|
||||
assert_eq!(
|
||||
TinySet::range_lower(3).into_iter().collect::<Vec<u32>>(),
|
||||
[0, 1, 2]
|
||||
);
|
||||
assert!(TinySet::range_lower(0).is_empty());
|
||||
assert_eq!(
|
||||
TinySet::range_lower(63).into_iter().collect::<Vec<u32>>(),
|
||||
(0u32..63u32).collect::<Vec<_>>()
|
||||
);
|
||||
assert_eq!(
|
||||
TinySet::range_lower(1).into_iter().collect::<Vec<u32>>(),
|
||||
[0]
|
||||
);
|
||||
assert_eq!(
|
||||
TinySet::range_lower(2).into_iter().collect::<Vec<u32>>(),
|
||||
[0, 1]
|
||||
);
|
||||
assert_eq!(
|
||||
TinySet::range_greater_or_equal(3)
|
||||
.into_iter()
|
||||
.collect::<Vec<u32>>(),
|
||||
(3u32..64u32).collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitset_len() {
|
||||
let mut bitset = BitSet::with_max_value(1_000);
|
||||
assert_eq!(bitset.len(), 0);
|
||||
bitset.insert(3u32);
|
||||
assert_eq!(bitset.len(), 1);
|
||||
bitset.insert(103u32);
|
||||
assert_eq!(bitset.len(), 2);
|
||||
bitset.insert(3u32);
|
||||
assert_eq!(bitset.len(), 2);
|
||||
bitset.insert(103u32);
|
||||
assert_eq!(bitset.len(), 2);
|
||||
bitset.insert(104u32);
|
||||
assert_eq!(bitset.len(), 3);
|
||||
bitset.remove(105u32);
|
||||
assert_eq!(bitset.len(), 3);
|
||||
bitset.remove(104u32);
|
||||
assert_eq!(bitset.len(), 2);
|
||||
bitset.remove(3u32);
|
||||
assert_eq!(bitset.len(), 1);
|
||||
bitset.remove(103u32);
|
||||
assert_eq!(bitset.len(), 0);
|
||||
}
|
||||
|
||||
pub fn sample_with_seed(n: u32, ratio: f64, seed_val: u8) -> Vec<u32> {
|
||||
StdRng::from_seed([seed_val; 32])
|
||||
.sample_iter(&Bernoulli::new(ratio).unwrap())
|
||||
.take(n as usize)
|
||||
.enumerate()
|
||||
.filter_map(|(val, keep)| if keep { Some(val as u32) } else { None })
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn sample(n: u32, ratio: f64) -> Vec<u32> {
|
||||
sample_with_seed(n, ratio, 4)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitset_clear() {
|
||||
let mut bitset = BitSet::with_max_value(1_000);
|
||||
let els = sample(1_000, 0.01f64);
|
||||
for &el in &els {
|
||||
bitset.insert(el);
|
||||
}
|
||||
assert!(els.iter().all(|el| bitset.contains(*el)));
|
||||
bitset.clear();
|
||||
for el in 0u32..1000u32 {
|
||||
assert!(!bitset.contains(el));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use test;
|
||||
|
||||
use super::{BitSet, TinySet};
|
||||
|
||||
#[bench]
|
||||
fn bench_tinyset_pop(b: &mut test::Bencher) {
|
||||
b.iter(|| {
|
||||
let mut tinyset = TinySet::singleton(test::black_box(31u32));
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_tinyset_sum(b: &mut test::Bencher) {
|
||||
let tiny_set = TinySet::empty().insert(10u32).insert(14u32).insert(21u32);
|
||||
b.iter(|| {
|
||||
assert_eq!(test::black_box(tiny_set).into_iter().sum::<u32>(), 45u32);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_tinyarr_sum(b: &mut test::Bencher) {
|
||||
let v = [10u32, 14u32, 21u32];
|
||||
b.iter(|| test::black_box(v).iter().cloned().sum::<u32>());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_bitset_initialize(b: &mut test::Bencher) {
|
||||
b.iter(|| BitSet::with_max_value(1_000_000));
|
||||
}
|
||||
}
|
||||
347
common/src/file_slice.rs
Normal file
347
common/src/file_slice.rs
Normal file
@@ -0,0 +1,347 @@
|
||||
use std::ops::{Deref, Range, RangeBounds};
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use ownedbytes::{OwnedBytes, StableDeref};
|
||||
|
||||
use crate::HasLen;
|
||||
|
||||
/// Objects that represents files sections in tantivy.
|
||||
///
|
||||
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
||||
/// is alive, the data associated with it cannot be altered or destroyed.
|
||||
///
|
||||
/// The underlying behavior is therefore specific to the `Directory` that
|
||||
/// created it. Despite its name, a [`FileSlice`] may or may not directly map to an actual file
|
||||
/// on the filesystem.
|
||||
|
||||
#[async_trait]
|
||||
pub trait FileHandle: 'static + Send + Sync + HasLen + fmt::Debug {
|
||||
/// Reads a slice of bytes.
|
||||
///
|
||||
/// This method may panic if the range requested is invalid.
|
||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes>;
|
||||
|
||||
#[doc(hidden)]
|
||||
async fn read_bytes_async(&self, _byte_range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"Async read is not supported.",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FileHandle for &'static [u8] {
|
||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
let bytes = &self[range];
|
||||
Ok(OwnedBytes::new(bytes))
|
||||
}
|
||||
|
||||
async fn read_bytes_async(&self, byte_range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
Ok(self.read_bytes(byte_range)?)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> From<B> for FileSlice
|
||||
where B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync
|
||||
{
|
||||
fn from(bytes: B) -> FileSlice {
|
||||
FileSlice::new(Arc::new(OwnedBytes::new(bytes)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Logical slice of read only file in tantivy.
|
||||
///
|
||||
/// It can be cloned and sliced cheaply.
|
||||
#[derive(Clone)]
|
||||
pub struct FileSlice {
|
||||
data: Arc<dyn FileHandle>,
|
||||
range: Range<usize>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for FileSlice {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "FileSlice({:?}, {:?})", &self.data, self.range)
|
||||
}
|
||||
}
|
||||
|
||||
/// Takes a range, a `RangeBounds` object, and returns
|
||||
/// a `Range` that corresponds to the relative application of the
|
||||
/// `RangeBounds` object to the original `Range`.
|
||||
///
|
||||
/// For instance, combine_ranges(`[2..11)`, `[5..7]`) returns `[7..10]`
|
||||
/// as it reads, what is the sub-range that starts at the 5 element of
|
||||
/// `[2..11)` and ends at the 9th element included.
|
||||
///
|
||||
/// This function panics, if the result would suggest something outside
|
||||
/// of the bounds of the original range.
|
||||
fn combine_ranges<R: RangeBounds<usize>>(orig_range: Range<usize>, rel_range: R) -> Range<usize> {
|
||||
let start: usize = orig_range.start
|
||||
+ match rel_range.start_bound().cloned() {
|
||||
std::ops::Bound::Included(rel_start) => rel_start,
|
||||
std::ops::Bound::Excluded(rel_start) => rel_start + 1,
|
||||
std::ops::Bound::Unbounded => 0,
|
||||
};
|
||||
assert!(start <= orig_range.end);
|
||||
let end: usize = match rel_range.end_bound().cloned() {
|
||||
std::ops::Bound::Included(rel_end) => orig_range.start + rel_end + 1,
|
||||
std::ops::Bound::Excluded(rel_end) => orig_range.start + rel_end,
|
||||
std::ops::Bound::Unbounded => orig_range.end,
|
||||
};
|
||||
assert!(end >= start);
|
||||
assert!(end <= orig_range.end);
|
||||
start..end
|
||||
}
|
||||
|
||||
impl FileSlice {
|
||||
/// Wraps a FileHandle.
|
||||
pub fn new(file_handle: Arc<dyn FileHandle>) -> Self {
|
||||
let num_bytes = file_handle.len();
|
||||
FileSlice::new_with_num_bytes(file_handle, num_bytes)
|
||||
}
|
||||
|
||||
/// Wraps a FileHandle.
|
||||
#[doc(hidden)]
|
||||
#[must_use]
|
||||
pub fn new_with_num_bytes(file_handle: Arc<dyn FileHandle>, num_bytes: usize) -> Self {
|
||||
FileSlice {
|
||||
data: file_handle,
|
||||
range: 0..num_bytes,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a fileslice that is just a view over a slice of the data.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if `byte_range.end` exceeds the filesize.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn slice<R: RangeBounds<usize>>(&self, byte_range: R) -> FileSlice {
|
||||
FileSlice {
|
||||
data: self.data.clone(),
|
||||
range: combine_ranges(self.range.clone(), byte_range),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an empty FileSlice
|
||||
pub fn empty() -> FileSlice {
|
||||
const EMPTY_SLICE: &[u8] = &[];
|
||||
FileSlice::from(EMPTY_SLICE)
|
||||
}
|
||||
|
||||
/// Returns a `OwnedBytes` with all of the data in the `FileSlice`.
|
||||
///
|
||||
/// The behavior is strongly dependent on the implementation of the underlying
|
||||
/// `Directory` and the `FileSliceTrait` it creates.
|
||||
/// In particular, it is up to the `Directory` implementation
|
||||
/// to handle caching if needed.
|
||||
pub fn read_bytes(&self) -> io::Result<OwnedBytes> {
|
||||
self.data.read_bytes(self.range.clone())
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub async fn read_bytes_async(&self) -> io::Result<OwnedBytes> {
|
||||
self.data.read_bytes_async(self.range.clone()).await
|
||||
}
|
||||
|
||||
/// Reads a specific slice of data.
|
||||
///
|
||||
/// This is equivalent to running `file_slice.slice(from, to).read_bytes()`.
|
||||
pub fn read_bytes_slice(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
assert!(
|
||||
range.end <= self.len(),
|
||||
"end of requested range exceeds the fileslice length ({} > {})",
|
||||
range.end,
|
||||
self.len()
|
||||
);
|
||||
self.data
|
||||
.read_bytes(self.range.start + range.start..self.range.start + range.end)
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub async fn read_bytes_slice_async(&self, byte_range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
assert!(
|
||||
self.range.start + byte_range.end <= self.range.end,
|
||||
"`to` exceeds the fileslice length"
|
||||
);
|
||||
self.data
|
||||
.read_bytes_async(
|
||||
self.range.start + byte_range.start..self.range.start + byte_range.end,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Splits the FileSlice at the given offset and return two file slices.
|
||||
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
||||
///
|
||||
/// This operation is cheap and must not copy any underlying data.
|
||||
pub fn split(self, left_len: usize) -> (FileSlice, FileSlice) {
|
||||
let left = self.slice_to(left_len);
|
||||
let right = self.slice_from(left_len);
|
||||
(left, right)
|
||||
}
|
||||
|
||||
/// Splits the file slice at the given offset and return two file slices.
|
||||
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
||||
pub fn split_from_end(self, right_len: usize) -> (FileSlice, FileSlice) {
|
||||
let left_len = self.len() - right_len;
|
||||
self.split(left_len)
|
||||
}
|
||||
|
||||
/// Like `.slice(...)` but enforcing only the `from`
|
||||
/// boundary.
|
||||
///
|
||||
/// Equivalent to `.slice(from_offset, self.len())`
|
||||
#[must_use]
|
||||
pub fn slice_from(&self, from_offset: usize) -> FileSlice {
|
||||
self.slice(from_offset..self.len())
|
||||
}
|
||||
|
||||
/// Returns a slice from the end.
|
||||
///
|
||||
/// Equivalent to `.slice(self.len() - from_offset, self.len())`
|
||||
#[must_use]
|
||||
pub fn slice_from_end(&self, from_offset: usize) -> FileSlice {
|
||||
self.slice(self.len() - from_offset..self.len())
|
||||
}
|
||||
|
||||
/// Like `.slice(...)` but enforcing only the `to`
|
||||
/// boundary.
|
||||
///
|
||||
/// Equivalent to `.slice(0, to_offset)`
|
||||
#[must_use]
|
||||
pub fn slice_to(&self, to_offset: usize) -> FileSlice {
|
||||
self.slice(0..to_offset)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FileHandle for FileSlice {
|
||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
self.read_bytes_slice(range)
|
||||
}
|
||||
|
||||
async fn read_bytes_async(&self, byte_range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
self.read_bytes_slice_async(byte_range).await
|
||||
}
|
||||
}
|
||||
|
||||
impl HasLen for FileSlice {
|
||||
fn len(&self) -> usize {
|
||||
self.range.len()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FileHandle for OwnedBytes {
|
||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
Ok(self.slice(range))
|
||||
}
|
||||
|
||||
async fn read_bytes_async(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
self.read_bytes(range)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io;
|
||||
use std::ops::Bound;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::{FileHandle, FileSlice};
|
||||
use crate::file_slice::combine_ranges;
|
||||
use crate::HasLen;
|
||||
|
||||
#[test]
|
||||
fn test_file_slice() -> io::Result<()> {
|
||||
let file_slice = FileSlice::new(Arc::new(b"abcdef".as_ref()));
|
||||
assert_eq!(file_slice.len(), 6);
|
||||
assert_eq!(file_slice.slice_from(2).read_bytes()?.as_slice(), b"cdef");
|
||||
assert_eq!(file_slice.slice_to(2).read_bytes()?.as_slice(), b"ab");
|
||||
assert_eq!(
|
||||
file_slice
|
||||
.slice_from(1)
|
||||
.slice_to(2)
|
||||
.read_bytes()?
|
||||
.as_slice(),
|
||||
b"bc"
|
||||
);
|
||||
{
|
||||
let (left, right) = file_slice.clone().split(0);
|
||||
assert_eq!(left.read_bytes()?.as_slice(), b"");
|
||||
assert_eq!(right.read_bytes()?.as_slice(), b"abcdef");
|
||||
}
|
||||
{
|
||||
let (left, right) = file_slice.clone().split(2);
|
||||
assert_eq!(left.read_bytes()?.as_slice(), b"ab");
|
||||
assert_eq!(right.read_bytes()?.as_slice(), b"cdef");
|
||||
}
|
||||
{
|
||||
let (left, right) = file_slice.clone().split_from_end(0);
|
||||
assert_eq!(left.read_bytes()?.as_slice(), b"abcdef");
|
||||
assert_eq!(right.read_bytes()?.as_slice(), b"");
|
||||
}
|
||||
{
|
||||
let (left, right) = file_slice.split_from_end(2);
|
||||
assert_eq!(left.read_bytes()?.as_slice(), b"abcd");
|
||||
assert_eq!(right.read_bytes()?.as_slice(), b"ef");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_slice_trait_slice_len() {
|
||||
let blop: &'static [u8] = b"abc";
|
||||
let owned_bytes: Box<dyn FileHandle> = Box::new(blop);
|
||||
assert_eq!(owned_bytes.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slice_simple_read() -> io::Result<()> {
|
||||
let slice = FileSlice::new(Arc::new(&b"abcdef"[..]));
|
||||
assert_eq!(slice.len(), 6);
|
||||
assert_eq!(slice.read_bytes()?.as_ref(), b"abcdef");
|
||||
assert_eq!(slice.slice(1..4).read_bytes()?.as_ref(), b"bcd");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slice_read_slice() -> io::Result<()> {
|
||||
let slice_deref = FileSlice::new(Arc::new(&b"abcdef"[..]));
|
||||
assert_eq!(slice_deref.read_bytes_slice(1..4)?.as_ref(), b"bcd");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "end of requested range exceeds the fileslice length (10 > 6)")]
|
||||
fn test_slice_read_slice_invalid_range_exceeds() {
|
||||
let slice_deref = FileSlice::new(Arc::new(&b"abcdef"[..]));
|
||||
assert_eq!(
|
||||
slice_deref.read_bytes_slice(0..10).unwrap().as_ref(),
|
||||
b"bcd"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_combine_range() {
|
||||
assert_eq!(combine_ranges(1..3, 0..1), 1..2);
|
||||
assert_eq!(combine_ranges(1..3, 1..), 2..3);
|
||||
assert_eq!(combine_ranges(1..4, ..2), 1..3);
|
||||
assert_eq!(combine_ranges(3..10, 2..5), 5..8);
|
||||
assert_eq!(combine_ranges(2..11, 5..=7), 7..10);
|
||||
assert_eq!(
|
||||
combine_ranges(2..11, (Bound::Excluded(5), Bound::Unbounded)),
|
||||
8..11
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_combine_range_panics() {
|
||||
let _ = combine_ranges(3..5, 1..4);
|
||||
}
|
||||
}
|
||||
166
common/src/group_by.rs
Normal file
166
common/src/group_by.rs
Normal file
@@ -0,0 +1,166 @@
|
||||
use std::cell::RefCell;
|
||||
use std::iter::Peekable;
|
||||
use std::rc::Rc;
|
||||
|
||||
pub trait GroupByIteratorExtended: Iterator {
|
||||
/// Return an `Iterator` that groups iterator elements. Consecutive elements that map to the
|
||||
/// same key are assigned to the same group.
|
||||
///
|
||||
/// The returned Iterator item is `(K, impl Iterator)`, where Iterator are the items of the
|
||||
/// group.
|
||||
///
|
||||
/// ```
|
||||
/// use tantivy_common::GroupByIteratorExtended;
|
||||
///
|
||||
/// // group data into blocks of larger than zero or not.
|
||||
/// let data: Vec<i32> = vec![1, 3, -2, -2, 1, 0, 1, 2];
|
||||
/// // groups: |---->|------>|--------->|
|
||||
///
|
||||
/// let mut data_grouped = Vec::new();
|
||||
/// // Note: group is an iterator
|
||||
/// for (key, group) in data.into_iter().group_by(|val| *val >= 0) {
|
||||
/// data_grouped.push((key, group.collect()));
|
||||
/// }
|
||||
/// assert_eq!(data_grouped, vec![(true, vec![1, 3]), (false, vec![-2, -2]), (true, vec![1, 0, 1, 2])]);
|
||||
/// ```
|
||||
fn group_by<K, F>(self, key: F) -> GroupByIterator<Self, F, K>
|
||||
where
|
||||
Self: Sized,
|
||||
F: FnMut(&Self::Item) -> K,
|
||||
K: PartialEq + Copy,
|
||||
Self::Item: Copy,
|
||||
{
|
||||
GroupByIterator::new(self, key)
|
||||
}
|
||||
}
|
||||
impl<I: Iterator> GroupByIteratorExtended for I {}
|
||||
|
||||
pub struct GroupByIterator<I, F, K: Copy>
|
||||
where
|
||||
I: Iterator,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
{
|
||||
// I really would like to avoid the Rc<RefCell>, but the Iterator is shared between
|
||||
// `GroupByIterator` and `GroupIter`. In practice they are used consecutive and
|
||||
// `GroupByIter` is finished before calling next on `GroupByIterator`. I'm not sure there
|
||||
// is a solution with lifetimes for that, because we would need to enforce it in the usage
|
||||
// somehow.
|
||||
//
|
||||
// One potential solution would be to replace the iterator approach with something similar.
|
||||
inner: Rc<RefCell<GroupByShared<I, F, K>>>,
|
||||
}
|
||||
|
||||
struct GroupByShared<I, F, K: Copy>
|
||||
where
|
||||
I: Iterator,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
{
|
||||
iter: Peekable<I>,
|
||||
group_by_fn: F,
|
||||
}
|
||||
|
||||
impl<I, F, K> GroupByIterator<I, F, K>
|
||||
where
|
||||
I: Iterator,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
K: Copy,
|
||||
{
|
||||
fn new(inner: I, group_by_fn: F) -> Self {
|
||||
let inner = GroupByShared {
|
||||
iter: inner.peekable(),
|
||||
group_by_fn,
|
||||
};
|
||||
|
||||
Self {
|
||||
inner: Rc::new(RefCell::new(inner)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, F, K> Iterator for GroupByIterator<I, F, K>
|
||||
where
|
||||
I: Iterator,
|
||||
I::Item: Copy,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
K: Copy,
|
||||
{
|
||||
type Item = (K, GroupIterator<I, F, K>);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let mut inner = self.inner.borrow_mut();
|
||||
let value = *inner.iter.peek()?;
|
||||
let key = (inner.group_by_fn)(&value);
|
||||
|
||||
let inner = self.inner.clone();
|
||||
|
||||
let group_iter = GroupIterator {
|
||||
inner,
|
||||
group_key: key,
|
||||
};
|
||||
Some((key, group_iter))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GroupIterator<I, F, K: Copy>
|
||||
where
|
||||
I: Iterator,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
{
|
||||
inner: Rc<RefCell<GroupByShared<I, F, K>>>,
|
||||
group_key: K,
|
||||
}
|
||||
|
||||
impl<I, F, K: PartialEq + Copy> Iterator for GroupIterator<I, F, K>
|
||||
where
|
||||
I: Iterator,
|
||||
I::Item: Copy,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
{
|
||||
type Item = I::Item;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let mut inner = self.inner.borrow_mut();
|
||||
// peek if next value is in group
|
||||
let peek_val = *inner.iter.peek()?;
|
||||
if (inner.group_by_fn)(&peek_val) == self.group_key {
|
||||
inner.iter.next()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn group_by_collect<I: Iterator<Item = u32>>(iter: I) -> Vec<(I::Item, Vec<I::Item>)> {
|
||||
iter.group_by(|val| val / 10)
|
||||
.map(|(el, iter)| (el, iter.collect::<Vec<_>>()))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn group_by_two_groups() {
|
||||
let vals = vec![1u32, 4, 15];
|
||||
let grouped_vals = group_by_collect(vals.into_iter());
|
||||
assert_eq!(grouped_vals, vec![(0, vec![1, 4]), (1, vec![15])]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn group_by_test_empty() {
|
||||
let vals = vec![];
|
||||
let grouped_vals = group_by_collect(vals.into_iter());
|
||||
assert_eq!(grouped_vals, vec![]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn group_by_three_groups() {
|
||||
let vals = vec![1u32, 4, 15, 1];
|
||||
let grouped_vals = group_by_collect(vals.into_iter());
|
||||
assert_eq!(
|
||||
grouped_vals,
|
||||
vec![(0, vec![1, 4]), (1, vec![15]), (0, vec![1])]
|
||||
);
|
||||
}
|
||||
}
|
||||
174
common/src/lib.rs
Normal file
174
common/src/lib.rs
Normal file
@@ -0,0 +1,174 @@
|
||||
#![allow(clippy::len_without_is_empty)]
|
||||
|
||||
use std::ops::Deref;
|
||||
|
||||
pub use byteorder::LittleEndian as Endianness;
|
||||
|
||||
mod bitset;
|
||||
pub mod file_slice;
|
||||
mod group_by;
|
||||
mod serialize;
|
||||
mod vint;
|
||||
mod writer;
|
||||
pub use bitset::*;
|
||||
pub use group_by::GroupByIteratorExtended;
|
||||
pub use ownedbytes::{OwnedBytes, StableDeref};
|
||||
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
||||
pub use vint::{
|
||||
deserialize_vint_u128, read_u32_vint, read_u32_vint_no_advance, serialize_vint_u128,
|
||||
serialize_vint_u32, write_u32_vint, VInt, VIntU128,
|
||||
};
|
||||
pub use writer::{AntiCallToken, CountingWriter, TerminatingWrite};
|
||||
|
||||
/// Has length trait
|
||||
pub trait HasLen {
|
||||
/// Return length
|
||||
fn len(&self) -> usize;
|
||||
|
||||
/// Returns true iff empty.
|
||||
fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Deref<Target = [u8]>> HasLen for T {
|
||||
fn len(&self) -> usize {
|
||||
self.deref().len()
|
||||
}
|
||||
}
|
||||
|
||||
const HIGHEST_BIT: u64 = 1 << 63;
|
||||
|
||||
/// Maps a `i64` to `u64`
|
||||
///
|
||||
/// For simplicity, tantivy internally handles `i64` as `u64`.
|
||||
/// The mapping is defined by this function.
|
||||
///
|
||||
/// Maps `i64` to `u64` so that
|
||||
/// `-2^63 .. 2^63-1` is mapped
|
||||
/// to
|
||||
/// `0 .. 2^64-1`
|
||||
/// in that order.
|
||||
///
|
||||
/// This is more suited than simply casting (`val as u64`)
|
||||
/// because of bitpacking.
|
||||
///
|
||||
/// Imagine a list of `i64` ranging from -10 to 10.
|
||||
/// When casting negative values, the negative values are projected
|
||||
/// to values over 2^63, and all values end up requiring 64 bits.
|
||||
///
|
||||
/// # See also
|
||||
/// The reverse mapping is [`u64_to_i64()`].
|
||||
#[inline]
|
||||
pub fn i64_to_u64(val: i64) -> u64 {
|
||||
(val as u64) ^ HIGHEST_BIT
|
||||
}
|
||||
|
||||
/// Reverse the mapping given by [`i64_to_u64()`].
|
||||
#[inline]
|
||||
pub fn u64_to_i64(val: u64) -> i64 {
|
||||
(val ^ HIGHEST_BIT) as i64
|
||||
}
|
||||
|
||||
/// Maps a `f64` to `u64`
|
||||
///
|
||||
/// For simplicity, tantivy internally handles `f64` as `u64`.
|
||||
/// The mapping is defined by this function.
|
||||
///
|
||||
/// Maps `f64` to `u64` in a monotonic manner, so that bytes lexical order is preserved.
|
||||
///
|
||||
/// This is more suited than simply casting (`val as u64`)
|
||||
/// which would truncate the result
|
||||
///
|
||||
/// # Reference
|
||||
///
|
||||
/// Daniel Lemire's [blog post](https://lemire.me/blog/2020/12/14/converting-floating-point-numbers-to-integers-while-preserving-order/)
|
||||
/// explains the mapping in a clear manner.
|
||||
///
|
||||
/// # See also
|
||||
/// The reverse mapping is [`u64_to_f64()`].
|
||||
#[inline]
|
||||
pub fn f64_to_u64(val: f64) -> u64 {
|
||||
let bits = val.to_bits();
|
||||
if val.is_sign_positive() {
|
||||
bits ^ HIGHEST_BIT
|
||||
} else {
|
||||
!bits
|
||||
}
|
||||
}
|
||||
|
||||
/// Reverse the mapping given by [`f64_to_u64()`].
|
||||
#[inline]
|
||||
pub fn u64_to_f64(val: u64) -> f64 {
|
||||
f64::from_bits(if val & HIGHEST_BIT != 0 {
|
||||
val ^ HIGHEST_BIT
|
||||
} else {
|
||||
!val
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
|
||||
use proptest::prelude::*;
|
||||
|
||||
use super::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64, BinarySerializable, FixedSize};
|
||||
|
||||
fn test_i64_converter_helper(val: i64) {
|
||||
assert_eq!(u64_to_i64(i64_to_u64(val)), val);
|
||||
}
|
||||
|
||||
fn test_f64_converter_helper(val: f64) {
|
||||
assert_eq!(u64_to_f64(f64_to_u64(val)), val);
|
||||
}
|
||||
|
||||
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
||||
let mut buffer = Vec::new();
|
||||
O::default().serialize(&mut buffer).unwrap();
|
||||
assert_eq!(buffer.len(), O::SIZE_IN_BYTES);
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_f64_converter_monotonicity_proptest((left, right) in (proptest::num::f64::NORMAL, proptest::num::f64::NORMAL)) {
|
||||
let left_u64 = f64_to_u64(left);
|
||||
let right_u64 = f64_to_u64(right);
|
||||
assert_eq!(left_u64 < right_u64, left < right);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_i64_converter() {
|
||||
assert_eq!(i64_to_u64(i64::MIN), u64::MIN);
|
||||
assert_eq!(i64_to_u64(i64::MAX), u64::MAX);
|
||||
test_i64_converter_helper(0i64);
|
||||
test_i64_converter_helper(i64::MIN);
|
||||
test_i64_converter_helper(i64::MAX);
|
||||
for i in -1000i64..1000i64 {
|
||||
test_i64_converter_helper(i);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_f64_converter() {
|
||||
test_f64_converter_helper(f64::INFINITY);
|
||||
test_f64_converter_helper(f64::NEG_INFINITY);
|
||||
test_f64_converter_helper(0.0);
|
||||
test_f64_converter_helper(-0.0);
|
||||
test_f64_converter_helper(1.0);
|
||||
test_f64_converter_helper(-1.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_f64_order() {
|
||||
assert!(!(f64_to_u64(f64::NEG_INFINITY)..f64_to_u64(f64::INFINITY))
|
||||
.contains(&f64_to_u64(f64::NAN))); // nan is not a number
|
||||
assert!(f64_to_u64(1.5) > f64_to_u64(1.0)); // same exponent, different mantissa
|
||||
assert!(f64_to_u64(2.0) > f64_to_u64(1.0)); // same mantissa, different exponent
|
||||
assert!(f64_to_u64(2.0) > f64_to_u64(1.5)); // different exponent and mantissa
|
||||
assert!(f64_to_u64(1.0) > f64_to_u64(-1.0)); // pos > neg
|
||||
assert!(f64_to_u64(-1.5) < f64_to_u64(-1.0));
|
||||
assert!(f64_to_u64(-2.0) < f64_to_u64(1.0));
|
||||
assert!(f64_to_u64(-2.0) < f64_to_u64(-1.5));
|
||||
}
|
||||
}
|
||||
@@ -1,17 +1,55 @@
|
||||
use crate::common::Endianness;
|
||||
use crate::common::VInt;
|
||||
use std::io::{Read, Write};
|
||||
use std::{fmt, io};
|
||||
|
||||
use byteorder::{ReadBytesExt, WriteBytesExt};
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::io::Read;
|
||||
use std::io::Write;
|
||||
|
||||
use crate::{Endianness, VInt};
|
||||
|
||||
#[derive(Default)]
|
||||
struct Counter(u64);
|
||||
|
||||
impl io::Write for Counter {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.0 += buf.len() as u64;
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
self.0 += buf.len() as u64;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for a simple binary serialization.
|
||||
pub trait BinarySerializable: fmt::Debug + Sized {
|
||||
/// Serialize
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()>;
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()>;
|
||||
/// Deserialize
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self>;
|
||||
|
||||
fn num_bytes(&self) -> u64 {
|
||||
let mut counter = Counter::default();
|
||||
self.serialize(&mut counter).unwrap();
|
||||
counter.0
|
||||
}
|
||||
}
|
||||
|
||||
pub trait DeserializeFrom<T: BinarySerializable> {
|
||||
fn deserialize(&mut self) -> io::Result<T>;
|
||||
}
|
||||
|
||||
/// Implement deserialize from &[u8] for all types which implement BinarySerializable.
|
||||
///
|
||||
/// TryFrom would actually be preferable, but not possible because of the orphan
|
||||
/// rules (not completely sure if this could be resolved)
|
||||
impl<T: BinarySerializable> DeserializeFrom<T> for &[u8] {
|
||||
fn deserialize(&mut self) -> io::Result<T> {
|
||||
T::deserialize(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// `FixedSize` marks a `BinarySerializable` as
|
||||
@@ -21,7 +59,7 @@ pub trait FixedSize: BinarySerializable {
|
||||
}
|
||||
|
||||
impl BinarySerializable for () {
|
||||
fn serialize<W: Write>(&self, _: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, _: &mut W) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
fn deserialize<R: Read>(_: &mut R) -> io::Result<Self> {
|
||||
@@ -34,7 +72,7 @@ impl FixedSize for () {
|
||||
}
|
||||
|
||||
impl<T: BinarySerializable> BinarySerializable for Vec<T> {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.len() as u64).serialize(writer)?;
|
||||
for it in self {
|
||||
it.serialize(writer)?;
|
||||
@@ -53,7 +91,7 @@ impl<T: BinarySerializable> BinarySerializable for Vec<T> {
|
||||
}
|
||||
|
||||
impl<Left: BinarySerializable, Right: BinarySerializable> BinarySerializable for (Left, Right) {
|
||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, write: &mut W) -> io::Result<()> {
|
||||
self.0.serialize(write)?;
|
||||
self.1.serialize(write)
|
||||
}
|
||||
@@ -61,9 +99,14 @@ impl<Left: BinarySerializable, Right: BinarySerializable> BinarySerializable for
|
||||
Ok((Left::deserialize(reader)?, Right::deserialize(reader)?))
|
||||
}
|
||||
}
|
||||
impl<Left: BinarySerializable + FixedSize, Right: BinarySerializable + FixedSize> FixedSize
|
||||
for (Left, Right)
|
||||
{
|
||||
const SIZE_IN_BYTES: usize = Left::SIZE_IN_BYTES + Right::SIZE_IN_BYTES;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u32 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u32::<Endianness>(*self)
|
||||
}
|
||||
|
||||
@@ -76,8 +119,22 @@ impl FixedSize for u32 {
|
||||
const SIZE_IN_BYTES: usize = 4;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u16 {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u16::<Endianness>(*self)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u16> {
|
||||
reader.read_u16::<Endianness>()
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for u16 {
|
||||
const SIZE_IN_BYTES: usize = 2;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u64::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
@@ -89,8 +146,34 @@ impl FixedSize for u64 {
|
||||
const SIZE_IN_BYTES: usize = 8;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u128 {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u128::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
reader.read_u128::<Endianness>()
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for u128 {
|
||||
const SIZE_IN_BYTES: usize = 16;
|
||||
}
|
||||
|
||||
impl BinarySerializable for f32 {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_f32::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
reader.read_f32::<Endianness>()
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for f32 {
|
||||
const SIZE_IN_BYTES: usize = 4;
|
||||
}
|
||||
|
||||
impl BinarySerializable for i64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_i64::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
@@ -103,7 +186,7 @@ impl FixedSize for i64 {
|
||||
}
|
||||
|
||||
impl BinarySerializable for f64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_f64::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
@@ -116,7 +199,7 @@ impl FixedSize for f64 {
|
||||
}
|
||||
|
||||
impl BinarySerializable for u8 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u8(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u8> {
|
||||
@@ -128,8 +211,29 @@ impl FixedSize for u8 {
|
||||
const SIZE_IN_BYTES: usize = 1;
|
||||
}
|
||||
|
||||
impl BinarySerializable for bool {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u8(u8::from(*self))
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<bool> {
|
||||
let val = reader.read_u8()?;
|
||||
match val {
|
||||
0 => Ok(false),
|
||||
1 => Ok(true),
|
||||
_ => Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"invalid bool value on deserialization, data corrupted",
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for bool {
|
||||
const SIZE_IN_BYTES: usize = 1;
|
||||
}
|
||||
|
||||
impl BinarySerializable for String {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let data: &[u8] = self.as_bytes();
|
||||
VInt(data.len() as u64).serialize(writer)?;
|
||||
writer.write_all(data)
|
||||
@@ -148,9 +252,8 @@ impl BinarySerializable for String {
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
|
||||
use super::*;
|
||||
use crate::common::VInt;
|
||||
|
||||
use super::{VInt, *};
|
||||
use crate::serialize::BinarySerializable;
|
||||
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
||||
let mut buffer = Vec::new();
|
||||
O::default().serialize(&mut buffer).unwrap();
|
||||
@@ -177,7 +280,7 @@ pub mod test {
|
||||
fixed_size_test::<u32>();
|
||||
assert_eq!(4, serialize_test(3u32));
|
||||
assert_eq!(4, serialize_test(5u32));
|
||||
assert_eq!(4, serialize_test(u32::max_value()));
|
||||
assert_eq!(4, serialize_test(u32::MAX));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -195,14 +298,16 @@ pub mod test {
|
||||
fixed_size_test::<u64>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_bool() {
|
||||
fixed_size_test::<bool>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_string() {
|
||||
assert_eq!(serialize_test(String::from("")), 1);
|
||||
assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4);
|
||||
assert_eq!(
|
||||
serialize_test(String::from("富士さん見える。")),
|
||||
1 + 3 * 8
|
||||
);
|
||||
assert_eq!(serialize_test(String::from("富士さん見える。")), 1 + 3 * 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -223,6 +328,6 @@ pub mod test {
|
||||
assert_eq!(serialize_test(VInt(1234u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(16_383u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(16_384u64)), 3);
|
||||
assert_eq!(serialize_test(VInt(u64::max_value())), 10);
|
||||
assert_eq!(serialize_test(VInt(u64::MAX)), 10);
|
||||
}
|
||||
}
|
||||
@@ -1,16 +1,86 @@
|
||||
use super::BinarySerializable;
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use std::io;
|
||||
use std::io::Read;
|
||||
use std::io::Write;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
|
||||
use super::BinarySerializable;
|
||||
|
||||
/// Variable int serializes a u128 number
|
||||
pub fn serialize_vint_u128(mut val: u128, output: &mut Vec<u8>) {
|
||||
loop {
|
||||
let next_byte: u8 = (val % 128u128) as u8;
|
||||
val /= 128u128;
|
||||
if val == 0 {
|
||||
output.push(next_byte | STOP_BIT);
|
||||
return;
|
||||
} else {
|
||||
output.push(next_byte);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Deserializes a u128 number
|
||||
///
|
||||
/// Returns the number and the slice after the vint
|
||||
pub fn deserialize_vint_u128(data: &[u8]) -> io::Result<(u128, &[u8])> {
|
||||
let mut result = 0u128;
|
||||
let mut shift = 0u64;
|
||||
for i in 0..19 {
|
||||
let b = data[i];
|
||||
result |= u128::from(b % 128u8) << shift;
|
||||
if b >= STOP_BIT {
|
||||
return Ok((result, &data[i + 1..]));
|
||||
}
|
||||
shift += 7;
|
||||
}
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Failed to deserialize u128 vint",
|
||||
))
|
||||
}
|
||||
|
||||
/// Wrapper over a `u128` that serializes as a variable int.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub struct VIntU128(pub u128);
|
||||
|
||||
impl BinarySerializable for VIntU128 {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let mut buffer = vec![];
|
||||
serialize_vint_u128(self.0, &mut buffer);
|
||||
writer.write_all(&buffer)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut bytes = reader.bytes();
|
||||
let mut result = 0u128;
|
||||
let mut shift = 0u64;
|
||||
loop {
|
||||
match bytes.next() {
|
||||
Some(Ok(b)) => {
|
||||
result |= u128::from(b % 128u8) << shift;
|
||||
if b >= STOP_BIT {
|
||||
return Ok(VIntU128(result));
|
||||
}
|
||||
shift += 7;
|
||||
}
|
||||
_ => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Reach end of buffer while reading VInt",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper over a `u64` that serializes as a variable int.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub struct VInt(pub u64);
|
||||
|
||||
const STOP_BIT: u8 = 128;
|
||||
|
||||
pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
||||
pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
|
||||
const START_2: u64 = 1 << 7;
|
||||
const START_3: u64 = 1 << 14;
|
||||
const START_4: u64 = 1 << 21;
|
||||
@@ -29,7 +99,7 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
||||
|
||||
let val = u64::from(val);
|
||||
const STOP_BIT: u64 = 128u64;
|
||||
match val {
|
||||
let (res, num_bytes) = match val {
|
||||
0..=STOP_1 => (val | STOP_BIT, 1),
|
||||
START_2..=STOP_2 => (
|
||||
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
|
||||
@@ -56,7 +126,9 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
||||
| (STOP_BIT << (8 * 4)),
|
||||
5,
|
||||
),
|
||||
}
|
||||
};
|
||||
LittleEndian::write_u64(&mut buf[..], res);
|
||||
&buf[0..num_bytes]
|
||||
}
|
||||
|
||||
/// Returns the number of bytes covered by a
|
||||
@@ -85,23 +157,26 @@ fn vint_len(data: &[u8]) -> usize {
|
||||
/// If the buffer does not start by a valid
|
||||
/// vint payload
|
||||
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
||||
let vlen = vint_len(*data);
|
||||
let (result, vlen) = read_u32_vint_no_advance(data);
|
||||
*data = &data[vlen..];
|
||||
result
|
||||
}
|
||||
|
||||
pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
|
||||
let vlen = vint_len(data);
|
||||
let mut result = 0u32;
|
||||
let mut shift = 0u64;
|
||||
for &b in &data[..vlen] {
|
||||
result |= u32::from(b & 127u8) << shift;
|
||||
shift += 7;
|
||||
}
|
||||
*data = &data[vlen..];
|
||||
result
|
||||
(result, vlen)
|
||||
}
|
||||
|
||||
/// Write a `u32` as a vint payload.
|
||||
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
||||
let (val, num_bytes) = serialize_vint_u32(val);
|
||||
let mut buffer = [0u8; 8];
|
||||
LittleEndian::write_u64(&mut buffer, val);
|
||||
writer.write_all(&buffer[..num_bytes])
|
||||
let mut buf = [0u8; 8];
|
||||
let data = serialize_vint_u32(val, &mut buf);
|
||||
writer.write_all(data)
|
||||
}
|
||||
|
||||
impl VInt {
|
||||
@@ -136,7 +211,7 @@ impl VInt {
|
||||
}
|
||||
|
||||
impl BinarySerializable for VInt {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let mut buffer = [0u8; 10];
|
||||
let num_bytes = self.serialize_into(&mut buffer);
|
||||
writer.write_all(&buffer[0..num_bytes])
|
||||
@@ -169,16 +244,14 @@ impl BinarySerializable for VInt {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::serialize_vint_u32;
|
||||
use super::VInt;
|
||||
use crate::common::BinarySerializable;
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use super::{serialize_vint_u32, BinarySerializable, VInt};
|
||||
use crate::vint::{deserialize_vint_u128, serialize_vint_u128, VIntU128};
|
||||
|
||||
fn aux_test_vint(val: u64) {
|
||||
let mut v = [14u8; 10];
|
||||
let num_bytes = VInt(val).serialize_into(&mut v);
|
||||
for i in num_bytes..10 {
|
||||
assert_eq!(v[i], 14u8);
|
||||
for el in &v[num_bytes..10] {
|
||||
assert_eq!(el, &14u8);
|
||||
}
|
||||
assert!(num_bytes > 0);
|
||||
if num_bytes < 10 {
|
||||
@@ -196,7 +269,7 @@ mod tests {
|
||||
aux_test_vint(0);
|
||||
aux_test_vint(1);
|
||||
aux_test_vint(5);
|
||||
aux_test_vint(u64::max_value());
|
||||
aux_test_vint(u64::MAX);
|
||||
for i in 1..9 {
|
||||
let power_of_128 = 1u64 << (7 * i);
|
||||
aux_test_vint(power_of_128 - 1u64);
|
||||
@@ -208,12 +281,30 @@ mod tests {
|
||||
|
||||
fn aux_test_serialize_vint_u32(val: u32) {
|
||||
let mut buffer = [0u8; 10];
|
||||
let mut buffer2 = [0u8; 10];
|
||||
let mut buffer2 = [0u8; 8];
|
||||
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
|
||||
let (vint, len) = serialize_vint_u32(val);
|
||||
assert_eq!(len, len_vint, "len wrong for val {}", val);
|
||||
LittleEndian::write_u64(&mut buffer2, vint);
|
||||
assert_eq!(&buffer[..len], &buffer2[..len], "array wrong for {}", val);
|
||||
let res2 = serialize_vint_u32(val, &mut buffer2);
|
||||
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
|
||||
}
|
||||
|
||||
fn aux_test_vint_u128(val: u128) {
|
||||
let mut data = vec![];
|
||||
serialize_vint_u128(val, &mut data);
|
||||
let (deser_val, _data) = deserialize_vint_u128(&data).unwrap();
|
||||
assert_eq!(val, deser_val);
|
||||
|
||||
let mut out = vec![];
|
||||
VIntU128(val).serialize(&mut out).unwrap();
|
||||
let deser_val = VIntU128::deserialize(&mut &out[..]).unwrap();
|
||||
assert_eq!(val, deser_val.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vint_u128() {
|
||||
aux_test_vint_u128(0);
|
||||
aux_test_vint_u128(1);
|
||||
aux_test_vint_u128(u128::MAX / 3);
|
||||
aux_test_vint_u128(u128::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -227,6 +318,6 @@ mod tests {
|
||||
aux_test_serialize_vint_u32(power_of_128);
|
||||
aux_test_serialize_vint_u32(power_of_128 + 1u32);
|
||||
}
|
||||
aux_test_serialize_vint_u32(u32::max_value());
|
||||
aux_test_serialize_vint_u32(u32::MAX);
|
||||
}
|
||||
}
|
||||
114
common/src/writer.rs
Normal file
114
common/src/writer.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
use std::io::{self, BufWriter, Write};
|
||||
|
||||
pub struct CountingWriter<W> {
|
||||
underlying: W,
|
||||
written_bytes: u64,
|
||||
}
|
||||
|
||||
impl<W: Write> CountingWriter<W> {
|
||||
pub fn wrap(underlying: W) -> CountingWriter<W> {
|
||||
CountingWriter {
|
||||
underlying,
|
||||
written_bytes: 0,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn written_bytes(&self) -> u64 {
|
||||
self.written_bytes
|
||||
}
|
||||
|
||||
/// Returns the underlying write object.
|
||||
/// Note that this method does not trigger any flushing.
|
||||
#[inline]
|
||||
pub fn finish(self) -> W {
|
||||
self.underlying
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> Write for CountingWriter<W> {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let written_size = self.underlying.write(buf)?;
|
||||
self.written_bytes += written_size as u64;
|
||||
Ok(written_size)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
self.underlying.write_all(buf)?;
|
||||
self.written_bytes += buf.len() as u64;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.underlying.flush()
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
||||
#[inline]
|
||||
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
|
||||
self.underlying.terminate_ref(token)
|
||||
}
|
||||
}
|
||||
|
||||
/// Struct used to prevent from calling
|
||||
/// [`terminate_ref`](TerminatingWrite::terminate_ref) directly
|
||||
///
|
||||
/// The point is that while the type is public, it cannot be built by anyone
|
||||
/// outside of this module.
|
||||
pub struct AntiCallToken(());
|
||||
|
||||
/// Trait used to indicate when no more write need to be done on a writer
|
||||
pub trait TerminatingWrite: Write + Send + Sync {
|
||||
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
|
||||
fn terminate(mut self) -> io::Result<()>
|
||||
where Self: Sized {
|
||||
self.terminate_ref(AntiCallToken(()))
|
||||
}
|
||||
|
||||
/// You should implement this function to define custom behavior.
|
||||
/// This function should flush any buffer it may hold.
|
||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()>;
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite + ?Sized> TerminatingWrite for Box<W> {
|
||||
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
|
||||
self.as_mut().terminate_ref(token)
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
|
||||
fn terminate_ref(&mut self, a: AntiCallToken) -> io::Result<()> {
|
||||
self.flush()?;
|
||||
self.get_mut().terminate_ref(a)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
|
||||
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use std::io::Write;
|
||||
|
||||
use super::CountingWriter;
|
||||
|
||||
#[test]
|
||||
fn test_counting_writer() {
|
||||
let buffer: Vec<u8> = vec![];
|
||||
let mut counting_writer = CountingWriter::wrap(buffer);
|
||||
let bytes = (0u8..10u8).collect::<Vec<u8>>();
|
||||
counting_writer.write_all(&bytes).unwrap();
|
||||
let len = counting_writer.written_bytes();
|
||||
let buffer_restituted: Vec<u8> = counting_writer.finish();
|
||||
assert_eq!(len, 10u64);
|
||||
assert_eq!(buffer_restituted.len(), 10);
|
||||
}
|
||||
}
|
||||
BIN
doc/assets/images/Nuclia.png
Normal file
BIN
doc/assets/images/Nuclia.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.1 KiB |
BIN
doc/assets/images/element-dark-theme.png
Normal file
BIN
doc/assets/images/element-dark-theme.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 56 KiB |
8
doc/assets/images/element.io.svg
Normal file
8
doc/assets/images/element.io.svg
Normal file
@@ -0,0 +1,8 @@
|
||||
<svg width="518" height="112" viewBox="0 0 518 112" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M56 112C86.9279 112 112 86.9279 112 56C112 25.0721 86.9279 0 56 0C25.0721 0 0 25.0721 0 56C0 86.9279 25.0721 112 56 112Z" fill="#0DBD8B"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M45.7615 26.093C45.7615 23.8325 47.5977 22.0001 49.8629 22.0001C65.2154 22.0001 77.6611 34.4199 77.6611 49.7406C77.6611 52.001 75.8248 53.8335 73.5597 53.8335C71.2945 53.8335 69.4583 52.001 69.4583 49.7406C69.4583 38.9408 60.6851 30.1859 49.8629 30.1859C47.5977 30.1859 45.7615 28.3534 45.7615 26.093Z" fill="white"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M85.8986 45.6477C88.1637 45.6477 89.9999 47.4801 89.9999 49.7406C89.9999 65.0612 77.5543 77.4811 62.2017 77.4811C59.9366 77.4811 58.1003 75.6486 58.1003 73.3882C58.1003 71.1277 59.9366 69.2953 62.2017 69.2953C73.024 69.2953 81.7972 60.5403 81.7972 49.7406C81.7972 47.4801 83.6334 45.6477 85.8986 45.6477Z" fill="white"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M66.3031 85.907C66.3031 88.1675 64.4668 89.9999 62.2017 89.9999C46.8492 89.9999 34.4035 77.58 34.4035 62.2594C34.4035 59.9989 36.2398 58.1665 38.5049 58.1665C40.77 58.1665 42.6063 59.9989 42.6063 62.2594C42.6063 73.0592 51.3795 81.8141 62.2017 81.8141C64.4668 81.8141 66.3031 83.6466 66.3031 85.907Z" fill="white"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M26.1014 66.3523C23.8363 66.3523 22.0001 64.5199 22.0001 62.2594C22 46.9388 34.4457 34.5189 49.7983 34.5189C52.0634 34.5189 53.8997 36.3514 53.8997 38.6118C53.8997 40.8723 52.0634 42.7047 49.7983 42.7047C38.976 42.7047 30.2028 51.4597 30.2028 62.2594C30.2028 64.5199 28.3666 66.3523 26.1014 66.3523Z" fill="white"/>
|
||||
<path d="M197 63.5H157.5C157.967 67.6333 159.467 70.9333 162 73.4C164.533 75.8 167.867 77 172 77C174.733 77 177.2 76.3333 179.4 75C181.6 73.6667 183.167 71.8667 184.1 69.6H196.1C194.5 74.8667 191.5 79.1333 187.1 82.4C182.767 85.6 177.633 87.2 171.7 87.2C163.967 87.2 157.7 84.6333 152.9 79.5C148.167 74.3667 145.8 67.8667 145.8 60C145.8 52.3333 148.2 45.9 153 40.7C157.8 35.5 164 32.9 171.6 32.9C179.2 32.9 185.333 35.4667 190 40.6C194.733 45.6667 197.1 52.0667 197.1 59.8L197 63.5ZM171.6 42.6C167.867 42.6 164.767 43.7 162.3 45.9C159.833 48.1 158.3 51.0333 157.7 54.7H185.3C184.767 51.0333 183.3 48.1 180.9 45.9C178.5 43.7 175.4 42.6 171.6 42.6ZM205.289 70.5V11H217.189V70.7C217.189 73.3667 218.656 74.7 221.589 74.7L223.689 74.6V85.9C222.556 86.1 221.356 86.2 220.089 86.2C214.956 86.2 211.189 84.9 208.789 82.3C206.456 79.7 205.289 75.7667 205.289 70.5ZM279.109 63.5H239.609C240.076 67.6333 241.576 70.9333 244.109 73.4C246.643 75.8 249.976 77 254.109 77C256.843 77 259.309 76.3333 261.509 75C263.709 73.6667 265.276 71.8667 266.209 69.6H278.209C276.609 74.8667 273.609 79.1333 269.209 82.4C264.876 85.6 259.743 87.2 253.809 87.2C246.076 87.2 239.809 84.6333 235.009 79.5C230.276 74.3667 227.909 67.8667 227.909 60C227.909 52.3333 230.309 45.9 235.109 40.7C239.909 35.5 246.109 32.9 253.709 32.9C261.309 32.9 267.443 35.4667 272.109 40.6C276.843 45.6667 279.209 52.0667 279.209 59.8L279.109 63.5ZM253.709 42.6C249.976 42.6 246.876 43.7 244.409 45.9C241.943 48.1 240.409 51.0333 239.809 54.7H267.409C266.876 51.0333 265.409 48.1 263.009 45.9C260.609 43.7 257.509 42.6 253.709 42.6ZM332.798 56.2V86H320.898V54.9C320.898 47.0333 317.632 43.1 311.098 43.1C307.565 43.1 304.732 44.2333 302.598 46.5C300.532 48.7667 299.498 51.8667 299.498 55.8V86H287.598V34.1H298.598V41C299.865 38.6667 301.798 36.7333 304.398 35.2C306.998 33.6667 310.232 32.9 314.098 32.9C321.298 32.9 326.498 35.6333 329.698 41.1C334.098 35.6333 339.965 32.9 347.298 32.9C353.365 32.9 358.032 34.8 361.298 38.6C364.565 42.3333 366.198 47.2667 366.198 53.4V86H354.298V54.9C354.298 47.0333 351.032 43.1 344.498 43.1C340.898 43.1 338.032 44.2667 335.898 46.6C333.832 48.8667 332.798 52.0667 332.798 56.2ZM425.379 63.5H385.879C386.346 67.6333 387.846 70.9333 390.379 73.4C392.912 75.8 396.246 77 400.379 77C403.112 77 405.579 76.3333 407.779 75C409.979 73.6667 411.546 71.8667 412.479 69.6H424.479C422.879 74.8667 419.879 79.1333 415.479 82.4C411.146 85.6 406.012 87.2 400.079 87.2C392.346 87.2 386.079 84.6333 381.279 79.5C376.546 74.3667 374.179 67.8667 374.179 60C374.179 52.3333 376.579 45.9 381.379 40.7C386.179 35.5 392.379 32.9 399.979 32.9C407.579 32.9 413.712 35.4667 418.379 40.6C423.112 45.6667 425.479 52.0667 425.479 59.8L425.379 63.5ZM399.979 42.6C396.246 42.6 393.146 43.7 390.679 45.9C388.212 48.1 386.679 51.0333 386.079 54.7H413.679C413.146 51.0333 411.679 48.1 409.279 45.9C406.879 43.7 403.779 42.6 399.979 42.6ZM444.868 34.1V41C446.068 38.7333 448.035 36.8333 450.768 35.3C453.568 33.7 456.935 32.9 460.868 32.9C467.001 32.9 471.735 34.7667 475.068 38.5C478.468 42.2333 480.168 47.2 480.168 53.4V86H468.268V54.9C468.268 51.2333 467.401 48.3667 465.668 46.3C464.001 44.1667 461.435 43.1 457.968 43.1C454.168 43.1 451.168 44.2333 448.968 46.5C446.835 48.7667 445.768 51.9 445.768 55.9V86H433.868V34.1H444.868ZM514.922 75.4V85.7C513.455 86.1 511.389 86.3 508.722 86.3C498.589 86.3 493.522 81.2 493.522 71V43.6H485.622V34.1H493.522V20.6H505.422V34.1H515.122V43.6H505.422V69.8C505.422 73.8667 507.355 75.9 511.222 75.9L514.922 75.4Z" fill="black"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 5.2 KiB |
BIN
doc/assets/images/etsy.png
Normal file
BIN
doc/assets/images/etsy.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 85 KiB |
BIN
doc/assets/images/humanfirst.ai-dark-theme.png
Normal file
BIN
doc/assets/images/humanfirst.ai-dark-theme.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 23 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user