mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-28 04:52:55 +00:00
Compare commits
929 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7720d21265 | ||
|
|
c339b05789 | ||
|
|
2d3c657f9d | ||
|
|
07f9b828ae | ||
|
|
3dd0322f4c | ||
|
|
2481c87be8 | ||
|
|
b6a664b5f8 | ||
|
|
25b666a7c9 | ||
|
|
9b41912e66 | ||
|
|
8e74bb98b5 | ||
|
|
6db8bb49d6 | ||
|
|
410aed0176 | ||
|
|
00a239a712 | ||
|
|
68fe406924 | ||
|
|
f71b04acb0 | ||
|
|
1ab7f660a4 | ||
|
|
0ebbc4cb5a | ||
|
|
5300cb5da0 | ||
|
|
7d773abc92 | ||
|
|
c34541ccce | ||
|
|
1cc5bd706c | ||
|
|
4026d183bc | ||
|
|
c0f5645cd9 | ||
|
|
cbff874e43 | ||
|
|
baf015fc57 | ||
|
|
7275ebdf3c | ||
|
|
b974e7ce34 | ||
|
|
8f8f34499f | ||
|
|
6ea6f4bfcd | ||
|
|
e25284bafe | ||
|
|
8b67877cd5 | ||
|
|
9de1360538 | ||
|
|
c55db83609 | ||
|
|
1e5ebdbf3c | ||
|
|
9a2090ab21 | ||
|
|
e4aaacdb86 | ||
|
|
29acf1104d | ||
|
|
3d34fa0b69 | ||
|
|
77f363987a | ||
|
|
c0be461191 | ||
|
|
1fb562f44a | ||
|
|
c591d0e591 | ||
|
|
186d7fc20e | ||
|
|
cfbdef5186 | ||
|
|
d04368b1d4 | ||
|
|
b167058028 | ||
|
|
262957717b | ||
|
|
873a808321 | ||
|
|
6fa8f9330e | ||
|
|
b3f0ef0878 | ||
|
|
04304262ba | ||
|
|
920ced364a | ||
|
|
e0499118e2 | ||
|
|
50b5efae46 | ||
|
|
486b8fa9c5 | ||
|
|
b2baed9bdd | ||
|
|
b591542c0b | ||
|
|
a83fa00ac4 | ||
|
|
7ff5c7c797 | ||
|
|
1748602691 | ||
|
|
6542dd5337 | ||
|
|
c64a44b9e1 | ||
|
|
fccc5b3bed | ||
|
|
98b9d5c6c4 | ||
|
|
afd2c1a8ad | ||
|
|
81f35a3ceb | ||
|
|
7e2e765f4a | ||
|
|
7d6cfa58e1 | ||
|
|
14735ce3aa | ||
|
|
72f7cc1569 | ||
|
|
abef5c4e74 | ||
|
|
ae14022bf0 | ||
|
|
55f5658d40 | ||
|
|
3ae6363462 | ||
|
|
9e20d7f8a5 | ||
|
|
ab13ffe377 | ||
|
|
039138ed50 | ||
|
|
6227a0555a | ||
|
|
f85d0a522a | ||
|
|
5795488ba7 | ||
|
|
c3045dfb5c | ||
|
|
811fd0cb9e | ||
|
|
f6847c46d7 | ||
|
|
92dac7af5c | ||
|
|
801905d77f | ||
|
|
8f5ac86f30 | ||
|
|
d12a06b65b | ||
|
|
749432f949 | ||
|
|
c1400f25a7 | ||
|
|
87120acf7c | ||
|
|
401f74f7ae | ||
|
|
03d31f6713 | ||
|
|
a57faf07f6 | ||
|
|
562ea9a839 | ||
|
|
cf92cc1ada | ||
|
|
f6000aece7 | ||
|
|
2b3fe3a2b5 | ||
|
|
0fde90faac | ||
|
|
5838644b03 | ||
|
|
c0011edd05 | ||
|
|
431c187a60 | ||
|
|
392abec420 | ||
|
|
dfbe337fe2 | ||
|
|
b9896c4962 | ||
|
|
afa5715e56 | ||
|
|
79474288d0 | ||
|
|
daf64487b4 | ||
|
|
00816f5529 | ||
|
|
f73787e6e5 | ||
|
|
5cffa71467 | ||
|
|
02af28b3b7 | ||
|
|
afe0134d0f | ||
|
|
db9e81d0f9 | ||
|
|
3821f57ecc | ||
|
|
d379f98b22 | ||
|
|
ef3eddf3da | ||
|
|
08a2368845 | ||
|
|
1868fc1e2c | ||
|
|
451a0252ab | ||
|
|
42756c7474 | ||
|
|
598b076240 | ||
|
|
f1f96fc417 | ||
|
|
9c941603f5 | ||
|
|
fb3d6fa332 | ||
|
|
88fd7f091a | ||
|
|
6e4fdfd4bf | ||
|
|
0519056bd8 | ||
|
|
7305ad575e | ||
|
|
79f64ac2f4 | ||
|
|
67bce6cbf2 | ||
|
|
e5316a4388 | ||
|
|
6a8a8557d2 | ||
|
|
3a65dc84c8 | ||
|
|
ce42bbf5c9 | ||
|
|
7b21b3f25a | ||
|
|
46caec1040 | ||
|
|
1187a02a3e | ||
|
|
f6c525b19e | ||
|
|
4a8f7712f3 | ||
|
|
2f867aad17 | ||
|
|
5c6580eb15 | ||
|
|
4c3941750b | ||
|
|
2ea8e618f2 | ||
|
|
94f27f990b | ||
|
|
349e8aa348 | ||
|
|
cde9b78b8d | ||
|
|
d8894f0bd2 | ||
|
|
7e08e0047b | ||
|
|
1a817f117f | ||
|
|
2ec19b21ae | ||
|
|
141f5a93f7 | ||
|
|
df47d55cd2 | ||
|
|
5e579fd6b7 | ||
|
|
4b9c1dce69 | ||
|
|
d74f71bbef | ||
|
|
5196ca41d8 | ||
|
|
4959e06151 | ||
|
|
c1635c13f6 | ||
|
|
135e0ea2e9 | ||
|
|
f283bfd7ab | ||
|
|
9f74786db2 | ||
|
|
32e5d7a0c7 | ||
|
|
84c615cff1 | ||
|
|
039c0a0863 | ||
|
|
b3b0138b82 | ||
|
|
ea56160cdc | ||
|
|
028b0a749c | ||
|
|
941f06eb9f | ||
|
|
04832a86eb | ||
|
|
beb8e990cd | ||
|
|
001af3876f | ||
|
|
f428f344da | ||
|
|
143f78eced | ||
|
|
754b55eee5 | ||
|
|
280ea1209c | ||
|
|
0154dbe477 | ||
|
|
efd1af1325 | ||
|
|
c91eb7fba7 | ||
|
|
6eb4e08636 | ||
|
|
c3231ca252 | ||
|
|
7211df6719 | ||
|
|
f27ce6412c | ||
|
|
8197a9921f | ||
|
|
b0e23b5715 | ||
|
|
0167151f5b | ||
|
|
0668949390 | ||
|
|
94d0e52786 | ||
|
|
818a0abbee | ||
|
|
4e6dcf3cbe | ||
|
|
af7ea1422a | ||
|
|
498057c5b7 | ||
|
|
5095e6b010 | ||
|
|
1aebc87ee3 | ||
|
|
9fb5058b29 | ||
|
|
158e0a28ba | ||
|
|
3576a006f7 | ||
|
|
80c25ae9f3 | ||
|
|
4867be3d3b | ||
|
|
697c7e721d | ||
|
|
3e368d92cb | ||
|
|
0bc2c64a53 | ||
|
|
35236c8634 | ||
|
|
462774b15c | ||
|
|
185a5b8d31 | ||
|
|
73d7791479 | ||
|
|
f52b1e68d1 | ||
|
|
3e0907fe05 | ||
|
|
ab4a8916d3 | ||
|
|
bcd7386fc5 | ||
|
|
c23a7c992b | ||
|
|
2a88094ec4 | ||
|
|
ca3cfddab4 | ||
|
|
7bd9f9773b | ||
|
|
e2da92fcb5 | ||
|
|
876e1451c4 | ||
|
|
a37d2f9777 | ||
|
|
4822940b19 | ||
|
|
d590f4c6b0 | ||
|
|
edfa619519 | ||
|
|
96f194635f | ||
|
|
444662485f | ||
|
|
943c25d0f8 | ||
|
|
5c0b2a4579 | ||
|
|
9870a9258d | ||
|
|
7102b363f5 | ||
|
|
66b4615e4e | ||
|
|
da46913839 | ||
|
|
3df037961f | ||
|
|
8ffae47854 | ||
|
|
1a90a1f3b0 | ||
|
|
dac50c6aeb | ||
|
|
31b22c5acc | ||
|
|
8e50921363 | ||
|
|
96a4f503ec | ||
|
|
9df288b0c9 | ||
|
|
b7c2d0de97 | ||
|
|
62445e0ec8 | ||
|
|
a228825462 | ||
|
|
d3eabd14bc | ||
|
|
c967031d21 | ||
|
|
d823163d52 | ||
|
|
c4f59f202d | ||
|
|
acd29b535d | ||
|
|
2cd31bcda2 | ||
|
|
99870de55c | ||
|
|
cad2d91845 | ||
|
|
79f3cd6cf4 | ||
|
|
e3abb4481b | ||
|
|
bfa61d2f2f | ||
|
|
6c0e621fdb | ||
|
|
a8cc5208f1 | ||
|
|
83eb0d0cb7 | ||
|
|
ee6e273365 | ||
|
|
6ea34b3d53 | ||
|
|
22cf1004bd | ||
|
|
5768d93171 | ||
|
|
663dd89c05 | ||
|
|
a934577168 | ||
|
|
94f1885334 | ||
|
|
2ccfdb97b5 | ||
|
|
e67883138d | ||
|
|
f5c65f1f60 | ||
|
|
ec73a9a284 | ||
|
|
a814a31f1e | ||
|
|
9acadb3756 | ||
|
|
774fcecf23 | ||
|
|
27c9fa6028 | ||
|
|
fdefea9e26 | ||
|
|
b422f9c389 | ||
|
|
9451fd5b09 | ||
|
|
788b3803d9 | ||
|
|
5b11228083 | ||
|
|
515adff644 | ||
|
|
e70a45426a | ||
|
|
e14701e9cd | ||
|
|
45e62d4329 | ||
|
|
76d2b4dab6 | ||
|
|
04e9606638 | ||
|
|
a5c57ebbd9 | ||
|
|
96eaa5bc63 | ||
|
|
f1d30ab196 | ||
|
|
4507df9255 | ||
|
|
e8625548b7 | ||
|
|
50ed6fb534 | ||
|
|
76609deadf | ||
|
|
749e62c40b | ||
|
|
259ce567d1 | ||
|
|
4c93b096eb | ||
|
|
6a547b0b5f | ||
|
|
e99d1a2355 | ||
|
|
c7bddc5fe3 | ||
|
|
7b97dde335 | ||
|
|
644b4bd0a1 | ||
|
|
bf94fd77db | ||
|
|
097eaf4aa6 | ||
|
|
1fd46c1e9b | ||
|
|
2fb219d017 | ||
|
|
63b593bd0a | ||
|
|
286bb75a0c | ||
|
|
222b7f2580 | ||
|
|
5292e78860 | ||
|
|
c0cc6aac83 | ||
|
|
0b0bf59a32 | ||
|
|
74f70a5c2c | ||
|
|
1acfb2ebb5 | ||
|
|
4dfd091e67 | ||
|
|
8eba4ab807 | ||
|
|
5e8e03882b | ||
|
|
7df3260a15 | ||
|
|
176f67a266 | ||
|
|
19babff849 | ||
|
|
bf2576adf9 | ||
|
|
0e8fcd5727 | ||
|
|
f745c83bb7 | ||
|
|
ffb16d9103 | ||
|
|
98ca703daa | ||
|
|
b9d25cda5d | ||
|
|
beb4289ec2 | ||
|
|
bdd72e4683 | ||
|
|
45c3cd19be | ||
|
|
b8241c5603 | ||
|
|
a4745151c0 | ||
|
|
e2ce326a8c | ||
|
|
bb21d12a70 | ||
|
|
4565aba62a | ||
|
|
545a7ec8dd | ||
|
|
e68775d71c | ||
|
|
dcc92d287e | ||
|
|
b48f81c051 | ||
|
|
a3042e956b | ||
|
|
1fa10f0a0b | ||
|
|
279a9eb5e3 | ||
|
|
21a24672d8 | ||
|
|
a3f1fbaae6 | ||
|
|
a6e767c877 | ||
|
|
6af0488dbe | ||
|
|
07d87e154b | ||
|
|
8b0b0133dd | ||
|
|
7b9752f897 | ||
|
|
c92f41aea8 | ||
|
|
dea16f1d9d | ||
|
|
236cfbec08 | ||
|
|
edcafb69bb | ||
|
|
14908479d5 | ||
|
|
ab4593eeb7 | ||
|
|
e75bb1d6a1 | ||
|
|
63b9d62237 | ||
|
|
0098e3d428 | ||
|
|
69d5e4b9b1 | ||
|
|
e0cdd3114d | ||
|
|
f32b4a2ebe | ||
|
|
6ff60b8ed8 | ||
|
|
8da28fb6cf | ||
|
|
0df2a221da | ||
|
|
5449ec3c11 | ||
|
|
10f6c07c53 | ||
|
|
06e7bd18e7 | ||
|
|
37e4280c0a | ||
|
|
0ba1cf93f7 | ||
|
|
21a9940726 | ||
|
|
8600b8ea25 | ||
|
|
30f4f85d48 | ||
|
|
82d25b8397 | ||
|
|
2104c0277c | ||
|
|
dd37e109f2 | ||
|
|
cc23194c58 | ||
|
|
63868733a3 | ||
|
|
644d8a3a10 | ||
|
|
e32dba1a97 | ||
|
|
a78aa4c259 | ||
|
|
7e5f697d00 | ||
|
|
a78f4cca37 | ||
|
|
2e44f0f099 | ||
|
|
9ccba9f864 | ||
|
|
9101bf5753 | ||
|
|
23e97da9f6 | ||
|
|
1d439e96f5 | ||
|
|
934933582e | ||
|
|
98c7fbdc6f | ||
|
|
cec9956a01 | ||
|
|
c64972e039 | ||
|
|
b3b2421e8a | ||
|
|
f570fe37d4 | ||
|
|
6704ab6987 | ||
|
|
a12d211330 | ||
|
|
ee681a4dd1 | ||
|
|
d15efd6635 | ||
|
|
18814ba0c1 | ||
|
|
f247935bb9 | ||
|
|
6a197e023e | ||
|
|
96a313c6dd | ||
|
|
fb9b1c1f41 | ||
|
|
e1bca6db9d | ||
|
|
8438eda01a | ||
|
|
b373f00840 | ||
|
|
46decdb0ea | ||
|
|
835cdc2fe8 | ||
|
|
19756bb7d6 | ||
|
|
57e1f8ed28 | ||
|
|
2649c8a715 | ||
|
|
ede97eded6 | ||
|
|
4b7ff78c5a | ||
|
|
948758ad78 | ||
|
|
d71fa43ca3 | ||
|
|
1e5266d4c9 | ||
|
|
537fc27231 | ||
|
|
af593b1116 | ||
|
|
3d73c0c240 | ||
|
|
3a8e524f77 | ||
|
|
c0641c2b47 | ||
|
|
ef3a16a129 | ||
|
|
a0a284fe91 | ||
|
|
0feeef2684 | ||
|
|
cc50bdb06a | ||
|
|
23c2c3ae7c | ||
|
|
674524ba91 | ||
|
|
60a9a7f837 | ||
|
|
5b5c706581 | ||
|
|
3e14a76623 | ||
|
|
8cde1c81e5 | ||
|
|
8d0a29b137 | ||
|
|
cbfb2fe19d | ||
|
|
09e00f1d42 | ||
|
|
290620fdee | ||
|
|
f0d1b85bd8 | ||
|
|
aaef546f91 | ||
|
|
811ddf2226 | ||
|
|
79a339d353 | ||
|
|
e45e4c79d9 | ||
|
|
848bf41bc9 | ||
|
|
d11cb087a7 | ||
|
|
2dd7422f42 | ||
|
|
e8707c02c0 | ||
|
|
55928d756a | ||
|
|
a4370bca64 | ||
|
|
5a5c5a8ca5 | ||
|
|
1b470dd474 | ||
|
|
52b4575245 | ||
|
|
ddd2d5b04c | ||
|
|
fa22b4041a | ||
|
|
8faee143fa | ||
|
|
366ce98f08 | ||
|
|
190e60a41c | ||
|
|
b9558801a1 | ||
|
|
36728215ac | ||
|
|
39551a0418 | ||
|
|
39b98b2e76 | ||
|
|
616162400d | ||
|
|
694d164db6 | ||
|
|
ef442cefb1 | ||
|
|
14da241f35 | ||
|
|
346a9e4287 | ||
|
|
31655e92d7 | ||
|
|
6b8d76685a | ||
|
|
ce5683fc6a | ||
|
|
5205579db6 | ||
|
|
d056ae60dc | ||
|
|
af9280c95f | ||
|
|
2e538ce6e6 | ||
|
|
00466d2b08 | ||
|
|
8ebbf6b336 | ||
|
|
1ce36bb211 | ||
|
|
2ac43bf21b | ||
|
|
3fd8c2aa5a | ||
|
|
c1022e23d2 | ||
|
|
8ccbfdea5d | ||
|
|
badfce3a23 | ||
|
|
e301e0bc87 | ||
|
|
317baf4e75 | ||
|
|
24398d94e4 | ||
|
|
360f4132eb | ||
|
|
2b8f02764b | ||
|
|
0465876854 | ||
|
|
6f7b099370 | ||
|
|
84f5cc4388 | ||
|
|
75aae0d2c2 | ||
|
|
009a3559be | ||
|
|
7a31669e9d | ||
|
|
5185eb790b | ||
|
|
a3dffbf1c6 | ||
|
|
857a5794d8 | ||
|
|
b0a6fc1448 | ||
|
|
989d52bea4 | ||
|
|
09661ea7ec | ||
|
|
b59132966f | ||
|
|
863d3411bc | ||
|
|
8a55d133ab | ||
|
|
432d49d814 | ||
|
|
0cea706f10 | ||
|
|
71d41ca209 | ||
|
|
bc69dab822 | ||
|
|
72acad0921 | ||
|
|
c9459f74e8 | ||
|
|
08d2cc6c7b | ||
|
|
82d87416c2 | ||
|
|
96b2c2971e | ||
|
|
162afd73f6 | ||
|
|
ddfd87fa59 | ||
|
|
24050d0eb5 | ||
|
|
89eb209ece | ||
|
|
9a0b7f9855 | ||
|
|
8e343b1ca3 | ||
|
|
99c0b84036 | ||
|
|
ca74c14647 | ||
|
|
68ee18e4e8 | ||
|
|
5637657c2f | ||
|
|
2e3c9a8878 | ||
|
|
78673172d0 | ||
|
|
175b76f119 | ||
|
|
9b79e21bd7 | ||
|
|
5e38ae336f | ||
|
|
8604351f59 | ||
|
|
6a48953d8a | ||
|
|
0804b42afa | ||
|
|
8083bc6eef | ||
|
|
0156f88265 | ||
|
|
a1c07bf457 | ||
|
|
9de74b68d1 | ||
|
|
57c7073867 | ||
|
|
121374b89b | ||
|
|
e44782bf14 | ||
|
|
dfafb24fa6 | ||
|
|
4c6f9541e9 | ||
|
|
743ae102f1 | ||
|
|
0107fe886b | ||
|
|
1d9566e73c | ||
|
|
8006f1df11 | ||
|
|
ffa03bad71 | ||
|
|
98cf4ba63a | ||
|
|
4d65771e04 | ||
|
|
9712a75399 | ||
|
|
3ae03b91ae | ||
|
|
238b02ce7d | ||
|
|
3091459777 | ||
|
|
b7f8884246 | ||
|
|
e22f767fda | ||
|
|
3ecfc36e53 | ||
|
|
1c9450174e | ||
|
|
cde4c391cd | ||
|
|
6d47634616 | ||
|
|
39b182c24b | ||
|
|
baaae3f4ec | ||
|
|
63064601a7 | ||
|
|
07a8023a3a | ||
|
|
59639cd311 | ||
|
|
b0e5e1f61d | ||
|
|
234a902470 | ||
|
|
75d130f1ce | ||
|
|
410187dd24 | ||
|
|
88303d4833 | ||
|
|
a26b0ff4a2 | ||
|
|
d4ed86f13a | ||
|
|
fc8902353c | ||
|
|
a2ee988304 | ||
|
|
97b7984200 | ||
|
|
8683718159 | ||
|
|
0cf274135b | ||
|
|
a3b44773bb | ||
|
|
ec7c582109 | ||
|
|
ee7ab72fb1 | ||
|
|
2c20759829 | ||
|
|
23387b0ed0 | ||
|
|
e82859f2e6 | ||
|
|
be830b03c5 | ||
|
|
1b94a3e382 | ||
|
|
c3fbc4c8fa | ||
|
|
4ee2db25a0 | ||
|
|
e423784fd0 | ||
|
|
fdb9c3c516 | ||
|
|
6fb114224a | ||
|
|
2c3e33895a | ||
|
|
d512b53688 | ||
|
|
c8afd2b55d | ||
|
|
3fd6d7125b | ||
|
|
de6a3987a9 | ||
|
|
3dedc465fa | ||
|
|
f16cc6367e | ||
|
|
4026fc5fb1 | ||
|
|
43742a93ef | ||
|
|
2a843d86cb | ||
|
|
9a706c296a | ||
|
|
5ff8123b7a | ||
|
|
6061158506 | ||
|
|
4e8b0e89d9 | ||
|
|
0540ebb49e | ||
|
|
ef94582203 | ||
|
|
2f242d5f52 | ||
|
|
da3d372e6e | ||
|
|
42fd3fe5c7 | ||
|
|
5dae6e6bbc | ||
|
|
e608e0a1df | ||
|
|
6c8c90d348 | ||
|
|
eb50e92ec4 | ||
|
|
20bede9462 | ||
|
|
4640ab4e65 | ||
|
|
cd51ed0f9f | ||
|
|
6676fe5717 | ||
|
|
292bb17346 | ||
|
|
0300e7272b | ||
|
|
8760899fa2 | ||
|
|
c89d570a79 | ||
|
|
1da06d867b | ||
|
|
76e8db6ed3 | ||
|
|
31e5580bfa | ||
|
|
930d3db2f7 | ||
|
|
1593e1dc6f | ||
|
|
e0189fc9e6 | ||
|
|
ffdb4ef0a7 | ||
|
|
58845344c2 | ||
|
|
548ec9ecca | ||
|
|
86b700fa93 | ||
|
|
e95c49e749 | ||
|
|
f3033a8469 | ||
|
|
c4125bda59 | ||
|
|
a7ffc0e610 | ||
|
|
9370427ae2 | ||
|
|
1fc7afa90a | ||
|
|
6a104e4f69 | ||
|
|
920f086e1d | ||
|
|
13aaca7e11 | ||
|
|
df53dc4ceb | ||
|
|
dd028841e8 | ||
|
|
eb84b8a60d | ||
|
|
c05f46ad0e | ||
|
|
435ff9d524 | ||
|
|
fdd5dd8496 | ||
|
|
fb5476d5de | ||
|
|
dd8332c327 | ||
|
|
63d201150b | ||
|
|
b78efdc59f | ||
|
|
5cb08f7996 | ||
|
|
1947a19700 | ||
|
|
271b019420 | ||
|
|
340693184f | ||
|
|
97782a9511 | ||
|
|
930010aa88 | ||
|
|
7f5b07d4e7 | ||
|
|
3edb3dce6a | ||
|
|
1edaf7a312 | ||
|
|
137906ff29 | ||
|
|
143a143cde | ||
|
|
4f5ce12a77 | ||
|
|
813efa4ab3 | ||
|
|
c3b6c1dc0b | ||
|
|
6f5e0ef6f4 | ||
|
|
7224f58895 | ||
|
|
49519c3f61 | ||
|
|
cb11b92505 | ||
|
|
7b2dcfbd91 | ||
|
|
d2e30e6681 | ||
|
|
ef109927b3 | ||
|
|
44e5c4dfd3 | ||
|
|
6f223253ea | ||
|
|
f7b0392bd5 | ||
|
|
442bc9a1b8 | ||
|
|
db7d784573 | ||
|
|
79132e803a | ||
|
|
9e132b7dde | ||
|
|
1e55189db1 | ||
|
|
8b1b389a76 | ||
|
|
46f3ec87a5 | ||
|
|
f24e5f405e | ||
|
|
2589be3984 | ||
|
|
a02a9294e4 | ||
|
|
8023445b63 | ||
|
|
05ce093f97 | ||
|
|
6937e23a56 | ||
|
|
974c321153 | ||
|
|
f30ec9b36b | ||
|
|
acd7c1ea2d | ||
|
|
aaeeda2bc5 | ||
|
|
ac4d433fad | ||
|
|
a298c084e6 | ||
|
|
185a72b341 | ||
|
|
bb41ae76f9 | ||
|
|
74d32e522a | ||
|
|
927dd1ee6f | ||
|
|
2c9302290f | ||
|
|
426cc436da | ||
|
|
68d42c9cf2 | ||
|
|
ca49d6130f | ||
|
|
3588ca0561 | ||
|
|
7c6cdcd876 | ||
|
|
71366b9a56 | ||
|
|
a3247ebcfb | ||
|
|
3ec13a8719 | ||
|
|
f8593c76d5 | ||
|
|
f8710bd4b0 | ||
|
|
8d05b8f7b2 | ||
|
|
fc25516b7a | ||
|
|
5b1e71947f | ||
|
|
69351fb4a5 | ||
|
|
3d0082d020 | ||
|
|
8e450c770a | ||
|
|
a757902aed | ||
|
|
b3a8074826 | ||
|
|
4289625348 | ||
|
|
850f10c1fe | ||
|
|
d7f9bfdfc5 | ||
|
|
d0d5db4515 | ||
|
|
303fc7e820 | ||
|
|
744edb2c5c | ||
|
|
2d70efb7b0 | ||
|
|
eb5b2ffdcc | ||
|
|
38513014d5 | ||
|
|
9cb7a0f6e6 | ||
|
|
8d466b8a76 | ||
|
|
413d0e1719 | ||
|
|
0eb3c872fd | ||
|
|
f9203228be | ||
|
|
8f377b92d0 | ||
|
|
1e89f86267 | ||
|
|
d1f61a50c1 | ||
|
|
2bb85ed575 | ||
|
|
236fa74767 | ||
|
|
63b35dd87b | ||
|
|
efb910f4e8 | ||
|
|
aff7e64d4e | ||
|
|
92a3f3981f | ||
|
|
447a9361d8 | ||
|
|
5f59139484 | ||
|
|
27c373d26d | ||
|
|
80ae136646 | ||
|
|
52b1398702 | ||
|
|
7b9cd09a6e | ||
|
|
4c423ad2ca | ||
|
|
9f542d5252 | ||
|
|
77d8e81ae4 | ||
|
|
76e07b9705 | ||
|
|
ea4e9fdaf1 | ||
|
|
e418bee693 | ||
|
|
af4f1a86bc | ||
|
|
753b639454 | ||
|
|
5907a47547 | ||
|
|
586a6e62a2 | ||
|
|
fdae0eff5a | ||
|
|
6eea407f20 | ||
|
|
1ba51d4dc4 | ||
|
|
6e742d5145 | ||
|
|
1843259e91 | ||
|
|
4ebacb7297 | ||
|
|
fb75e60c6e | ||
|
|
04b15c6c11 | ||
|
|
b05b5f5487 | ||
|
|
4fe96483bc | ||
|
|
09e27740e2 | ||
|
|
e51feea574 | ||
|
|
93e7f28cc0 | ||
|
|
8875b9794a | ||
|
|
f26874557e | ||
|
|
a7d10b65ae | ||
|
|
e120e3b7aa | ||
|
|
90fcfb3f43 | ||
|
|
e547e8abad | ||
|
|
5aa4565424 | ||
|
|
3637620187 | ||
|
|
a94679d74d | ||
|
|
a35a8638cc | ||
|
|
97a051996f | ||
|
|
69525cb3c7 | ||
|
|
63867a7150 | ||
|
|
19c073385a | ||
|
|
0521844e56 | ||
|
|
8d4778f94d | ||
|
|
1d5464351d | ||
|
|
522ebdc674 | ||
|
|
4a805733db | ||
|
|
568d149db8 | ||
|
|
4cfc9806c0 | ||
|
|
37042e3ccb | ||
|
|
b316cd337a | ||
|
|
c04991e5ad | ||
|
|
c59b712eeb | ||
|
|
da61baed3b | ||
|
|
b6140d2962 | ||
|
|
6a9a71bb1b | ||
|
|
e8fc4c77e2 | ||
|
|
80837601ea | ||
|
|
2b2703cf51 | ||
|
|
d79018a7f8 | ||
|
|
d8a7c428f7 | ||
|
|
45595234cc | ||
|
|
1bcebdd29e | ||
|
|
ed0333a404 | ||
|
|
ac0b1a21eb | ||
|
|
6bbc789d84 | ||
|
|
87152daef3 | ||
|
|
e0fce4782a | ||
|
|
a633c2a49a | ||
|
|
51623d593e | ||
|
|
29bf740ddf | ||
|
|
511bd25a31 | ||
|
|
66e14ac1b1 | ||
|
|
09e94072ba | ||
|
|
6c68136d31 | ||
|
|
aaf1b2c6b6 | ||
|
|
8a6af2aefa | ||
|
|
7a6e62976b | ||
|
|
2712930bd6 | ||
|
|
cb05f8c098 | ||
|
|
c0c9d04ca9 | ||
|
|
7ea5e740e0 | ||
|
|
2afa6c372a | ||
|
|
c7db8866b5 | ||
|
|
02d992324a | ||
|
|
4ab511ffc6 | ||
|
|
f318172ea4 | ||
|
|
581449a824 | ||
|
|
272589a381 | ||
|
|
73d54c6379 | ||
|
|
3e4606de5d | ||
|
|
020779f61b | ||
|
|
835936585f | ||
|
|
bdd05e97d1 | ||
|
|
2be5f08cd6 | ||
|
|
3f49d65a87 | ||
|
|
f9baf4bcc8 | ||
|
|
7ee93fbed5 | ||
|
|
57a5547ae8 | ||
|
|
c57ab6a335 | ||
|
|
02bfa9be52 | ||
|
|
b3f62b8acc | ||
|
|
2a08c247af | ||
|
|
d2926b6ee0 | ||
|
|
0272167c2e | ||
|
|
a9cf0bde16 | ||
|
|
5a457df45d | ||
|
|
ca76fd5ba0 | ||
|
|
e79a316e41 | ||
|
|
733f54d80e | ||
|
|
7b2b181652 | ||
|
|
b3f39f2343 | ||
|
|
a13122d392 | ||
|
|
113917c521 | ||
|
|
1352b95b07 | ||
|
|
c0538dbe9a | ||
|
|
0d5ea98132 | ||
|
|
0404df3fd5 | ||
|
|
a67caee141 | ||
|
|
f5fb29422a | ||
|
|
4e48bbf0ea | ||
|
|
6fea510869 | ||
|
|
39958ec476 | ||
|
|
36f51e289e | ||
|
|
5c83153035 | ||
|
|
8e407bb314 | ||
|
|
103ba6ba35 | ||
|
|
3965b26cd2 | ||
|
|
1cd0b378fb | ||
|
|
92f383fa51 | ||
|
|
6ae34d2a77 | ||
|
|
1af1f7e0d1 | ||
|
|
feec2e2620 | ||
|
|
3e2ad7542d | ||
|
|
ac02c76b1e | ||
|
|
e5c7c0b8b9 | ||
|
|
49dbe4722f | ||
|
|
f64ff77424 | ||
|
|
2bf93e9e51 | ||
|
|
3dde748b25 | ||
|
|
1dabe26395 | ||
|
|
5590537739 | ||
|
|
ccf0f9cb2f | ||
|
|
e21913ecdc | ||
|
|
2cc826adc7 | ||
|
|
4d90d8fc1d | ||
|
|
0606a8ae73 | ||
|
|
03564214e7 | ||
|
|
4c8f9742f8 | ||
|
|
a23b7a1815 | ||
|
|
6f89a86b14 | ||
|
|
b2beac1203 | ||
|
|
8cd5a2d81d | ||
|
|
b26c22ada0 | ||
|
|
8a35259300 | ||
|
|
db56167a5d | ||
|
|
ab66ffed4e | ||
|
|
e04f2f0b08 | ||
|
|
7a5df33c85 | ||
|
|
ee0873dd07 | ||
|
|
695c8828b8 | ||
|
|
4ff7dc7a4f | ||
|
|
69832bfd03 | ||
|
|
ecbdd70c37 | ||
|
|
fb1b2be782 | ||
|
|
9cd7458978 | ||
|
|
4c4c28e2c4 | ||
|
|
9f9e588905 | ||
|
|
6fd17e0ead | ||
|
|
65dc5b0d83 | ||
|
|
15d15c01f8 | ||
|
|
106832a66a | ||
|
|
477b9136b9 | ||
|
|
7852d097b8 | ||
|
|
0bd56241bb | ||
|
|
54ab897755 | ||
|
|
1369d2d144 | ||
|
|
d3f829dc8a | ||
|
|
e82ccf9627 | ||
|
|
d3d29f7f54 | ||
|
|
3566717979 | ||
|
|
90bc3e3773 | ||
|
|
ffb62b6835 | ||
|
|
4f9ce91d6a | ||
|
|
3c3a2fbfe8 | ||
|
|
0508571d1a | ||
|
|
7b733dd34f | ||
|
|
2c798e3147 | ||
|
|
2c13f210bc | ||
|
|
0dad02791c | ||
|
|
2947364ae1 | ||
|
|
05111599b3 | ||
|
|
83263eabbb | ||
|
|
5cb5c9a8f2 | ||
|
|
9ab92b7739 | ||
|
|
962bddfbbf | ||
|
|
26cfe2909f | ||
|
|
afdfb1a69b | ||
|
|
b26ad1d57a | ||
|
|
1dbd54edbb | ||
|
|
deb04eb090 | ||
|
|
bed34bf502 | ||
|
|
95bfb71901 | ||
|
|
74e10843a7 | ||
|
|
1b922e6d23 | ||
|
|
a7c6c31538 | ||
|
|
9d071c8d46 | ||
|
|
04074f7bcb | ||
|
|
8a28d1643d |
12
.github/FUNDING.yml
vendored
Normal file
12
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: fulmicoton
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
19
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
19
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
- What did you do?
|
||||
- What happened?
|
||||
- What was expected?
|
||||
|
||||
**Which version of tantivy are you using?**
|
||||
If "master", ideally give the specific sha1 revision.
|
||||
|
||||
**To Reproduce**
|
||||
|
||||
If your bug is deterministic, can you give a minimal reproducing code?
|
||||
Some bugs are not deterministic. Can you describe with precision in which context it happened?
|
||||
If this is possible, can you share your code?
|
||||
14
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
14
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**[Optional] describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
7
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
7
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
name: Question
|
||||
about: Ask any question about tantivy's usage...
|
||||
|
||||
---
|
||||
|
||||
Try to be specific about your use case...
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -1,3 +1,6 @@
|
||||
tantivy.iml
|
||||
proptest-regressions
|
||||
*.swp
|
||||
target
|
||||
target/debug
|
||||
.vscode
|
||||
@@ -5,4 +8,7 @@ target/release
|
||||
Cargo.lock
|
||||
benchmark
|
||||
.DS_Store
|
||||
cpp/simdcomp/bitpackingbenchmark
|
||||
cpp/simdcomp/bitpackingbenchmark
|
||||
*.bk
|
||||
.idea
|
||||
trace.dat
|
||||
|
||||
91
.travis.yml
91
.travis.yml
@@ -1,16 +1,22 @@
|
||||
# Based on the "trust" template v0.1.2
|
||||
# https://github.com/japaric/trust/tree/v0.1.2
|
||||
|
||||
dist: trusty
|
||||
language: rust
|
||||
rust:
|
||||
- nightly
|
||||
services: docker
|
||||
sudo: required
|
||||
|
||||
env:
|
||||
global:
|
||||
- CC=gcc-4.8
|
||||
- CXX=g++-4.8
|
||||
- CRATE_NAME=tantivy
|
||||
- TRAVIS_CARGO_NIGHTLY_FEATURE=""
|
||||
- secure: eC8HjTi1wgRVCsMAeXEXt8Ckr0YBSGOEnQkkW4/Nde/OZ9jJjz2nmP1ELQlDE7+czHub2QvYtDMG0parcHZDx/Kus0yvyn08y3g2rhGIiE7y8OCvQm1Mybu2D/p7enm6shXquQ6Z5KRfRq+18mHy80wy9ABMA/ukEZdvnfQ76/Een8/Lb0eHaDoXDXn3PqLVtByvSfQQ7OhS60dEScu8PWZ6/l1057P5NpdWbMExBE7Ro4zYXNhkJeGZx0nP/Bd4Jjdt1XfPzMEybV6NZ5xsTILUBFTmOOt603IsqKGov089NExqxYu5bD3K+S4MzF1Nd6VhomNPJqLDCfhlymJCUj5n5Ku4yidlhQbM4Ej9nGrBalJnhcjBjPua5tmMF2WCxP9muKn/2tIOu1/+wc0vMf9Yd3wKIkf5+FtUxCgs2O+NslWvmOMAMI/yD25m7hb4t1IwE/4Bk+GVcWJRWXbo0/m6ZUHzRzdjUY2a1qvw7C9udzdhg7gcnXwsKrSWi2NjMiIVw86l+Zim0nLpKIN41sxZHLaFRG63Ki8zQ/481LGn32awJ6i3sizKS0WD+N1DfR2qYMrwYHaMN0uR0OFXYTJkFvTFttAeUY3EKmRKAuMhmO2YRdSr4/j/G5E9HMc1gSGJj6PxgpQU7EpvxRsmoVAEJr0mszmOj9icGHep/FM=
|
||||
# - secure: eC8HjTi1wgRVCsMAeXEXt8Ckr0YBSGOEnQkkW4/Nde/OZ9jJjz2nmP1ELQlDE7+czHub2QvYtDMG0parcHZDx/Kus0yvyn08y3g2rhGIiE7y8OCvQm1Mybu2D/p7enm6shXquQ6Z5KRfRq+18mHy80wy9ABMA/ukEZdvnfQ76/Een8/Lb0eHaDoXDXn3PqLVtByvSfQQ7OhS60dEScu8PWZ6/l1057P5NpdWbMExBE7Ro4zYXNhkJeGZx0nP/Bd4Jjdt1XfPzMEybV6NZ5xsTILUBFTmOOt603IsqKGov089NExqxYu5bD3K+S4MzF1Nd6VhomNPJqLDCfhlymJCUj5n5Ku4yidlhQbM4Ej9nGrBalJnhcjBjPua5tmMF2WCxP9muKn/2tIOu1/+wc0vMf9Yd3wKIkf5+FtUxCgs2O+NslWvmOMAMI/yD25m7hb4t1IwE/4Bk+GVcWJRWXbo0/m6ZUHzRzdjUY2a1qvw7C9udzdhg7gcnXwsKrSWi2NjMiIVw86l+Zim0nLpKIN41sxZHLaFRG63Ki8zQ/481LGn32awJ6i3sizKS0WD+N1DfR2qYMrwYHaMN0uR0OFXYTJkFvTFttAeUY3EKmRKAuMhmO2YRdSr4/j/G5E9HMc1gSGJj6PxgpQU7EpvxRsmoVAEJr0mszmOj9icGHep/FM=
|
||||
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
- ubuntu-toolchain-r-test
|
||||
- kalakris-cmake
|
||||
packages:
|
||||
- gcc-4.8
|
||||
- g++-4.8
|
||||
@@ -18,18 +24,69 @@ addons:
|
||||
- libelf-dev
|
||||
- libdw-dev
|
||||
- binutils-dev
|
||||
- cmake
|
||||
|
||||
matrix:
|
||||
include:
|
||||
# Android
|
||||
- env: TARGET=aarch64-linux-android DISABLE_TESTS=1
|
||||
#- env: TARGET=arm-linux-androideabi DISABLE_TESTS=1
|
||||
#- env: TARGET=armv7-linux-androideabi DISABLE_TESTS=1
|
||||
#- env: TARGET=i686-linux-android DISABLE_TESTS=1
|
||||
#- env: TARGET=x86_64-linux-android DISABLE_TESTS=1
|
||||
|
||||
# Linux
|
||||
#- env: TARGET=aarch64-unknown-linux-gnu
|
||||
#- env: TARGET=i686-unknown-linux-gnu
|
||||
- env: TARGET=x86_64-unknown-linux-gnu CODECOV=1 #UPLOAD_DOCS=1
|
||||
# - env: TARGET=x86_64-unknown-linux-musl CODECOV=1
|
||||
# OSX
|
||||
#- env: TARGET=x86_64-apple-darwin
|
||||
# os: osx
|
||||
|
||||
before_install:
|
||||
- set -e
|
||||
- rustup self update
|
||||
- rustup component add rustfmt
|
||||
|
||||
install:
|
||||
- sh ci/install.sh
|
||||
- source ~/.cargo/env || true
|
||||
- env | grep "TRAVIS"
|
||||
|
||||
before_script:
|
||||
- |
|
||||
pip install 'travis-cargo<0.2' --user &&
|
||||
export PATH=$HOME/.local/bin:$PATH
|
||||
- export PATH=$HOME/.cargo/bin:$PATH
|
||||
- cargo install cargo-update || echo "cargo-update already installed"
|
||||
- cargo install cargo-travis || echo "cargo-travis already installed"
|
||||
|
||||
script:
|
||||
- |
|
||||
travis-cargo build &&
|
||||
travis-cargo test &&
|
||||
travis-cargo bench &&
|
||||
travis-cargo doc
|
||||
- bash ci/script.sh
|
||||
- cargo fmt --all -- --check
|
||||
|
||||
before_deploy:
|
||||
- sh ci/before_deploy.sh
|
||||
|
||||
after_success:
|
||||
- bash ./script/build-doc.sh
|
||||
- travis-cargo doc-upload
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then travis-cargo coveralls --no-sudo --verify; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then ./kcov/build/src/kcov --verify --coveralls-id=$TRAVIS_JOB_ID --include-path=`pwd`/src --exclude-path=`pwd`/cpp --exclude-pattern=/.cargo target/kcov target/debug/tantivy-*; fi
|
||||
# Needs GH_TOKEN env var to be set in travis settings
|
||||
- if [[ -v GH_TOKEN ]]; then echo "GH TOKEN IS SET"; else echo "GH TOKEN NOT SET"; fi
|
||||
- if [[ -v UPLOAD_DOCS ]]; then cargo doc; cargo doc-upload; else echo "doc upload disabled."; fi
|
||||
|
||||
#cache: cargo
|
||||
#before_cache:
|
||||
# # Travis can't cache files that are not readable by "others"
|
||||
# - chmod -R a+r $HOME/.cargo
|
||||
# - find ./target/debug -type f -maxdepth 1 -delete
|
||||
# - rm -f ./target/.rustc_info.json
|
||||
# - rm -fr ./target/debug/{deps,.fingerprint}/tantivy*
|
||||
# - rm -r target/debug/examples/
|
||||
# - ls -1 examples/ | sed -e 's/\.rs$//' | xargs -I "{}" find target/* -name "*{}*" -type f -delete
|
||||
|
||||
#branches:
|
||||
# only:
|
||||
# # release tags
|
||||
# - /^v\d+\.\d+\.\d+.*$/
|
||||
# - master
|
||||
|
||||
notifications:
|
||||
email:
|
||||
on_success: never
|
||||
|
||||
11
AUTHORS
Normal file
11
AUTHORS
Normal file
@@ -0,0 +1,11 @@
|
||||
# This is the list of authors of tantivy for copyright purposes.
|
||||
Paul Masurel
|
||||
Laurentiu Nicola
|
||||
Dru Sellers
|
||||
Ashley Mannix
|
||||
Michael J. Curry
|
||||
Jason Wolfe
|
||||
# As an employee of Google I am required to add Google LLC
|
||||
# in the list of authors, but this project is not affiliated to Google
|
||||
# in any other way.
|
||||
Google LLC
|
||||
354
CHANGELOG.md
354
CHANGELOG.md
@@ -1,3 +1,348 @@
|
||||
Tantivy 0.13.2
|
||||
===================
|
||||
Bugfix. Acquiring a facet reader on a segment that does not contain any
|
||||
doc with this facet returns `None`. (#896)
|
||||
|
||||
Tantivy 0.13.1
|
||||
======================
|
||||
Made `Query` and `Collector` `Send + Sync`.
|
||||
Updated misc dependency versions.
|
||||
|
||||
|
||||
Tantivy 0.13.0
|
||||
======================
|
||||
Tantivy 0.13 introduce a change in the index format that will require
|
||||
you to reindex your index (BlockWAND information are added in the skiplist).
|
||||
The index size increase is minor as this information is only added for
|
||||
full blocks.
|
||||
If you have a massive index for which reindexing is not an option, please contact me
|
||||
so that we can discuss possible solutions.
|
||||
|
||||
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
|
||||
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
|
||||
- `MMapDirectory::open` does not return a `Result` anymore.
|
||||
- Change in the DocSet and Scorer API. (@fulmicoton).
|
||||
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
|
||||
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
|
||||
As a result, iterating through DocSet now looks as follows
|
||||
```rust
|
||||
let mut doc = docset.doc();
|
||||
while doc != TERMINATED {
|
||||
// ...
|
||||
doc = docset.advance();
|
||||
}
|
||||
```
|
||||
The change made it possible to greatly simplify a lot of the docset's code.
|
||||
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
|
||||
- Added an offset option to the Top(.*)Collectors. (@robyoung)
|
||||
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
|
||||
to the PISA team for answering all my questions!)
|
||||
|
||||
Tantivy 0.12.0
|
||||
======================
|
||||
- Removing static dispatch in tokenizers for simplicity. (#762)
|
||||
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
|
||||
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
|
||||
- Added a configurable maximum number of docs (10M by default) for a segment to be considered for merge (@hntd187, landed by @halvorboe #713)
|
||||
- Important Bugfix #777, causing tantivy to retain memory mapping. (diagnosed by @poljar)
|
||||
- Added support for field boosting. (#547, @fulmicoton)
|
||||
|
||||
## How to update?
|
||||
|
||||
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
|
||||
minor changes. Check https://github.com/tantivy-search/tantivy/blob/master/examples/custom_tokenizer.rs
|
||||
to check for some code sample.
|
||||
|
||||
Tantivy 0.11.3
|
||||
=======================
|
||||
- Fixed DateTime as a fast field (#735)
|
||||
|
||||
Tantivy 0.11.2
|
||||
=======================
|
||||
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
|
||||
- Exposing a constructor for `WatchHandle` (#731)
|
||||
|
||||
Tantivy 0.11.1
|
||||
=====================
|
||||
- Bug fix #729
|
||||
|
||||
|
||||
Tantivy 0.11.0
|
||||
=====================
|
||||
|
||||
- Added f64 field. Internally reuse u64 code the same way i64 does (@fdb-hiroshima)
|
||||
- Various bugfixes in the query parser.
|
||||
- Better handling of hyphens in query parser. (#609)
|
||||
- Better handling of whitespaces.
|
||||
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
|
||||
- API change around `Box<BoxableTokenizer>`. See detail in #629
|
||||
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
|
||||
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
|
||||
- Add a method to check the compatibility of the footer in the index with the running version of tantivy (@petr-tik)
|
||||
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
|
||||
- Added handling of pre-tokenized text fields (#642), which will enable users to
|
||||
load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara)
|
||||
- Fix crash when committing multiple times with deleted documents. #681 (@brainlock)
|
||||
|
||||
## How to update?
|
||||
|
||||
- The index format is changed. You are required to reindex your data to use tantivy 0.11.
|
||||
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
|
||||
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
|
||||
an error and handling the `Result` is required.
|
||||
- `tantivy::version()` now returns a `Version` object. This object implements `ToString()`
|
||||
|
||||
Tantivy 0.10.2
|
||||
=====================
|
||||
|
||||
- Closes #656. Solving memory leak.
|
||||
|
||||
Tantivy 0.10.1
|
||||
=====================
|
||||
|
||||
- Closes #544. A few users experienced problems with the directory watching system.
|
||||
Avoid watching the mmap directory until someone effectively creates a reader that uses
|
||||
this functionality.
|
||||
|
||||
|
||||
Tantivy 0.10.0
|
||||
=====================
|
||||
|
||||
*Tantivy 0.10.0 index format is compatible with the index format in 0.9.0.*
|
||||
|
||||
- Added an API to easily tweak or entirely replace the
|
||||
default score. See `TopDocs::tweak_score`and `TopScore::custom_score` (@pmasurel)
|
||||
- Added an ASCII folding filter (@drusellers)
|
||||
- Bugfix in `query.count` in presence of deletes (@pmasurel)
|
||||
- Added `.explain(...)` in `Query` and `Weight` to (@pmasurel)
|
||||
- Added an efficient way to `delete_all_documents` in `IndexWriter` (@petr-tik).
|
||||
All segments are simply removed.
|
||||
|
||||
Minor
|
||||
---------
|
||||
- Switched to Rust 2018 (@uvd)
|
||||
- Small simplification of the code.
|
||||
Calling .freq() or .doc() when .advance() has never been called
|
||||
on segment postings should panic from now on.
|
||||
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
|
||||
- Fast fields are now preloaded when the `SegmentReader` is created.
|
||||
- `IndexMeta` is now public. (@hntd187)
|
||||
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
|
||||
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
|
||||
only require a read lock. (@pmasurel)
|
||||
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
|
||||
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
|
||||
- Bugfix - Files get deleted slightly earlier
|
||||
- Compilation resources improved (@fdb-hiroshima)
|
||||
|
||||
## How to update?
|
||||
|
||||
Your program should be usable as is.
|
||||
|
||||
### Fast fields
|
||||
|
||||
Fast fields used to be accessed directly from the `SegmentReader`.
|
||||
The API changed, you are now required to acquire your fast field reader via the
|
||||
`segment_reader.fast_fields()`, and use one of the typed method:
|
||||
- `.u64()`, `.i64()` if your field is single-valued ;
|
||||
- `.u64s()`, `.i64s()` if your field is multi-valued ;
|
||||
- `.bytes()` if your field is bytes fast field.
|
||||
|
||||
|
||||
|
||||
Tantivy 0.9.0
|
||||
=====================
|
||||
*0.9.0 index format is not compatible with the
|
||||
previous index format.*
|
||||
- MAJOR BUGFIX :
|
||||
Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton)
|
||||
- Removed most unsafe (@fulmicoton)
|
||||
- Indexer memory footprint improved. (VInt comp, inlining the first block. (@fulmicoton)
|
||||
- Stemming in other language possible (@pentlander)
|
||||
- Segments with no docs are deleted earlier (@barrotsteindev)
|
||||
- Added grouped add and delete operations.
|
||||
They are guaranteed to happen together (i.e. they cannot be split by a commit).
|
||||
In addition, adds are guaranteed to happen on the same segment. (@elbow-jason)
|
||||
- Removed `INT_STORED` and `INT_INDEXED`. It is now possible to use `STORED` and `INDEXED`
|
||||
for int fields. (@fulmicoton)
|
||||
- Added DateTime field (@barrotsteindev)
|
||||
- Added IndexReader. By default, index is reloaded automatically upon new commits (@fulmicoton)
|
||||
- SIMD linear search within blocks (@fulmicoton)
|
||||
|
||||
## How to update ?
|
||||
|
||||
tantivy 0.9 brought some API breaking change.
|
||||
To update from tantivy 0.8, you will need to go through the following steps.
|
||||
|
||||
- `schema::INT_INDEXED` and `schema::INT_STORED` should be replaced by `schema::INDEXED` and `schema::INT_STORED`.
|
||||
- The index now does not hold the pool of searcher anymore. You are required to create an intermediary object called
|
||||
`IndexReader` for this.
|
||||
|
||||
```rust
|
||||
// create the reader. You typically need to create 1 reader for the entire
|
||||
// lifetime of you program.
|
||||
let reader = index.reader()?;
|
||||
|
||||
// Acquire a searcher (previously `index.searcher()`) is now written:
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// With the default setting of the reader, you are not required to
|
||||
// call `index.load_searchers()` anymore.
|
||||
//
|
||||
// The IndexReader will pick up that change automatically, regardless
|
||||
// of whether the update was done in a different process or not.
|
||||
// If this behavior is not wanted, you can create your reader with
|
||||
// the `ReloadPolicy::Manual`, and manually decide when to reload the index
|
||||
// by calling `reader.reload()?`.
|
||||
|
||||
```
|
||||
|
||||
|
||||
Tantivy 0.8.2
|
||||
=====================
|
||||
Fixing build for x86_64 platforms. (#496)
|
||||
No need to update from 0.8.1 if tantivy
|
||||
is building on your platform.
|
||||
|
||||
|
||||
Tantivy 0.8.1
|
||||
=====================
|
||||
Hotfix of #476.
|
||||
|
||||
Merge was reflecting deletes before commit was passed.
|
||||
Thanks @barrotsteindev for reporting the bug.
|
||||
|
||||
|
||||
Tantivy 0.8.0
|
||||
=====================
|
||||
*No change in the index format*
|
||||
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
|
||||
- Multithreaded search (@jwolfe, @fulmicoton)
|
||||
|
||||
|
||||
Tantivy 0.7.1
|
||||
=====================
|
||||
*No change in the index format*
|
||||
- Bugfix: NGramTokenizer panics on non ascii chars
|
||||
- Added a space usage API
|
||||
|
||||
Tantivy 0.7
|
||||
=====================
|
||||
- Skip data for doc ids and positions (@fulmicoton),
|
||||
greatly improving performance
|
||||
- Tantivy error now rely on the failure crate (@drusellers)
|
||||
- Added support for `AND`, `OR`, `NOT` syntax in addition to the `+`,`-` syntax
|
||||
- Added a snippet generator with highlight (@vigneshsarma, @fulmicoton)
|
||||
- Added a `TopFieldCollector` (@pentlander)
|
||||
|
||||
Tantivy 0.6.1
|
||||
=========================
|
||||
- Bugfix #324. GC removing was removing file that were still in useful
|
||||
- Added support for parsing AllQuery and RangeQuery via QueryParser
|
||||
- AllQuery: `*`
|
||||
- RangeQuery:
|
||||
- Inclusive `field:[startIncl to endIncl]`
|
||||
- Exclusive `field:{startExcl to endExcl}`
|
||||
- Mixed `field:[startIncl to endExcl}` and vice versa
|
||||
- Unbounded `field:[start to *]`, `field:[* to end]`
|
||||
|
||||
|
||||
Tantivy 0.6
|
||||
==========================
|
||||
|
||||
Special thanks to @drusellers and @jason-wolfe for their contributions
|
||||
to this release!
|
||||
|
||||
- Removed C code. Tantivy is now pure Rust. (@pmasurel)
|
||||
- BM25 (@pmasurel)
|
||||
- Approximate field norms encoded over 1 byte. (@pmasurel)
|
||||
- Compiles on stable rust (@pmasurel)
|
||||
- Add &[u8] fastfield for associating arbitrary bytes to each document (@jason-wolfe) (#270)
|
||||
- Completely uncompressed
|
||||
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
|
||||
- Add NGram token support (@drusellers)
|
||||
- Add Stopword Filter support (@drusellers)
|
||||
- Add a FuzzyTermQuery (@drusellers)
|
||||
- Add a RegexQuery (@drusellers)
|
||||
- Various performance improvements (@pmasurel)_
|
||||
|
||||
|
||||
Tantivy 0.5.2
|
||||
===========================
|
||||
- bugfix #274
|
||||
- bugfix #280
|
||||
- bugfix #289
|
||||
|
||||
|
||||
Tantivy 0.5.1
|
||||
==========================
|
||||
- bugfix #254 : tantivy failed if no documents in a segment contained a specific field.
|
||||
|
||||
|
||||
Tantivy 0.5
|
||||
==========================
|
||||
- Faceting
|
||||
- RangeQuery
|
||||
- Configurable tokenization pipeline
|
||||
- Bugfix in PhraseQuery
|
||||
- Various query optimisation
|
||||
- Allowing very large indexes
|
||||
- 64 bits file address
|
||||
- Smarter encoding of the `TermInfo` objects
|
||||
|
||||
|
||||
|
||||
Tantivy 0.4.3
|
||||
==========================
|
||||
|
||||
- Bugfix race condition when deleting files. (#198)
|
||||
|
||||
|
||||
Tantivy 0.4.2
|
||||
==========================
|
||||
|
||||
- Prevent usage of AVX2 instructions (#201)
|
||||
|
||||
|
||||
Tantivy 0.4.1
|
||||
==========================
|
||||
|
||||
- Bugfix for non-indexed fields. (#199)
|
||||
|
||||
|
||||
Tantivy 0.4.0
|
||||
==========================
|
||||
|
||||
- Raise the limit of number of fields (previously 256 fields) (@fulmicoton)
|
||||
- Removed u32 fields. They are replaced by u64 and i64 fields (#65) (@fulmicoton)
|
||||
- Optimized skip in SegmentPostings (#130) (@lnicola)
|
||||
- Replacing rustc_serialize by serde. Kudos to @KodrAus and @lnicola
|
||||
- Using error-chain (@KodrAus)
|
||||
- QueryParser: (@fulmicoton)
|
||||
- Explicit error returned when searched for a term that is not indexed
|
||||
- Searching for a int term via the query parser was broken `(age:1)`
|
||||
- Searching for a non-indexed field returns an explicit Error
|
||||
- Phrase query for non-tokenized field are not tokenized by the query parser.
|
||||
- Faster/Better indexing (@fulmicoton)
|
||||
- using murmurhash2
|
||||
- faster merging
|
||||
- more memory efficient fast field writer (@lnicola )
|
||||
- better handling of collisions
|
||||
- lesser memory usage
|
||||
- Added API, most notably to iterate over ranges of terms (@fulmicoton)
|
||||
- Bugfix that was preventing to unmap segment files, on index drop (@fulmicoton)
|
||||
- Made the doc! macro public (@fulmicoton)
|
||||
- Added an alternative implementation of the streaming dictionary (@fulmicoton)
|
||||
|
||||
|
||||
|
||||
Tantivy 0.3.1
|
||||
==========================
|
||||
|
||||
- Expose a method to trigger files garbage collection
|
||||
|
||||
|
||||
|
||||
Tantivy 0.3
|
||||
==========================
|
||||
|
||||
@@ -5,7 +350,7 @@ Tantivy 0.3
|
||||
Special thanks to @Kodraus @lnicola @Ameobea @manuel-woelker @celaus
|
||||
for their contribution to this release.
|
||||
|
||||
Thanks also to everyone in tantivy gitter chat
|
||||
Thanks also to everyone in tantivy gitter chat
|
||||
for their advise and company :)
|
||||
|
||||
https://gitter.im/tantivy-search/tantivy
|
||||
@@ -13,12 +358,13 @@ https://gitter.im/tantivy-search/tantivy
|
||||
|
||||
Warning:
|
||||
|
||||
Tantivy 0.3 is NOT backward compatible with tantivy 0.2
|
||||
Tantivy 0.3 is NOT backward compatible with tantivy 0.2
|
||||
code and index format.
|
||||
You should not expect backward compatibility before
|
||||
You should not expect backward compatibility before
|
||||
tantivy 1.0.
|
||||
|
||||
|
||||
|
||||
New Features
|
||||
------------
|
||||
|
||||
@@ -40,7 +386,7 @@ Thanks to @KodrAus ! (#108)
|
||||
the natural ordering.
|
||||
- Building binary targets for tantivy-cli (Thanks to @KodrAus)
|
||||
- Misc invisible bug fixes, and code cleanup.
|
||||
- Use
|
||||
- Use
|
||||
|
||||
|
||||
|
||||
|
||||
109
Cargo.toml
109
Cargo.toml
@@ -1,64 +1,101 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.3.1"
|
||||
version = "0.13.2"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
build = "build.rs"
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
description = """Tantivy is a search engine library."""
|
||||
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
|
||||
description = """Search engine library"""
|
||||
documentation = "https://docs.rs/tantivy/"
|
||||
homepage = "https://github.com/tantivy-search/tantivy"
|
||||
repository = "https://github.com/tantivy-search/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.0"
|
||||
memmap = "0.4"
|
||||
lazy_static = "0.2.1"
|
||||
regex = "0.2"
|
||||
fst = "0.1.37"
|
||||
atomicwrites = "0.1.3"
|
||||
tempfile = "2.1"
|
||||
rustc-serialize = "0.3"
|
||||
log = "0.3.6"
|
||||
combine = "2.2"
|
||||
tempdir = "0.3"
|
||||
bincode = "0.5"
|
||||
libc = {version = "0.2.20", optional=true}
|
||||
num_cpus = "1.2"
|
||||
itertools = "0.5.9"
|
||||
lz4 = "1.20"
|
||||
bit-set = "0.4.0"
|
||||
time = "0.1"
|
||||
uuid = { version = "0.4", features = ["v4", "rustc-serialize"] }
|
||||
chan = "0.1"
|
||||
version = "2"
|
||||
crossbeam = "0.2"
|
||||
futures = "0.1.9"
|
||||
futures-cpupool = "0.1.2"
|
||||
base64 = "0.12"
|
||||
byteorder = "1"
|
||||
crc32fast = "1"
|
||||
once_cell = "1"
|
||||
regex ={version = "1", default-features = false, features = ["std"]}
|
||||
tantivy-fst = "0.3"
|
||||
memmap = {version = "0.7", optional=true}
|
||||
lz4 = {version="1", optional=true}
|
||||
snap = "1"
|
||||
atomicwrites = {version="0.2", optional=true}
|
||||
tempfile = "3"
|
||||
log = "0.4"
|
||||
serde = {version="1", features=["derive"]}
|
||||
serde_json = "1"
|
||||
num_cpus = "1"
|
||||
fs2={version="0.4", optional=true}
|
||||
levenshtein_automata = "0.2"
|
||||
notify = {version="4", optional=true}
|
||||
uuid = { version = "0.8", features = ["v4", "serde"] }
|
||||
crossbeam = "0.7"
|
||||
futures = {version = "0.3", features=["thread-pool"] }
|
||||
owning_ref = "0.4"
|
||||
stable_deref_trait = "1"
|
||||
rust-stemmers = "1"
|
||||
downcast-rs = "1"
|
||||
tantivy-query-grammar = { version="0.13", path="./query-grammar" }
|
||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||
census = "0.4"
|
||||
fnv = "1"
|
||||
owned-read = "0.4"
|
||||
failure = "0.1"
|
||||
htmlescape = "0.3"
|
||||
fail = "0.4"
|
||||
murmurhash32 = "0.2"
|
||||
chrono = "0.4"
|
||||
smallvec = "1"
|
||||
rayon = "1"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.2"
|
||||
winapi = "0.3"
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.3"
|
||||
env_logger = "0.4"
|
||||
rand = "0.7"
|
||||
maplit = "1"
|
||||
matches = "0.1.8"
|
||||
proptest = "0.10"
|
||||
|
||||
[build-dependencies]
|
||||
gcc = {version = "0.3", optional=true}
|
||||
[dev-dependencies.fail]
|
||||
version = "0.4"
|
||||
features = ["failpoints"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
debug = false
|
||||
lto = true
|
||||
debug-assertions = false
|
||||
|
||||
[profile.test]
|
||||
debug-assertions = true
|
||||
overflow-checks = true
|
||||
|
||||
[features]
|
||||
default = ["simdcompression"]
|
||||
simdcompression = ["libc", "gcc"]
|
||||
default = ["mmap"]
|
||||
mmap = ["atomicwrites", "fs2", "memmap", "notify"]
|
||||
lz4-compression = ["lz4"]
|
||||
failpoints = ["fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
wasm-bindgen = ["uuid/wasm-bindgen"]
|
||||
scoref64 = [] # scores are f64 instead of f32. was introduced to debug blockwand.
|
||||
|
||||
[workspace]
|
||||
members = ["query-grammar"]
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "tantivy-search/tantivy" }
|
||||
|
||||
# Following the "fail" crate best practises, we isolate
|
||||
# tests that define specific behavior in fail check points
|
||||
# in a different binary.
|
||||
#
|
||||
# We do that because, fail rely on a global definition of
|
||||
# failpoints behavior and hence, it is incompatible with
|
||||
# multithreading.
|
||||
[[test]]
|
||||
name = "failpoints"
|
||||
path = "tests/failpoints/mod.rs"
|
||||
required-features = ["fail/failpoints"]
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,4 +1,4 @@
|
||||
Copyright (c) 2016 Paul Masurel
|
||||
Copyright (c) 2018 by the project authors, as listed in the AUTHORS file.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
|
||||
3
Makefile
Normal file
3
Makefile
Normal file
@@ -0,0 +1,3 @@
|
||||
test:
|
||||
echo "Run test only... No examples."
|
||||
cargo test --tests --lib
|
||||
144
README.md
144
README.md
@@ -1,62 +1,140 @@
|
||||

|
||||
|
||||
[](https://travis-ci.org/tantivy-search/tantivy)
|
||||
[](https://coveralls.io/github/tantivy-search/tantivy?branch=master)
|
||||
[](https://codecov.io/gh/tantivy-search/tantivy)
|
||||
[](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://ci.appveyor.com/project/fulmicoton/tantivy)
|
||||

|
||||
[](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
|
||||
[](https://crates.io/crates/tantivy)
|
||||
[](https://saythanks.io/to/fulmicoton)
|
||||
|
||||
**Tantivy** is a **full text search engine library** written in rust.
|
||||

|
||||
|
||||
It is strongly inspired by Lucene's design.
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/0)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/1)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/2)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/3)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/4)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/5)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/6)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/7)
|
||||
|
||||
[](https://www.patreon.com/fulmicoton)
|
||||
|
||||
|
||||
**Tantivy** is a **full text search engine library** written in Rust.
|
||||
|
||||
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
an off-the-shelf search engine server, but rather a crate that can be used
|
||||
to build such a search engine.
|
||||
|
||||
Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||
|
||||
# Benchmark
|
||||
|
||||
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
||||
performance for different type of queries / collection.
|
||||
|
||||
|
||||
In general, Tantivy tends to be
|
||||
- slower than Lucene on union with a Top-K due to Block-WAND optimization.
|
||||
- faster than Lucene on intersection and phrase queries.
|
||||
|
||||
Your mileage WILL vary depending on the nature of queries and their load.
|
||||
|
||||
# Features
|
||||
|
||||
- configurable indexing (optional term frequency and position indexing)
|
||||
- tf-idf scoring
|
||||
- Basic query language
|
||||
- Phrase queries
|
||||
- Full-text search
|
||||
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy) and [tantivy-tokenizer-tiny-segmente](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
|
||||
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
|
||||
- Tiny startup time (<10ms), perfect for command line tools
|
||||
- BM25 scoring (the same as Lucene)
|
||||
- Natural query language (e.g. `(michael AND jackson) OR "king of pop"`)
|
||||
- Phrase queries search (e.g. `"michael jackson"`)
|
||||
- Incremental indexing
|
||||
- Multithreaded indexing (indexing English Wikipedia takes 4 minutes on my desktop)
|
||||
- mmap based
|
||||
- optional SIMD integer compression
|
||||
- u32 fast fields (equivalent of doc values in Lucene)
|
||||
- Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop)
|
||||
- Mmap directory
|
||||
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set
|
||||
- Single valued and multivalued u64, i64, and f64 fast fields (equivalent of doc values in Lucene)
|
||||
- `&[u8]` fast fields
|
||||
- Text, i64, u64, f64, dates, and hierarchical facet fields
|
||||
- LZ4 compressed document store
|
||||
- Range queries
|
||||
- Faceted search
|
||||
- Configurable indexing (optional term frequency and position indexing)
|
||||
- Cheesy logo with a horse
|
||||
|
||||
Tantivy supports Linux, MacOS and Windows.
|
||||
## Non-features
|
||||
|
||||
- Distributed search is out of the scope of Tantivy. That being said, Tantivy is a
|
||||
library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
|
||||
are within the scope of Tantivy.
|
||||
|
||||
|
||||
# Getting started
|
||||
|
||||
- [tantivy's usage example](http://fulmicoton.com/tantivy-examples/simple_search.html)
|
||||
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli).
|
||||
It will walk you through getting a wikipedia search engine up and running in a few minutes.
|
||||
- [reference doc]
|
||||
- [For the last released version](https://docs.rs/tantivy/)
|
||||
- [For the last master branch](https://tantivy-search.github.io/tantivy/tantivy/index.html)
|
||||
Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows.
|
||||
|
||||
# Compiling
|
||||
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
||||
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli) - `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
|
||||
index documents, and search via the CLI or a small server with a REST API.
|
||||
It walks you through getting a wikipedia search engine up and running in a few minutes.
|
||||
- [Reference doc for the last released version](https://docs.rs/tantivy/)
|
||||
|
||||
Tantivy requires Rust Nightly because it uses requires the features [`box_syntax`](https://doc.rust-lang.org/stable/book/box-syntax-and-patterns.html), [`optin_builtin_traits`](https://github.com/rust-lang/rfcs/blob/master/text/0019-opt-in-builtin-traits.md), and [`conservative_impl_trait`](https://github.com/rust-lang/rfcs/blob/master/text/1522-conservative-impl-trait.md).
|
||||
By default, `tantivy` uses a git submodule called `simdcomp`.
|
||||
After cloning the repository, you will need to initialize and update
|
||||
the submodules. The project can then be built using `cargo`.
|
||||
# How can I support this project?
|
||||
|
||||
git clone git@github.com:tantivy-search/tantivy.git
|
||||
There are many ways to support this project.
|
||||
|
||||
- Use Tantivy and tell us about your experience on [Gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com)
|
||||
- Report bugs
|
||||
- Write a blog post
|
||||
- Help with documentation by asking questions or submitting PRs
|
||||
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
|
||||
- Talk about Tantivy around you
|
||||
- Drop a word on on [](https://saythanks.io/to/fulmicoton) or even [](https://www.patreon.com/fulmicoton)
|
||||
|
||||
# Contributing code
|
||||
|
||||
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
||||
|
||||
## Clone and build locally
|
||||
|
||||
Tantivy compiles on stable Rust but requires `Rust >= 1.27`.
|
||||
To check out and run tests, you can simply run:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/tantivy-search/tantivy.git
|
||||
cd tantivy
|
||||
cargo build
|
||||
```
|
||||
|
||||
## Run tests
|
||||
|
||||
Alternatively, if you are trying to compile `tantivy` without simd compression,
|
||||
you can disable this functionality. In this case, this submodule is not required
|
||||
and you can compile tantivy by using the `--no-default-features` flag.
|
||||
Some tests will not run with just `cargo test` because of `fail-rs`.
|
||||
To run the tests exhaustively, run `./run-tests.sh`.
|
||||
|
||||
cargo build --no-default-features
|
||||
## Debug
|
||||
|
||||
You might find it useful to step through the programme with a debugger.
|
||||
|
||||
# Contribute
|
||||
### A failing test
|
||||
|
||||
Send me an email (paul.masurel at gmail.com) if you want to contribute to tantivy.
|
||||
Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that the `target/` directory exists. Use this bash script to find the name of the most recent debug build of Tantivy and run it under `rust-gdb`:
|
||||
|
||||
```bash
|
||||
find target/debug/ -maxdepth 1 -executable -type f -name "tantivy*" -printf '%TY-%Tm-%Td %TT %p\n' | sort -r | cut -d " " -f 3 | xargs -I RECENT_DBG_TANTIVY rust-gdb RECENT_DBG_TANTIVY
|
||||
```
|
||||
|
||||
Now that you are in `rust-gdb`, you can set breakpoints on lines and methods that match your source code and run the debug executable with flags that you normally pass to `cargo test` like this:
|
||||
|
||||
```bash
|
||||
$gdb run --test-threads 1 --test $NAME_OF_TEST
|
||||
```
|
||||
|
||||
### An example
|
||||
|
||||
By default, `rustc` compiles everything in the `examples/` directory in debug mode. This makes it easy for you to make examples to reproduce bugs:
|
||||
|
||||
```bash
|
||||
rust-gdb target/debug/examples/$EXAMPLE_NAME
|
||||
$ gdb run
|
||||
```
|
||||
|
||||
@@ -4,11 +4,8 @@
|
||||
os: Visual Studio 2015
|
||||
environment:
|
||||
matrix:
|
||||
- channel: nightly
|
||||
- channel: stable
|
||||
target: x86_64-pc-windows-msvc
|
||||
- channel: nightly
|
||||
target: x86_64-pc-windows-gnu
|
||||
msys_bits: 64
|
||||
|
||||
install:
|
||||
- appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe
|
||||
@@ -21,4 +18,5 @@ install:
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- REM SET RUST_LOG=tantivy,test & cargo test --verbose
|
||||
- REM SET RUST_LOG=tantivy,test & cargo test --all --verbose --no-default-features --features mmap
|
||||
- REM SET RUST_BACKTRACE=1 & cargo build --examples
|
||||
|
||||
50
build.rs
50
build.rs
@@ -1,50 +0,0 @@
|
||||
#[cfg(feature = "simdcompression")]
|
||||
mod build {
|
||||
extern crate gcc;
|
||||
|
||||
pub fn build() {
|
||||
let mut config = gcc::Config::new();
|
||||
config.include("./cpp/simdcomp/include")
|
||||
.file("cpp/simdcomp/src/avxbitpacking.c")
|
||||
.file("cpp/simdcomp/src/simdintegratedbitpacking.c")
|
||||
.file("cpp/simdcomp/src/simdbitpacking.c")
|
||||
.file("cpp/simdcomp/src/simdpackedsearch.c")
|
||||
.file("cpp/simdcomp/src/simdcomputil.c")
|
||||
.file("cpp/simdcomp/src/simdpackedselect.c")
|
||||
.file("cpp/simdcomp/src/simdfor.c")
|
||||
.file("cpp/simdcomp_wrapper.c");
|
||||
|
||||
if !cfg!(debug_assertions) {
|
||||
config.opt_level(3);
|
||||
|
||||
if cfg!(target_env = "msvc") {
|
||||
config.define("NDEBUG", None)
|
||||
.flag("/Gm-")
|
||||
.flag("/GS-")
|
||||
.flag("/Gy")
|
||||
.flag("/Oi")
|
||||
.flag("/GL");
|
||||
} else {
|
||||
config.flag("-msse4.1")
|
||||
.flag("-march=native");
|
||||
}
|
||||
}
|
||||
|
||||
config.compile("libsimdcomp.a");
|
||||
|
||||
// Workaround for linking static libraries built with /GL
|
||||
// https://github.com/rust-lang/rust/issues/26003
|
||||
if !cfg!(debug_assertions) && cfg!(target_env = "msvc") {
|
||||
println!("cargo:rustc-link-lib=dylib=simdcomp");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "simdcompression"))]
|
||||
mod build {
|
||||
pub fn build() {}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
build::build();
|
||||
}
|
||||
23
ci/before_deploy.ps1
Normal file
23
ci/before_deploy.ps1
Normal file
@@ -0,0 +1,23 @@
|
||||
# This script takes care of packaging the build artifacts that will go in the
|
||||
# release zipfile
|
||||
|
||||
$SRC_DIR = $PWD.Path
|
||||
$STAGE = [System.Guid]::NewGuid().ToString()
|
||||
|
||||
Set-Location $ENV:Temp
|
||||
New-Item -Type Directory -Name $STAGE
|
||||
Set-Location $STAGE
|
||||
|
||||
$ZIP = "$SRC_DIR\$($Env:CRATE_NAME)-$($Env:APPVEYOR_REPO_TAG_NAME)-$($Env:TARGET).zip"
|
||||
|
||||
# TODO Update this to package the right artifacts
|
||||
Copy-Item "$SRC_DIR\target\$($Env:TARGET)\release\hello.exe" '.\'
|
||||
|
||||
7z a "$ZIP" *
|
||||
|
||||
Push-AppveyorArtifact "$ZIP"
|
||||
|
||||
Remove-Item *.* -Force
|
||||
Set-Location ..
|
||||
Remove-Item $STAGE
|
||||
Set-Location $SRC_DIR
|
||||
33
ci/before_deploy.sh
Normal file
33
ci/before_deploy.sh
Normal file
@@ -0,0 +1,33 @@
|
||||
# This script takes care of building your crate and packaging it for release
|
||||
|
||||
set -ex
|
||||
|
||||
main() {
|
||||
local src=$(pwd) \
|
||||
stage=
|
||||
|
||||
case $TRAVIS_OS_NAME in
|
||||
linux)
|
||||
stage=$(mktemp -d)
|
||||
;;
|
||||
osx)
|
||||
stage=$(mktemp -d -t tmp)
|
||||
;;
|
||||
esac
|
||||
|
||||
test -f Cargo.lock || cargo generate-lockfile
|
||||
|
||||
# TODO Update this to build the artifacts that matter to you
|
||||
cross rustc --bin hello --target $TARGET --release -- -C lto
|
||||
|
||||
# TODO Update this to package the right artifacts
|
||||
cp target/$TARGET/release/hello $stage/
|
||||
|
||||
cd $stage
|
||||
tar czf $src/$CRATE_NAME-$TRAVIS_TAG-$TARGET.tar.gz *
|
||||
cd $src
|
||||
|
||||
rm -rf $stage
|
||||
}
|
||||
|
||||
main
|
||||
47
ci/install.sh
Normal file
47
ci/install.sh
Normal file
@@ -0,0 +1,47 @@
|
||||
set -ex
|
||||
|
||||
main() {
|
||||
local target=
|
||||
if [ $TRAVIS_OS_NAME = linux ]; then
|
||||
target=x86_64-unknown-linux-musl
|
||||
sort=sort
|
||||
else
|
||||
target=x86_64-apple-darwin
|
||||
sort=gsort # for `sort --sort-version`, from brew's coreutils.
|
||||
fi
|
||||
|
||||
# Builds for iOS are done on OSX, but require the specific target to be
|
||||
# installed.
|
||||
case $TARGET in
|
||||
aarch64-apple-ios)
|
||||
rustup target install aarch64-apple-ios
|
||||
;;
|
||||
armv7-apple-ios)
|
||||
rustup target install armv7-apple-ios
|
||||
;;
|
||||
armv7s-apple-ios)
|
||||
rustup target install armv7s-apple-ios
|
||||
;;
|
||||
i386-apple-ios)
|
||||
rustup target install i386-apple-ios
|
||||
;;
|
||||
x86_64-apple-ios)
|
||||
rustup target install x86_64-apple-ios
|
||||
;;
|
||||
esac
|
||||
|
||||
# This fetches latest stable release
|
||||
local tag=$(git ls-remote --tags --refs --exit-code https://github.com/japaric/cross \
|
||||
| cut -d/ -f3 \
|
||||
| grep -E '^v[0.1.0-9.]+$' \
|
||||
| $sort --version-sort \
|
||||
| tail -n1)
|
||||
curl -LSfs https://japaric.github.io/trust/install.sh | \
|
||||
sh -s -- \
|
||||
--force \
|
||||
--git japaric/cross \
|
||||
--tag $tag \
|
||||
--target $target
|
||||
}
|
||||
|
||||
main
|
||||
30
ci/script.sh
Normal file
30
ci/script.sh
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script takes care of testing your crate
|
||||
|
||||
set -ex
|
||||
|
||||
main() {
|
||||
if [ ! -z $CODECOV ]; then
|
||||
echo "Codecov"
|
||||
cargo build --verbose && cargo coverage --verbose --all && bash <(curl -s https://codecov.io/bash) -s target/kcov
|
||||
else
|
||||
echo "Build"
|
||||
cross build --target $TARGET
|
||||
if [ ! -z $DISABLE_TESTS ]; then
|
||||
return
|
||||
fi
|
||||
echo "Test"
|
||||
cross test --target $TARGET --no-default-features --features mmap
|
||||
cross test --target $TARGET --no-default-features --features mmap query-grammar
|
||||
fi
|
||||
for example in $(ls examples/*.rs)
|
||||
do
|
||||
cargo run --example $(basename $example .rs)
|
||||
done
|
||||
}
|
||||
|
||||
# we don't run the "test phase" when doing deploys
|
||||
if [ -z $TRAVIS_TAG ]; then
|
||||
main
|
||||
fi
|
||||
9
cpp/simdcomp/.gitignore
vendored
9
cpp/simdcomp/.gitignore
vendored
@@ -1,9 +0,0 @@
|
||||
Makefile.in
|
||||
lib*
|
||||
unit*
|
||||
*.o
|
||||
src/*.lo
|
||||
src/*.o
|
||||
src/.deps
|
||||
src/.dirstamp
|
||||
src/.libs
|
||||
@@ -1,11 +0,0 @@
|
||||
language: c
|
||||
sudo: false
|
||||
compiler:
|
||||
- gcc
|
||||
- clang
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: make && ./unit
|
||||
@@ -1,9 +0,0 @@
|
||||
Upcoming
|
||||
- added missing include
|
||||
- improved portability (MSVC)
|
||||
- implemented C89 compatibility
|
||||
Version 0.0.3 (19 May 2014)
|
||||
- improved documentation
|
||||
Version 0.0.2 (6 February 2014)
|
||||
- added go demo
|
||||
Version 0.0.1 (5 February 2014)
|
||||
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2014--, The authors
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the {organization} nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
@@ -1,137 +0,0 @@
|
||||
The SIMDComp library
|
||||
====================
|
||||
[](https://travis-ci.org/lemire/simdcomp)
|
||||
|
||||
A simple C library for compressing lists of integers using binary packing and SIMD instructions.
|
||||
The assumption is either that you have a list of 32-bit integers where most of them are small, or a list of 32-bit integers where differences between successive integers are small. No software is able to reliably compress an array of 32-bit random numbers.
|
||||
|
||||
This library can decode at least 4 billions of compressed integers per second on most
|
||||
desktop or laptop processors. That is, it can decompress data at a rate of 15 GB/s.
|
||||
This is significantly faster than generic codecs like gzip, LZO, Snappy or LZ4.
|
||||
|
||||
On a Skylake Intel processor, it can decode integers at a rate 0.3 cycles per integer,
|
||||
which can easily translate into more than 8 decoded billions integers per second.
|
||||
|
||||
Contributors: Daniel Lemire, Nathan Kurz, Christoph Rupp, Anatol Belski, Nick White and others
|
||||
|
||||
What is it for?
|
||||
-------------
|
||||
|
||||
This is a low-level library for fast integer compression. By design it does not define a compressed
|
||||
format. It is up to the (sophisticated) user to create a compressed format.
|
||||
|
||||
Requirements
|
||||
-------------
|
||||
|
||||
- Your processor should support SSE4.1 (It is supported by most Intel and AMD processors released since 2008.)
|
||||
- It is possible to build the core part of the code if your processor support SSE2 (Pentium4 or better)
|
||||
- C99 compliant compiler (GCC is assumed)
|
||||
- A Linux-like distribution is assumed by the makefile
|
||||
|
||||
For a plain C version that does not use SIMD instructions, see https://github.com/lemire/LittleIntPacker
|
||||
|
||||
Usage
|
||||
-------
|
||||
|
||||
Compression works over blocks of 128 integers.
|
||||
|
||||
For a complete working example, see example.c (you can build it and
|
||||
run it with "make example; ./example").
|
||||
|
||||
|
||||
|
||||
1) Lists of integers in random order.
|
||||
|
||||
```C
|
||||
const uint32_t b = maxbits(datain);// computes bit width
|
||||
simdpackwithoutmask(datain, buffer, b);//compressed to buffer, compressing 128 32-bit integers down to b*32 bytes
|
||||
simdunpack(buffer, backbuffer, b);//uncompressed to backbuffer
|
||||
```
|
||||
|
||||
While 128 32-bit integers are read, only b 128-bit words are written. Thus, the compression ratio is 32/b.
|
||||
|
||||
2) Sorted lists of integers.
|
||||
|
||||
We used differential coding: we store the difference between successive integers. For this purpose, we need an initial value (called offset).
|
||||
|
||||
```C
|
||||
uint32_t offset = 0;
|
||||
uint32_t b1 = simdmaxbitsd1(offset,datain); // bit width
|
||||
simdpackwithoutmaskd1(offset, datain, buffer, b1);//compressing 128 32-bit integers down to b1*32 bytes
|
||||
simdunpackd1(offset, buffer, backbuffer, b1);//uncompressed
|
||||
```
|
||||
|
||||
General example for arrays of arbitrary length:
|
||||
```C
|
||||
int compress_decompress_demo() {
|
||||
size_t k, N = 9999;
|
||||
__m128i * endofbuf;
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint8_t * buffer;
|
||||
uint32_t * backbuffer = malloc(N * sizeof(uint32_t));
|
||||
uint32_t b;
|
||||
|
||||
for (k = 0; k < N; ++k){ /* start with k=0, not k=1! */
|
||||
datain[k] = k;
|
||||
}
|
||||
|
||||
b = maxbits_length(datain, N);
|
||||
buffer = malloc(simdpack_compressedbytes(N,b)); // allocate just enough memory
|
||||
endofbuf = simdpack_length(datain, N, (__m128i *)buffer, b);
|
||||
/* compressed data is stored between buffer and endofbuf using (endofbuf-buffer)*sizeof(__m128i) bytes */
|
||||
/* would be safe to do : buffer = realloc(buffer,(endofbuf-(__m128i *)buffer)*sizeof(__m128i)); */
|
||||
simdunpack_length((const __m128i *)buffer, N, backbuffer, b);
|
||||
|
||||
for (k = 0; k < N; ++k){
|
||||
if(datain[k] != backbuffer[k]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
3) Frame-of-Reference
|
||||
|
||||
We also have frame-of-reference (FOR) functions (see simdfor.h header). They work like the bit packing
|
||||
routines, but do not use differential coding so they allow faster search in some cases, at the expense
|
||||
of compression.
|
||||
|
||||
Setup
|
||||
---------
|
||||
|
||||
|
||||
make
|
||||
make test
|
||||
|
||||
and if you are daring:
|
||||
|
||||
make install
|
||||
|
||||
Go
|
||||
--------
|
||||
|
||||
If you are a go user, there is a "go" folder where you will find a simple demo.
|
||||
|
||||
Other libraries
|
||||
----------------
|
||||
|
||||
* Fast decoder for VByte-compressed integers https://github.com/lemire/MaskedVByte
|
||||
* Fast integer compression in C using StreamVByte https://github.com/lemire/streamvbyte
|
||||
* FastPFOR is a C++ research library well suited to compress unsorted arrays: https://github.com/lemire/FastPFor
|
||||
* SIMDCompressionAndIntersection is a C++ research library well suited for sorted arrays (differential coding)
|
||||
and computing intersections: https://github.com/lemire/SIMDCompressionAndIntersection
|
||||
* TurboPFor is a C library that offers lots of interesting optimizations. Well worth checking! (GPL license) https://github.com/powturbo/TurboPFor
|
||||
* Oroch is a C++ library that offers a usable API (MIT license) https://github.com/ademakov/Oroch
|
||||
|
||||
|
||||
References
|
||||
------------
|
||||
|
||||
* Daniel Lemire, Leonid Boytsov, Nathan Kurz, SIMD Compression and the Intersection of Sorted Integers, Software Practice & Experience 46 (6) 2016. http://arxiv.org/abs/1401.6399
|
||||
* Daniel Lemire and Leonid Boytsov, Decoding billions of integers per second through vectorization, Software Practice & Experience 45 (1), 2015. http://arxiv.org/abs/1209.2137 http://onlinelibrary.wiley.com/doi/10.1002/spe.2203/abstract
|
||||
* Jeff Plaisance, Nathan Kurz, Daniel Lemire, Vectorized VByte Decoding, International Symposium on Web Algorithms 2015, 2015. http://arxiv.org/abs/1503.07387
|
||||
* Wayne Xin Zhao, Xudong Zhang, Daniel Lemire, Dongdong Shan, Jian-Yun Nie, Hongfei Yan, Ji-Rong Wen, A General SIMD-based Approach to Accelerating Compression Algorithms, ACM Transactions on Information Systems 33 (3), 2015. http://arxiv.org/abs/1502.01916
|
||||
* T. D. Wu, Bitpacking techniques for indexing genomes: I. Hash tables, Algorithms for Molecular Biology 11 (5), 2016. http://almob.biomedcentral.com/articles/10.1186/s13015-016-0069-5
|
||||
@@ -1,235 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "simdcomp.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# include <windows.h>
|
||||
|
||||
__int64 freq;
|
||||
|
||||
typedef __int64 time_snap_t;
|
||||
|
||||
static time_snap_t time_snap(void)
|
||||
{
|
||||
__int64 now;
|
||||
|
||||
QueryPerformanceCounter((LARGE_INTEGER *)&now);
|
||||
|
||||
return (__int64)((now*1000000)/freq);
|
||||
}
|
||||
# define TIME_SNAP_FMT "%I64d"
|
||||
#else
|
||||
# define time_snap clock
|
||||
# define TIME_SNAP_FMT "%lu"
|
||||
typedef clock_t time_snap_t;
|
||||
#endif
|
||||
|
||||
|
||||
void benchmarkSelect() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t backbuffer[128];
|
||||
uint32_t initial = 33;
|
||||
uint32_t b;
|
||||
time_snap_t S1, S2, S3;
|
||||
int i;
|
||||
printf("benchmarking select \n");
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 0; b <= 32; b++) {
|
||||
uint32_t prev = initial;
|
||||
uint32_t out[128];
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = ((uint32_t)(1655765 * i )) ;
|
||||
if(b < 32) buffer[i] %= (1<<b);
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = buffer[i] + prev;
|
||||
prev = buffer[i];
|
||||
}
|
||||
|
||||
for (i = 1; i < 128; i++) {
|
||||
if(buffer[i] < buffer[i-1] )
|
||||
buffer[i] = buffer[i-1];
|
||||
}
|
||||
assert(simdmaxbitsd1(initial, buffer)<=b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
out[i] = 0; /* memset would do too */
|
||||
}
|
||||
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
|
||||
|
||||
S1 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
uint32_t valretrieved = simdselectd1(initial, (__m128i *)out, b, (uint32_t)i % 128);
|
||||
assert(valretrieved == buffer[i%128]);
|
||||
}
|
||||
S2 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
simdunpackd1(initial, (__m128i *)out, backbuffer, b);
|
||||
assert(backbuffer[i % 128] == buffer[i % 128]);
|
||||
}
|
||||
S3 = time_snap();
|
||||
printf("bit width = %d, fast select function time = " TIME_SNAP_FMT ", naive time = " TIME_SNAP_FMT " \n", b, (S2-S1), (S3-S2));
|
||||
}
|
||||
}
|
||||
|
||||
int uint32_cmp(const void *a, const void *b)
|
||||
{
|
||||
const uint32_t *ia = (const uint32_t *)a;
|
||||
const uint32_t *ib = (const uint32_t *)b;
|
||||
if(*ia < *ib)
|
||||
return -1;
|
||||
else if (*ia > *ib)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* adapted from wikipedia */
|
||||
int binary_search(uint32_t * A, uint32_t key, int imin, int imax)
|
||||
{
|
||||
int imid;
|
||||
imax --;
|
||||
while(imin + 1 < imax) {
|
||||
imid = imin + ((imax - imin) / 2);
|
||||
|
||||
if (A[imid] > key) {
|
||||
imax = imid;
|
||||
} else if (A[imid] < key) {
|
||||
imin = imid;
|
||||
} else {
|
||||
return imid;
|
||||
}
|
||||
}
|
||||
return imax;
|
||||
}
|
||||
|
||||
|
||||
/* adapted from wikipedia */
|
||||
int lower_bound(uint32_t * A, uint32_t key, int imin, int imax)
|
||||
{
|
||||
int imid;
|
||||
imax --;
|
||||
while(imin + 1 < imax) {
|
||||
imid = imin + ((imax - imin) / 2);
|
||||
|
||||
if (A[imid] >= key) {
|
||||
imax = imid;
|
||||
} else if (A[imid] < key) {
|
||||
imin = imid;
|
||||
}
|
||||
}
|
||||
if(A[imin] >= key) return imin;
|
||||
return imax;
|
||||
}
|
||||
|
||||
void benchmarkSearch() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t backbuffer[128];
|
||||
uint32_t out[128];
|
||||
uint32_t result, initial = 0;
|
||||
uint32_t b, i;
|
||||
time_snap_t S1, S2, S3, S4;
|
||||
|
||||
printf("benchmarking search \n");
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 0; b <= 32; b++) {
|
||||
uint32_t prev = initial;
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = ((uint32_t)rand()) ;
|
||||
if(b < 32) buffer[i] %= (1<<b);
|
||||
}
|
||||
|
||||
qsort(buffer,128, sizeof(uint32_t), uint32_cmp);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = buffer[i] + prev;
|
||||
prev = buffer[i];
|
||||
}
|
||||
for (i = 1; i < 128; i++) {
|
||||
if(buffer[i] < buffer[i-1] )
|
||||
buffer[i] = buffer[i-1];
|
||||
}
|
||||
assert(simdmaxbitsd1(initial, buffer)<=b);
|
||||
for (i = 0; i < 128; i++) {
|
||||
out[i] = 0; /* memset would do too */
|
||||
}
|
||||
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
|
||||
simdunpackd1(initial, (__m128i *)out, backbuffer, b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
assert(buffer[i] == backbuffer[i]);
|
||||
}
|
||||
S1 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
|
||||
int pos;
|
||||
uint32_t pseudorandomkey = buffer[i%128];
|
||||
__m128i vecinitial = _mm_set1_epi32(initial);
|
||||
pos = simdsearchd1(&vecinitial, (__m128i *)out, b,
|
||||
pseudorandomkey, &result);
|
||||
if((result < pseudorandomkey) || (buffer[pos] != result)) {
|
||||
printf("bug A.\n");
|
||||
} else if (pos > 0) {
|
||||
if(buffer[pos-1] >= pseudorandomkey)
|
||||
printf("bug B.\n");
|
||||
}
|
||||
}
|
||||
S2 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
int pos;
|
||||
uint32_t pseudorandomkey = buffer[i%128];
|
||||
simdunpackd1(initial, (__m128i *)out, backbuffer, b);
|
||||
pos = lower_bound(backbuffer, pseudorandomkey, 0, 128);
|
||||
result = backbuffer[pos];
|
||||
|
||||
if((result < pseudorandomkey) || (buffer[pos] != result)) {
|
||||
printf("bug C.\n");
|
||||
} else if (pos > 0) {
|
||||
if(buffer[pos-1] >= pseudorandomkey)
|
||||
printf("bug D.\n");
|
||||
}
|
||||
}
|
||||
S3 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
|
||||
int pos;
|
||||
uint32_t pseudorandomkey = buffer[i%128];
|
||||
pos = simdsearchwithlengthd1(initial, (__m128i *)out, b, 128,
|
||||
pseudorandomkey, &result);
|
||||
if((result < pseudorandomkey) || (buffer[pos] != result)) {
|
||||
printf("bug A.\n");
|
||||
} else if (pos > 0) {
|
||||
if(buffer[pos-1] >= pseudorandomkey)
|
||||
printf("bug B.\n");
|
||||
}
|
||||
}
|
||||
S4 = time_snap();
|
||||
|
||||
printf("bit width = %d, fast search function time = " TIME_SNAP_FMT ", naive time = " TIME_SNAP_FMT " , fast with length time = " TIME_SNAP_FMT " \n", b, (S2-S1), (S3-S2), (S4-S3) );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main() {
|
||||
#ifdef _MSC_VER
|
||||
QueryPerformanceFrequency((LARGE_INTEGER *)&freq);
|
||||
#endif
|
||||
benchmarkSearch();
|
||||
benchmarkSelect();
|
||||
return 0;
|
||||
}
|
||||
@@ -1,205 +0,0 @@
|
||||
#include <stdio.h>
|
||||
|
||||
#include "simdcomp.h"
|
||||
|
||||
|
||||
#define RDTSC_START(cycles) \
|
||||
do { \
|
||||
register unsigned cyc_high, cyc_low; \
|
||||
__asm volatile( \
|
||||
"cpuid\n\t" \
|
||||
"rdtsc\n\t" \
|
||||
"mov %%edx, %0\n\t" \
|
||||
"mov %%eax, %1\n\t" \
|
||||
: "=r"(cyc_high), "=r"(cyc_low)::"%rax", "%rbx", "%rcx", "%rdx"); \
|
||||
(cycles) = ((uint64_t)cyc_high << 32) | cyc_low; \
|
||||
} while (0)
|
||||
|
||||
#define RDTSC_FINAL(cycles) \
|
||||
do { \
|
||||
register unsigned cyc_high, cyc_low; \
|
||||
__asm volatile( \
|
||||
"rdtscp\n\t" \
|
||||
"mov %%edx, %0\n\t" \
|
||||
"mov %%eax, %1\n\t" \
|
||||
"cpuid\n\t" \
|
||||
: "=r"(cyc_high), "=r"(cyc_low)::"%rax", "%rbx", "%rcx", "%rdx"); \
|
||||
(cycles) = ((uint64_t)cyc_high << 32) | cyc_low; \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
||||
|
||||
uint32_t * get_random_array_from_bit_width(uint32_t length, uint32_t bit) {
|
||||
uint32_t * answer = malloc(sizeof(uint32_t) * length);
|
||||
uint32_t mask = (uint32_t) ((UINT64_C(1) << bit) - 1);
|
||||
uint32_t i;
|
||||
for(i = 0; i < length; ++i) {
|
||||
answer[i] = rand() & mask;
|
||||
}
|
||||
return answer;
|
||||
}
|
||||
|
||||
uint32_t * get_random_array_from_bit_width_d1(uint32_t length, uint32_t bit) {
|
||||
uint32_t * answer = malloc(sizeof(uint32_t) * length);
|
||||
uint32_t mask = (uint32_t) ((UINT64_C(1) << bit) - 1);
|
||||
uint32_t i;
|
||||
answer[0] = rand() & mask;
|
||||
for(i = 1; i < length; ++i) {
|
||||
answer[i] = answer[i-1] + (rand() & mask);
|
||||
}
|
||||
return answer;
|
||||
}
|
||||
|
||||
|
||||
void demo128() {
|
||||
const uint32_t length = 128;
|
||||
uint32_t bit;
|
||||
printf("# --- %s\n", __func__);
|
||||
printf("# compressing %d integers\n",length);
|
||||
printf("# format: bit width, pack in cycles per int, unpack in cycles per int\n");
|
||||
for(bit = 1; bit <= 32; ++bit) {
|
||||
uint32_t i;
|
||||
|
||||
uint32_t * data = get_random_array_from_bit_width(length, bit);
|
||||
__m128i * buffer = malloc(length * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(length * sizeof(uint32_t));
|
||||
uint32_t repeat = 500;
|
||||
uint64_t min_diff;
|
||||
printf("%d\t",bit);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
simdpackwithoutmask(data,buffer, bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
simdunpack(buffer, backdata,bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
|
||||
free(data);
|
||||
free(buffer);
|
||||
free(backdata);
|
||||
printf("\n");
|
||||
}
|
||||
printf("\n\n"); /* two blank lines are required by gnuplot */
|
||||
}
|
||||
|
||||
void demo128_d1() {
|
||||
const uint32_t length = 128;
|
||||
uint32_t bit;
|
||||
printf("# --- %s\n", __func__);
|
||||
printf("# compressing %d integers\n",length);
|
||||
printf("# format: bit width, pack in cycles per int, unpack in cycles per int\n");
|
||||
for(bit = 1; bit <= 32; ++bit) {
|
||||
uint32_t i;
|
||||
|
||||
uint32_t * data = get_random_array_from_bit_width_d1(length, bit);
|
||||
__m128i * buffer = malloc(length * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(length * sizeof(uint32_t));
|
||||
uint32_t repeat = 500;
|
||||
uint64_t min_diff;
|
||||
printf("%d\t",bit);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
simdpackwithoutmaskd1(0,data,buffer, bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
simdunpackd1(0,buffer, backdata,bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
|
||||
free(data);
|
||||
free(buffer);
|
||||
free(backdata);
|
||||
printf("\n");
|
||||
}
|
||||
printf("\n\n"); /* two blank lines are required by gnuplot */
|
||||
}
|
||||
|
||||
#ifdef __AVX2__
|
||||
void demo256() {
|
||||
const uint32_t length = 256;
|
||||
uint32_t bit;
|
||||
printf("# --- %s\n", __func__);
|
||||
printf("# compressing %d integers\n",length);
|
||||
printf("# format: bit width, pack in cycles per int, unpack in cycles per int\n");
|
||||
for(bit = 1; bit <= 32; ++bit) {
|
||||
uint32_t i;
|
||||
|
||||
uint32_t * data = get_random_array_from_bit_width(length, bit);
|
||||
__m256i * buffer = malloc(length * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(length * sizeof(uint32_t));
|
||||
uint32_t repeat = 500;
|
||||
uint64_t min_diff;
|
||||
printf("%d\t",bit);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
avxpackwithoutmask(data,buffer, bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
avxunpack(buffer, backdata,bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
|
||||
free(data);
|
||||
free(buffer);
|
||||
free(backdata);
|
||||
printf("\n");
|
||||
}
|
||||
printf("\n\n"); /* two blank lines are required by gnuplot */
|
||||
}
|
||||
#endif /* avx 2 */
|
||||
|
||||
|
||||
int main() {
|
||||
demo128();
|
||||
demo128_d1();
|
||||
#ifdef __AVX2__
|
||||
demo256();
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
/* Type "make example" to build this example program. */
|
||||
#include <stdio.h>
|
||||
#include <time.h>
|
||||
#include <stdlib.h>
|
||||
#include "simdcomp.h"
|
||||
|
||||
/**
|
||||
We provide several different code examples.
|
||||
**/
|
||||
|
||||
|
||||
/* very simple test to illustrate a simple application */
|
||||
int compress_decompress_demo() {
|
||||
size_t k, N = 9999;
|
||||
__m128i * endofbuf;
|
||||
int howmanybytes;
|
||||
float compratio;
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint8_t * buffer;
|
||||
uint32_t * backbuffer = malloc(N * sizeof(uint32_t));
|
||||
uint32_t b;
|
||||
printf("== simple test\n");
|
||||
|
||||
for (k = 0; k < N; ++k) { /* start with k=0, not k=1! */
|
||||
datain[k] = k;
|
||||
}
|
||||
|
||||
b = maxbits_length(datain, N);
|
||||
buffer = malloc(simdpack_compressedbytes(N,b));
|
||||
endofbuf = simdpack_length(datain, N, (__m128i *)buffer, b);
|
||||
howmanybytes = (endofbuf-(__m128i *)buffer)*sizeof(__m128i); /* number of compressed bytes */
|
||||
compratio = N*sizeof(uint32_t) * 1.0 / howmanybytes;
|
||||
/* endofbuf points to the end of the compressed data */
|
||||
buffer = realloc(buffer,(endofbuf-(__m128i *)buffer)*sizeof(__m128i)); /* optional but safe. */
|
||||
printf("Compressed %d integers down to %d bytes (comp. ratio = %f).\n",(int)N,howmanybytes,compratio);
|
||||
/* in actual applications b must be stored and retrieved: caller is responsible for that. */
|
||||
simdunpack_length((const __m128i *)buffer, N, backbuffer, b); /* will return a pointer to endofbuf */
|
||||
|
||||
for (k = 0; k < N; ++k) {
|
||||
if(datain[k] != backbuffer[k]) {
|
||||
printf("bug at %lu \n",(unsigned long)k);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
printf("Code works!\n");
|
||||
free(datain);
|
||||
free(buffer);
|
||||
free(backbuffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* compresses data from datain to buffer, returns how many bytes written
|
||||
used below in simple_demo */
|
||||
size_t compress(uint32_t * datain, size_t length, uint8_t * buffer) {
|
||||
uint32_t offset;
|
||||
uint8_t * initout;
|
||||
size_t k;
|
||||
if(length/SIMDBlockSize*SIMDBlockSize != length) {
|
||||
printf("Data length should be a multiple of %i \n",SIMDBlockSize);
|
||||
}
|
||||
offset = 0;
|
||||
initout = buffer;
|
||||
for(k = 0; k < length / SIMDBlockSize; ++k) {
|
||||
uint32_t b = simdmaxbitsd1(offset,
|
||||
datain + k * SIMDBlockSize);
|
||||
*buffer++ = b;
|
||||
simdpackwithoutmaskd1(offset, datain + k * SIMDBlockSize, (__m128i *) buffer,
|
||||
b);
|
||||
offset = datain[k * SIMDBlockSize + SIMDBlockSize - 1];
|
||||
buffer += b * sizeof(__m128i);
|
||||
}
|
||||
return buffer - initout;
|
||||
}
|
||||
|
||||
/* Another illustration ... */
|
||||
void simple_demo() {
|
||||
size_t REPEAT = 10, gap;
|
||||
size_t N = 1000 * SIMDBlockSize;/* SIMDBlockSize is 128 */
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
size_t compsize;
|
||||
clock_t start, end;
|
||||
uint8_t * buffer = malloc(N * sizeof(uint32_t) + N / SIMDBlockSize); /* output buffer */
|
||||
uint32_t * backbuffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
printf("== simple demo\n");
|
||||
for (gap = 1; gap <= 243; gap *= 3) {
|
||||
size_t k, repeat;
|
||||
uint32_t offset = 0;
|
||||
uint32_t bogus = 0;
|
||||
double numberofseconds;
|
||||
|
||||
printf("\n");
|
||||
printf(" gap = %lu \n", (unsigned long) gap);
|
||||
datain[0] = 0;
|
||||
for (k = 1; k < N; ++k)
|
||||
datain[k] = datain[k-1] + ( rand() % (gap + 1) );
|
||||
compsize = compress(datain,N,buffer);
|
||||
printf("compression ratio = %f \n", (N * sizeof(uint32_t))/ (compsize * 1.0 ));
|
||||
start = clock();
|
||||
for(repeat = 0; repeat < REPEAT; ++repeat) {
|
||||
uint8_t * decbuffer = buffer;
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
uint8_t b = *decbuffer++;
|
||||
simdunpackd1(offset, (__m128i *) decbuffer, backbuffer, b);
|
||||
/* do something here with backbuffer */
|
||||
bogus += backbuffer[3];
|
||||
decbuffer += b * sizeof(__m128i);
|
||||
offset = backbuffer[SIMDBlockSize - 1];
|
||||
}
|
||||
}
|
||||
end = clock();
|
||||
numberofseconds = (end-start)/(double)CLOCKS_PER_SEC;
|
||||
printf("decoding speed in million of integers per second %f \n",N*REPEAT/(numberofseconds*1000.0*1000.0));
|
||||
start = clock();
|
||||
for(repeat = 0; repeat < REPEAT; ++repeat) {
|
||||
uint8_t * decbuffer = buffer;
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
memcpy(backbuffer,decbuffer+k*SIMDBlockSize,SIMDBlockSize*sizeof(uint32_t));
|
||||
bogus += backbuffer[3] - backbuffer[100];
|
||||
}
|
||||
}
|
||||
end = clock();
|
||||
numberofseconds = (end-start)/(double)CLOCKS_PER_SEC;
|
||||
printf("memcpy speed in million of integers per second %f \n",N*REPEAT/(numberofseconds*1000.0*1000.0));
|
||||
printf("ignore me %i \n",bogus);
|
||||
printf("All tests are in CPU cache. Avoid out-of-cache decoding in applications.\n");
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
}
|
||||
|
||||
/* Used below in more_sophisticated_demo ... */
|
||||
size_t varying_bit_width_compress(uint32_t * datain, size_t length, uint8_t * buffer) {
|
||||
uint8_t * initout;
|
||||
size_t k;
|
||||
if(length/SIMDBlockSize*SIMDBlockSize != length) {
|
||||
printf("Data length should be a multiple of %i \n",SIMDBlockSize);
|
||||
}
|
||||
initout = buffer;
|
||||
for(k = 0; k < length / SIMDBlockSize; ++k) {
|
||||
uint32_t b = maxbits(datain);
|
||||
*buffer++ = b;
|
||||
simdpackwithoutmask(datain, (__m128i *)buffer, b);
|
||||
datain += SIMDBlockSize;
|
||||
buffer += b * sizeof(__m128i);
|
||||
}
|
||||
return buffer - initout;
|
||||
}
|
||||
|
||||
/* Here we compress the data in blocks of 128 integers with varying bit width */
|
||||
int varying_bit_width_demo() {
|
||||
size_t nn = 128 * 2;
|
||||
uint32_t * datainn = malloc(nn * sizeof(uint32_t));
|
||||
uint8_t * buffern = malloc(nn * sizeof(uint32_t) + nn / SIMDBlockSize);
|
||||
uint8_t * initbuffern = buffern;
|
||||
uint32_t * backbuffern = malloc(nn * sizeof(uint32_t));
|
||||
size_t k, compsize;
|
||||
printf("== varying bit-width demo\n");
|
||||
|
||||
for(k=0; k<nn; ++k) {
|
||||
datainn[k] = rand() % (k + 1);
|
||||
}
|
||||
|
||||
compsize = varying_bit_width_compress(datainn,nn,buffern);
|
||||
printf("encoded size: %u (original size: %u)\n", (unsigned)compsize,
|
||||
(unsigned)(nn * sizeof(uint32_t)));
|
||||
|
||||
for (k = 0; k * SIMDBlockSize < nn; ++k) {
|
||||
uint32_t b = *buffern;
|
||||
buffern++;
|
||||
simdunpack((const __m128i *)buffern, backbuffern + k * SIMDBlockSize, b);
|
||||
buffern += b * sizeof(__m128i);
|
||||
}
|
||||
|
||||
for (k = 0; k < nn; ++k) {
|
||||
if(backbuffern[k] != datainn[k]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
printf("Code works!\n");
|
||||
free(datainn);
|
||||
free(initbuffern);
|
||||
free(backbuffern);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main() {
|
||||
if(compress_decompress_demo() != 0) return -1;
|
||||
if(varying_bit_width_demo() != 0) return -1;
|
||||
simple_demo();
|
||||
return 0;
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
Simple Go demo
|
||||
==============
|
||||
|
||||
Setup
|
||||
======
|
||||
|
||||
Start by installing the simdcomp library (make && make install).
|
||||
|
||||
Then type:
|
||||
|
||||
go run test.go
|
||||
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
/////////
|
||||
// This particular file is in the public domain.
|
||||
// Author: Daniel Lemire
|
||||
////////
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lsimdcomp
|
||||
#include <simdcomp.h>
|
||||
*/
|
||||
import "C"
|
||||
import "fmt"
|
||||
|
||||
//////////
|
||||
// For this demo, we pack and unpack blocks of 128 integers
|
||||
/////////
|
||||
func main() {
|
||||
// I am going to use C types. Alternative might be to use unsafe.Pointer calls, see http://bit.ly/1ndw3W3
|
||||
// this is our original data
|
||||
var data [128]C.uint32_t
|
||||
for i := C.uint32_t(0); i < C.uint32_t(128); i++ {
|
||||
data[i] = i
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
////////////
|
||||
// We first pack without differential coding
|
||||
///////////
|
||||
// computing how many bits per int. is needed
|
||||
b := C.maxbits(&data[0])
|
||||
ratio := 32.0/float64(b)
|
||||
fmt.Println("Bit width ", b)
|
||||
fmt.Println(fmt.Sprintf("Compression ratio %f ", ratio))
|
||||
// we are now going to create a buffer to receive the packed data (each __m128i uses 128 bits)
|
||||
out := make([] C.__m128i,b)
|
||||
C.simdpackwithoutmask( &data[0],&out[0],b);
|
||||
var recovereddata [128]C.uint32_t
|
||||
C.simdunpack(&out[0],&recovereddata[0],b)
|
||||
for i := 0; i < 128; i++ {
|
||||
if data[i] != recovereddata[i] {
|
||||
fmt.Println("Bug ")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
///////////
|
||||
// Next, we use differential coding
|
||||
//////////
|
||||
offset := C.uint32_t(0) // if you pack data from K to K + 128, offset should be the value at K-1. When K = 0, choose a default
|
||||
b1 := C.simdmaxbitsd1(offset,&data[0])
|
||||
ratio1 := 32.0/float64(b1)
|
||||
fmt.Println("Bit width ", b1)
|
||||
fmt.Println(fmt.Sprintf("Compression ratio %f ", ratio1))
|
||||
// we are now going to create a buffer to receive the packed data (each __m128i uses 128 bits)
|
||||
out = make([] C.__m128i,b1)
|
||||
C.simdpackwithoutmaskd1(offset, &data[0],&out[0],b1);
|
||||
C.simdunpackd1(offset,&out[0],&recovereddata[0],b1)
|
||||
for i := 0; i < 128; i++ {
|
||||
if data[i] != recovereddata[i] {
|
||||
fmt.Println("Bug ")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("test succesful.")
|
||||
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#ifndef INCLUDE_AVXBITPACKING_H_
|
||||
#define INCLUDE_AVXBITPACKING_H_
|
||||
|
||||
|
||||
#ifdef __AVX2__
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
|
||||
/* AVX2 is required */
|
||||
#include <immintrin.h>
|
||||
/* for memset */
|
||||
#include <string.h>
|
||||
|
||||
#include "simdcomputil.h"
|
||||
|
||||
enum{ AVXBlockSize = 256};
|
||||
|
||||
/* max integer logarithm over a range of AVXBlockSize integers (256 integer) */
|
||||
uint32_t avxmaxbits(const uint32_t * begin);
|
||||
|
||||
/* reads 256 values from "in", writes "bit" 256-bit vectors to "out" */
|
||||
void avxpack(const uint32_t * in,__m256i * out, const uint32_t bit);
|
||||
|
||||
/* reads 256 values from "in", writes "bit" 256-bit vectors to "out" */
|
||||
void avxpackwithoutmask(const uint32_t * in,__m256i * out, const uint32_t bit);
|
||||
|
||||
/* reads "bit" 256-bit vectors from "in", writes 256 values to "out" */
|
||||
void avxunpack(const __m256i * in,uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
|
||||
|
||||
#endif /* __AVX2__ */
|
||||
|
||||
#endif /* INCLUDE_AVXBITPACKING_H_ */
|
||||
@@ -1,81 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#ifndef SIMDBITCOMPAT_H_
|
||||
#define SIMDBITCOMPAT_H_
|
||||
|
||||
#include <iso646.h> /* mostly for Microsoft compilers */
|
||||
#include <string.h>
|
||||
|
||||
#if SIMDCOMP_DEBUG
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline
|
||||
# define SIMDCOMP_NEVER_INLINE
|
||||
# define SIMDCOMP_PURE
|
||||
#else
|
||||
# if defined(__GNUC__)
|
||||
# if __GNUC__ >= 3
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline __attribute__((always_inline))
|
||||
# define SIMDCOMP_NEVER_INLINE __attribute__((noinline))
|
||||
# define SIMDCOMP_PURE __attribute__((pure))
|
||||
# else
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline
|
||||
# define SIMDCOMP_NEVER_INLINE
|
||||
# define SIMDCOMP_PURE
|
||||
# endif
|
||||
# elif defined(_MSC_VER)
|
||||
# define SIMDCOMP_ALWAYS_INLINE __forceinline
|
||||
# define SIMDCOMP_NEVER_INLINE
|
||||
# define SIMDCOMP_PURE
|
||||
# else
|
||||
# if __has_attribute(always_inline)
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline __attribute__((always_inline))
|
||||
# else
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline
|
||||
# endif
|
||||
# if __has_attribute(noinline)
|
||||
# define SIMDCOMP_NEVER_INLINE __attribute__((noinline))
|
||||
# else
|
||||
# define SIMDCOMP_NEVER_INLINE
|
||||
# endif
|
||||
# if __has_attribute(pure)
|
||||
# define SIMDCOMP_PURE __attribute__((pure))
|
||||
# else
|
||||
# define SIMDCOMP_PURE
|
||||
# endif
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1600
|
||||
typedef unsigned int uint32_t;
|
||||
typedef unsigned char uint8_t;
|
||||
typedef signed char int8_t;
|
||||
#else
|
||||
#include <stdint.h> /* part of Visual Studio 2010 and better, others likely anyway */
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#define SIMDCOMP_ALIGNED(x) __declspec(align(x))
|
||||
#else
|
||||
#if defined(__GNUC__)
|
||||
#define SIMDCOMP_ALIGNED(x) __attribute__ ((aligned(x)))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# include <intrin.h>
|
||||
/* 64-bit needs extending */
|
||||
# define SIMDCOMP_CTZ(result, mask) do { \
|
||||
unsigned long index; \
|
||||
if (!_BitScanForward(&(index), (mask))) { \
|
||||
(result) = 32U; \
|
||||
} else { \
|
||||
(result) = (uint32_t)(index); \
|
||||
} \
|
||||
} while (0)
|
||||
#else
|
||||
# define SIMDCOMP_CTZ(result, mask) \
|
||||
result = __builtin_ctz(mask)
|
||||
#endif
|
||||
|
||||
#endif /* SIMDBITCOMPAT_H_ */
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#ifndef SIMDBITPACKING_H_
|
||||
#define SIMDBITPACKING_H_
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
/* SSE2 is required */
|
||||
#include <emmintrin.h>
|
||||
/* for memset */
|
||||
#include <string.h>
|
||||
|
||||
#include "simdcomputil.h"
|
||||
|
||||
/***
|
||||
* Please see example.c for various examples on how to make good use
|
||||
* of these functions.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out".
|
||||
* The input values are masked so that only the least significant "bit" bits are used. */
|
||||
void simdpack(const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out".
|
||||
* The input values are assumed to be less than 1<<bit. */
|
||||
void simdpackwithoutmask(const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
/* reads "bit" 128-bit vectors from "in", writes 128 values to "out" */
|
||||
void simdunpack(const __m128i * in,uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
|
||||
/* how many compressed bytes are needed to compressed length integers using a bit width of bit with
|
||||
the simdpackFOR_length function. */
|
||||
int simdpack_compressedbytes(int length, const uint32_t bit);
|
||||
|
||||
/* like simdpack, but supports an undetermined number of inputs.
|
||||
* This is useful if you need to unpack an array of integers that is not divisible by 128 integers.
|
||||
* Returns a pointer to the (advanced) compressed array. Compressed data is stored in the memory location between
|
||||
the provided (out) pointer and the returned pointer. */
|
||||
__m128i * simdpack_length(const uint32_t * in, size_t length, __m128i * out, const uint32_t bit);
|
||||
|
||||
/* like simdunpack, but supports an undetermined number of inputs.
|
||||
* This is useful if you need to unpack an array of integers that is not divisible by 128 integers.
|
||||
* Returns a pointer to the (advanced) compressed array. The read compressed data is between the provided
|
||||
(in) pointer and the returned pointer. */
|
||||
const __m128i * simdunpack_length(const __m128i * in, size_t length, uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
|
||||
|
||||
/* like simdpack, but supports an undetermined small number of inputs. This is useful if you need to pack less
|
||||
than 128 integers.
|
||||
* Note that this function is much slower.
|
||||
* Returns a pointer to the (advanced) compressed array. Compressed data is stored in the memory location
|
||||
between the provided (out) pointer and the returned pointer. */
|
||||
__m128i * simdpack_shortlength(const uint32_t * in, int length, __m128i * out, const uint32_t bit);
|
||||
|
||||
/* like simdunpack, but supports an undetermined small number of inputs. This is useful if you need to unpack less
|
||||
than 128 integers.
|
||||
* Note that this function is much slower.
|
||||
* Returns a pointer to the (advanced) compressed array. The read compressed data is between the provided (in)
|
||||
pointer and the returned pointer. */
|
||||
const __m128i * simdunpack_shortlength(const __m128i * in, int length, uint32_t * out, const uint32_t bit);
|
||||
|
||||
/* given a block of 128 packed values, this function sets the value at index "index" to "value" */
|
||||
void simdfastset(__m128i * in128, uint32_t b, uint32_t value, size_t index);
|
||||
|
||||
#endif /* SIMDBITPACKING_H_ */
|
||||
@@ -1,22 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#ifndef SIMDCOMP_H_
|
||||
#define SIMDCOMP_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "simdbitpacking.h"
|
||||
#include "simdcomputil.h"
|
||||
#include "simdfor.h"
|
||||
#include "simdintegratedbitpacking.h"
|
||||
#include "avxbitpacking.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -1,54 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#ifndef SIMDCOMPUTIL_H_
|
||||
#define SIMDCOMPUTIL_H_
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
/* SSE2 is required */
|
||||
#include <emmintrin.h>
|
||||
|
||||
|
||||
|
||||
|
||||
/* returns the integer logarithm of v (bit width) */
|
||||
uint32_t bits(const uint32_t v);
|
||||
|
||||
/* max integer logarithm over a range of SIMDBlockSize integers (128 integer) */
|
||||
uint32_t maxbits(const uint32_t * begin);
|
||||
|
||||
/* same as maxbits, but we specify the number of integers */
|
||||
uint32_t maxbits_length(const uint32_t * in,uint32_t length);
|
||||
|
||||
enum{ SIMDBlockSize = 128};
|
||||
|
||||
|
||||
/* computes (quickly) the minimal value of 128 values */
|
||||
uint32_t simdmin(const uint32_t * in);
|
||||
|
||||
/* computes (quickly) the minimal value of the specified number of values */
|
||||
uint32_t simdmin_length(const uint32_t * in, uint32_t length);
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
/* computes (quickly) the minimal and maximal value of the specified number of values */
|
||||
void simdmaxmin_length(const uint32_t * in, uint32_t length, uint32_t * getmin, uint32_t * getmax);
|
||||
|
||||
/* computes (quickly) the minimal and maximal value of the 128 values */
|
||||
void simdmaxmin(const uint32_t * in, uint32_t * getmin, uint32_t * getmax);
|
||||
|
||||
#endif
|
||||
|
||||
/* like maxbit over 128 integers (SIMDBlockSize) with provided initial value
|
||||
and using differential coding */
|
||||
uint32_t simdmaxbitsd1(uint32_t initvalue, const uint32_t * in);
|
||||
|
||||
/* like simdmaxbitsd1, but calculates maxbits over |length| integers
|
||||
with provided initial value. |length| can be any arbitrary value. */
|
||||
uint32_t simdmaxbitsd1_length(uint32_t initvalue, const uint32_t * in,
|
||||
uint32_t length);
|
||||
|
||||
|
||||
|
||||
#endif /* SIMDCOMPUTIL_H_ */
|
||||
@@ -1,72 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#ifndef INCLUDE_SIMDFOR_H_
|
||||
#define INCLUDE_SIMDFOR_H_
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
/* SSE2 is required */
|
||||
#include <emmintrin.h>
|
||||
|
||||
#include "simdcomputil.h"
|
||||
#include "simdbitpacking.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out" */
|
||||
void simdpackFOR(uint32_t initvalue, const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* reads "bit" 128-bit vectors from "in", writes 128 values to "out" */
|
||||
void simdunpackFOR(uint32_t initvalue, const __m128i * in,uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* how many compressed bytes are needed to compressed length integers using a bit width of bit with
|
||||
the simdpackFOR_length function. */
|
||||
int simdpackFOR_compressedbytes(int length, const uint32_t bit);
|
||||
|
||||
/* like simdpackFOR, but supports an undetermined number of inputs.
|
||||
This is useful if you need to pack less than 128 integers. Note that this function is much slower.
|
||||
Compressed data is stored in the memory location between
|
||||
the provided (out) pointer and the returned pointer. */
|
||||
__m128i * simdpackFOR_length(uint32_t initvalue, const uint32_t * in, int length, __m128i * out, const uint32_t bit);
|
||||
|
||||
/* like simdunpackFOR, but supports an undetermined number of inputs.
|
||||
This is useful if you need to unpack less than 128 integers. Note that this function is much slower.
|
||||
The read compressed data is between the provided
|
||||
(in) pointer and the returned pointer. */
|
||||
const __m128i * simdunpackFOR_length(uint32_t initvalue, const __m128i * in, int length, uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* returns the value stored at the specified "slot".
|
||||
* */
|
||||
uint32_t simdselectFOR(uint32_t initvalue, const __m128i *in, uint32_t bit,
|
||||
int slot);
|
||||
|
||||
/* given a block of 128 packed values, this function sets the value at index "index" to "value" */
|
||||
void simdfastsetFOR(uint32_t initvalue, __m128i * in, uint32_t bit, uint32_t value, size_t index);
|
||||
|
||||
|
||||
/* searches "bit" 128-bit vectors from "in" (= length<=128 encoded integers) for the first encoded uint32 value
|
||||
* which is >= |key|, and returns its position. It is assumed that the values
|
||||
* stored are in sorted order.
|
||||
* The encoded key is stored in "*presult".
|
||||
* The first length decoded integers, ignoring others. If no value is larger or equal to the key,
|
||||
* length is returned. Length should be no larger than 128.
|
||||
*
|
||||
* If no value is larger or equal to the key,
|
||||
* length is returned */
|
||||
int simdsearchwithlengthFOR(uint32_t initvalue, const __m128i *in, uint32_t bit,
|
||||
int length, uint32_t key, uint32_t *presult);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
#endif /* INCLUDE_SIMDFOR_H_ */
|
||||
@@ -1,98 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#ifndef SIMD_INTEGRATED_BITPACKING_H
|
||||
#define SIMD_INTEGRATED_BITPACKING_H
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
/* SSE2 is required */
|
||||
#include <emmintrin.h>
|
||||
|
||||
#include "simdcomputil.h"
|
||||
#include "simdbitpacking.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out"
|
||||
integer values should be in sorted order (for best results).
|
||||
The differences are masked so that only the least significant "bit" bits are used. */
|
||||
void simdpackd1(uint32_t initvalue, const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out"
|
||||
integer values should be in sorted order (for best results).
|
||||
The difference values are assumed to be less than 1<<bit. */
|
||||
void simdpackwithoutmaskd1(uint32_t initvalue, const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* reads "bit" 128-bit vectors from "in", writes 128 values to "out" */
|
||||
void simdunpackd1(uint32_t initvalue, const __m128i * in,uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* searches "bit" 128-bit vectors from "in" (= 128 encoded integers) for the first encoded uint32 value
|
||||
* which is >= |key|, and returns its position. It is assumed that the values
|
||||
* stored are in sorted order.
|
||||
* The encoded key is stored in "*presult". If no value is larger or equal to the key,
|
||||
* 128 is returned. The pointer initOffset is a pointer to the last four value decoded
|
||||
* (when starting out, this can be a zero vector or initialized with _mm_set1_epi32(init)),
|
||||
* and the vector gets updated.
|
||||
**/
|
||||
int
|
||||
simdsearchd1(__m128i * initOffset, const __m128i *in, uint32_t bit,
|
||||
uint32_t key, uint32_t *presult);
|
||||
|
||||
|
||||
/* searches "bit" 128-bit vectors from "in" (= length<=128 encoded integers) for the first encoded uint32 value
|
||||
* which is >= |key|, and returns its position. It is assumed that the values
|
||||
* stored are in sorted order.
|
||||
* The encoded key is stored in "*presult".
|
||||
* The first length decoded integers, ignoring others. If no value is larger or equal to the key,
|
||||
* length is returned. Length should be no larger than 128.
|
||||
*
|
||||
* If no value is larger or equal to the key,
|
||||
* length is returned */
|
||||
int simdsearchwithlengthd1(uint32_t initvalue, const __m128i *in, uint32_t bit,
|
||||
int length, uint32_t key, uint32_t *presult);
|
||||
|
||||
|
||||
|
||||
/* returns the value stored at the specified "slot".
|
||||
* */
|
||||
uint32_t simdselectd1(uint32_t initvalue, const __m128i *in, uint32_t bit,
|
||||
int slot);
|
||||
|
||||
/* given a block of 128 packed values, this function sets the value at index "index" to "value",
|
||||
* you must somehow know the previous value.
|
||||
* Because of differential coding, all following values are incremented by the offset between this new
|
||||
* value and the old value...
|
||||
* This functions is useful if you want to modify the last value.
|
||||
*/
|
||||
void simdfastsetd1fromprevious( __m128i * in, uint32_t bit, uint32_t previousvalue, uint32_t value, size_t index);
|
||||
|
||||
/* given a block of 128 packed values, this function sets the value at index "index" to "value",
|
||||
* This function computes the previous value if needed.
|
||||
* Because of differential coding, all following values are incremented by the offset between this new
|
||||
* value and the old value...
|
||||
* This functions is useful if you want to modify the last value.
|
||||
*/
|
||||
void simdfastsetd1(uint32_t initvalue, __m128i * in, uint32_t bit, uint32_t value, size_t index);
|
||||
|
||||
|
||||
/*Simply scan the data
|
||||
* The pointer initOffset is a pointer to the last four value decoded
|
||||
* (when starting out, this can be a zero vector or initialized with _mm_set1_epi32(init);),
|
||||
* and the vector gets updated.
|
||||
* */
|
||||
|
||||
void
|
||||
simdscand1(__m128i * initOffset, const __m128i *in, uint32_t bit);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -1,79 +0,0 @@
|
||||
# minimalist makefile
|
||||
.SUFFIXES:
|
||||
#
|
||||
.SUFFIXES: .cpp .o .c .h
|
||||
ifeq ($(DEBUG),1)
|
||||
CFLAGS = -fPIC -std=c89 -ggdb -msse4.1 -march=native -Wall -Wextra -Wshadow -fsanitize=undefined -fno-omit-frame-pointer -fsanitize=address
|
||||
else
|
||||
CFLAGS = -fPIC -std=c89 -O3 -msse4.1 -march=native -Wall -Wextra -Wshadow
|
||||
endif # debug
|
||||
LDFLAGS = -shared
|
||||
LIBNAME=libsimdcomp.so.0.0.3
|
||||
all: unit unit_chars bitpackingbenchmark $(LIBNAME)
|
||||
test:
|
||||
./unit
|
||||
./unit_chars
|
||||
install: $(OBJECTS)
|
||||
cp $(LIBNAME) /usr/local/lib
|
||||
ln -s /usr/local/lib/$(LIBNAME) /usr/local/lib/libsimdcomp.so
|
||||
ldconfig
|
||||
cp $(HEADERS) /usr/local/include
|
||||
|
||||
|
||||
|
||||
HEADERS=./include/simdbitpacking.h ./include/simdcomputil.h ./include/simdintegratedbitpacking.h ./include/simdcomp.h ./include/simdfor.h ./include/avxbitpacking.h
|
||||
|
||||
uninstall:
|
||||
for h in $(HEADERS) ; do rm /usr/local/$$h; done
|
||||
rm /usr/local/lib/$(LIBNAME)
|
||||
rm /usr/local/lib/libsimdcomp.so
|
||||
ldconfig
|
||||
|
||||
|
||||
OBJECTS= simdbitpacking.o simdintegratedbitpacking.o simdcomputil.o \
|
||||
simdpackedsearch.o simdpackedselect.o simdfor.o avxbitpacking.o
|
||||
|
||||
$(LIBNAME): $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o $(LIBNAME) $(OBJECTS) $(LDFLAGS)
|
||||
|
||||
|
||||
avxbitpacking.o: ./src/avxbitpacking.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/avxbitpacking.c -Iinclude
|
||||
|
||||
|
||||
simdfor.o: ./src/simdfor.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdfor.c -Iinclude
|
||||
|
||||
|
||||
simdcomputil.o: ./src/simdcomputil.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdcomputil.c -Iinclude
|
||||
|
||||
simdbitpacking.o: ./src/simdbitpacking.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdbitpacking.c -Iinclude
|
||||
|
||||
simdintegratedbitpacking.o: ./src/simdintegratedbitpacking.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdintegratedbitpacking.c -Iinclude
|
||||
|
||||
simdpackedsearch.o: ./src/simdpackedsearch.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdpackedsearch.c -Iinclude
|
||||
|
||||
simdpackedselect.o: ./src/simdpackedselect.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdpackedselect.c -Iinclude
|
||||
|
||||
example: ./example.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o example ./example.c -Iinclude $(OBJECTS)
|
||||
|
||||
unit: ./tests/unit.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o unit ./tests/unit.c -Iinclude $(OBJECTS)
|
||||
|
||||
bitpackingbenchmark: ./benchmarks/bitpackingbenchmark.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o bitpackingbenchmark ./benchmarks/bitpackingbenchmark.c -Iinclude $(OBJECTS)
|
||||
benchmark: ./benchmarks/benchmark.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o benchmark ./benchmarks/benchmark.c -Iinclude $(OBJECTS)
|
||||
dynunit: ./tests/unit.c $(HEADERS) $(LIBNAME)
|
||||
$(CC) $(CFLAGS) -o dynunit ./tests/unit.c -Iinclude -lsimdcomp
|
||||
|
||||
unit_chars: ./tests/unit_chars.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o unit_chars ./tests/unit_chars.c -Iinclude $(OBJECTS)
|
||||
clean:
|
||||
rm -f unit *.o $(LIBNAME) example benchmark bitpackingbenchmark dynunit unit_chars
|
||||
@@ -1,104 +0,0 @@
|
||||
|
||||
!IFNDEF MACHINE
|
||||
!IF "$(PROCESSOR_ARCHITECTURE)"=="AMD64"
|
||||
MACHINE=x64
|
||||
!ELSE
|
||||
MACHINE=x86
|
||||
!ENDIF
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF DEBUG
|
||||
DEBUG=no
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF CC
|
||||
CC=cl.exe
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF AR
|
||||
AR=lib.exe
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF LINK
|
||||
LINK=link.exe
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF PGO
|
||||
PGO=no
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF PGI
|
||||
PGI=no
|
||||
!ENDIF
|
||||
|
||||
INC = /Iinclude
|
||||
|
||||
!IF "$(DEBUG)"=="yes"
|
||||
CFLAGS = /nologo /MDd /LDd /Od /Zi /D_DEBUG /RTC1 /W3 /GS /Gm
|
||||
ARFLAGS = /nologo
|
||||
LDFLAGS = /nologo /debug /nodefaultlib:msvcrt
|
||||
!ELSE
|
||||
CFLAGS = /nologo /MD /O2 /Zi /DNDEBUG /W3 /Gm- /GS /Gy /Oi /GL /MP
|
||||
ARFLAGS = /nologo /LTCG
|
||||
LDFLAGS = /nologo /LTCG /DYNAMICBASE /incremental:no /debug /opt:ref,icf
|
||||
!ENDIF
|
||||
|
||||
!IF "$(PGI)"=="yes"
|
||||
LDFLAGS = $(LDFLAGS) /ltcg:pgi
|
||||
!ENDIF
|
||||
|
||||
!IF "$(PGO)"=="yes"
|
||||
LDFLAGS = $(LDFLAGS) /ltcg:pgo
|
||||
!ENDIF
|
||||
|
||||
LIB_OBJS = simdbitpacking.obj simdintegratedbitpacking.obj simdcomputil.obj \
|
||||
simdpackedsearch.obj simdpackedselect.obj simdfor.obj
|
||||
|
||||
|
||||
all: lib dll dynunit unit_chars example benchmark
|
||||
# need some good use case scenario to train the instrumented build
|
||||
@if "$(PGI)"=="yes" echo Running PGO training
|
||||
@if "$(PGI)"=="yes" benchmark.exe >nul 2>&1
|
||||
@if "$(PGI)"=="yes" example.exe >nul 2>&1
|
||||
|
||||
|
||||
$(LIB_OBJS):
|
||||
$(CC) $(INC) $(CFLAGS) /c src/simdbitpacking.c src/simdintegratedbitpacking.c src/simdcomputil.c \
|
||||
src/simdpackedsearch.c src/simdpackedselect.c src/simdfor.c
|
||||
|
||||
lib: $(LIB_OBJS)
|
||||
$(AR) $(ARFLAGS) /OUT:simdcomp_a.lib $(LIB_OBJS)
|
||||
|
||||
dll: $(LIB_OBJS)
|
||||
$(LINK) /DLL $(LDFLAGS) /OUT:simdcomp.dll /IMPLIB:simdcomp.lib /DEF:simdcomp.def $(LIB_OBJS)
|
||||
|
||||
unit: lib
|
||||
$(CC) $(INC) $(CFLAGS) /c src/unit.c
|
||||
$(LINK) $(LDFLAGS) /OUT:unit.exe unit.obj simdcomp_a.lib
|
||||
|
||||
dynunit: dll
|
||||
$(CC) $(INC) $(CFLAGS) /c src/unit.c
|
||||
$(LINK) $(LDFLAGS) /OUT:unit.exe unit.obj simdcomp.lib
|
||||
|
||||
unit_chars: lib
|
||||
$(CC) $(INC) $(CFLAGS) /c src/unit_chars.c
|
||||
$(LINK) $(LDFLAGS) /OUT:unit_chars.exe unit_chars.obj simdcomp.lib
|
||||
|
||||
|
||||
example: lib
|
||||
$(CC) $(INC) $(CFLAGS) /c example.c
|
||||
$(LINK) $(LDFLAGS) /OUT:example.exe example.obj simdcomp.lib
|
||||
|
||||
benchmark: lib
|
||||
$(CC) $(INC) $(CFLAGS) /c src/benchmark.c
|
||||
$(LINK) $(LDFLAGS) /OUT:benchmark.exe benchmark.obj simdcomp.lib
|
||||
|
||||
clean:
|
||||
del /Q *.obj
|
||||
del /Q *.lib
|
||||
del /Q *.exe
|
||||
del /Q *.dll
|
||||
del /Q *.pgc
|
||||
del /Q *.pgd
|
||||
del /Q *.pdb
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"name": "simdcomp",
|
||||
"version": "0.0.3",
|
||||
"repo": "lemire/simdcomp",
|
||||
"description": "A simple C library for compressing lists of integers",
|
||||
"license": "BSD-3-Clause",
|
||||
"src": [
|
||||
"src/simdbitpacking.c",
|
||||
"src/simdcomputil.c",
|
||||
"src/simdintegratedbitpacking.c",
|
||||
"include/simdbitpacking.h",
|
||||
"include/simdcomp.h",
|
||||
"include/simdcomputil.h",
|
||||
"include/simdintegratedbitpacking.h"
|
||||
]
|
||||
}
|
||||
@@ -1,182 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
def howmany(bit):
|
||||
""" how many values are we going to pack? """
|
||||
return 256
|
||||
|
||||
def howmanywords(bit):
|
||||
return (howmany(bit) * bit + 255)/256
|
||||
|
||||
def howmanybytes(bit):
|
||||
return howmanywords(bit) * 16
|
||||
|
||||
print("""
|
||||
/** code generated by avxpacking.py starts here **/
|
||||
""")
|
||||
|
||||
print("""typedef void (*avxpackblockfnc)(const uint32_t * pin, __m256i * compressed);""")
|
||||
print("""typedef void (*avxunpackblockfnc)(const __m256i * compressed, uint32_t * pout);""")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def plurial(number):
|
||||
if(number <> 1):
|
||||
return "s"
|
||||
else :
|
||||
return ""
|
||||
|
||||
print("")
|
||||
print("static void avxpackblock0(const uint32_t * pin, __m256i * compressed) {");
|
||||
print(" (void)compressed;");
|
||||
print(" (void) pin; /* we consumed {0} 32-bit integer{1} */ ".format(howmany(0),plurial(howmany(0))));
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
for bit in range(1,33):
|
||||
print("")
|
||||
print("/* we are going to pack {0} {1}-bit values, touching {2} 256-bit words, using {3} bytes */ ".format(howmany(bit),bit,howmanywords(bit),howmanybytes(bit)))
|
||||
print("static void avxpackblock{0}(const uint32_t * pin, __m256i * compressed) {{".format(bit));
|
||||
print(" const __m256i * in = (const __m256i *) pin;");
|
||||
print(" /* we are going to touch {0} 256-bit word{1} */ ".format(howmanywords(bit),plurial(howmanywords(bit))));
|
||||
if(howmanywords(bit) == 1):
|
||||
print(" __m256i w0;")
|
||||
else:
|
||||
print(" __m256i w0, w1;")
|
||||
if( (bit & (bit-1)) <> 0) : print(" __m256i tmp; /* used to store inputs at word boundary */")
|
||||
oldword = 0
|
||||
for j in range(howmany(bit)/8):
|
||||
firstword = j * bit / 32
|
||||
if(firstword > oldword):
|
||||
print(" _mm256_storeu_si256(compressed + {0}, w{1});".format(oldword,oldword%2))
|
||||
oldword = firstword
|
||||
secondword = (j * bit + bit - 1)/32
|
||||
firstshift = (j*bit) % 32
|
||||
if( firstword == secondword):
|
||||
if(firstshift == 0):
|
||||
print(" w{0} = _mm256_lddqu_si256 (in + {1});".format(firstword%2,j))
|
||||
else:
|
||||
print(" w{0} = _mm256_or_si256(w{0},_mm256_slli_epi32(_mm256_lddqu_si256 (in + {1}) , {2}));".format(firstword%2,j,firstshift))
|
||||
else:
|
||||
print(" tmp = _mm256_lddqu_si256 (in + {0});".format(j))
|
||||
print(" w{0} = _mm256_or_si256(w{0},_mm256_slli_epi32(tmp , {2}));".format(firstword%2,j,firstshift))
|
||||
secondshift = 32-firstshift
|
||||
print(" w{0} = _mm256_srli_epi32(tmp,{2});".format(secondword%2,j,secondshift))
|
||||
print(" _mm256_storeu_si256(compressed + {0}, w{1});".format(secondword,secondword%2))
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
|
||||
print("")
|
||||
print("static void avxpackblockmask0(const uint32_t * pin, __m256i * compressed) {");
|
||||
print(" (void)compressed;");
|
||||
print(" (void) pin; /* we consumed {0} 32-bit integer{1} */ ".format(howmany(0),plurial(howmany(0))));
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
for bit in range(1,33):
|
||||
print("")
|
||||
print("/* we are going to pack {0} {1}-bit values, touching {2} 256-bit words, using {3} bytes */ ".format(howmany(bit),bit,howmanywords(bit),howmanybytes(bit)))
|
||||
print("static void avxpackblockmask{0}(const uint32_t * pin, __m256i * compressed) {{".format(bit));
|
||||
print(" /* we are going to touch {0} 256-bit word{1} */ ".format(howmanywords(bit),plurial(howmanywords(bit))));
|
||||
if(howmanywords(bit) == 1):
|
||||
print(" __m256i w0;")
|
||||
else:
|
||||
print(" __m256i w0, w1;")
|
||||
print(" const __m256i * in = (const __m256i *) pin;");
|
||||
if(bit < 32): print(" const __m256i mask = _mm256_set1_epi32({0});".format((1<<bit)-1));
|
||||
def maskfnc(x):
|
||||
if(bit == 32): return x
|
||||
return " _mm256_and_si256 ( mask, {0}) ".format(x)
|
||||
if( (bit & (bit-1)) <> 0) : print(" __m256i tmp; /* used to store inputs at word boundary */")
|
||||
oldword = 0
|
||||
for j in range(howmany(bit)/8):
|
||||
firstword = j * bit / 32
|
||||
if(firstword > oldword):
|
||||
print(" _mm256_storeu_si256(compressed + {0}, w{1});".format(oldword,oldword%2))
|
||||
oldword = firstword
|
||||
secondword = (j * bit + bit - 1)/32
|
||||
firstshift = (j*bit) % 32
|
||||
loadstr = maskfnc(" _mm256_lddqu_si256 (in + {0}) ".format(j))
|
||||
if( firstword == secondword):
|
||||
if(firstshift == 0):
|
||||
print(" w{0} = {1};".format(firstword%2,loadstr))
|
||||
else:
|
||||
print(" w{0} = _mm256_or_si256(w{0},_mm256_slli_epi32({1} , {2}));".format(firstword%2,loadstr,firstshift))
|
||||
else:
|
||||
print(" tmp = {0};".format(loadstr))
|
||||
print(" w{0} = _mm256_or_si256(w{0},_mm256_slli_epi32(tmp , {2}));".format(firstword%2,j,firstshift))
|
||||
secondshift = 32-firstshift
|
||||
print(" w{0} = _mm256_srli_epi32(tmp,{2});".format(secondword%2,j,secondshift))
|
||||
print(" _mm256_storeu_si256(compressed + {0}, w{1});".format(secondword,secondword%2))
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
|
||||
print("static void avxunpackblock0(const __m256i * compressed, uint32_t * pout) {");
|
||||
print(" (void) compressed;");
|
||||
print(" memset(pout,0,{0});".format(howmany(0)));
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
for bit in range(1,33):
|
||||
print("")
|
||||
print("/* we packed {0} {1}-bit values, touching {2} 256-bit words, using {3} bytes */ ".format(howmany(bit),bit,howmanywords(bit),howmanybytes(bit)))
|
||||
print("static void avxunpackblock{0}(const __m256i * compressed, uint32_t * pout) {{".format(bit));
|
||||
print(" /* we are going to access {0} 256-bit word{1} */ ".format(howmanywords(bit),plurial(howmanywords(bit))));
|
||||
if(howmanywords(bit) == 1):
|
||||
print(" __m256i w0;")
|
||||
else:
|
||||
print(" __m256i w0, w1;")
|
||||
print(" __m256i * out = (__m256i *) pout;");
|
||||
if(bit < 32): print(" const __m256i mask = _mm256_set1_epi32({0});".format((1<<bit)-1));
|
||||
maskstr = " _mm256_and_si256 ( mask, {0}) "
|
||||
if (bit == 32) : maskstr = " {0} " # no need
|
||||
oldword = 0
|
||||
print(" w0 = _mm256_lddqu_si256 (compressed);")
|
||||
for j in range(howmany(bit)/8):
|
||||
firstword = j * bit / 32
|
||||
secondword = (j * bit + bit - 1)/32
|
||||
if(secondword > oldword):
|
||||
print(" w{0} = _mm256_lddqu_si256 (compressed + {1});".format(secondword%2,secondword))
|
||||
oldword = secondword
|
||||
firstshift = (j*bit) % 32
|
||||
firstshiftstr = "_mm256_srli_epi32( w{0} , "+str(firstshift)+") "
|
||||
if(firstshift == 0):
|
||||
firstshiftstr =" w{0} " # no need
|
||||
wfirst = firstshiftstr.format(firstword%2)
|
||||
if( firstword == secondword):
|
||||
if(firstshift + bit <> 32):
|
||||
wfirst = maskstr.format(wfirst)
|
||||
print(" _mm256_storeu_si256(out + {0}, {1});".format(j,wfirst))
|
||||
else:
|
||||
secondshift = (32-firstshift)
|
||||
wsecond = "_mm256_slli_epi32( w{0} , {1} ) ".format((firstword+1)%2,secondshift)
|
||||
wfirstorsecond = " _mm256_or_si256 ({0},{1}) ".format(wfirst,wsecond)
|
||||
wfirstorsecond = maskstr.format(wfirstorsecond)
|
||||
print(" _mm256_storeu_si256(out + {0},\n {1});".format(j,wfirstorsecond))
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
|
||||
print("static avxpackblockfnc avxfuncPackArr[] = {")
|
||||
for bit in range(0,32):
|
||||
print("&avxpackblock{0},".format(bit))
|
||||
print("&avxpackblock32")
|
||||
print("};")
|
||||
|
||||
print("static avxpackblockfnc avxfuncPackMaskArr[] = {")
|
||||
for bit in range(0,32):
|
||||
print("&avxpackblockmask{0},".format(bit))
|
||||
print("&avxpackblockmask32")
|
||||
print("};")
|
||||
|
||||
|
||||
print("static avxunpackblockfnc avxfuncUnpackArr[] = {")
|
||||
for bit in range(0,32):
|
||||
print("&avxunpackblock{0},".format(bit))
|
||||
print("&avxunpackblock32")
|
||||
print("};")
|
||||
print("/** code generated by avxpacking.py ends here **/")
|
||||
@@ -1,152 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
from math import ceil
|
||||
|
||||
print("""
|
||||
/**
|
||||
* Blablabla
|
||||
*
|
||||
*/
|
||||
|
||||
""");
|
||||
|
||||
def mask(bit):
|
||||
return str((1 << bit) - 1)
|
||||
|
||||
for length in [32]:
|
||||
print("""
|
||||
static __m128i iunpackFOR0(__m128i initOffset, const __m128i * _in , uint32_t * _out) {
|
||||
__m128i *out = (__m128i*)(_out);
|
||||
int i;
|
||||
(void) _in;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
_mm_store_si128(out++, initOffset);
|
||||
_mm_store_si128(out++, initOffset);
|
||||
_mm_store_si128(out++, initOffset);
|
||||
_mm_store_si128(out++, initOffset);
|
||||
}
|
||||
|
||||
return initOffset;
|
||||
}
|
||||
|
||||
""")
|
||||
print("""
|
||||
|
||||
static void ipackFOR0(__m128i initOffset , const uint32_t * _in , __m128i * out ) {
|
||||
(void) initOffset;
|
||||
(void) _in;
|
||||
(void) out;
|
||||
}
|
||||
""")
|
||||
for bit in range(1,33):
|
||||
offsetVar = " initOffset";
|
||||
print("""
|
||||
static void ipackFOR"""+str(bit)+"""(__m128i """+offsetVar+""", const uint32_t * _in, __m128i * out) {
|
||||
const __m128i *in = (const __m128i*)(_in);
|
||||
__m128i OutReg;
|
||||
|
||||
""");
|
||||
|
||||
if (bit != 32):
|
||||
print(" __m128i CurrIn = _mm_load_si128(in);");
|
||||
print(" __m128i InReg = _mm_sub_epi32(CurrIn, initOffset);");
|
||||
else:
|
||||
print(" __m128i InReg = _mm_load_si128(in);");
|
||||
print(" (void) initOffset;");
|
||||
|
||||
|
||||
inwordpointer = 0
|
||||
valuecounter = 0
|
||||
for k in range(ceil((length * bit) / 32)):
|
||||
if(valuecounter == length): break
|
||||
for x in range(inwordpointer,32,bit):
|
||||
if(x!=0) :
|
||||
print(" OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, " + str(x) + "));");
|
||||
else:
|
||||
print(" OutReg = InReg; ");
|
||||
if((x+bit>=32) ):
|
||||
while(inwordpointer<32):
|
||||
inwordpointer += bit
|
||||
print(" _mm_store_si128(out, OutReg);");
|
||||
print("");
|
||||
|
||||
if(valuecounter + 1 < length):
|
||||
print(" ++out;")
|
||||
inwordpointer -= 32;
|
||||
if(inwordpointer>0):
|
||||
print(" OutReg = _mm_srli_epi32(InReg, " + str(bit) + " - " + str(inwordpointer) + ");");
|
||||
if(valuecounter + 1 < length):
|
||||
print(" ++in;")
|
||||
|
||||
if (bit != 32):
|
||||
print(" CurrIn = _mm_load_si128(in);");
|
||||
print(" InReg = _mm_sub_epi32(CurrIn, initOffset);");
|
||||
else:
|
||||
print(" InReg = _mm_load_si128(in);");
|
||||
print("");
|
||||
valuecounter = valuecounter + 1
|
||||
if(valuecounter == length): break
|
||||
assert(valuecounter == length)
|
||||
print("\n}\n\n""")
|
||||
|
||||
for bit in range(1,32):
|
||||
offsetVar = " initOffset";
|
||||
print("""\n
|
||||
static __m128i iunpackFOR"""+str(bit)+"""(__m128i """+offsetVar+""", const __m128i* in, uint32_t * _out) {
|
||||
""");
|
||||
print(""" __m128i* out = (__m128i*)(_out);
|
||||
__m128i InReg = _mm_load_si128(in);
|
||||
__m128i OutReg;
|
||||
__m128i tmp;
|
||||
const __m128i mask = _mm_set1_epi32((1U<<"""+str(bit)+""")-1);
|
||||
|
||||
""");
|
||||
|
||||
MainText = "";
|
||||
|
||||
MainText += "\n";
|
||||
inwordpointer = 0
|
||||
valuecounter = 0
|
||||
for k in range(ceil((length * bit) / 32)):
|
||||
for x in range(inwordpointer,32,bit):
|
||||
if(valuecounter == length): break
|
||||
if (x > 0):
|
||||
MainText += " tmp = _mm_srli_epi32(InReg," + str(x) +");\n";
|
||||
else:
|
||||
MainText += " tmp = InReg;\n";
|
||||
if(x+bit<32):
|
||||
MainText += " OutReg = _mm_and_si128(tmp, mask);\n";
|
||||
else:
|
||||
MainText += " OutReg = tmp;\n";
|
||||
if((x+bit>=32) ):
|
||||
while(inwordpointer<32):
|
||||
inwordpointer += bit
|
||||
if(valuecounter + 1 < length):
|
||||
MainText += " ++in;"
|
||||
MainText += " InReg = _mm_load_si128(in);\n";
|
||||
inwordpointer -= 32;
|
||||
if(inwordpointer>0):
|
||||
MainText += " OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, " + str(bit) + "-" + str(inwordpointer) + "), mask));\n\n";
|
||||
if (bit != 32):
|
||||
MainText += " OutReg = _mm_add_epi32(OutReg, initOffset);\n";
|
||||
MainText += " _mm_store_si128(out++, OutReg);\n\n";
|
||||
MainText += "";
|
||||
valuecounter = valuecounter + 1
|
||||
if(valuecounter == length): break
|
||||
assert(valuecounter == length)
|
||||
print(MainText)
|
||||
print(" return initOffset;");
|
||||
print("\n}\n\n")
|
||||
print("""
|
||||
static __m128i iunpackFOR32(__m128i initvalue , const __m128i* in, uint32_t * _out) {
|
||||
__m128i * mout = (__m128i *)_out;
|
||||
__m128i invec;
|
||||
size_t k;
|
||||
for(k = 0; k < 128/4; ++k) {
|
||||
invec = _mm_load_si128(in++);
|
||||
_mm_store_si128(mout++, invec);
|
||||
}
|
||||
return invec;
|
||||
}
|
||||
""")
|
||||
@@ -1,40 +0,0 @@
|
||||
EXPORTS
|
||||
simdpack
|
||||
simdpackwithoutmask
|
||||
simdunpack
|
||||
bits
|
||||
maxbits
|
||||
maxbits_length
|
||||
simdmin
|
||||
simdmin_length
|
||||
simdmaxmin
|
||||
simdmaxmin_length
|
||||
simdmaxbitsd1
|
||||
simdmaxbitsd1_length
|
||||
simdpackd1
|
||||
simdpackwithoutmaskd1
|
||||
simdunpackd1
|
||||
simdsearchd1
|
||||
simdsearchwithlengthd1
|
||||
simdselectd1
|
||||
simdpackFOR
|
||||
simdselectFOR
|
||||
simdsearchwithlengthFOR
|
||||
simdunpackFOR
|
||||
simdmin_length
|
||||
simdmaxmin
|
||||
simdmaxmin_length
|
||||
simdpack_length
|
||||
simdpackFOR_length
|
||||
simdunpackFOR_length
|
||||
simdpack_shortlength
|
||||
simdfastsetFOR
|
||||
simdfastset
|
||||
simdfastsetd1
|
||||
simdunpack_length
|
||||
simdunpack_shortlength
|
||||
simdsearchwithlengthFOR
|
||||
simdscand1
|
||||
simdfastsetd1fromprevious
|
||||
simdfastsetd1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,234 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#include "simdcomputil.h"
|
||||
#ifdef __SSE4_1__
|
||||
#include <smmintrin.h>
|
||||
#endif
|
||||
#include <assert.h>
|
||||
|
||||
#define Delta(curr, prev) \
|
||||
_mm_sub_epi32(curr, \
|
||||
_mm_or_si128(_mm_slli_si128(curr, 4), _mm_srli_si128(prev, 12)))
|
||||
|
||||
/* returns the integer logarithm of v (bit width) */
|
||||
uint32_t bits(const uint32_t v) {
|
||||
#ifdef _MSC_VER
|
||||
unsigned long answer;
|
||||
if (v == 0) {
|
||||
return 0;
|
||||
}
|
||||
_BitScanReverse(&answer, v);
|
||||
return answer + 1;
|
||||
#else
|
||||
return v == 0 ? 0 : 32 - __builtin_clz(v); /* assume GCC-like compiler if not microsoft */
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
static uint32_t maxbitas32int(const __m128i accumulator) {
|
||||
const __m128i _tmp1 = _mm_or_si128(_mm_srli_si128(accumulator, 8), accumulator); /* (A,B,C,D) xor (0,0,A,B) = (A,B,C xor A,D xor B)*/
|
||||
const __m128i _tmp2 = _mm_or_si128(_mm_srli_si128(_tmp1, 4), _tmp1); /* (A,B,C xor A,D xor B) xor (0,0,0,C xor A)*/
|
||||
uint32_t ans = _mm_cvtsi128_si32(_tmp2);
|
||||
return bits(ans);
|
||||
}
|
||||
|
||||
SIMDCOMP_PURE uint32_t maxbits(const uint32_t * begin) {
|
||||
const __m128i* pin = (const __m128i*)(begin);
|
||||
__m128i accumulator = _mm_loadu_si128(pin);
|
||||
uint32_t k = 1;
|
||||
for(; 4*k < SIMDBlockSize; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_or_si128(accumulator,newvec);
|
||||
}
|
||||
return maxbitas32int(accumulator);
|
||||
}
|
||||
static uint32_t orasint(const __m128i accumulator) {
|
||||
const __m128i _tmp1 = _mm_or_si128(_mm_srli_si128(accumulator, 8), accumulator); /* (A,B,C,D) xor (0,0,A,B) = (A,B,C xor A,D xor B)*/
|
||||
const __m128i _tmp2 = _mm_or_si128(_mm_srli_si128(_tmp1, 4), _tmp1); /* (A,B,C xor A,D xor B) xor (0,0,0,C xor A)*/
|
||||
return _mm_cvtsi128_si32(_tmp2);
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
|
||||
static uint32_t minasint(const __m128i accumulator) {
|
||||
const __m128i _tmp1 = _mm_min_epu32(_mm_srli_si128(accumulator, 8), accumulator); /* (A,B,C,D) xor (0,0,A,B) = (A,B,C xor A,D xor B)*/
|
||||
const __m128i _tmp2 = _mm_min_epu32(_mm_srli_si128(_tmp1, 4), _tmp1); /* (A,B,C xor A,D xor B) xor (0,0,0,C xor A)*/
|
||||
return _mm_cvtsi128_si32(_tmp2);
|
||||
}
|
||||
|
||||
static uint32_t maxasint(const __m128i accumulator) {
|
||||
const __m128i _tmp1 = _mm_max_epu32(_mm_srli_si128(accumulator, 8), accumulator); /* (A,B,C,D) xor (0,0,A,B) = (A,B,C xor A,D xor B)*/
|
||||
const __m128i _tmp2 = _mm_max_epu32(_mm_srli_si128(_tmp1, 4), _tmp1); /* (A,B,C xor A,D xor B) xor (0,0,0,C xor A)*/
|
||||
return _mm_cvtsi128_si32(_tmp2);
|
||||
}
|
||||
|
||||
uint32_t simdmin(const uint32_t * in) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i accumulator = _mm_loadu_si128(pin);
|
||||
uint32_t k = 1;
|
||||
for(; 4*k < SIMDBlockSize; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_min_epu32(accumulator,newvec);
|
||||
}
|
||||
return minasint(accumulator);
|
||||
}
|
||||
|
||||
void simdmaxmin(const uint32_t * in, uint32_t * getmin, uint32_t * getmax) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i minaccumulator = _mm_loadu_si128(pin);
|
||||
__m128i maxaccumulator = minaccumulator;
|
||||
uint32_t k = 1;
|
||||
for(; 4*k < SIMDBlockSize; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
minaccumulator = _mm_min_epu32(minaccumulator,newvec);
|
||||
maxaccumulator = _mm_max_epu32(maxaccumulator,newvec);
|
||||
}
|
||||
*getmin = minasint(minaccumulator);
|
||||
*getmax = maxasint(maxaccumulator);
|
||||
}
|
||||
|
||||
|
||||
uint32_t simdmin_length(const uint32_t * in, uint32_t length) {
|
||||
uint32_t currentmin = 0xFFFFFFFF;
|
||||
uint32_t lengthdividedby4 = length / 4;
|
||||
uint32_t offset = lengthdividedby4 * 4;
|
||||
uint32_t k;
|
||||
if (lengthdividedby4 > 0) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i accumulator = _mm_loadu_si128(pin);
|
||||
k = 1;
|
||||
for(; 4*k < lengthdividedby4 * 4; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_min_epu32(accumulator,newvec);
|
||||
}
|
||||
currentmin = minasint(accumulator);
|
||||
}
|
||||
for (k = offset; k < length; ++k)
|
||||
if (in[k] < currentmin)
|
||||
currentmin = in[k];
|
||||
return currentmin;
|
||||
}
|
||||
|
||||
void simdmaxmin_length(const uint32_t * in, uint32_t length, uint32_t * getmin, uint32_t * getmax) {
|
||||
uint32_t lengthdividedby4 = length / 4;
|
||||
uint32_t offset = lengthdividedby4 * 4;
|
||||
uint32_t k;
|
||||
*getmin = 0xFFFFFFFF;
|
||||
*getmax = 0;
|
||||
if (lengthdividedby4 > 0) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i minaccumulator = _mm_loadu_si128(pin);
|
||||
__m128i maxaccumulator = minaccumulator;
|
||||
k = 1;
|
||||
for(; 4*k < lengthdividedby4 * 4; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
minaccumulator = _mm_min_epu32(minaccumulator,newvec);
|
||||
maxaccumulator = _mm_max_epu32(maxaccumulator,newvec);
|
||||
}
|
||||
*getmin = minasint(minaccumulator);
|
||||
*getmax = maxasint(maxaccumulator);
|
||||
}
|
||||
for (k = offset; k < length; ++k) {
|
||||
if (in[k] < *getmin)
|
||||
*getmin = in[k];
|
||||
if (in[k] > *getmax)
|
||||
*getmax = in[k];
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
SIMDCOMP_PURE uint32_t maxbits_length(const uint32_t * in,uint32_t length) {
|
||||
uint32_t k;
|
||||
uint32_t lengthdividedby4 = length / 4;
|
||||
uint32_t offset = lengthdividedby4 * 4;
|
||||
uint32_t bigxor = 0;
|
||||
if(lengthdividedby4 > 0) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i accumulator = _mm_loadu_si128(pin);
|
||||
k = 1;
|
||||
for(; 4*k < 4*lengthdividedby4; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_or_si128(accumulator,newvec);
|
||||
}
|
||||
bigxor = orasint(accumulator);
|
||||
}
|
||||
for(k = offset; k < length; ++k)
|
||||
bigxor |= in[k];
|
||||
return bits(bigxor);
|
||||
}
|
||||
|
||||
|
||||
/* maxbit over 128 integers (SIMDBlockSize) with provided initial value */
|
||||
uint32_t simdmaxbitsd1(uint32_t initvalue, const uint32_t * in) {
|
||||
__m128i initoffset = _mm_set1_epi32 (initvalue);
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i newvec = _mm_loadu_si128(pin);
|
||||
__m128i accumulator = Delta(newvec , initoffset);
|
||||
__m128i oldvec = newvec;
|
||||
uint32_t k = 1;
|
||||
for(; 4*k < SIMDBlockSize; ++k) {
|
||||
newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_or_si128(accumulator,Delta(newvec , oldvec));
|
||||
oldvec = newvec;
|
||||
}
|
||||
initoffset = oldvec;
|
||||
return maxbitas32int(accumulator);
|
||||
}
|
||||
|
||||
|
||||
/* maxbit over |length| integers with provided initial value */
|
||||
uint32_t simdmaxbitsd1_length(uint32_t initvalue, const uint32_t * in,
|
||||
uint32_t length) {
|
||||
__m128i newvec;
|
||||
__m128i oldvec;
|
||||
__m128i initoffset;
|
||||
__m128i accumulator;
|
||||
const __m128i *pin;
|
||||
uint32_t tmparray[4];
|
||||
uint32_t k = 1;
|
||||
uint32_t acc;
|
||||
|
||||
assert(length > 0);
|
||||
|
||||
pin = (const __m128i *)(in);
|
||||
initoffset = _mm_set1_epi32(initvalue);
|
||||
switch (length) {
|
||||
case 1:
|
||||
newvec = _mm_set1_epi32(in[0]);
|
||||
break;
|
||||
case 2:
|
||||
newvec = _mm_setr_epi32(in[0], in[1], in[1], in[1]);
|
||||
break;
|
||||
case 3:
|
||||
newvec = _mm_setr_epi32(in[0], in[1], in[2], in[2]);
|
||||
break;
|
||||
default:
|
||||
newvec = _mm_loadu_si128(pin);
|
||||
break;
|
||||
}
|
||||
accumulator = Delta(newvec, initoffset);
|
||||
oldvec = newvec;
|
||||
|
||||
/* process 4 integers and build an accumulator */
|
||||
while (k * 4 + 4 <= length) {
|
||||
newvec = _mm_loadu_si128(pin + k);
|
||||
accumulator = _mm_or_si128(accumulator, Delta(newvec, oldvec));
|
||||
oldvec = newvec;
|
||||
k++;
|
||||
}
|
||||
|
||||
/* extract the accumulator as an integer */
|
||||
_mm_storeu_si128((__m128i *)(tmparray), accumulator);
|
||||
acc = tmparray[0] | tmparray[1] | tmparray[2] | tmparray[3];
|
||||
|
||||
/* now process the remaining integers */
|
||||
for (k *= 4; k < length; k++)
|
||||
acc |= in[k] - (k == 0 ? initvalue : in[k - 1]);
|
||||
|
||||
/* return the number of bits */
|
||||
return bits(acc);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,900 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include "simdcomp.h"
|
||||
|
||||
|
||||
|
||||
int testshortpack() {
|
||||
int bit;
|
||||
size_t i;
|
||||
size_t length;
|
||||
__m128i * bb;
|
||||
srand(0);
|
||||
printf("testshortpack\n");
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = rand() & ((1 << bit) - 1);
|
||||
}
|
||||
for (length = 0; length <= N; ++length) {
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
bb = simdpack_shortlength(data, length, (__m128i *) buffer,
|
||||
bit);
|
||||
if((bb - (__m128i *) buffer) * sizeof(__m128i) != (unsigned) simdpack_compressedbytes(length,bit)) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
simdunpack_shortlength((__m128i *) buffer, length,
|
||||
backdata, bit);
|
||||
for (i = 0; i < length; ++i) {
|
||||
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int testlongpack() {
|
||||
int bit;
|
||||
size_t i;
|
||||
size_t length;
|
||||
__m128i * bb;
|
||||
srand(0);
|
||||
printf("testlongpack\n");
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
const size_t N = 2048;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = rand() & ((1 << bit) - 1);
|
||||
}
|
||||
for (length = 0; length <= N; ++length) {
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
bb = simdpack_length(data, length, (__m128i *) buffer,
|
||||
bit);
|
||||
if((bb - (__m128i *) buffer) * sizeof(__m128i) != (unsigned) simdpack_compressedbytes(length,bit)) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
simdunpack_length((__m128i *) buffer, length,
|
||||
backdata, bit);
|
||||
for (i = 0; i < length; ++i) {
|
||||
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int testset() {
|
||||
int bit;
|
||||
size_t i;
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
srand(0);
|
||||
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
printf("simple set %d \n",bit);
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = rand() & ((1 << bit) - 1);
|
||||
}
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
simdpack(data, (__m128i *) buffer, bit);
|
||||
simdunpack((__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
for(i = N ; i > 0; i--) {
|
||||
simdfastset((__m128i *) buffer, bit, data[N - i], i - 1);
|
||||
}
|
||||
simdunpack((__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[N - i - 1]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
simdpack(data, (__m128i *) buffer, bit);
|
||||
for(i = 1 ; i <= N; i++) {
|
||||
simdfastset((__m128i *) buffer, bit, data[i - 1], i - 1);
|
||||
}
|
||||
simdunpack((__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
|
||||
int testsetd1() {
|
||||
int bit;
|
||||
size_t i;
|
||||
uint32_t newvalue;
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * datazeroes = malloc(N * sizeof(uint32_t));
|
||||
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
srand(0);
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
printf("simple set d1 %d \n",bit);
|
||||
data[0] = rand() & ((1 << bit) - 1);
|
||||
datazeroes[0] = 0;
|
||||
|
||||
for (i = 1; i < N; ++i) {
|
||||
data[i] = data[i - 1] + (rand() & ((1 << bit) - 1));
|
||||
datazeroes[i] = 0;
|
||||
}
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
simdpackd1(0,datazeroes, (__m128i *) buffer, bit);
|
||||
for(i = 1 ; i <= N; i++) {
|
||||
simdfastsetd1(0,(__m128i *) buffer, bit, data[i - 1], i - 1);
|
||||
newvalue = simdselectd1(0, (const __m128i *) buffer, bit,i - 1);
|
||||
if( newvalue != data[i-1] ) {
|
||||
printf("bad set-select\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
simdunpackd1(0,(__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[i])
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
free(datazeroes);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int testsetFOR() {
|
||||
int bit;
|
||||
size_t i;
|
||||
uint32_t newvalue;
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * datazeroes = malloc(N * sizeof(uint32_t));
|
||||
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
srand(0);
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
printf("simple set FOR %d \n",bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = (rand() & ((1 << bit) - 1));
|
||||
datazeroes[i] = 0;
|
||||
}
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
simdpackFOR(0,datazeroes, (__m128i *) buffer, bit);
|
||||
for(i = 1 ; i <= N; i++) {
|
||||
simdfastsetFOR(0,(__m128i *) buffer, bit, data[i - 1], i - 1);
|
||||
newvalue = simdselectFOR(0, (const __m128i *) buffer, bit,i - 1);
|
||||
if( newvalue != data[i-1] ) {
|
||||
printf("bad set-select\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
simdunpackFOR(0,(__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[i])
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
free(datazeroes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int testshortFORpack() {
|
||||
int bit;
|
||||
size_t i;
|
||||
__m128i * rb;
|
||||
size_t length;
|
||||
uint32_t offset = 7;
|
||||
srand(0);
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = (rand() & ((1 << bit) - 1)) + offset;
|
||||
}
|
||||
for (length = 0; length <= N; ++length) {
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
rb = simdpackFOR_length(offset,data, length, (__m128i *) buffer,
|
||||
bit);
|
||||
if(((rb - (__m128i *) buffer)*sizeof(__m128i)) != (unsigned) simdpackFOR_compressedbytes(length,bit)) {
|
||||
return -1;
|
||||
}
|
||||
simdunpackFOR_length(offset,(__m128i *) buffer, length,
|
||||
backdata, bit);
|
||||
for (i = 0; i < length; ++i) {
|
||||
|
||||
if (data[i] != backdata[i])
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifdef __AVX2__
|
||||
|
||||
int testbabyavx() {
|
||||
int bit;
|
||||
int trial;
|
||||
unsigned int i,j;
|
||||
const size_t N = AVXBlockSize;
|
||||
srand(0);
|
||||
printf("testbabyavx\n");
|
||||
printf("bit = ");
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
printf(" %d ",bit);
|
||||
fflush(stdout);
|
||||
for(trial = 0; trial < 100; ++trial) {
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t)+ 64 * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t) + 64 * sizeof(uint32_t) );
|
||||
__m256i * buffer = malloc((2 * N + 1024) * sizeof(uint32_t) + 32);
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = rand() & ((uint32_t)(1 << bit) - 1);
|
||||
}
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
if(avxmaxbits(data) != maxbits_length(data,N)) {
|
||||
printf("avxmaxbits is buggy\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
avxpackwithoutmask(data, buffer, bit);
|
||||
avxunpack(buffer, backdata, bit);
|
||||
for (i = 0; i < AVXBlockSize; ++i) {
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
for (j = 0; j < N; ++j) {
|
||||
if (data[j] != backdata[j]) {
|
||||
printf("data[%d]=%d v.s. backdata[%d]=%d\n",j,data[j],j,backdata[j]);
|
||||
} else {
|
||||
printf("data[%d]=%d\n",j,data[j]);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
}
|
||||
}
|
||||
printf("\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int testavx2() {
|
||||
int N = 5000 * AVXBlockSize, gap;
|
||||
__m256i * buffer = malloc(AVXBlockSize * sizeof(uint32_t));
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backbuffer = malloc(AVXBlockSize * sizeof(uint32_t));
|
||||
for (gap = 1; gap <= 387420489; gap *= 3) {
|
||||
int k;
|
||||
printf(" gap = %u \n", gap);
|
||||
for (k = 0; k < N; ++k)
|
||||
datain[k] = k * gap;
|
||||
for (k = 0; k * AVXBlockSize < N; ++k) {
|
||||
/*
|
||||
First part works for general arrays (sorted or unsorted)
|
||||
*/
|
||||
int j;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b = avxmaxbits(datain + k * AVXBlockSize);
|
||||
if(avxmaxbits(datain + k * AVXBlockSize) != maxbits_length(datain + k * AVXBlockSize,AVXBlockSize)) {
|
||||
printf("avxmaxbits is buggy %d %d \n",
|
||||
avxmaxbits(datain + k * AVXBlockSize),
|
||||
maxbits_length(datain + k * AVXBlockSize,AVXBlockSize));
|
||||
return -1;
|
||||
}
|
||||
printf("bit width = %d\n",b);
|
||||
|
||||
|
||||
/* we read 256 integers at "datain + k * AVXBlockSize" and
|
||||
write b 256-bit vectors at "buffer" */
|
||||
avxpackwithoutmask(datain + k * AVXBlockSize, buffer, b);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
avxunpack(buffer, backbuffer, b);/* uncompressed */
|
||||
for (j = 0; j < AVXBlockSize; ++j) {
|
||||
if (backbuffer[j] != datain[k * AVXBlockSize + j]) {
|
||||
int i;
|
||||
printf("bug in avxpack\n");
|
||||
for(i = 0; i < AVXBlockSize; ++i) {
|
||||
printf("data[%d]=%d got back %d %s\n",i,
|
||||
datain[k * AVXBlockSize + i],backbuffer[i],
|
||||
datain[k * AVXBlockSize + i]!=backbuffer[i]?"bug":"");
|
||||
}
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
#endif /* avx2 */
|
||||
|
||||
int test() {
|
||||
int N = 5000 * SIMDBlockSize, gap;
|
||||
__m128i * buffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backbuffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
for (gap = 1; gap <= 387420489; gap *= 3) {
|
||||
int k;
|
||||
printf(" gap = %u \n", gap);
|
||||
for (k = 0; k < N; ++k)
|
||||
datain[k] = k * gap;
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
/*
|
||||
First part works for general arrays (sorted or unsorted)
|
||||
*/
|
||||
int j;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b = maxbits(datain + k * SIMDBlockSize);
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b 128-bit vectors at "buffer" */
|
||||
simdpackwithoutmask(datain + k * SIMDBlockSize, buffer, b);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpack(buffer, backbuffer, b);/* uncompressed */
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
if (backbuffer[j] != datain[k * SIMDBlockSize + j]) {
|
||||
printf("bug in simdpack\n");
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
/*
|
||||
next part assumes that the data is sorted (uses differential coding)
|
||||
*/
|
||||
uint32_t offset = 0;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b1 = simdmaxbitsd1(offset,
|
||||
datain + k * SIMDBlockSize);
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b1 128-bit vectors at "buffer" */
|
||||
simdpackwithoutmaskd1(offset, datain + k * SIMDBlockSize, buffer,
|
||||
b1);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpackd1(offset, buffer, backbuffer, b1);
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
if (backbuffer[j] != datain[k * SIMDBlockSize + j]) {
|
||||
printf("bug in simdpack d1\n");
|
||||
return -3;
|
||||
}
|
||||
}
|
||||
offset = datain[k * SIMDBlockSize + SIMDBlockSize - 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
int testFOR() {
|
||||
int N = 5000 * SIMDBlockSize, gap;
|
||||
__m128i * buffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backbuffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
uint32_t tmax, tmin, tb;
|
||||
for (gap = 1; gap <= 387420489; gap *= 2) {
|
||||
int k;
|
||||
printf(" gap = %u \n", gap);
|
||||
for (k = 0; k < N; ++k)
|
||||
datain[k] = k * gap;
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
int j;
|
||||
simdmaxmin_length(datain + k * SIMDBlockSize,SIMDBlockSize,&tmin,&tmax);
|
||||
/* we compute the bit width */
|
||||
tb = bits(tmax - tmin);
|
||||
|
||||
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b 128-bit vectors at "buffer" */
|
||||
simdpackFOR(tmin,datain + k * SIMDBlockSize, buffer, tb);
|
||||
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
uint32_t selectedvalue = simdselectFOR(tmin,buffer,tb,j);
|
||||
if (selectedvalue != datain[k * SIMDBlockSize + j]) {
|
||||
printf("bug in simdselectFOR\n");
|
||||
return -3;
|
||||
}
|
||||
}
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpackFOR(tmin,buffer, backbuffer, tb);/* uncompressed */
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
if (backbuffer[j] != datain[k * SIMDBlockSize + j]) {
|
||||
printf("bug in simdpackFOR\n");
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define MAX 300
|
||||
int test_simdmaxbitsd1_length() {
|
||||
uint32_t result, buffer[MAX + 1];
|
||||
int i, j;
|
||||
|
||||
memset(&buffer[0], 0xff, sizeof(buffer));
|
||||
|
||||
/* this test creates buffers of different length; each buffer is
|
||||
* initialized to result in the following deltas:
|
||||
* length 1: 2
|
||||
* length 2: 1 2
|
||||
* length 3: 1 1 2
|
||||
* length 4: 1 1 1 2
|
||||
* length 5: 1 1 1 1 2
|
||||
* etc. Each sequence's "maxbits" is 2. */
|
||||
for (i = 0; i < MAX; i++) {
|
||||
for (j = 0; j < i; j++)
|
||||
buffer[j] = j + 1;
|
||||
buffer[i] = i + 2;
|
||||
|
||||
result = simdmaxbitsd1_length(0, &buffer[0], i + 1);
|
||||
if (result != 2) {
|
||||
printf("simdmaxbitsd1_length: unexpected result %u in loop %d\n",
|
||||
result, i);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
printf("simdmaxbitsd1_length: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int uint32_cmp(const void *a, const void *b)
|
||||
{
|
||||
const uint32_t *ia = (const uint32_t *)a;
|
||||
const uint32_t *ib = (const uint32_t *)b;
|
||||
if(*ia < *ib)
|
||||
return -1;
|
||||
else if (*ia > *ib)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
int test_simdpackedsearch() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t result = 0;
|
||||
int b, i;
|
||||
uint32_t init = 0;
|
||||
__m128i initial = _mm_set1_epi32(init);
|
||||
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++)
|
||||
buffer[i] = (uint32_t)(i + 1);
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 1; b <= 32; b++) {
|
||||
uint32_t out[128];
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(init, buffer, (__m128i *)out, b);
|
||||
initial = _mm_setzero_si128();
|
||||
printf("simdsearchd1: %d bits\n", b);
|
||||
|
||||
/* now perform the searches */
|
||||
initial = _mm_set1_epi32(init);
|
||||
assert(simdsearchd1(&initial, (__m128i *)out, b, 0, &result) == 0);
|
||||
assert(result > 0);
|
||||
|
||||
for (i = 1; i <= 128; i++) {
|
||||
initial = _mm_set1_epi32(init);
|
||||
assert(simdsearchd1(&initial, (__m128i *)out, b,
|
||||
(uint32_t)i, &result) == i - 1);
|
||||
assert(result == (unsigned)i);
|
||||
}
|
||||
initial = _mm_set1_epi32(init);
|
||||
assert(simdsearchd1(&initial, (__m128i *)out, b, 200, &result)
|
||||
== 128);
|
||||
assert(result > 200);
|
||||
}
|
||||
printf("simdsearchd1: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_simdpackedsearchFOR() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t result = 0;
|
||||
int b;
|
||||
uint32_t i;
|
||||
uint32_t maxv, tmin, tmax, tb;
|
||||
uint32_t out[128];
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 1; b <= 32; b++) {
|
||||
/* initialize the buffer */
|
||||
maxv = (b == 32)
|
||||
? 0xFFFFFFFF
|
||||
: ((1U<<b) - 1);
|
||||
for (i = 0; i < 128; i++)
|
||||
buffer[i] = maxv * (i + 1) / 128;
|
||||
simdmaxmin_length(buffer,SIMDBlockSize,&tmin,&tmax);
|
||||
/* we compute the bit width */
|
||||
tb = bits(tmax - tmin);
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackFOR(tmin, buffer, (__m128i *)out, tb);
|
||||
printf("simdsearchd1: %d bits\n", b);
|
||||
|
||||
/* now perform the searches */
|
||||
for (i = 0; i < 128; i++) {
|
||||
assert(buffer[i] == simdselectFOR(tmin, (__m128i *)out, tb,i));
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
int x = simdsearchwithlengthFOR(tmin, (__m128i *)out, tb,
|
||||
128,buffer[i], &result) ;
|
||||
assert(simdselectFOR(tmin, (__m128i *)out, tb,x) == buffer[x]);
|
||||
assert(simdselectFOR(tmin, (__m128i *)out, tb,x) == result);
|
||||
assert(buffer[x] == result);
|
||||
assert(result == buffer[i]);
|
||||
assert(buffer[x] == buffer[i]);
|
||||
}
|
||||
}
|
||||
printf("simdsearchFOR: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_simdpackedsearch_advanced() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t backbuffer[128];
|
||||
uint32_t out[128];
|
||||
uint32_t result = 0;
|
||||
uint32_t b, i;
|
||||
uint32_t init = 0;
|
||||
__m128i initial = _mm_set1_epi32(init);
|
||||
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 0; b <= 32; b++) {
|
||||
uint32_t prev = init;
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = ((uint32_t)(1431655765 * i + 0xFFFFFFFF)) ;
|
||||
if(b < 32) buffer[i] %= (1<<b);
|
||||
}
|
||||
|
||||
qsort(buffer,128, sizeof(uint32_t), uint32_cmp);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = buffer[i] + prev;
|
||||
prev = buffer[i];
|
||||
}
|
||||
for (i = 1; i < 128; i++) {
|
||||
if(buffer[i] < buffer[i-1] )
|
||||
buffer[i] = buffer[i-1];
|
||||
}
|
||||
assert(simdmaxbitsd1(init, buffer)<=b);
|
||||
for (i = 0; i < 128; i++) {
|
||||
out[i] = 0; /* memset would do too */
|
||||
}
|
||||
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(init, buffer, (__m128i *)out, b);
|
||||
simdunpackd1(init, (__m128i *)out, backbuffer, b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
assert(buffer[i] == backbuffer[i]);
|
||||
}
|
||||
|
||||
printf("advanced simdsearchd1: %d bits\n", b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
int pos;
|
||||
initial = _mm_set1_epi32(init);
|
||||
pos = simdsearchd1(&initial, (__m128i *)out, b,
|
||||
buffer[i], &result);
|
||||
assert(pos == simdsearchwithlengthd1(init, (__m128i *)out, b, 128,
|
||||
buffer[i], &result));
|
||||
assert(buffer[pos] == buffer[i]);
|
||||
if(pos > 0)
|
||||
assert(buffer[pos - 1] < buffer[i]);
|
||||
assert(result == buffer[i]);
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
int pos;
|
||||
if(buffer[i] == 0) continue;
|
||||
initial = _mm_set1_epi32(init);
|
||||
pos = simdsearchd1(&initial, (__m128i *)out, b,
|
||||
buffer[i] - 1, &result);
|
||||
assert(pos == simdsearchwithlengthd1(init, (__m128i *)out, b, 128,
|
||||
buffer[i] - 1, &result));
|
||||
assert(buffer[pos] >= buffer[i] - 1);
|
||||
if(pos > 0)
|
||||
assert(buffer[pos - 1] < buffer[i] - 1);
|
||||
assert(result == buffer[pos]);
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
int pos;
|
||||
if (buffer[i] + 1 == 0)
|
||||
continue;
|
||||
initial = _mm_set1_epi32(init);
|
||||
pos = simdsearchd1(&initial, (__m128i *) out, b,
|
||||
buffer[i] + 1, &result);
|
||||
assert(pos == simdsearchwithlengthd1(init, (__m128i *)out, b, 128,
|
||||
buffer[i] + 1, &result));
|
||||
if(pos == 128) {
|
||||
assert(buffer[i] == buffer[127]);
|
||||
} else {
|
||||
assert(buffer[pos] >= buffer[i] + 1);
|
||||
if (pos > 0)
|
||||
assert(buffer[pos - 1] < buffer[i] + 1);
|
||||
assert(result == buffer[pos]);
|
||||
}
|
||||
}
|
||||
}
|
||||
printf("advanced simdsearchd1: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_simdpackedselect() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t initial = 33;
|
||||
int b, i;
|
||||
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++)
|
||||
buffer[i] = (uint32_t)(initial + i);
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 1; b <= 32; b++) {
|
||||
uint32_t out[128];
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
|
||||
|
||||
printf("simdselectd1: %d bits\n", b);
|
||||
|
||||
/* now perform the searches */
|
||||
for (i = 0; i < 128; i++) {
|
||||
assert(simdselectd1(initial, (__m128i *)out, b, (uint32_t)i)
|
||||
== initial + i);
|
||||
}
|
||||
}
|
||||
printf("simdselectd1: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_simdpackedselect_advanced() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t initial = 33;
|
||||
uint32_t b;
|
||||
int i;
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 0; b <= 32; b++) {
|
||||
uint32_t prev = initial;
|
||||
uint32_t out[128];
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = ((uint32_t)(165576 * i)) ;
|
||||
if(b < 32) buffer[i] %= (1<<b);
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = buffer[i] + prev;
|
||||
prev = buffer[i];
|
||||
}
|
||||
|
||||
for (i = 1; i < 128; i++) {
|
||||
if(buffer[i] < buffer[i-1] )
|
||||
buffer[i] = buffer[i-1];
|
||||
}
|
||||
assert(simdmaxbitsd1(initial, buffer)<=b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
out[i] = 0; /* memset would do too */
|
||||
}
|
||||
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
|
||||
|
||||
printf("simdselectd1: %d bits\n", b);
|
||||
|
||||
/* now perform the searches */
|
||||
for (i = 0; i < 128; i++) {
|
||||
uint32_t valretrieved = simdselectd1(initial, (__m128i *)out, b, (uint32_t)i);
|
||||
assert(valretrieved == buffer[i]);
|
||||
}
|
||||
}
|
||||
printf("advanced simdselectd1: ok\n");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
int main() {
|
||||
int r;
|
||||
r = testsetFOR();
|
||||
if (r) {
|
||||
printf("test failure 1\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
r = testsetd1();
|
||||
if (r) {
|
||||
printf("test failure 2\n");
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
r = testset();
|
||||
if (r) {
|
||||
printf("test failure 3\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = testshortFORpack();
|
||||
if (r) {
|
||||
printf("test failure 4\n");
|
||||
return r;
|
||||
}
|
||||
r = testshortpack();
|
||||
if (r) {
|
||||
printf("test failure 5\n");
|
||||
return r;
|
||||
}
|
||||
r = testlongpack();
|
||||
if (r) {
|
||||
printf("test failure 6\n");
|
||||
return r;
|
||||
}
|
||||
#ifdef __SSE4_1__
|
||||
r = test_simdpackedsearchFOR();
|
||||
if (r) {
|
||||
printf("test failure 7\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = testFOR();
|
||||
if (r) {
|
||||
printf("test failure 8\n");
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
#ifdef __AVX2__
|
||||
r= testbabyavx();
|
||||
if (r) {
|
||||
printf("test failure baby avx\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = testavx2();
|
||||
if (r) {
|
||||
printf("test failure 9 avx\n");
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
r = test();
|
||||
if (r) {
|
||||
printf("test failure 9\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = test_simdmaxbitsd1_length();
|
||||
if (r) {
|
||||
printf("test failure 10\n");
|
||||
return r;
|
||||
}
|
||||
#ifdef __SSE4_1__
|
||||
r = test_simdpackedsearch();
|
||||
if (r) {
|
||||
printf("test failure 11\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = test_simdpackedsearch_advanced();
|
||||
if (r) {
|
||||
printf("test failure 12\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = test_simdpackedselect();
|
||||
if (r) {
|
||||
printf("test failure 13\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = test_simdpackedselect_advanced();
|
||||
if (r) {
|
||||
printf("test failure 14\n");
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
printf("All tests OK!\n");
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include "simdcomp.h"
|
||||
|
||||
|
||||
#define get_random_char() (uint8_t)(rand() % 256);
|
||||
|
||||
|
||||
int main() {
|
||||
int N = 5000 * SIMDBlockSize, gap;
|
||||
__m128i * buffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backbuffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
|
||||
srand(time(NULL));
|
||||
|
||||
for (gap = 1; gap <= 387420489; gap *= 3) {
|
||||
int k;
|
||||
printf(" gap = %u \n", gap);
|
||||
|
||||
/* simulate some random character string, don't care about endiannes */
|
||||
for (k = 0; k < N; ++k) {
|
||||
uint8_t _tmp[4];
|
||||
|
||||
_tmp[0] = get_random_char();
|
||||
_tmp[1] = get_random_char();
|
||||
_tmp[2] = get_random_char();
|
||||
_tmp[3] = get_random_char();
|
||||
|
||||
memmove(&datain[k], _tmp, 4);
|
||||
}
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
/*
|
||||
First part works for general arrays (sorted or unsorted)
|
||||
*/
|
||||
int j;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b = maxbits(datain + k * SIMDBlockSize);
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b 128-bit vectors at "buffer" */
|
||||
simdpackwithoutmask(datain + k * SIMDBlockSize, buffer, b);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpack(buffer, backbuffer, b);/* uncompressed */
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
uint8_t chars_back[4];
|
||||
uint8_t chars_in[4];
|
||||
|
||||
memmove(chars_back, &backbuffer[j], 4);
|
||||
memmove(chars_in, &datain[k * SIMDBlockSize + j], 4);
|
||||
|
||||
if (chars_in[0] != chars_back[0]
|
||||
|| chars_in[1] != chars_back[1]
|
||||
|| chars_in[2] != chars_back[2]
|
||||
|| chars_in[3] != chars_back[3]) {
|
||||
printf("bug in simdpack\n");
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
/*
|
||||
next part assumes that the data is sorted (uses differential coding)
|
||||
*/
|
||||
uint32_t offset = 0;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b1 = simdmaxbitsd1(offset,
|
||||
datain + k * SIMDBlockSize);
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b1 128-bit vectors at "buffer" */
|
||||
simdpackwithoutmaskd1(offset, datain + k * SIMDBlockSize, buffer,
|
||||
b1);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpackd1(offset, buffer, backbuffer, b1);
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
uint8_t chars_back[4];
|
||||
uint8_t chars_in[4];
|
||||
|
||||
memmove(chars_back, &backbuffer[j], 4);
|
||||
memmove(chars_in, &datain[k * SIMDBlockSize + j], 4);
|
||||
|
||||
if (chars_in[0] != chars_back[0]
|
||||
|| chars_in[1] != chars_back[1]
|
||||
|| chars_in[2] != chars_back[2]
|
||||
|| chars_in[3] != chars_back[3]) {
|
||||
printf("bug in simdpack\n");
|
||||
return -3;
|
||||
}
|
||||
}
|
||||
offset = datain[k * SIMDBlockSize + SIMDBlockSize - 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
42
cpp/simdcomp_wrapper.c
vendored
42
cpp/simdcomp_wrapper.c
vendored
@@ -1,42 +0,0 @@
|
||||
#include "simdcomp.h"
|
||||
#include "simdcomputil.h"
|
||||
|
||||
// assumes datain has a size of 128 uint32
|
||||
// and that buffer is large enough to host the data.
|
||||
size_t compress_sorted(
|
||||
const uint32_t* datain,
|
||||
uint8_t* output,
|
||||
const uint32_t offset) {
|
||||
const uint32_t b = simdmaxbitsd1(offset, datain);
|
||||
*output++ = b;
|
||||
simdpackwithoutmaskd1(offset, datain, (__m128i *) output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
|
||||
// assumes datain has a size of 128 uint32
|
||||
// and that buffer is large enough to host the data.
|
||||
size_t uncompress_sorted(
|
||||
const uint8_t* compressed_data,
|
||||
uint32_t* output,
|
||||
uint32_t offset) {
|
||||
const uint32_t b = *compressed_data++;
|
||||
simdunpackd1(offset, (__m128i *)compressed_data, output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
|
||||
size_t compress_unsorted(
|
||||
const uint32_t* datain,
|
||||
uint8_t* output) {
|
||||
const uint32_t b = maxbits(datain);
|
||||
*output++ = b;
|
||||
simdpackwithoutmask(datain, (__m128i *) output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
|
||||
size_t uncompress_unsorted(
|
||||
const uint8_t* compressed_data,
|
||||
uint32_t* output) {
|
||||
const uint32_t b = *compressed_data++;
|
||||
simdunpack((__m128i *)compressed_data, output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
1
doc/.gitignore
vendored
Normal file
1
doc/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
book
|
||||
5
doc/book.toml
Normal file
5
doc/book.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[book]
|
||||
authors = ["Paul Masurel"]
|
||||
multilingual = false
|
||||
src = "src"
|
||||
title = "Tantivy, the user guide"
|
||||
15
doc/src/SUMMARY.md
Normal file
15
doc/src/SUMMARY.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Summary
|
||||
|
||||
|
||||
|
||||
[Avant Propos](./avant-propos.md)
|
||||
|
||||
- [Segments](./basis.md)
|
||||
- [Defining your schema](./schema.md)
|
||||
- [Facetting](./facetting.md)
|
||||
- [Innerworkings](./innerworkings.md)
|
||||
- [Inverted index](./inverted_index.md)
|
||||
- [Best practise](./inverted_index.md)
|
||||
|
||||
[Frequently Asked Questions](./faq.md)
|
||||
[Examples](./examples.md)
|
||||
34
doc/src/avant-propos.md
Normal file
34
doc/src/avant-propos.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# Foreword, what is the scope of tantivy?
|
||||
|
||||
> Tantivy is a **search** engine **library** for Rust.
|
||||
|
||||
If you are familiar with Lucene, it's an excellent approximation to consider tantivy as Lucene for rust. tantivy is heavily inspired by Lucene's design and
|
||||
they both have the same scope and targetted use cases.
|
||||
|
||||
If you are not familiar with Lucene, let's break down our little tagline.
|
||||
|
||||
- **Search** here means full-text search : fundamentally, tantivy is here to help you
|
||||
identify efficiently what are the documents matching a given query in your corpus.
|
||||
But modern search UI are so much more : text processing, facetting, autocomplete, fuzzy search, good
|
||||
relevancy, collapsing, highlighting, spatial search.
|
||||
|
||||
While some of these features are not available in tantivy yet, all of these are relevant
|
||||
feature requests. Tantivy's objective is to offer a solid toolbox to create the best search
|
||||
experience. But keep in mind this is just a toolbox.
|
||||
Which bring us to the second keyword...
|
||||
|
||||
- **Library** means that you will have to write code. tantivy is not an *all-in-one* server solution like elastic search for instance.
|
||||
|
||||
Sometimes a functionality will not be available in tantivy because it is too
|
||||
specific to your use case. By design, tantivy should make it possible to extend
|
||||
the available set of features using the existing rock-solid datastructures.
|
||||
|
||||
Most frequently this will mean writing your own `Collector`, your own `Scorer` or your own
|
||||
`TokenFilter`... Some of your requirements may also be related to
|
||||
something closer to architecture or operations. For instance, you may
|
||||
want to build a large corpus on Hadoop, fine-tune the merge policy to keep your
|
||||
index sharded in a time-wise fashion, or you may want to convert and existing
|
||||
index from a different format.
|
||||
|
||||
Tantivy exposes a lot of low level API to do all of these things.
|
||||
|
||||
77
doc/src/basis.md
Normal file
77
doc/src/basis.md
Normal file
@@ -0,0 +1,77 @@
|
||||
# Anatomy of an index
|
||||
|
||||
## Straight from disk
|
||||
|
||||
Tantivy accesses its data using an abstracting trait called `Directory`.
|
||||
In theory, one can come and override the data access logic. In practise, the
|
||||
trait somewhat assumes that your data can be mapped to memory, and tantivy
|
||||
seems deeply married to using `mmap` for its io [^1], and the only persisting
|
||||
directory shipped with tantivy is the `MmapDirectory`.
|
||||
|
||||
While this design has some downsides, this greatly simplifies the source code of
|
||||
tantivy. Caching is also entirely delegated to the OS.
|
||||
|
||||
`tantivy` works entirely (or almost) by directly reading the datastructures as they are layed on disk. As a result, the act of opening an indexing does not involve loading different datastructures from the disk into random access memory : starting a process, opening an index, and performing your first query can typically be done in a matter of milliseconds.
|
||||
|
||||
This is an interesting property for a command line search engine, or for some multi-tenant log search engine : spawning a new process for each new query can be a perfectly sensible solution in some use case.
|
||||
|
||||
In later chapters, we will discuss tantivy's inverted index data layout.
|
||||
One key take away is that to achieve great performance, search indexes are extremely compact.
|
||||
Of course this is crucial to reduce IO, and ensure that as much of our index can sit in RAM.
|
||||
|
||||
Also, whenever possible its data is accessed sequentially. Of course, this is an amazing property when tantivy needs to access the data from your spinning hard disk, but this is also
|
||||
critical for performance, if your data is read from and an `SSD` or even already in your pagecache.
|
||||
|
||||
|
||||
## Segments, and the log method
|
||||
|
||||
That kind of compact layout comes at one cost: it prevents our datastructures from being dynamic.
|
||||
In fact, the `Directory` trait does not even allow you to modify part of a file.
|
||||
|
||||
To allow the addition / deletion of documents, and create the illusion that
|
||||
your index is dynamic (i.e.: adding and deleting documents), tantivy uses a common database trick sometimes referred to as the *log method*.
|
||||
|
||||
Let's forget about deletes for a moment.
|
||||
|
||||
As you add documents, these documents are processed and stored in a dedicated datastructure, in a `RAM` buffer. This datastructure is not ready for search, but it is useful to receive your data and rearrange it very rapidly.
|
||||
|
||||
As you add documents, this buffer will reach its capacity and tantivy will transparently stop adding document to it and start converting this datastructure to its final read-only format on disk. Once written, an brand empty buffer is available to resume adding documents.
|
||||
|
||||
The resulting chunk of index obtained after this serialization is called a `Segment`.
|
||||
|
||||
> A segment is a self-contained atomic piece of index. It is identified with a UUID, and all of its files are identified using the naming scheme : `<UUID>.*`.
|
||||
|
||||
Which brings us to the nature of a tantivy `Index`.
|
||||
|
||||
> A tantivy `Index` is a collection of `Segments`.
|
||||
|
||||
Physically, this really just means and index is a bunch of segment files in a given `Directory`,
|
||||
linked together by a `meta.json` file. This transparency can become extremely handy
|
||||
to get tantivy to fit your use case:
|
||||
|
||||
*Example 1* You could for instance use hadoop to build a very large search index in a timely manner, copy all of the resulting segment files in the same directory and edit the `meta.json` to get a functional index.[^2]
|
||||
|
||||
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated to segment `D-7`.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Merging
|
||||
|
||||
As you index more and more data, your index will accumulate more and more segments.
|
||||
Having a lot of small segments is not really optimal. There is a bit of redundancy in having
|
||||
all these term dictionary. Also when searching, we will need to do term lookups as many times as we have segments. It can hurt search performance a bit.
|
||||
|
||||
That's where merging or compacting comes into place. Tantivy will continuously consider merge
|
||||
opportunities and start merging segments in the background.
|
||||
|
||||
|
||||
# Indexing throughput, number of indexing threads
|
||||
|
||||
|
||||
|
||||
|
||||
[^1]: This may eventually change.
|
||||
|
||||
[^2]: Be careful however. By default these files will not be considered as *managed* by tantivy. This means they will never be garbage collected by tantivy, regardless of whether they become obsolete or not.
|
||||
0
doc/src/best_practise.md.rs
Normal file
0
doc/src/best_practise.md.rs
Normal file
3
doc/src/examples.md
Normal file
3
doc/src/examples.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Examples
|
||||
|
||||
- [Basic search](/examples/basic_search.html)
|
||||
5
doc/src/facetting.md
Normal file
5
doc/src/facetting.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Facetting
|
||||
|
||||
wewew
|
||||
|
||||
## weeewe
|
||||
0
doc/src/faq.md
Normal file
0
doc/src/faq.md
Normal file
1
doc/src/innerworkings.md
Normal file
1
doc/src/innerworkings.md
Normal file
@@ -0,0 +1 @@
|
||||
# Innerworkings
|
||||
1
doc/src/inverted_index.md
Normal file
1
doc/src/inverted_index.md
Normal file
@@ -0,0 +1 @@
|
||||
# Inverted index
|
||||
1
doc/src/schema.md
Normal file
1
doc/src/schema.md
Normal file
@@ -0,0 +1 @@
|
||||
# Defining your schema
|
||||
237
examples/basic_search.rs
Normal file
237
examples/basic_search.rs
Normal file
@@ -0,0 +1,237 @@
|
||||
// # Basic Example
|
||||
//
|
||||
// This example covers the basic functionalities of
|
||||
// tantivy.
|
||||
//
|
||||
// We will :
|
||||
// - define our schema
|
||||
// - create an index in a directory
|
||||
// - index a few documents into our index
|
||||
// - search for the best document matching a basic query
|
||||
// - retrieve the best document's original content.
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the
|
||||
// sake of this example
|
||||
let index_path = TempDir::new()?;
|
||||
|
||||
// # Defining the schema
|
||||
//
|
||||
// The Tantivy index requires a very strict schema.
|
||||
// The schema declares which fields are in the index,
|
||||
// and for each field, its type and "the way it should
|
||||
// be indexed".
|
||||
|
||||
// First we need to define a schema ...
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
// Our first field is title.
|
||||
// We want full-text search for it, and we also want
|
||||
// to be able to retrieve the document after the search.
|
||||
//
|
||||
// `TEXT | STORED` is some syntactic sugar to describe
|
||||
// that.
|
||||
//
|
||||
// `TEXT` means the field should be tokenized and indexed,
|
||||
// along with its term frequency and term positions.
|
||||
//
|
||||
// `STORED` means that the field will also be saved
|
||||
// in a compressed, row-oriented key-value store.
|
||||
// This store is useful for reconstructing the
|
||||
// documents that were selected during the search phase.
|
||||
schema_builder.add_text_field("title", TEXT | STORED);
|
||||
|
||||
// Our second field is body.
|
||||
// We want full-text search for it, but we do not
|
||||
// need to be able to be able to retrieve it
|
||||
// for our application.
|
||||
//
|
||||
// We can make our index lighter by omitting the `STORED` flag.
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// # Indexing documents
|
||||
//
|
||||
// Let's create a brand new index.
|
||||
//
|
||||
// This will actually just save a meta.json
|
||||
// with our schema in the directory.
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
// To insert a document we will need an index writer.
|
||||
// There must be only one writer at a time.
|
||||
// This single `IndexWriter` is already
|
||||
// multithreaded.
|
||||
//
|
||||
// Here we give tantivy a budget of `50MB`.
|
||||
// Using a bigger heap for the indexer may increase
|
||||
// throughput, but 50 MB is already plenty.
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// Let's index our documents!
|
||||
// We first need a handle on the title and the body field.
|
||||
|
||||
// ### Adding documents
|
||||
//
|
||||
// We can create a document manually, by setting the fields
|
||||
// one by one in a Document object.
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
let mut old_man_doc = Document::default();
|
||||
old_man_doc.add_text(title, "The Old Man and the Sea");
|
||||
old_man_doc.add_text(
|
||||
body,
|
||||
"He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
he had gone eighty-four days now without taking a fish.",
|
||||
);
|
||||
|
||||
// ... and add it to the `IndexWriter`.
|
||||
index_writer.add_document(old_man_doc);
|
||||
|
||||
// For convenience, tantivy also comes with a macro to
|
||||
// reduce the boilerplate above.
|
||||
index_writer.add_document(doc!(
|
||||
title => "Of Mice and Men",
|
||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
));
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "Of Mice and Men",
|
||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
));
|
||||
|
||||
// Multivalued field just need to be repeated.
|
||||
index_writer.add_document(doc!(
|
||||
title => "Frankenstein",
|
||||
title => "The Modern Prometheus",
|
||||
body => "You will rejoice to hear that no disaster has accompanied the commencement of an \
|
||||
enterprise which you have regarded with such evil forebodings. I arrived here \
|
||||
yesterday, and my first task is to assure my dear sister of my welfare and \
|
||||
increasing confidence in the success of my undertaking."
|
||||
));
|
||||
|
||||
// This is an example, so we will only index 3 documents
|
||||
// here. You can check out tantivy's tutorial to index
|
||||
// the English wikipedia. Tantivy's indexing is rather fast.
|
||||
// Indexing 5 million articles of the English wikipedia takes
|
||||
// around 3 minutes on my computer!
|
||||
|
||||
// ### Committing
|
||||
//
|
||||
// At this point our documents are not searchable.
|
||||
//
|
||||
//
|
||||
// We need to call `.commit()` explicitly to force the
|
||||
// `index_writer` to finish processing the documents in the queue,
|
||||
// flush the current index to the disk, and advertise
|
||||
// the existence of new documents.
|
||||
//
|
||||
// This call is blocking.
|
||||
index_writer.commit()?;
|
||||
|
||||
// If `.commit()` returns correctly, then all of the
|
||||
// documents that have been added are guaranteed to be
|
||||
// persistently indexed.
|
||||
//
|
||||
// In the scenario of a crash or a power failure,
|
||||
// tantivy behaves as if it has rolled back to its last
|
||||
// commit.
|
||||
|
||||
// # Searching
|
||||
//
|
||||
// ### Searcher
|
||||
//
|
||||
// A reader is required first in order to search an index.
|
||||
// It acts as a `Searcher` pool that reloads itself,
|
||||
// depending on a `ReloadPolicy`.
|
||||
//
|
||||
// For a search server you will typically create one reader for the entire lifetime of your
|
||||
// program, and acquire a new searcher for every single request.
|
||||
//
|
||||
// In the code below, we rely on the 'ON_COMMIT' policy: the reader
|
||||
// will reload the index automatically after each commit.
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()?;
|
||||
|
||||
// We now need to acquire a searcher.
|
||||
//
|
||||
// A searcher points to a snapshotted, immutable version of the index.
|
||||
//
|
||||
// Some search experience might require more than
|
||||
// one query. Using the same searcher ensures that all of these queries will run on the
|
||||
// same version of the index.
|
||||
//
|
||||
// Acquiring a `searcher` is very cheap.
|
||||
//
|
||||
// You should acquire a searcher every time you start processing a request and
|
||||
// and release it right after your query is finished.
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// ### Query
|
||||
|
||||
// The query parser can interpret human queries.
|
||||
// Here, if the user does not specify which
|
||||
// field they want to search, tantivy will search
|
||||
// in both title and body.
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
|
||||
// `QueryParser` may fail if the query is not in the right
|
||||
// format. For user facing applications, this can be a problem.
|
||||
// A ticket has been opened regarding this problem.
|
||||
let query = query_parser.parse_query("sea whale")?;
|
||||
|
||||
// A query defines a set of documents, as
|
||||
// well as the way they should be scored.
|
||||
//
|
||||
// A query created by the query parser is scored according
|
||||
// to a metric called Tf-Idf, and will consider
|
||||
// any document matching at least one of our terms.
|
||||
|
||||
// ### Collectors
|
||||
//
|
||||
// We are not interested in all of the documents but
|
||||
// only in the top 10. Keeping track of our top 10 best documents
|
||||
// is the role of the `TopDocs` collector.
|
||||
|
||||
// We can now perform our query.
|
||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||
|
||||
// The actual documents still need to be
|
||||
// retrieved from Tantivy's store.
|
||||
//
|
||||
// Since the body field was not configured as stored,
|
||||
// the document returned will only contain
|
||||
// a title.
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
191
examples/custom_collector.rs
Normal file
191
examples/custom_collector.rs
Normal file
@@ -0,0 +1,191 @@
|
||||
// # Custom collector example
|
||||
//
|
||||
// This example shows how you can implement your own
|
||||
// collector. As an example, we will compute a collector
|
||||
// that computes the standard deviation of a given fast field.
|
||||
//
|
||||
// Of course, you can have a look at the tantivy's built-in collectors
|
||||
// such as the `CountCollector` for more examples.
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::{Collector, SegmentCollector};
|
||||
use tantivy::fastfield::FastFieldReader;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::Field;
|
||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||
use tantivy::{doc, Index, Score, SegmentReader, TantivyError};
|
||||
|
||||
#[derive(Default)]
|
||||
struct Stats {
|
||||
count: usize,
|
||||
sum: f64,
|
||||
squared_sum: f64,
|
||||
}
|
||||
|
||||
impl Stats {
|
||||
pub fn count(&self) -> usize {
|
||||
self.count
|
||||
}
|
||||
|
||||
pub fn mean(&self) -> f64 {
|
||||
self.sum / (self.count as f64)
|
||||
}
|
||||
|
||||
fn square_mean(&self) -> f64 {
|
||||
self.squared_sum / (self.count as f64)
|
||||
}
|
||||
|
||||
pub fn standard_deviation(&self) -> f64 {
|
||||
let mean = self.mean();
|
||||
(self.square_mean() - mean * mean).sqrt()
|
||||
}
|
||||
|
||||
fn non_zero_count(self) -> Option<Stats> {
|
||||
if self.count == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(self)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct StatsCollector {
|
||||
field: Field,
|
||||
}
|
||||
|
||||
impl StatsCollector {
|
||||
fn with_field(field: Field) -> StatsCollector {
|
||||
StatsCollector { field }
|
||||
}
|
||||
}
|
||||
|
||||
impl Collector for StatsCollector {
|
||||
// That's the type of our result.
|
||||
// Our standard deviation will be a float.
|
||||
type Fruit = Option<Stats>;
|
||||
|
||||
type Child = StatsSegmentCollector;
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
_segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> tantivy::Result<StatsSegmentCollector> {
|
||||
let fast_field_reader = segment_reader
|
||||
.fast_fields()
|
||||
.u64(self.field)
|
||||
.ok_or_else(|| {
|
||||
let field_name = segment_reader.schema().get_field_name(self.field);
|
||||
TantivyError::SchemaError(format!(
|
||||
"Field {:?} is not a u64 fast field.",
|
||||
field_name
|
||||
))
|
||||
})?;
|
||||
Ok(StatsSegmentCollector {
|
||||
fast_field_reader,
|
||||
stats: Stats::default(),
|
||||
})
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
// this collector does not care about score.
|
||||
false
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, segment_stats: Vec<Option<Stats>>) -> tantivy::Result<Option<Stats>> {
|
||||
let mut stats = Stats::default();
|
||||
for segment_stats_opt in segment_stats {
|
||||
if let Some(segment_stats) = segment_stats_opt {
|
||||
stats.count += segment_stats.count;
|
||||
stats.sum += segment_stats.sum;
|
||||
stats.squared_sum += segment_stats.squared_sum;
|
||||
}
|
||||
}
|
||||
Ok(stats.non_zero_count())
|
||||
}
|
||||
}
|
||||
|
||||
struct StatsSegmentCollector {
|
||||
fast_field_reader: FastFieldReader<u64>,
|
||||
stats: Stats,
|
||||
}
|
||||
|
||||
impl SegmentCollector for StatsSegmentCollector {
|
||||
type Fruit = Option<Stats>;
|
||||
|
||||
fn collect(&mut self, doc: u32, _score: Score) {
|
||||
let value = self.fast_field_reader.get(doc) as f64;
|
||||
self.stats.count += 1;
|
||||
self.stats.sum += value;
|
||||
self.stats.squared_sum += value * value;
|
||||
}
|
||||
|
||||
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
|
||||
self.stats.non_zero_count()
|
||||
}
|
||||
}
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
//
|
||||
// The Tantivy index requires a very strict schema.
|
||||
// The schema declares which fields are in the index,
|
||||
// and for each field, its type and "the way it should
|
||||
// be indexed".
|
||||
|
||||
// first we need to define a schema ...
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
// We'll assume a fictional index containing
|
||||
// products, and with a name, a description, and a price.
|
||||
let product_name = schema_builder.add_text_field("name", TEXT);
|
||||
let product_description = schema_builder.add_text_field("description", TEXT);
|
||||
let price = schema_builder.add_u64_field("price", INDEXED | FAST);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// # Indexing documents
|
||||
//
|
||||
// Lets index a bunch of fake documents for the sake of
|
||||
// this example.
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
index_writer.add_document(doc!(
|
||||
product_name => "Super Broom 2000",
|
||||
product_description => "While it is ok for short distance travel, this broom \
|
||||
was designed quiditch. It will up your game.",
|
||||
price => 30_200u64
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
product_name => "Turbulobroom",
|
||||
product_description => "You might have heard of this broom before : it is the sponsor of the Wales team.\
|
||||
You'll enjoy its sharp turns, and rapid acceleration",
|
||||
price => 29_240u64
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
product_name => "Broomio",
|
||||
product_description => "Great value for the price. This broom is a market favorite",
|
||||
price => 21_240u64
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
product_name => "Whack a Mole",
|
||||
product_description => "Prime quality bat.",
|
||||
price => 5_200u64
|
||||
));
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let query_parser = QueryParser::for_index(&index, vec![product_name, product_description]);
|
||||
|
||||
// here we want to get a hit on the 'ken' in Frankenstein
|
||||
let query = query_parser.parse_query("broom")?;
|
||||
if let Some(stats) = searcher.search(&query, &StatsCollector::with_field(price))? {
|
||||
println!("count: {}", stats.count());
|
||||
println!("mean: {}", stats.mean());
|
||||
println!("standard deviation: {}", stats.standard_deviation());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
112
examples/custom_tokenizer.rs
Normal file
112
examples/custom_tokenizer.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
// # Defining a tokenizer pipeline
|
||||
//
|
||||
// In this example, we'll see how to define a tokenizer pipeline
|
||||
// by aligning a bunch of `TokenFilter`.
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::NgramTokenizer;
|
||||
use tantivy::{doc, Index};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
//
|
||||
// The Tantivy index requires a very strict schema.
|
||||
// The schema declares which fields are in the index,
|
||||
// and for each field, its type and "the way it should
|
||||
// be indexed".
|
||||
|
||||
// first we need to define a schema ...
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
// Our first field is title.
|
||||
// In this example we want to use NGram searching
|
||||
// we will set that to 3 characters, so any three
|
||||
// char in the title should be findable.
|
||||
let text_field_indexing = TextFieldIndexing::default()
|
||||
.set_tokenizer("ngram3")
|
||||
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
|
||||
let text_options = TextOptions::default()
|
||||
.set_indexing_options(text_field_indexing)
|
||||
.set_stored();
|
||||
let title = schema_builder.add_text_field("title", text_options);
|
||||
|
||||
// Our second field is body.
|
||||
// We want full-text search for it, but we do not
|
||||
// need to be able to be able to retrieve it
|
||||
// for our application.
|
||||
//
|
||||
// We can make our index lighter and
|
||||
// by omitting `STORED` flag.
|
||||
let body = schema_builder.add_text_field("body", TEXT);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// # Indexing documents
|
||||
//
|
||||
// Let's create a brand new index.
|
||||
// To simplify we will work entirely in RAM.
|
||||
// This is not what you want in reality, but it is very useful
|
||||
// for your unit tests... Or this example.
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
// here we are registering our custome tokenizer
|
||||
// this will store tokens of 3 characters each
|
||||
index
|
||||
.tokenizers()
|
||||
.register("ngram3", NgramTokenizer::new(3, 3, false));
|
||||
|
||||
// To insert document we need an index writer.
|
||||
// There must be only one writer at a time.
|
||||
// This single `IndexWriter` is already
|
||||
// multithreaded.
|
||||
//
|
||||
// Here we use a buffer of 50MB per thread. Using a bigger
|
||||
// heap for the indexer can increase its throughput.
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
index_writer.add_document(doc!(
|
||||
title => "The Old Man and the Sea",
|
||||
body => "He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
he had gone eighty-four days now without taking a fish."
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "Of Mice and Men",
|
||||
body => r#"A few miles south of Soledad, the Salinas River drops in close to the hillside
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent
|
||||
limbs and branches that arch over the pool"#
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "Frankenstein",
|
||||
body => r#"You will rejoice to hear that no disaster has accompanied the commencement of an
|
||||
enterprise which you have regarded with such evil forebodings. I arrived here
|
||||
yesterday, and my first task is to assure my dear sister of my welfare and
|
||||
increasing confidence in the success of my undertaking."#
|
||||
));
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// The query parser can interpret human queries.
|
||||
// Here, if the user does not specify which
|
||||
// field they want to search, tantivy will search
|
||||
// in both title and body.
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
|
||||
// here we want to get a hit on the 'ken' in Frankenstein
|
||||
let query = query_parser.parse_query("ken")?;
|
||||
|
||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||
|
||||
for (_, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
143
examples/deleting_updating_documents.rs
Normal file
143
examples/deleting_updating_documents.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
// # Deleting and Updating (?) documents
|
||||
//
|
||||
// This example explains how to delete and update documents.
|
||||
// In fact there is actually no such thing as an update in tantivy.
|
||||
//
|
||||
// To update a document, you need to delete a document and then reinsert
|
||||
// its new version.
|
||||
//
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, IndexReader};
|
||||
|
||||
// A simple helper function to fetch a single document
|
||||
// given its id from our index.
|
||||
// It will be helpful to check our work.
|
||||
fn extract_doc_given_isbn(
|
||||
reader: &IndexReader,
|
||||
isbn_term: &Term,
|
||||
) -> tantivy::Result<Option<Document>> {
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// This is the simplest query you can think of.
|
||||
// It matches all of the documents containing a specific term.
|
||||
//
|
||||
// The second argument is here to tell we don't care about decoding positions,
|
||||
// or term frequencies.
|
||||
let term_query = TermQuery::new(isbn_term.clone(), IndexRecordOption::Basic);
|
||||
let top_docs = searcher.search(&term_query, &TopDocs::with_limit(1))?;
|
||||
|
||||
if let Some((_score, doc_address)) = top_docs.first() {
|
||||
let doc = searcher.doc(*doc_address)?;
|
||||
Ok(Some(doc))
|
||||
} else {
|
||||
// no doc matching this ID.
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
//
|
||||
// Check out the *basic_search* example if this makes
|
||||
// small sense to you.
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
// Tantivy does not really have a notion of primary id.
|
||||
// This may change in the future.
|
||||
//
|
||||
// Still, we can create a `isbn` field and use it as an id. This
|
||||
// field can be `u64` or a `text`, depending on your use case.
|
||||
// It just needs to be indexed.
|
||||
//
|
||||
// If it is `text`, let's make sure to keep it `raw` and let's avoid
|
||||
// running any text processing on it.
|
||||
// This is done by associating this field to the tokenizer named `raw`.
|
||||
// Rather than building our [`TextOptions`](//docs.rs/tantivy/~0/tantivy/schema/struct.TextOptions.html) manually,
|
||||
// We use the `STRING` shortcut. `STRING` stands for indexed (without term frequency or positions)
|
||||
// and untokenized.
|
||||
//
|
||||
// Because we also want to be able to see this `id` in our returned documents,
|
||||
// we also mark the field as stored.
|
||||
let isbn = schema_builder.add_text_field("isbn", STRING | STORED);
|
||||
let title = schema_builder.add_text_field("title", TEXT | STORED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// Let's add a couple of documents, for the sake of the example.
|
||||
let mut old_man_doc = Document::default();
|
||||
old_man_doc.add_text(title, "The Old Man and the Sea");
|
||||
index_writer.add_document(doc!(
|
||||
isbn => "978-0099908401",
|
||||
title => "The old Man and the see"
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
isbn => "978-0140177398",
|
||||
title => "Of Mice and Men",
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "Frankentein", //< Oops there is a typo here.
|
||||
isbn => "978-9176370711",
|
||||
));
|
||||
index_writer.commit()?;
|
||||
let reader = index.reader()?;
|
||||
|
||||
let frankenstein_isbn = Term::from_field_text(isbn, "978-9176370711");
|
||||
|
||||
// Oops our frankenstein doc seems mispelled
|
||||
let frankenstein_doc_misspelled = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
|
||||
assert_eq!(
|
||||
schema.to_json(&frankenstein_doc_misspelled),
|
||||
r#"{"isbn":["978-9176370711"],"title":["Frankentein"]}"#,
|
||||
);
|
||||
|
||||
// # Update = Delete + Insert
|
||||
//
|
||||
// Here we will want to update the typo in the `Frankenstein` book.
|
||||
//
|
||||
// Tantivy does not handle updates directly, we need to delete
|
||||
// and reinsert the document.
|
||||
//
|
||||
// This can be complicated as it means you need to have access
|
||||
// to the entire document. It is good practise to integrate tantivy
|
||||
// with a key value store for this reason.
|
||||
//
|
||||
// To remove one of the document, we just call `delete_term`
|
||||
// on its id.
|
||||
//
|
||||
// Note that `tantivy` does nothing to enforce the idea that
|
||||
// there is only one document associated to this id.
|
||||
//
|
||||
// Also you might have noticed that we apply the delete before
|
||||
// having committed. This does not matter really...
|
||||
index_writer.delete_term(frankenstein_isbn.clone());
|
||||
|
||||
// We now need to reinsert our document without the typo.
|
||||
index_writer.add_document(doc!(
|
||||
title => "Frankenstein",
|
||||
isbn => "978-9176370711",
|
||||
));
|
||||
|
||||
// You are guaranteed that your clients will only observe your index in
|
||||
// the state it was in after a commit.
|
||||
// In this example, your search engine will at no point be missing the *Frankenstein* document.
|
||||
// Everything happened as if the document was updated.
|
||||
index_writer.commit()?;
|
||||
// We reload our searcher to make our change available to clients.
|
||||
reader.reload()?;
|
||||
|
||||
// No more typo!
|
||||
let frankenstein_new_doc = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
|
||||
assert_eq!(
|
||||
schema.to_json(&frankenstein_new_doc),
|
||||
r#"{"isbn":["978-9176370711"],"title":["Frankenstein"]}"#,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
112
examples/faceted_search.rs
Normal file
112
examples/faceted_search.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
// # Basic Example
|
||||
//
|
||||
// This example covers the basic functionalities of
|
||||
// tantivy.
|
||||
//
|
||||
// We will :
|
||||
// - define our schema
|
||||
// = create an index in a directory
|
||||
// - index few documents in our index
|
||||
// - search for the best document matchings "sea whale"
|
||||
// - retrieve the best document original content.
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::FacetCollector;
|
||||
use tantivy::query::{AllQuery, TermQuery};
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the sake of this example
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
let name = schema_builder.add_text_field("felin_name", TEXT | STORED);
|
||||
// this is our faceted field: its scientific classification
|
||||
let classification = schema_builder.add_facet_field("classification");
|
||||
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
|
||||
// For convenience, tantivy also comes with a macro to
|
||||
// reduce the boilerplate above.
|
||||
index_writer.add_document(doc!(
|
||||
name => "Cat",
|
||||
classification => Facet::from("/Felidae/Felinae/Felis")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Canada lynx",
|
||||
classification => Facet::from("/Felidae/Felinae/Lynx")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Cheetah",
|
||||
classification => Facet::from("/Felidae/Felinae/Acinonyx")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Tiger",
|
||||
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Lion",
|
||||
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Jaguar",
|
||||
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Sunda clouded leopard",
|
||||
classification => Facet::from("/Felidae/Pantherinae/Neofelis")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Fossa",
|
||||
classification => Facet::from("/Eupleridae/Cryptoprocta")
|
||||
));
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
{
|
||||
let mut facet_collector = FacetCollector::for_field(classification);
|
||||
facet_collector.add_facet("/Felidae");
|
||||
let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||
// This lists all of the facet counts, right below "/Felidae".
|
||||
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae").collect();
|
||||
assert_eq!(
|
||||
facets,
|
||||
vec![
|
||||
(&Facet::from("/Felidae/Felinae"), 3),
|
||||
(&Facet::from("/Felidae/Pantherinae"), 4),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
// Facets are also searchable.
|
||||
//
|
||||
// For instance a common UI pattern is to allow the user someone to click on a facet link
|
||||
// (e.g: `Pantherinae`) to drill down and filter the current result set with this subfacet.
|
||||
//
|
||||
// The search would then look as follows.
|
||||
|
||||
// Check the reference doc for different ways to create a `Facet` object.
|
||||
{
|
||||
let facet = Facet::from_text("/Felidae/Pantherinae");
|
||||
let facet_term = Term::from_facet(classification, &facet);
|
||||
let facet_term_query = TermQuery::new(facet_term, IndexRecordOption::Basic);
|
||||
let mut facet_collector = FacetCollector::for_field(classification);
|
||||
facet_collector.add_facet("/Felidae/Pantherinae");
|
||||
let facet_counts = searcher.search(&facet_term_query, &facet_collector)?;
|
||||
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae/Pantherinae").collect();
|
||||
assert_eq!(
|
||||
facets,
|
||||
vec![
|
||||
(&Facet::from("/Felidae/Pantherinae/Neofelis"), 1),
|
||||
(&Facet::from("/Felidae/Pantherinae/Panthera"), 3),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
98
examples/faceted_search_with_tweaked_score.rs
Normal file
98
examples/faceted_search_with_tweaked_score.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
use std::collections::HashSet;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::doc;
|
||||
use tantivy::query::BooleanQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{DocId, Index, Score, SegmentReader};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
let title = schema_builder.add_text_field("title", STORED);
|
||||
let ingredient = schema_builder.add_facet_field("ingredient");
|
||||
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "Fried egg",
|
||||
ingredient => Facet::from("/ingredient/egg"),
|
||||
ingredient => Facet::from("/ingredient/oil"),
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "Scrambled egg",
|
||||
ingredient => Facet::from("/ingredient/egg"),
|
||||
ingredient => Facet::from("/ingredient/butter"),
|
||||
ingredient => Facet::from("/ingredient/milk"),
|
||||
ingredient => Facet::from("/ingredient/salt"),
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "Egg rolls",
|
||||
ingredient => Facet::from("/ingredient/egg"),
|
||||
ingredient => Facet::from("/ingredient/garlic"),
|
||||
ingredient => Facet::from("/ingredient/salt"),
|
||||
ingredient => Facet::from("/ingredient/oil"),
|
||||
ingredient => Facet::from("/ingredient/tortilla-wrap"),
|
||||
ingredient => Facet::from("/ingredient/mushroom"),
|
||||
));
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
{
|
||||
let facets = vec![
|
||||
Facet::from("/ingredient/egg"),
|
||||
Facet::from("/ingredient/oil"),
|
||||
Facet::from("/ingredient/garlic"),
|
||||
Facet::from("/ingredient/mushroom"),
|
||||
];
|
||||
let query = BooleanQuery::new_multiterms_query(
|
||||
facets
|
||||
.iter()
|
||||
.map(|key| Term::from_facet(ingredient, &key))
|
||||
.collect(),
|
||||
);
|
||||
let top_docs_by_custom_score =
|
||||
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
|
||||
let ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
|
||||
let facet_dict = ingredient_reader.facet_dict();
|
||||
|
||||
let query_ords: HashSet<u64> = facets
|
||||
.iter()
|
||||
.filter_map(|key| facet_dict.term_ord(key.encoded_str()))
|
||||
.collect();
|
||||
|
||||
let mut facet_ords_buffer: Vec<u64> = Vec::with_capacity(20);
|
||||
|
||||
move |doc: DocId, original_score: Score| {
|
||||
ingredient_reader.facet_ords(doc, &mut facet_ords_buffer);
|
||||
let missing_ingredients = facet_ords_buffer
|
||||
.iter()
|
||||
.filter(|ord| !query_ords.contains(ord))
|
||||
.count();
|
||||
let tweak = 1.0 / 4_f32.powi(missing_ingredients as i32);
|
||||
|
||||
original_score * tweak
|
||||
}
|
||||
});
|
||||
let top_docs = searcher.search(&query, &top_docs_by_custom_score)?;
|
||||
|
||||
let titles: Vec<String> = top_docs
|
||||
.iter()
|
||||
.map(|(_, doc_id)| {
|
||||
searcher
|
||||
.doc(*doc_id)
|
||||
.unwrap()
|
||||
.get_first(title)
|
||||
.unwrap()
|
||||
.text()
|
||||
.unwrap()
|
||||
.to_owned()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
docco simple_search.rs -o html
|
||||
@@ -1,518 +0,0 @@
|
||||
/*--------------------- Typography ----------------------------*/
|
||||
|
||||
@font-face {
|
||||
font-family: 'aller-light';
|
||||
src: url('public/fonts/aller-light.eot');
|
||||
src: url('public/fonts/aller-light.eot?#iefix') format('embedded-opentype'),
|
||||
url('public/fonts/aller-light.woff') format('woff'),
|
||||
url('public/fonts/aller-light.ttf') format('truetype');
|
||||
font-weight: normal;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'aller-bold';
|
||||
src: url('public/fonts/aller-bold.eot');
|
||||
src: url('public/fonts/aller-bold.eot?#iefix') format('embedded-opentype'),
|
||||
url('public/fonts/aller-bold.woff') format('woff'),
|
||||
url('public/fonts/aller-bold.ttf') format('truetype');
|
||||
font-weight: normal;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'roboto-black';
|
||||
src: url('public/fonts/roboto-black.eot');
|
||||
src: url('public/fonts/roboto-black.eot?#iefix') format('embedded-opentype'),
|
||||
url('public/fonts/roboto-black.woff') format('woff'),
|
||||
url('public/fonts/roboto-black.ttf') format('truetype');
|
||||
font-weight: normal;
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
/*--------------------- Layout ----------------------------*/
|
||||
html { height: 100%; }
|
||||
body {
|
||||
font-family: "aller-light";
|
||||
font-size: 14px;
|
||||
line-height: 18px;
|
||||
color: #30404f;
|
||||
margin: 0; padding: 0;
|
||||
height:100%;
|
||||
}
|
||||
#container { min-height: 100%; }
|
||||
|
||||
a {
|
||||
color: #000;
|
||||
}
|
||||
|
||||
b, strong {
|
||||
font-weight: normal;
|
||||
font-family: "aller-bold";
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 15px 0 0px;
|
||||
}
|
||||
.annotation ul, .annotation ol {
|
||||
margin: 25px 0;
|
||||
}
|
||||
.annotation ul li, .annotation ol li {
|
||||
font-size: 14px;
|
||||
line-height: 18px;
|
||||
margin: 10px 0;
|
||||
}
|
||||
|
||||
h1, h2, h3, h4, h5, h6 {
|
||||
color: #112233;
|
||||
line-height: 1em;
|
||||
font-weight: normal;
|
||||
font-family: "roboto-black";
|
||||
text-transform: uppercase;
|
||||
margin: 30px 0 15px 0;
|
||||
}
|
||||
|
||||
h1 {
|
||||
margin-top: 40px;
|
||||
}
|
||||
h2 {
|
||||
font-size: 1.26em;
|
||||
}
|
||||
|
||||
hr {
|
||||
border: 0;
|
||||
background: 1px #ddd;
|
||||
height: 1px;
|
||||
margin: 20px 0;
|
||||
}
|
||||
|
||||
pre, tt, code {
|
||||
font-size: 12px; line-height: 16px;
|
||||
font-family: Menlo, Monaco, Consolas, "Lucida Console", monospace;
|
||||
margin: 0; padding: 0;
|
||||
}
|
||||
.annotation pre {
|
||||
display: block;
|
||||
margin: 0;
|
||||
padding: 7px 10px;
|
||||
background: #fcfcfc;
|
||||
-moz-box-shadow: inset 0 0 10px rgba(0,0,0,0.1);
|
||||
-webkit-box-shadow: inset 0 0 10px rgba(0,0,0,0.1);
|
||||
box-shadow: inset 0 0 10px rgba(0,0,0,0.1);
|
||||
overflow-x: auto;
|
||||
}
|
||||
.annotation pre code {
|
||||
border: 0;
|
||||
padding: 0;
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
|
||||
blockquote {
|
||||
border-left: 5px solid #ccc;
|
||||
margin: 0;
|
||||
padding: 1px 0 1px 1em;
|
||||
}
|
||||
.sections blockquote p {
|
||||
font-family: Menlo, Consolas, Monaco, monospace;
|
||||
font-size: 12px; line-height: 16px;
|
||||
color: #999;
|
||||
margin: 10px 0 0;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
ul.sections {
|
||||
list-style: none;
|
||||
padding:0 0 5px 0;;
|
||||
margin:0;
|
||||
}
|
||||
|
||||
/*
|
||||
Force border-box so that % widths fit the parent
|
||||
container without overlap because of margin/padding.
|
||||
|
||||
More Info : http://www.quirksmode.org/css/box.html
|
||||
*/
|
||||
ul.sections > li > div {
|
||||
-moz-box-sizing: border-box; /* firefox */
|
||||
-ms-box-sizing: border-box; /* ie */
|
||||
-webkit-box-sizing: border-box; /* webkit */
|
||||
-khtml-box-sizing: border-box; /* konqueror */
|
||||
box-sizing: border-box; /* css3 */
|
||||
}
|
||||
|
||||
|
||||
/*---------------------- Jump Page -----------------------------*/
|
||||
#jump_to, #jump_page {
|
||||
margin: 0;
|
||||
background: white;
|
||||
-webkit-box-shadow: 0 0 25px #777; -moz-box-shadow: 0 0 25px #777;
|
||||
-webkit-border-bottom-left-radius: 5px; -moz-border-radius-bottomleft: 5px;
|
||||
font: 16px Arial;
|
||||
cursor: pointer;
|
||||
text-align: right;
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
#jump_to a {
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
#jump_to a.large {
|
||||
display: none;
|
||||
}
|
||||
#jump_to a.small {
|
||||
font-size: 22px;
|
||||
font-weight: bold;
|
||||
color: #676767;
|
||||
}
|
||||
|
||||
#jump_to, #jump_wrapper {
|
||||
position: fixed;
|
||||
right: 0; top: 0;
|
||||
padding: 10px 15px;
|
||||
margin:0;
|
||||
}
|
||||
|
||||
#jump_wrapper {
|
||||
display: none;
|
||||
padding:0;
|
||||
}
|
||||
|
||||
#jump_to:hover #jump_wrapper {
|
||||
display: block;
|
||||
}
|
||||
|
||||
#jump_page_wrapper{
|
||||
position: fixed;
|
||||
right: 0;
|
||||
top: 0;
|
||||
bottom: 0;
|
||||
}
|
||||
|
||||
#jump_page {
|
||||
padding: 5px 0 3px;
|
||||
margin: 0 0 25px 25px;
|
||||
max-height: 100%;
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
#jump_page .source {
|
||||
display: block;
|
||||
padding: 15px;
|
||||
text-decoration: none;
|
||||
border-top: 1px solid #eee;
|
||||
}
|
||||
|
||||
#jump_page .source:hover {
|
||||
background: #f5f5ff;
|
||||
}
|
||||
|
||||
#jump_page .source:first-child {
|
||||
}
|
||||
|
||||
/*---------------------- Low resolutions (> 320px) ---------------------*/
|
||||
@media only screen and (min-width: 320px) {
|
||||
.pilwrap { display: none; }
|
||||
|
||||
ul.sections > li > div {
|
||||
display: block;
|
||||
padding:5px 10px 0 10px;
|
||||
}
|
||||
|
||||
ul.sections > li > div.annotation ul, ul.sections > li > div.annotation ol {
|
||||
padding-left: 30px;
|
||||
}
|
||||
|
||||
ul.sections > li > div.content {
|
||||
overflow-x:auto;
|
||||
-webkit-box-shadow: inset 0 0 5px #e5e5ee;
|
||||
box-shadow: inset 0 0 5px #e5e5ee;
|
||||
border: 1px solid #dedede;
|
||||
margin:5px 10px 5px 10px;
|
||||
padding-bottom: 5px;
|
||||
}
|
||||
|
||||
ul.sections > li > div.annotation pre {
|
||||
margin: 7px 0 7px;
|
||||
padding-left: 15px;
|
||||
}
|
||||
|
||||
ul.sections > li > div.annotation p tt, .annotation code {
|
||||
background: #f8f8ff;
|
||||
border: 1px solid #dedede;
|
||||
font-size: 12px;
|
||||
padding: 0 0.2em;
|
||||
}
|
||||
}
|
||||
|
||||
/*---------------------- (> 481px) ---------------------*/
|
||||
@media only screen and (min-width: 481px) {
|
||||
#container {
|
||||
position: relative;
|
||||
}
|
||||
body {
|
||||
background-color: #F5F5FF;
|
||||
font-size: 15px;
|
||||
line-height: 21px;
|
||||
}
|
||||
pre, tt, code {
|
||||
line-height: 18px;
|
||||
}
|
||||
p, ul, ol {
|
||||
margin: 0 0 15px;
|
||||
}
|
||||
|
||||
|
||||
#jump_to {
|
||||
padding: 5px 10px;
|
||||
}
|
||||
#jump_wrapper {
|
||||
padding: 0;
|
||||
}
|
||||
#jump_to, #jump_page {
|
||||
font: 10px Arial;
|
||||
text-transform: uppercase;
|
||||
}
|
||||
#jump_page .source {
|
||||
padding: 5px 10px;
|
||||
}
|
||||
#jump_to a.large {
|
||||
display: inline-block;
|
||||
}
|
||||
#jump_to a.small {
|
||||
display: none;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#background {
|
||||
position: absolute;
|
||||
top: 0; bottom: 0;
|
||||
width: 350px;
|
||||
background: #fff;
|
||||
border-right: 1px solid #e5e5ee;
|
||||
z-index: -1;
|
||||
}
|
||||
|
||||
ul.sections > li > div.annotation ul, ul.sections > li > div.annotation ol {
|
||||
padding-left: 40px;
|
||||
}
|
||||
|
||||
ul.sections > li {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
ul.sections > li > div {
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
ul.sections > li > div.annotation {
|
||||
max-width: 350px;
|
||||
min-width: 350px;
|
||||
min-height: 5px;
|
||||
padding: 13px;
|
||||
overflow-x: hidden;
|
||||
white-space: normal;
|
||||
vertical-align: top;
|
||||
text-align: left;
|
||||
}
|
||||
ul.sections > li > div.annotation pre {
|
||||
margin: 15px 0 15px;
|
||||
padding-left: 15px;
|
||||
}
|
||||
|
||||
ul.sections > li > div.content {
|
||||
padding: 13px;
|
||||
vertical-align: top;
|
||||
border: none;
|
||||
-webkit-box-shadow: none;
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
.pilwrap {
|
||||
position: relative;
|
||||
display: inline;
|
||||
}
|
||||
|
||||
.pilcrow {
|
||||
font: 12px Arial;
|
||||
text-decoration: none;
|
||||
color: #454545;
|
||||
position: absolute;
|
||||
top: 3px; left: -20px;
|
||||
padding: 1px 2px;
|
||||
opacity: 0;
|
||||
-webkit-transition: opacity 0.2s linear;
|
||||
}
|
||||
.for-h1 .pilcrow {
|
||||
top: 47px;
|
||||
}
|
||||
.for-h2 .pilcrow, .for-h3 .pilcrow, .for-h4 .pilcrow {
|
||||
top: 35px;
|
||||
}
|
||||
|
||||
ul.sections > li > div.annotation:hover .pilcrow {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*---------------------- (> 1025px) ---------------------*/
|
||||
@media only screen and (min-width: 1025px) {
|
||||
|
||||
body {
|
||||
font-size: 16px;
|
||||
line-height: 24px;
|
||||
}
|
||||
|
||||
#background {
|
||||
width: 525px;
|
||||
}
|
||||
ul.sections > li > div.annotation {
|
||||
max-width: 525px;
|
||||
min-width: 525px;
|
||||
padding: 10px 25px 1px 50px;
|
||||
}
|
||||
ul.sections > li > div.content {
|
||||
padding: 9px 15px 16px 25px;
|
||||
}
|
||||
}
|
||||
|
||||
/*---------------------- Syntax Highlighting -----------------------------*/
|
||||
|
||||
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
|
||||
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
|
||||
/*
|
||||
|
||||
github.com style (c) Vasily Polovnyov <vast@whiteants.net>
|
||||
|
||||
*/
|
||||
|
||||
pre code {
|
||||
display: block; padding: 0.5em;
|
||||
color: #000;
|
||||
background: #f8f8ff
|
||||
}
|
||||
|
||||
pre .hljs-comment,
|
||||
pre .hljs-template_comment,
|
||||
pre .hljs-diff .hljs-header,
|
||||
pre .hljs-javadoc {
|
||||
color: #408080;
|
||||
font-style: italic
|
||||
}
|
||||
|
||||
pre .hljs-keyword,
|
||||
pre .hljs-assignment,
|
||||
pre .hljs-literal,
|
||||
pre .hljs-css .hljs-rule .hljs-keyword,
|
||||
pre .hljs-winutils,
|
||||
pre .hljs-javascript .hljs-title,
|
||||
pre .hljs-lisp .hljs-title,
|
||||
pre .hljs-subst {
|
||||
color: #954121;
|
||||
/*font-weight: bold*/
|
||||
}
|
||||
|
||||
pre .hljs-number,
|
||||
pre .hljs-hexcolor {
|
||||
color: #40a070
|
||||
}
|
||||
|
||||
pre .hljs-string,
|
||||
pre .hljs-tag .hljs-value,
|
||||
pre .hljs-phpdoc,
|
||||
pre .hljs-tex .hljs-formula {
|
||||
color: #219161;
|
||||
}
|
||||
|
||||
pre .hljs-title,
|
||||
pre .hljs-id {
|
||||
color: #19469D;
|
||||
}
|
||||
pre .hljs-params {
|
||||
color: #00F;
|
||||
}
|
||||
|
||||
pre .hljs-javascript .hljs-title,
|
||||
pre .hljs-lisp .hljs-title,
|
||||
pre .hljs-subst {
|
||||
font-weight: normal
|
||||
}
|
||||
|
||||
pre .hljs-class .hljs-title,
|
||||
pre .hljs-haskell .hljs-label,
|
||||
pre .hljs-tex .hljs-command {
|
||||
color: #458;
|
||||
font-weight: bold
|
||||
}
|
||||
|
||||
pre .hljs-tag,
|
||||
pre .hljs-tag .hljs-title,
|
||||
pre .hljs-rules .hljs-property,
|
||||
pre .hljs-django .hljs-tag .hljs-keyword {
|
||||
color: #000080;
|
||||
font-weight: normal
|
||||
}
|
||||
|
||||
pre .hljs-attribute,
|
||||
pre .hljs-variable,
|
||||
pre .hljs-instancevar,
|
||||
pre .hljs-lisp .hljs-body {
|
||||
color: #008080
|
||||
}
|
||||
|
||||
pre .hljs-regexp {
|
||||
color: #B68
|
||||
}
|
||||
|
||||
pre .hljs-class {
|
||||
color: #458;
|
||||
font-weight: bold
|
||||
}
|
||||
|
||||
pre .hljs-symbol,
|
||||
pre .hljs-ruby .hljs-symbol .hljs-string,
|
||||
pre .hljs-ruby .hljs-symbol .hljs-keyword,
|
||||
pre .hljs-ruby .hljs-symbol .hljs-keymethods,
|
||||
pre .hljs-lisp .hljs-keyword,
|
||||
pre .hljs-tex .hljs-special,
|
||||
pre .hljs-input_number {
|
||||
color: #990073
|
||||
}
|
||||
|
||||
pre .hljs-builtin,
|
||||
pre .hljs-constructor,
|
||||
pre .hljs-built_in,
|
||||
pre .hljs-lisp .hljs-title {
|
||||
color: #0086b3
|
||||
}
|
||||
|
||||
pre .hljs-preprocessor,
|
||||
pre .hljs-pi,
|
||||
pre .hljs-doctype,
|
||||
pre .hljs-shebang,
|
||||
pre .hljs-cdata {
|
||||
color: #999;
|
||||
font-weight: bold
|
||||
}
|
||||
|
||||
pre .hljs-deletion {
|
||||
background: #fdd
|
||||
}
|
||||
|
||||
pre .hljs-addition {
|
||||
background: #dfd
|
||||
}
|
||||
|
||||
pre .hljs-diff .hljs-change {
|
||||
background: #0086b3
|
||||
}
|
||||
|
||||
pre .hljs-chunk {
|
||||
color: #aaa
|
||||
}
|
||||
|
||||
pre .hljs-tex .hljs-formula {
|
||||
opacity: 0.5;
|
||||
}
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 56 KiB |
375
examples/html/public/stylesheets/normalize.css
vendored
375
examples/html/public/stylesheets/normalize.css
vendored
@@ -1,375 +0,0 @@
|
||||
/*! normalize.css v2.0.1 | MIT License | git.io/normalize */
|
||||
|
||||
/* ==========================================================================
|
||||
HTML5 display definitions
|
||||
========================================================================== */
|
||||
|
||||
/*
|
||||
* Corrects `block` display not defined in IE 8/9.
|
||||
*/
|
||||
|
||||
article,
|
||||
aside,
|
||||
details,
|
||||
figcaption,
|
||||
figure,
|
||||
footer,
|
||||
header,
|
||||
hgroup,
|
||||
nav,
|
||||
section,
|
||||
summary {
|
||||
display: block;
|
||||
}
|
||||
|
||||
/*
|
||||
* Corrects `inline-block` display not defined in IE 8/9.
|
||||
*/
|
||||
|
||||
audio,
|
||||
canvas,
|
||||
video {
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prevents modern browsers from displaying `audio` without controls.
|
||||
* Remove excess height in iOS 5 devices.
|
||||
*/
|
||||
|
||||
audio:not([controls]) {
|
||||
display: none;
|
||||
height: 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Addresses styling for `hidden` attribute not present in IE 8/9.
|
||||
*/
|
||||
|
||||
[hidden] {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* ==========================================================================
|
||||
Base
|
||||
========================================================================== */
|
||||
|
||||
/*
|
||||
* 1. Sets default font family to sans-serif.
|
||||
* 2. Prevents iOS text size adjust after orientation change, without disabling
|
||||
* user zoom.
|
||||
*/
|
||||
|
||||
html {
|
||||
font-family: sans-serif; /* 1 */
|
||||
-webkit-text-size-adjust: 100%; /* 2 */
|
||||
-ms-text-size-adjust: 100%; /* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes default margin.
|
||||
*/
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
/* ==========================================================================
|
||||
Links
|
||||
========================================================================== */
|
||||
|
||||
/*
|
||||
* Addresses `outline` inconsistency between Chrome and other browsers.
|
||||
*/
|
||||
|
||||
a:focus {
|
||||
outline: thin dotted;
|
||||
}
|
||||
|
||||
/*
|
||||
* Improves readability when focused and also mouse hovered in all browsers.
|
||||
*/
|
||||
|
||||
a:active,
|
||||
a:hover {
|
||||
outline: 0;
|
||||
}
|
||||
|
||||
/* ==========================================================================
|
||||
Typography
|
||||
========================================================================== */
|
||||
|
||||
/*
|
||||
* Addresses `h1` font sizes within `section` and `article` in Firefox 4+,
|
||||
* Safari 5, and Chrome.
|
||||
*/
|
||||
|
||||
h1 {
|
||||
font-size: 2em;
|
||||
}
|
||||
|
||||
/*
|
||||
* Addresses styling not present in IE 8/9, Safari 5, and Chrome.
|
||||
*/
|
||||
|
||||
abbr[title] {
|
||||
border-bottom: 1px dotted;
|
||||
}
|
||||
|
||||
/*
|
||||
* Addresses style set to `bolder` in Firefox 4+, Safari 5, and Chrome.
|
||||
*/
|
||||
|
||||
b,
|
||||
strong {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
/*
|
||||
* Addresses styling not present in Safari 5 and Chrome.
|
||||
*/
|
||||
|
||||
dfn {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
/*
|
||||
* Addresses styling not present in IE 8/9.
|
||||
*/
|
||||
|
||||
mark {
|
||||
background: #ff0;
|
||||
color: #000;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Corrects font family set oddly in Safari 5 and Chrome.
|
||||
*/
|
||||
|
||||
code,
|
||||
kbd,
|
||||
pre,
|
||||
samp {
|
||||
font-family: monospace, serif;
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
/*
|
||||
* Improves readability of pre-formatted text in all browsers.
|
||||
*/
|
||||
|
||||
pre {
|
||||
white-space: pre;
|
||||
white-space: pre-wrap;
|
||||
word-wrap: break-word;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets consistent quote types.
|
||||
*/
|
||||
|
||||
q {
|
||||
quotes: "\201C" "\201D" "\2018" "\2019";
|
||||
}
|
||||
|
||||
/*
|
||||
* Addresses inconsistent and variable font size in all browsers.
|
||||
*/
|
||||
|
||||
small {
|
||||
font-size: 80%;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prevents `sub` and `sup` affecting `line-height` in all browsers.
|
||||
*/
|
||||
|
||||
sub,
|
||||
sup {
|
||||
font-size: 75%;
|
||||
line-height: 0;
|
||||
position: relative;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
sup {
|
||||
top: -0.5em;
|
||||
}
|
||||
|
||||
sub {
|
||||
bottom: -0.25em;
|
||||
}
|
||||
|
||||
/* ==========================================================================
|
||||
Embedded content
|
||||
========================================================================== */
|
||||
|
||||
/*
|
||||
* Removes border when inside `a` element in IE 8/9.
|
||||
*/
|
||||
|
||||
img {
|
||||
border: 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Corrects overflow displayed oddly in IE 9.
|
||||
*/
|
||||
|
||||
svg:not(:root) {
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
/* ==========================================================================
|
||||
Figures
|
||||
========================================================================== */
|
||||
|
||||
/*
|
||||
* Addresses margin not present in IE 8/9 and Safari 5.
|
||||
*/
|
||||
|
||||
figure {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
/* ==========================================================================
|
||||
Forms
|
||||
========================================================================== */
|
||||
|
||||
/*
|
||||
* Define consistent border, margin, and padding.
|
||||
*/
|
||||
|
||||
fieldset {
|
||||
border: 1px solid #c0c0c0;
|
||||
margin: 0 2px;
|
||||
padding: 0.35em 0.625em 0.75em;
|
||||
}
|
||||
|
||||
/*
|
||||
* 1. Corrects color not being inherited in IE 8/9.
|
||||
* 2. Remove padding so people aren't caught out if they zero out fieldsets.
|
||||
*/
|
||||
|
||||
legend {
|
||||
border: 0; /* 1 */
|
||||
padding: 0; /* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
* 1. Corrects font family not being inherited in all browsers.
|
||||
* 2. Corrects font size not being inherited in all browsers.
|
||||
* 3. Addresses margins set differently in Firefox 4+, Safari 5, and Chrome
|
||||
*/
|
||||
|
||||
button,
|
||||
input,
|
||||
select,
|
||||
textarea {
|
||||
font-family: inherit; /* 1 */
|
||||
font-size: 100%; /* 2 */
|
||||
margin: 0; /* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
* Addresses Firefox 4+ setting `line-height` on `input` using `!important` in
|
||||
* the UA stylesheet.
|
||||
*/
|
||||
|
||||
button,
|
||||
input {
|
||||
line-height: normal;
|
||||
}
|
||||
|
||||
/*
|
||||
* 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`
|
||||
* and `video` controls.
|
||||
* 2. Corrects inability to style clickable `input` types in iOS.
|
||||
* 3. Improves usability and consistency of cursor style between image-type
|
||||
* `input` and others.
|
||||
*/
|
||||
|
||||
button,
|
||||
html input[type="button"], /* 1 */
|
||||
input[type="reset"],
|
||||
input[type="submit"] {
|
||||
-webkit-appearance: button; /* 2 */
|
||||
cursor: pointer; /* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
* Re-set default cursor for disabled elements.
|
||||
*/
|
||||
|
||||
button[disabled],
|
||||
input[disabled] {
|
||||
cursor: default;
|
||||
}
|
||||
|
||||
/*
|
||||
* 1. Addresses box sizing set to `content-box` in IE 8/9.
|
||||
* 2. Removes excess padding in IE 8/9.
|
||||
*/
|
||||
|
||||
input[type="checkbox"],
|
||||
input[type="radio"] {
|
||||
box-sizing: border-box; /* 1 */
|
||||
padding: 0; /* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
* 1. Addresses `appearance` set to `searchfield` in Safari 5 and Chrome.
|
||||
* 2. Addresses `box-sizing` set to `border-box` in Safari 5 and Chrome
|
||||
* (include `-moz` to future-proof).
|
||||
*/
|
||||
|
||||
input[type="search"] {
|
||||
-webkit-appearance: textfield; /* 1 */
|
||||
-moz-box-sizing: content-box;
|
||||
-webkit-box-sizing: content-box; /* 2 */
|
||||
box-sizing: content-box;
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes inner padding and search cancel button in Safari 5 and Chrome
|
||||
* on OS X.
|
||||
*/
|
||||
|
||||
input[type="search"]::-webkit-search-cancel-button,
|
||||
input[type="search"]::-webkit-search-decoration {
|
||||
-webkit-appearance: none;
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes inner padding and border in Firefox 4+.
|
||||
*/
|
||||
|
||||
button::-moz-focus-inner,
|
||||
input::-moz-focus-inner {
|
||||
border: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* 1. Removes default vertical scrollbar in IE 8/9.
|
||||
* 2. Improves readability and alignment in all browsers.
|
||||
*/
|
||||
|
||||
textarea {
|
||||
overflow: auto; /* 1 */
|
||||
vertical-align: top; /* 2 */
|
||||
}
|
||||
|
||||
/* ==========================================================================
|
||||
Tables
|
||||
========================================================================== */
|
||||
|
||||
/*
|
||||
* Remove most spacing between table cells.
|
||||
*/
|
||||
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
border-spacing: 0;
|
||||
}
|
||||
@@ -1,503 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
|
||||
<html>
|
||||
<head>
|
||||
<title>simple_search.rs</title>
|
||||
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
|
||||
<meta name="viewport" content="width=device-width, target-densitydpi=160dpi, initial-scale=1.0; maximum-scale=1.0; user-scalable=0;">
|
||||
<link rel="stylesheet" media="all" href="docco.css" />
|
||||
</head>
|
||||
<body>
|
||||
<div id="container">
|
||||
<div id="background"></div>
|
||||
|
||||
<ul class="sections">
|
||||
|
||||
<li id="title">
|
||||
<div class="annotation">
|
||||
<h1>simple_search.rs</h1>
|
||||
</div>
|
||||
</li>
|
||||
|
||||
|
||||
|
||||
<li id="section-1">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-1">¶</a>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre><span class="hljs-keyword">extern</span> <span class="hljs-keyword">crate</span> rustc_serialize;
|
||||
<span class="hljs-keyword">extern</span> <span class="hljs-keyword">crate</span> tantivy;
|
||||
<span class="hljs-keyword">extern</span> <span class="hljs-keyword">crate</span> tempdir;
|
||||
|
||||
<span class="hljs-keyword">use</span> std::path::Path;
|
||||
<span class="hljs-keyword">use</span> tempdir::TempDir;
|
||||
<span class="hljs-keyword">use</span> tantivy::Index;
|
||||
<span class="hljs-keyword">use</span> tantivy::schema::*;
|
||||
<span class="hljs-keyword">use</span> tantivy::collector::TopCollector;
|
||||
<span class="hljs-keyword">use</span> tantivy::query::QueryParser;
|
||||
|
||||
<span class="hljs-function"><span class="hljs-keyword">fn</span> <span class="hljs-title">main</span></span>() {</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-2">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-2">¶</a>
|
||||
</div>
|
||||
<p>Let’s create a temporary directory for the
|
||||
sake of this example</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">if</span> <span class="hljs-keyword">let</span> <span class="hljs-literal">Ok</span>(dir) = TempDir::new(<span class="hljs-string">"tantivy_example_dir"</span>) {
|
||||
run_example(dir.path()).unwrap();
|
||||
dir.close().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
<span class="hljs-function"><span class="hljs-keyword">fn</span> <span class="hljs-title">run_example</span></span>(index_path: &Path) -> tantivy::<span class="hljs-built_in">Result</span><()> {</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-3">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-3">¶</a>
|
||||
</div>
|
||||
<h1 id="defining-the-schema">Defining the schema</h1>
|
||||
<p>The Tantivy index requires a very strict schema.
|
||||
The schema declares which fields are in the index,
|
||||
and for each field, its type and “the way it should
|
||||
be indexed”.</p>
|
||||
|
||||
</div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-4">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-4">¶</a>
|
||||
</div>
|
||||
<p>first we need to define a schema …</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> schema_builder = SchemaBuilder::<span class="hljs-keyword">default</span>();</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-5">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-5">¶</a>
|
||||
</div>
|
||||
<p>Our first field is title.
|
||||
We want full-text search for it, and we want to be able
|
||||
to retrieve the document after the search.</p>
|
||||
<p>TEXT | STORED is some syntactic sugar to describe
|
||||
that.</p>
|
||||
<p><code>TEXT</code> means the field should be tokenized and indexed,
|
||||
along with its term frequency and term positions.</p>
|
||||
<p><code>STORED</code> means that the field will also be saved
|
||||
in a compressed, row-oriented key-value store.
|
||||
This store is useful to reconstruct the
|
||||
documents that were selected during the search phase.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> schema_builder.add_text_field(<span class="hljs-string">"title"</span>, TEXT | STORED);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-6">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-6">¶</a>
|
||||
</div>
|
||||
<p>Our first field is body.
|
||||
We want full-text search for it, and we want to be able
|
||||
to retrieve the body after the search.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> schema_builder.add_text_field(<span class="hljs-string">"body"</span>, TEXT);
|
||||
|
||||
<span class="hljs-keyword">let</span> schema = schema_builder.build();</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-7">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-7">¶</a>
|
||||
</div>
|
||||
<h1 id="indexing-documents">Indexing documents</h1>
|
||||
<p>Let’s create a brand new index.</p>
|
||||
<p>This will actually just save a meta.json
|
||||
with our schema in the directory.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> index = <span class="hljs-built_in">try!</span>(Index::create(index_path, schema.clone()));</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-8">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-8">¶</a>
|
||||
</div>
|
||||
<p>To insert document we need an index writer.
|
||||
There must be only one writer at a time.
|
||||
This single <code>IndexWriter</code> is already
|
||||
multithreaded.</p>
|
||||
<p>Here we use a buffer of 50MB per thread. Using a bigger
|
||||
heap for the indexer can increase its throughput.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> index_writer = <span class="hljs-built_in">try!</span>(index.writer(<span class="hljs-number">50_000_000</span>));</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-9">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-9">¶</a>
|
||||
</div>
|
||||
<p>Let’s index our documents!
|
||||
We first need a handle on the title and the body field.</p>
|
||||
|
||||
</div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-10">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-10">¶</a>
|
||||
</div>
|
||||
<h3 id="create-a-document-manually-">Create a document “manually”.</h3>
|
||||
<p>We can create a document manually, by setting the fields
|
||||
one by one in a Document object.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> title = schema.get_field(<span class="hljs-string">"title"</span>).unwrap();
|
||||
<span class="hljs-keyword">let</span> body = schema.get_field(<span class="hljs-string">"body"</span>).unwrap();
|
||||
|
||||
<span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> old_man_doc = Document::<span class="hljs-keyword">default</span>();
|
||||
old_man_doc.add_text(title, <span class="hljs-string">"The Old Man and the Sea"</span>);
|
||||
old_man_doc.add_text(body,
|
||||
<span class="hljs-string">"He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
he had gone eighty-four days now without taking a fish."</span>);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-11">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-11">¶</a>
|
||||
</div>
|
||||
<p>… and add it to the <code>IndexWriter</code>.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> index_writer.add_document(old_man_doc);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-12">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-12">¶</a>
|
||||
</div>
|
||||
<h3 id="create-a-document-directly-from-json-">Create a document directly from json.</h3>
|
||||
<p>Alternatively, we can use our schema to parse
|
||||
a document object directly from json.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre>
|
||||
<span class="hljs-keyword">let</span> mice_and_men_doc = <span class="hljs-built_in">try!</span>(schema.parse_document(r#<span class="hljs-string">"{
|
||||
"</span>title<span class="hljs-string">": "</span>Of Mice and Men<span class="hljs-string">",
|
||||
"</span>body<span class="hljs-string">": "</span>few miles south of Soledad, the Salinas River drops <span class="hljs-keyword">in</span> close to the hillside bank and runs deep and green. The water is warm too, <span class="hljs-keyword">for</span> it has slipped twinkling over the yellow sands <span class="hljs-keyword">in</span> the sunlight before reaching the narrow pool. On one side of the river the golden foothill slopes curve up to the strong and rocky Gabilan Mountains, but on the valley side the water is lined with trees—willows fresh and green with every spring, carrying <span class="hljs-keyword">in</span> their lower leaf junctures the debris of the winter’s flooding; and sycamores with mottled, white,recumbent limbs and branches that arch over the pool<span class="hljs-string">"
|
||||
}"</span>#));
|
||||
|
||||
index_writer.add_document(mice_and_men_doc);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-13">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-13">¶</a>
|
||||
</div>
|
||||
<p>Multi-valued field are allowed, they are
|
||||
expressed in JSON by an array.
|
||||
The following document has two titles.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> frankenstein_doc = <span class="hljs-built_in">try!</span>(schema.parse_document(r#<span class="hljs-string">"{
|
||||
"</span>title<span class="hljs-string">": ["</span>Frankenstein<span class="hljs-string">", "</span>The Modern Promotheus<span class="hljs-string">"],
|
||||
"</span>body<span class="hljs-string">": "</span>You will rejoice to hear that no disaster has accompanied the commencement of an enterprise which you have regarded with such evil forebodings. I arrived here yesterday, and my first task is to assure my dear sister of my welfare and increasing confidence <span class="hljs-keyword">in</span> the success of my undertaking.<span class="hljs-string">"
|
||||
}"</span>#));
|
||||
index_writer.add_document(frankenstein_doc);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-14">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-14">¶</a>
|
||||
</div>
|
||||
<p>This is an example, so we will only index 3 documents
|
||||
here. You can check out tantivy’s tutorial to index
|
||||
the English wikipedia. Tantivy’s indexing is rather fast.
|
||||
Indexing 5 million articles of the English wikipedia takes
|
||||
around 4 minutes on my computer!</p>
|
||||
|
||||
</div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-15">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-15">¶</a>
|
||||
</div>
|
||||
<h3 id="committing">Committing</h3>
|
||||
<p>At this point our documents are not searchable.</p>
|
||||
<p>We need to call .commit() explicitly to force the
|
||||
index_writer to finish processing the documents in the queue,
|
||||
flush the current index to the disk, and advertise
|
||||
the existence of new documents.</p>
|
||||
<p>This call is blocking.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-built_in">try!</span>(index_writer.commit());</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-16">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-16">¶</a>
|
||||
</div>
|
||||
<p>If <code>.commit()</code> returns correctly, then all of the
|
||||
documents that have been added are guaranteed to be
|
||||
persistently indexed.</p>
|
||||
<p>In the scenario of a crash or a power failure,
|
||||
tantivy behaves as if has rolled back to its last
|
||||
commit.</p>
|
||||
|
||||
</div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-17">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-17">¶</a>
|
||||
</div>
|
||||
<h1 id="searching">Searching</h1>
|
||||
<p>Let’s search our index. Start by reloading
|
||||
searchers in the index. This should be done
|
||||
after every commit().</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-built_in">try!</span>(index.load_searchers());</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-18">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-18">¶</a>
|
||||
</div>
|
||||
<p>Afterwards create one (or more) searchers.</p>
|
||||
<p>You should create a searcher
|
||||
every time you start a “search query”.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> searcher = index.searcher();</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-19">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-19">¶</a>
|
||||
</div>
|
||||
<p>The query parser can interpret human queries.
|
||||
Here, if the user does not specify which
|
||||
field they want to search, tantivy will search
|
||||
in both title and body.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> query_parser = QueryParser::new(index.schema(), <span class="hljs-built_in">vec!</span>[title, body]);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-20">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-20">¶</a>
|
||||
</div>
|
||||
<p>QueryParser may fail if the query is not in the right
|
||||
format. For user facing applications, this can be a problem.
|
||||
A ticket has been opened regarding this problem.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> query = <span class="hljs-built_in">try!</span>(query_parser.parse_query(<span class="hljs-string">"sea whale"</span>));</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-21">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-21">¶</a>
|
||||
</div>
|
||||
<p>A query defines a set of documents, as
|
||||
well as the way they should be scored.</p>
|
||||
<p>A query created by the query parser is scored according
|
||||
to a metric called Tf-Idf, and will consider
|
||||
any document matching at least one of our terms.</p>
|
||||
|
||||
</div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-22">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-22">¶</a>
|
||||
</div>
|
||||
<h3 id="collectors">Collectors</h3>
|
||||
<p>We are not interested in all of the documents but
|
||||
only in the top 10. Keeping track of our top 10 best documents
|
||||
is the role of the TopCollector.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> top_collector = TopCollector::with_limit(<span class="hljs-number">10</span>);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-23">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-23">¶</a>
|
||||
</div>
|
||||
<p>We can now perform our query.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-built_in">try!</span>(searcher.search(&*query, &<span class="hljs-keyword">mut</span> top_collector));</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-24">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-24">¶</a>
|
||||
</div>
|
||||
<p>Our top collector now contains the 10
|
||||
most relevant doc ids…</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> doc_addresses = top_collector.docs();</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-25">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-25">¶</a>
|
||||
</div>
|
||||
<p>The actual documents still need to be
|
||||
retrieved from Tantivy’s store.</p>
|
||||
<p>Since the body field was not configured as stored,
|
||||
the document returned will only contain
|
||||
a title.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre>
|
||||
<span class="hljs-keyword">for</span> doc_address <span class="hljs-keyword">in</span> doc_addresses {
|
||||
<span class="hljs-keyword">let</span> retrieved_doc = <span class="hljs-built_in">try!</span>(searcher.doc(&doc_address));
|
||||
<span class="hljs-built_in">println!</span>(<span class="hljs-string">"{}"</span>, schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
<span class="hljs-literal">Ok</span>(())
|
||||
}</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
</ul>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
39
examples/integer_range_search.rs
Normal file
39
examples/integer_range_search.rs
Normal file
@@ -0,0 +1,39 @@
|
||||
// # Searching a range on an indexed int field.
|
||||
//
|
||||
// Below is an example of creating an indexed integer field in your schema
|
||||
// You can use RangeQuery to get a Count of all occurrences in a given range.
|
||||
use tantivy::collector::Count;
|
||||
use tantivy::query::RangeQuery;
|
||||
use tantivy::schema::{Schema, INDEXED};
|
||||
use tantivy::{doc, Index, Result};
|
||||
|
||||
fn run() -> Result<()> {
|
||||
// For the sake of simplicity, this schema will only have 1 field
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
// `INDEXED` is a short-hand to indicate that our field should be "searchable".
|
||||
let year_field = schema_builder.add_u64_field("year", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let reader = index.reader()?;
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
|
||||
for year in 1950u64..2019u64 {
|
||||
index_writer.add_document(doc!(year_field => year));
|
||||
}
|
||||
index_writer.commit()?;
|
||||
// The index will be a range of years
|
||||
}
|
||||
reader.reload()?;
|
||||
let searcher = reader.searcher();
|
||||
// The end is excluded i.e. here we are searching up to 1969
|
||||
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
|
||||
// Uses a Count collector to sum the total number of docs in the range
|
||||
let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?;
|
||||
assert_eq!(num_60s_books, 10);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
run().unwrap()
|
||||
}
|
||||
135
examples/iterating_docs_and_positions.rs
Normal file
135
examples/iterating_docs_and_positions.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
// # Iterating docs and positioms.
|
||||
//
|
||||
// At its core of tantivy, relies on a data structure
|
||||
// called an inverted index.
|
||||
//
|
||||
// This example shows how to manually iterate through
|
||||
// the list of documents containing a term, getting
|
||||
// its term frequency, and accessing its positions.
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, DocSet, Index, Postings, TERMINATED};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// We first create a schema for the sake of the
|
||||
// example. Check the `basic_search` example for more information.
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
// For this example, we need to make sure to index positions for our title
|
||||
// field. `TEXT` precisely does this.
|
||||
let title = schema_builder.add_text_field("title", TEXT | STORED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 50_000_000)?;
|
||||
index_writer.add_document(doc!(title => "The Old Man and the Sea"));
|
||||
index_writer.add_document(doc!(title => "Of Mice and Men"));
|
||||
index_writer.add_document(doc!(title => "The modern Promotheus"));
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// A tantivy index is actually a collection of segments.
|
||||
// Similarly, a searcher just wraps a list `segment_reader`.
|
||||
//
|
||||
// (Because we indexed a very small number of documents over one thread
|
||||
// there is actually only one segment here, but let's iterate through the list
|
||||
// anyway)
|
||||
for segment_reader in searcher.segment_readers() {
|
||||
// A segment contains different data structure.
|
||||
// Inverted index stands for the combination of
|
||||
// - the term dictionary
|
||||
// - the inverted lists associated to each terms and their positions
|
||||
let inverted_index = segment_reader.inverted_index(title);
|
||||
|
||||
// A `Term` is a text token associated with a field.
|
||||
// Let's go through all docs containing the term `title:the` and access their position
|
||||
let term_the = Term::from_field_text(title, "the");
|
||||
|
||||
// This segment posting object is like a cursor over the documents matching the term.
|
||||
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
|
||||
// and positions.
|
||||
//
|
||||
// If you don't need all this information, you may get better performance by decompressing less
|
||||
// information.
|
||||
if let Some(mut segment_postings) =
|
||||
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)
|
||||
{
|
||||
// this buffer will be used to request for positions
|
||||
let mut positions: Vec<u32> = Vec::with_capacity(100);
|
||||
let mut doc_id = segment_postings.doc();
|
||||
while doc_id != TERMINATED {
|
||||
// This MAY contains deleted documents as well.
|
||||
if segment_reader.is_deleted(doc_id) {
|
||||
doc_id = segment_postings.advance();
|
||||
continue;
|
||||
}
|
||||
|
||||
// the number of time the term appears in the document.
|
||||
let term_freq: u32 = segment_postings.term_freq();
|
||||
// accessing positions is slightly expensive and lazy, do not request
|
||||
// for them if you don't need them for some documents.
|
||||
segment_postings.positions(&mut positions);
|
||||
|
||||
// By definition we should have `term_freq` positions.
|
||||
assert_eq!(positions.len(), term_freq as usize);
|
||||
|
||||
// This prints:
|
||||
// ```
|
||||
// Doc 0: TermFreq 2: [0, 4]
|
||||
// Doc 2: TermFreq 1: [0]
|
||||
// ```
|
||||
println!("Doc {}: TermFreq {}: {:?}", doc_id, term_freq, positions);
|
||||
doc_id = segment_postings.advance();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A `Term` is a text token associated with a field.
|
||||
// Let's go through all docs containing the term `title:the` and access their position
|
||||
let term_the = Term::from_field_text(title, "the");
|
||||
|
||||
// Some other powerful operations (especially `.skip_to`) may be useful to consume these
|
||||
// posting lists rapidly.
|
||||
// You can check for them in the [`DocSet`](https://docs.rs/tantivy/~0/tantivy/trait.DocSet.html) trait
|
||||
// and the [`Postings`](https://docs.rs/tantivy/~0/tantivy/trait.Postings.html) trait
|
||||
|
||||
// Also, for some VERY specific high performance use case like an OLAP analysis of logs,
|
||||
// you can get better performance by accessing directly the blocks of doc ids.
|
||||
for segment_reader in searcher.segment_readers() {
|
||||
// A segment contains different data structure.
|
||||
// Inverted index stands for the combination of
|
||||
// - the term dictionary
|
||||
// - the inverted lists associated to each terms and their positions
|
||||
let inverted_index = segment_reader.inverted_index(title);
|
||||
|
||||
// This segment posting object is like a cursor over the documents matching the term.
|
||||
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
|
||||
// and positions.
|
||||
//
|
||||
// If you don't need all this information, you may get better performance by decompressing less
|
||||
// information.
|
||||
if let Some(mut block_segment_postings) =
|
||||
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
|
||||
{
|
||||
loop {
|
||||
let docs = block_segment_postings.docs();
|
||||
if docs.is_empty() {
|
||||
break;
|
||||
}
|
||||
// Once again these docs MAY contains deleted documents as well.
|
||||
let docs = block_segment_postings.docs();
|
||||
// Prints `Docs [0, 2].`
|
||||
println!("Docs {:?}", docs);
|
||||
block_segment_postings.advance();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
100
examples/multiple_producer.rs
Normal file
100
examples/multiple_producer.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
// # Indexing from different threads.
|
||||
//
|
||||
// It is fairly common to have to index from different threads.
|
||||
// Tantivy forbids to create more than one `IndexWriter` at a time.
|
||||
//
|
||||
// This `IndexWriter` itself has its own multithreaded layer, so managing your own
|
||||
// indexing threads will not help. However, it can still be useful for some applications.
|
||||
//
|
||||
// For instance, if preparing documents to send to tantivy before indexing is the bottleneck of
|
||||
// your application, it is reasonable to have multiple threads.
|
||||
//
|
||||
// Another very common reason to want to index from multiple threads, is implementing a webserver
|
||||
// with CRUD capabilities. The server framework will most likely handle request from
|
||||
// different threads.
|
||||
//
|
||||
// The recommended way to address both of these use case is to wrap your `IndexWriter` into a
|
||||
// `Arc<RwLock<IndexWriter>>`.
|
||||
//
|
||||
// While this is counterintuitive, adding and deleting documents do not require mutability
|
||||
// over the `IndexWriter`, so several threads will be able to do this operation concurrently.
|
||||
//
|
||||
// The example below does not represent an actual real-life use case (who would spawn thread to
|
||||
// index a single document?), but aims at demonstrating the mechanism that makes indexing
|
||||
// from several threads possible.
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use tantivy::schema::{Schema, STORED, TEXT};
|
||||
use tantivy::{doc, Index, IndexWriter, Opstamp};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field("title", TEXT | STORED);
|
||||
let body = schema_builder.add_text_field("body", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
let index_writer: Arc<RwLock<IndexWriter>> = Arc::new(RwLock::new(index.writer(50_000_000)?));
|
||||
|
||||
// # First indexing thread.
|
||||
let index_writer_clone_1 = index_writer.clone();
|
||||
thread::spawn(move || {
|
||||
// we index 100 times the document... for the sake of the example.
|
||||
for i in 0..100 {
|
||||
let opstamp = index_writer_clone_1
|
||||
.read().unwrap() //< A read lock is sufficient here.
|
||||
.add_document(
|
||||
doc!(
|
||||
title => "Of Mice and Men",
|
||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
));
|
||||
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
}
|
||||
});
|
||||
|
||||
// # Second indexing thread.
|
||||
let index_writer_clone_2 = index_writer.clone();
|
||||
// For convenience, tantivy also comes with a macro to
|
||||
// reduce the boilerplate above.
|
||||
thread::spawn(move || {
|
||||
// we index 100 times the document... for the sake of the example.
|
||||
for i in 0..100 {
|
||||
// A read lock is sufficient here.
|
||||
let opstamp = {
|
||||
let index_writer_rlock = index_writer_clone_2.read().unwrap();
|
||||
index_writer_rlock.add_document(doc!(
|
||||
title => "Manufacturing consent",
|
||||
body => "Some great book description..."
|
||||
))
|
||||
};
|
||||
println!("add doc {} from thread 2 - opstamp {}", i, opstamp);
|
||||
thread::sleep(Duration::from_millis(10));
|
||||
}
|
||||
});
|
||||
|
||||
// # In the main thread, we commit 10 times, once every 500ms.
|
||||
for _ in 0..10 {
|
||||
let opstamp: Opstamp = {
|
||||
// Committing or rollbacking on the other hand requires write lock. This will block other threads.
|
||||
let mut index_writer_wlock = index_writer.write().unwrap();
|
||||
index_writer_wlock.commit().unwrap()
|
||||
};
|
||||
println!("committed with opstamp {}", opstamp);
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
139
examples/pre_tokenized_text.rs
Normal file
139
examples/pre_tokenized_text.rs
Normal file
@@ -0,0 +1,139 @@
|
||||
// # Pre-tokenized text example
|
||||
//
|
||||
// This example shows how to use pre-tokenized text. Sometimes yout might
|
||||
// want to index and search through text which is already split into
|
||||
// tokens by some external tool.
|
||||
//
|
||||
// In this example we will:
|
||||
// - use tantivy tokenizer to create tokens and load them directly into tantivy,
|
||||
// - import tokenized text straight from json,
|
||||
// - perform a search on documents with pre-tokenized text
|
||||
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, Tokenizer};
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn pre_tokenize_text(text: &str) -> Vec<Token> {
|
||||
let mut token_stream = SimpleTokenizer.token_stream(text);
|
||||
let mut tokens = vec![];
|
||||
while token_stream.advance() {
|
||||
tokens.push(token_stream.token().clone());
|
||||
}
|
||||
tokens
|
||||
}
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
let index_path = TempDir::new()?;
|
||||
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
schema_builder.add_text_field("title", TEXT | STORED);
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// We can create a document manually, by setting the fields
|
||||
// one by one in a Document object.
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
let title_text = "The Old Man and the Sea";
|
||||
let body_text = "He was an old man who fished alone in a skiff in the Gulf Stream";
|
||||
|
||||
// Content of our first document
|
||||
// We create `PreTokenizedString` which contains original text and vector of tokens
|
||||
let title_tok = PreTokenizedString {
|
||||
text: String::from(title_text),
|
||||
tokens: pre_tokenize_text(title_text),
|
||||
};
|
||||
|
||||
println!(
|
||||
"Original text: \"{}\" and tokens: {:?}",
|
||||
title_tok.text, title_tok.tokens
|
||||
);
|
||||
|
||||
let body_tok = PreTokenizedString {
|
||||
text: String::from(body_text),
|
||||
tokens: pre_tokenize_text(body_text),
|
||||
};
|
||||
|
||||
// Now lets create a document and add our `PreTokenizedString`
|
||||
let old_man_doc = doc!(title => title_tok, body => body_tok);
|
||||
|
||||
// ... now let's just add it to the IndexWriter
|
||||
index_writer.add_document(old_man_doc);
|
||||
|
||||
// Pretokenized text can also be fed as JSON
|
||||
let short_man_json = r#"{
|
||||
"title":[{
|
||||
"text":"The Old Man",
|
||||
"tokens":[
|
||||
{"offset_from":0,"offset_to":3,"position":0,"text":"The","position_length":1},
|
||||
{"offset_from":4,"offset_to":7,"position":1,"text":"Old","position_length":1},
|
||||
{"offset_from":8,"offset_to":11,"position":2,"text":"Man","position_length":1}
|
||||
]
|
||||
}]
|
||||
}"#;
|
||||
|
||||
let short_man_doc = schema.parse_document(&short_man_json)?;
|
||||
|
||||
index_writer.add_document(short_man_doc);
|
||||
|
||||
// Let's commit changes
|
||||
index_writer.commit()?;
|
||||
|
||||
// ... and now is the time to query our index
|
||||
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// We want to get documents with token "Man", we will use TermQuery to do it
|
||||
// Using PreTokenizedString means the tokens are stored as is avoiding stemming
|
||||
// and lowercasing, which preserves full words in their original form
|
||||
let query = TermQuery::new(
|
||||
Term::from_field_text(title, "Man"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let (top_docs, count) = searcher
|
||||
.search(&query, &(TopDocs::with_limit(2), Count))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(count, 2);
|
||||
|
||||
// Now let's print out the results.
|
||||
// Note that the tokens are not stored along with the original text
|
||||
// in the document store
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("Document: {}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
// In contrary to the previous query, when we search for the "man" term we
|
||||
// should get no results, as it's not one of the indexed tokens. SimpleTokenizer
|
||||
// only splits text on whitespace / punctuation.
|
||||
|
||||
let query = TermQuery::new(
|
||||
Term::from_field_text(title, "man"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let (_top_docs, count) = searcher
|
||||
.search(&query, &(TopDocs::with_limit(2), Count))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(count, 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,209 +0,0 @@
|
||||
extern crate rustc_serialize;
|
||||
extern crate tantivy;
|
||||
extern crate tempdir;
|
||||
|
||||
use std::path::Path;
|
||||
use tempdir::TempDir;
|
||||
use tantivy::Index;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::collector::TopCollector;
|
||||
use tantivy::query::QueryParser;
|
||||
|
||||
fn main() {
|
||||
// Let's create a temporary directory for the
|
||||
// sake of this example
|
||||
if let Ok(dir) = TempDir::new("tantivy_example_dir") {
|
||||
run_example(dir.path()).unwrap();
|
||||
dir.close().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn run_example(index_path: &Path) -> tantivy::Result<()> {
|
||||
|
||||
|
||||
// # Defining the schema
|
||||
//
|
||||
// The Tantivy index requires a very strict schema.
|
||||
// The schema declares which fields are in the index,
|
||||
// and for each field, its type and "the way it should
|
||||
// be indexed".
|
||||
|
||||
|
||||
// first we need to define a schema ...
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
|
||||
// Our first field is title.
|
||||
// We want full-text search for it, and we want to be able
|
||||
// to retrieve the document after the search.
|
||||
//
|
||||
// TEXT | STORED is some syntactic sugar to describe
|
||||
// that.
|
||||
//
|
||||
// `TEXT` means the field should be tokenized and indexed,
|
||||
// along with its term frequency and term positions.
|
||||
//
|
||||
// `STORED` means that the field will also be saved
|
||||
// in a compressed, row-oriented key-value store.
|
||||
// This store is useful to reconstruct the
|
||||
// documents that were selected during the search phase.
|
||||
schema_builder.add_text_field("title", TEXT | STORED);
|
||||
|
||||
// Our first field is body.
|
||||
// We want full-text search for it, and we want to be able
|
||||
// to retrieve the body after the search.
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
|
||||
|
||||
|
||||
// # Indexing documents
|
||||
//
|
||||
// Let's create a brand new index.
|
||||
//
|
||||
// This will actually just save a meta.json
|
||||
// with our schema in the directory.
|
||||
let index = try!(Index::create(index_path, schema.clone()));
|
||||
|
||||
|
||||
|
||||
// To insert document we need an index writer.
|
||||
// There must be only one writer at a time.
|
||||
// This single `IndexWriter` is already
|
||||
// multithreaded.
|
||||
//
|
||||
// Here we use a buffer of 50MB per thread. Using a bigger
|
||||
// heap for the indexer can increase its throughput.
|
||||
let mut index_writer = try!(index.writer(50_000_000));
|
||||
|
||||
// Let's index our documents!
|
||||
// We first need a handle on the title and the body field.
|
||||
|
||||
|
||||
// ### Create a document "manually".
|
||||
//
|
||||
// We can create a document manually, by setting the fields
|
||||
// one by one in a Document object.
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
let mut old_man_doc = Document::default();
|
||||
old_man_doc.add_text(title, "The Old Man and the Sea");
|
||||
old_man_doc.add_text(body,
|
||||
"He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
he had gone eighty-four days now without taking a fish.");
|
||||
|
||||
// ... and add it to the `IndexWriter`.
|
||||
index_writer.add_document(old_man_doc);
|
||||
|
||||
// ### Create a document directly from json.
|
||||
//
|
||||
// Alternatively, we can use our schema to parse
|
||||
// a document object directly from json.
|
||||
|
||||
let mice_and_men_doc = try!(schema.parse_document(r#"{
|
||||
"title": "Of Mice and Men",
|
||||
"body": "few miles south of Soledad, the Salinas River drops in close to the hillside bank and runs deep and green. The water is warm too, for it has slipped twinkling over the yellow sands in the sunlight before reaching the narrow pool. On one side of the river the golden foothill slopes curve up to the strong and rocky Gabilan Mountains, but on the valley side the water is lined with trees—willows fresh and green with every spring, carrying in their lower leaf junctures the debris of the winter’s flooding; and sycamores with mottled, white,recumbent limbs and branches that arch over the pool"
|
||||
}"#));
|
||||
|
||||
index_writer.add_document(mice_and_men_doc);
|
||||
|
||||
// Multi-valued field are allowed, they are
|
||||
// expressed in JSON by an array.
|
||||
// The following document has two titles.
|
||||
let frankenstein_doc = try!(schema.parse_document(r#"{
|
||||
"title": ["Frankenstein", "The Modern Promotheus"],
|
||||
"body": "You will rejoice to hear that no disaster has accompanied the commencement of an enterprise which you have regarded with such evil forebodings. I arrived here yesterday, and my first task is to assure my dear sister of my welfare and increasing confidence in the success of my undertaking."
|
||||
}"#));
|
||||
index_writer.add_document(frankenstein_doc);
|
||||
|
||||
// This is an example, so we will only index 3 documents
|
||||
// here. You can check out tantivy's tutorial to index
|
||||
// the English wikipedia. Tantivy's indexing is rather fast.
|
||||
// Indexing 5 million articles of the English wikipedia takes
|
||||
// around 4 minutes on my computer!
|
||||
|
||||
|
||||
// ### Committing
|
||||
//
|
||||
// At this point our documents are not searchable.
|
||||
//
|
||||
//
|
||||
// We need to call .commit() explicitly to force the
|
||||
// index_writer to finish processing the documents in the queue,
|
||||
// flush the current index to the disk, and advertise
|
||||
// the existence of new documents.
|
||||
//
|
||||
// This call is blocking.
|
||||
try!(index_writer.commit());
|
||||
|
||||
// If `.commit()` returns correctly, then all of the
|
||||
// documents that have been added are guaranteed to be
|
||||
// persistently indexed.
|
||||
//
|
||||
// In the scenario of a crash or a power failure,
|
||||
// tantivy behaves as if has rolled back to its last
|
||||
// commit.
|
||||
|
||||
|
||||
// # Searching
|
||||
//
|
||||
// Let's search our index. Start by reloading
|
||||
// searchers in the index. This should be done
|
||||
// after every commit().
|
||||
try!(index.load_searchers());
|
||||
|
||||
// Afterwards create one (or more) searchers.
|
||||
//
|
||||
// You should create a searcher
|
||||
// every time you start a "search query".
|
||||
let searcher = index.searcher();
|
||||
|
||||
// The query parser can interpret human queries.
|
||||
// Here, if the user does not specify which
|
||||
// field they want to search, tantivy will search
|
||||
// in both title and body.
|
||||
let query_parser = QueryParser::new(index.schema(), vec![title, body]);
|
||||
|
||||
// QueryParser may fail if the query is not in the right
|
||||
// format. For user facing applications, this can be a problem.
|
||||
// A ticket has been opened regarding this problem.
|
||||
let query = try!(query_parser.parse_query("sea whale"));
|
||||
|
||||
|
||||
// A query defines a set of documents, as
|
||||
// well as the way they should be scored.
|
||||
//
|
||||
// A query created by the query parser is scored according
|
||||
// to a metric called Tf-Idf, and will consider
|
||||
// any document matching at least one of our terms.
|
||||
|
||||
// ### Collectors
|
||||
//
|
||||
// We are not interested in all of the documents but
|
||||
// only in the top 10. Keeping track of our top 10 best documents
|
||||
// is the role of the TopCollector.
|
||||
let mut top_collector = TopCollector::with_limit(10);
|
||||
|
||||
// We can now perform our query.
|
||||
try!(searcher.search(&*query, &mut top_collector));
|
||||
|
||||
// Our top collector now contains the 10
|
||||
// most relevant doc ids...
|
||||
let doc_addresses = top_collector.docs();
|
||||
|
||||
// The actual documents still need to be
|
||||
// retrieved from Tantivy's store.
|
||||
//
|
||||
// Since the body field was not configured as stored,
|
||||
// the document returned will only contain
|
||||
// a title.
|
||||
|
||||
for doc_address in doc_addresses {
|
||||
let retrieved_doc = try!(searcher.doc(&doc_address));
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
82
examples/snippet.rs
Normal file
82
examples/snippet.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
// # Snippet example
|
||||
//
|
||||
// This example shows how to return a representative snippet of
|
||||
// your hit result.
|
||||
// Snippet are an extracted of a target document, and returned in HTML format.
|
||||
// The keyword searched by the user are highlighted with a `<b>` tag.
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, Snippet, SnippetGenerator};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the
|
||||
// sake of this example
|
||||
let index_path = TempDir::new()?;
|
||||
|
||||
// # Defining the schema
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field("title", TEXT | STORED);
|
||||
let body = schema_builder.add_text_field("body", TEXT | STORED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// we'll only need one doc for this example.
|
||||
index_writer.add_document(doc!(
|
||||
title => "Of Mice and Men",
|
||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
));
|
||||
// ...
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
let query = query_parser.parse_query("sycamore spring")?;
|
||||
|
||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||
|
||||
let snippet_generator = SnippetGenerator::create(&searcher, &*query, body)?;
|
||||
|
||||
for (score, doc_address) in top_docs {
|
||||
let doc = searcher.doc(doc_address)?;
|
||||
let snippet = snippet_generator.snippet_from_doc(&doc);
|
||||
println!("Document score {}:", score);
|
||||
println!("title: {}", doc.get_first(title).unwrap().text().unwrap());
|
||||
println!("snippet: {}", snippet.to_html());
|
||||
println!("custom highlighting: {}", highlight(snippet));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn highlight(snippet: Snippet) -> String {
|
||||
let mut result = String::new();
|
||||
let mut start_from = 0;
|
||||
|
||||
for (start, end) in snippet.highlighted().iter().map(|h| h.bounds()) {
|
||||
result.push_str(&snippet.fragments()[start_from..start]);
|
||||
result.push_str(" --> ");
|
||||
result.push_str(&snippet.fragments()[start..end]);
|
||||
result.push_str(" <-- ");
|
||||
start_from = end;
|
||||
}
|
||||
|
||||
result.push_str(&snippet.fragments()[start_from..]);
|
||||
result
|
||||
}
|
||||
113
examples/stop_words.rs
Normal file
113
examples/stop_words.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
// # Stop Words Example
|
||||
//
|
||||
// This example covers the basic usage of stop words
|
||||
// with tantivy
|
||||
//
|
||||
// We will :
|
||||
// - define our schema
|
||||
// - create an index in a directory
|
||||
// - add a few stop words
|
||||
// - index few documents in our index
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::*;
|
||||
use tantivy::{doc, Index};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// this example assumes you understand the content in `basic_search`
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
// This configures your custom options for how tantivy will
|
||||
// store and process your content in the index; The key
|
||||
// to note is that we are setting the tokenizer to `stoppy`
|
||||
// which will be defined and registered below.
|
||||
let text_field_indexing = TextFieldIndexing::default()
|
||||
.set_tokenizer("stoppy")
|
||||
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
|
||||
let text_options = TextOptions::default()
|
||||
.set_indexing_options(text_field_indexing)
|
||||
.set_stored();
|
||||
|
||||
// Our first field is title.
|
||||
schema_builder.add_text_field("title", text_options);
|
||||
|
||||
// Our second field is body.
|
||||
let text_field_indexing = TextFieldIndexing::default()
|
||||
.set_tokenizer("stoppy")
|
||||
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
|
||||
let text_options = TextOptions::default()
|
||||
.set_indexing_options(text_field_indexing)
|
||||
.set_stored();
|
||||
schema_builder.add_text_field("body", text_options);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
// This tokenizer lowers all of the text (to help with stop word matching)
|
||||
// then removes all instances of `the` and `and` from the corpus
|
||||
let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(LowerCaser)
|
||||
.filter(StopWordFilter::remove(vec![
|
||||
"the".to_string(),
|
||||
"and".to_string(),
|
||||
]));
|
||||
|
||||
index.tokenizers().register("stoppy", tokenizer);
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "The Old Man and the Sea",
|
||||
body => "He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
he had gone eighty-four days now without taking a fish."
|
||||
));
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "Of Mice and Men",
|
||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
));
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "Frankenstein",
|
||||
body => "You will rejoice to hear that no disaster has accompanied the commencement of an \
|
||||
enterprise which you have regarded with such evil forebodings. I arrived here \
|
||||
yesterday, and my first task is to assure my dear sister of my welfare and \
|
||||
increasing confidence in the success of my undertaking."
|
||||
));
|
||||
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
|
||||
// stop words are applied on the query as well.
|
||||
// The following will be equivalent to `title:frankenstein`
|
||||
let query = query_parser.parse_query("title:\"the Frankenstein\"")?;
|
||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||
|
||||
for (score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("\n==\nDocument score {}:", score);
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
41
examples/working_with_json.rs
Normal file
41
examples/working_with_json.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
use tantivy;
|
||||
use tantivy::schema::*;
|
||||
|
||||
// # Document from json
|
||||
//
|
||||
// For convenience, `Document` can be parsed directly from json.
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's first define a schema and an index.
|
||||
// Check out the basic example if this is confusing to you.
|
||||
//
|
||||
// first we need to define a schema ...
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_text_field("title", TEXT | STORED);
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
schema_builder.add_u64_field("year", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// Let's assume we have a json-serialized document.
|
||||
let mice_and_men_doc_json = r#"{
|
||||
"title": "Of Mice and Men",
|
||||
"year": 1937
|
||||
}"#;
|
||||
|
||||
// We can parse our document
|
||||
let _mice_and_men_doc = schema.parse_document(&mice_and_men_doc_json)?;
|
||||
|
||||
// Multi-valued field are allowed, they are
|
||||
// expressed in JSON by an array.
|
||||
// The following document has two titles.
|
||||
let frankenstein_json = r#"{
|
||||
"title": ["Frankenstein", "The Modern Prometheus"],
|
||||
"year": 1818
|
||||
}"#;
|
||||
let _frankenstein_doc = schema.parse_document(&frankenstein_json)?;
|
||||
|
||||
// Note that the schema is saved in your index directory.
|
||||
//
|
||||
// As a result, Indexes are aware of their schema, and you can use this feature
|
||||
// just by opening an existing `Index`, and calling `index.schema()..parse_document(json)`.
|
||||
Ok(())
|
||||
}
|
||||
16
query-grammar/Cargo.toml
Normal file
16
query-grammar/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.13.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
description = """Search engine library"""
|
||||
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
|
||||
homepage = "https://github.com/tantivy-search/tantivy"
|
||||
repository = "https://github.com/tantivy-search/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
combine = {version="4", default-features=false, features=[] }
|
||||
3
query-grammar/README.md
Normal file
3
query-grammar/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Tantivy Query Grammar
|
||||
|
||||
This crate is used by tantivy to parse queries.
|
||||
15
query-grammar/src/lib.rs
Normal file
15
query-grammar/src/lib.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
mod occur;
|
||||
mod query_grammar;
|
||||
mod user_input_ast;
|
||||
use combine::parser::Parser;
|
||||
|
||||
pub use crate::occur::Occur;
|
||||
use crate::query_grammar::parse_to_ast;
|
||||
pub use crate::user_input_ast::{UserInputAST, UserInputBound, UserInputLeaf, UserInputLiteral};
|
||||
|
||||
pub struct Error;
|
||||
|
||||
pub fn parse_query(query: &str) -> Result<UserInputAST, Error> {
|
||||
let (user_input_ast, _remaining) = parse_to_ast().parse(query).map_err(|_| Error)?;
|
||||
Ok(user_input_ast)
|
||||
}
|
||||
72
query-grammar/src/occur.rs
Normal file
72
query-grammar/src/occur.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use std::fmt;
|
||||
use std::fmt::Write;
|
||||
|
||||
/// Defines whether a term in a query must be present,
|
||||
/// should be present or must be not present.
|
||||
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
|
||||
pub enum Occur {
|
||||
/// For a given document to be considered for scoring,
|
||||
/// at least one of the document with the Should or the Must
|
||||
/// Occur constraint must be within the document.
|
||||
Should,
|
||||
/// Document without the term are excluded from the search.
|
||||
Must,
|
||||
/// Document that contain the term are excluded from the
|
||||
/// search.
|
||||
MustNot,
|
||||
}
|
||||
|
||||
impl Occur {
|
||||
/// Returns the one-char prefix symbol for this `Occur`.
|
||||
/// - `Should` => '?',
|
||||
/// - `Must` => '+'
|
||||
/// - `Not` => '-'
|
||||
fn to_char(self) -> char {
|
||||
match self {
|
||||
Occur::Should => '?',
|
||||
Occur::Must => '+',
|
||||
Occur::MustNot => '-',
|
||||
}
|
||||
}
|
||||
|
||||
/// Compose two occur values.
|
||||
pub fn compose(left: Occur, right: Occur) -> Occur {
|
||||
match (left, right) {
|
||||
(Occur::Should, _) => right,
|
||||
(Occur::Must, Occur::MustNot) => Occur::MustNot,
|
||||
(Occur::Must, _) => Occur::Must,
|
||||
(Occur::MustNot, Occur::MustNot) => Occur::Must,
|
||||
(Occur::MustNot, _) => Occur::MustNot,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Occur {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.write_char(self.to_char())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::Occur;
|
||||
|
||||
#[test]
|
||||
fn test_Occur_compose() {
|
||||
assert_eq!(Occur::compose(Occur::Should, Occur::Should), Occur::Should);
|
||||
assert_eq!(Occur::compose(Occur::Should, Occur::Must), Occur::Must);
|
||||
assert_eq!(
|
||||
Occur::compose(Occur::Should, Occur::MustNot),
|
||||
Occur::MustNot
|
||||
);
|
||||
assert_eq!(Occur::compose(Occur::Must, Occur::Should), Occur::Must);
|
||||
assert_eq!(Occur::compose(Occur::Must, Occur::Must), Occur::Must);
|
||||
assert_eq!(Occur::compose(Occur::Must, Occur::MustNot), Occur::MustNot);
|
||||
assert_eq!(
|
||||
Occur::compose(Occur::MustNot, Occur::Should),
|
||||
Occur::MustNot
|
||||
);
|
||||
assert_eq!(Occur::compose(Occur::MustNot, Occur::Must), Occur::MustNot);
|
||||
assert_eq!(Occur::compose(Occur::MustNot, Occur::MustNot), Occur::Must);
|
||||
}
|
||||
}
|
||||
510
query-grammar/src/query_grammar.rs
Normal file
510
query-grammar/src/query_grammar.rs
Normal file
@@ -0,0 +1,510 @@
|
||||
use super::user_input_ast::{UserInputAST, UserInputBound, UserInputLeaf, UserInputLiteral};
|
||||
use crate::Occur;
|
||||
use combine::error::StringStreamError;
|
||||
use combine::parser::char::{char, digit, letter, space, spaces, string};
|
||||
use combine::parser::Parser;
|
||||
use combine::{
|
||||
attempt, choice, eof, many, many1, one_of, optional, parser, satisfy, skip_many1, value,
|
||||
};
|
||||
|
||||
fn field<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
(
|
||||
letter(),
|
||||
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
|
||||
)
|
||||
.skip(char(':'))
|
||||
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
||||
}
|
||||
|
||||
fn word<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
(
|
||||
satisfy(|c: char| {
|
||||
!c.is_whitespace()
|
||||
&& !['-', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
||||
}),
|
||||
many(satisfy(|c: char| {
|
||||
!c.is_whitespace() && ![':', '^', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
||||
})),
|
||||
)
|
||||
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
||||
.and_then(|s: String| match s.as_str() {
|
||||
"OR" | "AND " | "NOT" => Err(StringStreamError::UnexpectedParse),
|
||||
_ => Ok(s),
|
||||
})
|
||||
}
|
||||
|
||||
fn term_val<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
|
||||
phrase.or(word())
|
||||
}
|
||||
|
||||
fn term_query<'a>() -> impl Parser<&'a str, Output = UserInputLiteral> {
|
||||
let term_val_with_field = negative_number().or(term_val());
|
||||
(field(), term_val_with_field).map(|(field_name, phrase)| UserInputLiteral {
|
||||
field_name: Some(field_name),
|
||||
phrase,
|
||||
})
|
||||
}
|
||||
|
||||
fn literal<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
||||
let term_default_field = term_val().map(|phrase| UserInputLiteral {
|
||||
field_name: None,
|
||||
phrase,
|
||||
});
|
||||
attempt(term_query())
|
||||
.or(term_default_field)
|
||||
.map(UserInputLeaf::from)
|
||||
}
|
||||
|
||||
fn negative_number<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
(
|
||||
char('-'),
|
||||
many1(digit()),
|
||||
optional((char('.'), many1(digit()))),
|
||||
)
|
||||
.map(|(s1, s2, s3): (char, String, Option<(char, String)>)| {
|
||||
if let Some(('.', s3)) = s3 {
|
||||
format!("{}{}.{}", s1, s2, s3)
|
||||
} else {
|
||||
format!("{}{}", s1, s2)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn spaces1<'a>() -> impl Parser<&'a str, Output = ()> {
|
||||
skip_many1(space())
|
||||
}
|
||||
|
||||
/// Function that parses a range out of a Stream
|
||||
/// Supports ranges like:
|
||||
/// [5 TO 10], {5 TO 10}, [* TO 10], [10 TO *], {10 TO *], >5, <=10
|
||||
/// [a TO *], [a TO c], [abc TO bcd}
|
||||
fn range<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
||||
let range_term_val = || {
|
||||
word()
|
||||
.or(negative_number())
|
||||
.or(char('*').with(value("*".to_string())))
|
||||
};
|
||||
|
||||
// check for unbounded range in the form of <5, <=10, >5, >=5
|
||||
let elastic_unbounded_range = (
|
||||
choice([
|
||||
attempt(string(">=")),
|
||||
attempt(string("<=")),
|
||||
attempt(string("<")),
|
||||
attempt(string(">")),
|
||||
])
|
||||
.skip(spaces()),
|
||||
range_term_val(),
|
||||
)
|
||||
.map(
|
||||
|(comparison_sign, bound): (&str, String)| match comparison_sign {
|
||||
">=" => (UserInputBound::Inclusive(bound), UserInputBound::Unbounded),
|
||||
"<=" => (UserInputBound::Unbounded, UserInputBound::Inclusive(bound)),
|
||||
"<" => (UserInputBound::Unbounded, UserInputBound::Exclusive(bound)),
|
||||
">" => (UserInputBound::Exclusive(bound), UserInputBound::Unbounded),
|
||||
// default case
|
||||
_ => (UserInputBound::Unbounded, UserInputBound::Unbounded),
|
||||
},
|
||||
);
|
||||
let lower_bound = (one_of("{[".chars()), range_term_val()).map(
|
||||
|(boundary_char, lower_bound): (char, String)| {
|
||||
if lower_bound == "*" {
|
||||
UserInputBound::Unbounded
|
||||
} else if boundary_char == '{' {
|
||||
UserInputBound::Exclusive(lower_bound)
|
||||
} else {
|
||||
UserInputBound::Inclusive(lower_bound)
|
||||
}
|
||||
},
|
||||
);
|
||||
let upper_bound = (range_term_val(), one_of("}]".chars())).map(
|
||||
|(higher_bound, boundary_char): (String, char)| {
|
||||
if higher_bound == "*" {
|
||||
UserInputBound::Unbounded
|
||||
} else if boundary_char == '}' {
|
||||
UserInputBound::Exclusive(higher_bound)
|
||||
} else {
|
||||
UserInputBound::Inclusive(higher_bound)
|
||||
}
|
||||
},
|
||||
);
|
||||
// return only lower and upper
|
||||
let lower_to_upper = (
|
||||
lower_bound.skip((spaces(), string("TO"), spaces())),
|
||||
upper_bound,
|
||||
);
|
||||
|
||||
(
|
||||
optional(field()).skip(spaces()),
|
||||
// try elastic first, if it matches, the range is unbounded
|
||||
attempt(elastic_unbounded_range).or(lower_to_upper),
|
||||
)
|
||||
.map(|(field, (lower, upper))|
|
||||
// Construct the leaf from extracted field (optional)
|
||||
// and bounds
|
||||
UserInputLeaf::Range {
|
||||
field,
|
||||
lower,
|
||||
upper
|
||||
})
|
||||
}
|
||||
|
||||
fn negate(expr: UserInputAST) -> UserInputAST {
|
||||
expr.unary(Occur::MustNot)
|
||||
}
|
||||
|
||||
fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
parser(|input| {
|
||||
char('(')
|
||||
.with(ast())
|
||||
.skip(char(')'))
|
||||
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All)))
|
||||
.or(attempt(
|
||||
string("NOT").skip(spaces1()).with(leaf()).map(negate),
|
||||
))
|
||||
.or(attempt(range().map(UserInputAST::from)))
|
||||
.or(literal().map(UserInputAST::from))
|
||||
.parse_stream(input)
|
||||
.into_result()
|
||||
})
|
||||
}
|
||||
|
||||
fn occur_symbol<'a>() -> impl Parser<&'a str, Output = Occur> {
|
||||
char('-')
|
||||
.map(|_| Occur::MustNot)
|
||||
.or(char('+').map(|_| Occur::Must))
|
||||
}
|
||||
|
||||
fn occur_leaf<'a>() -> impl Parser<&'a str, Output = (Option<Occur>, UserInputAST)> {
|
||||
(optional(occur_symbol()), boosted_leaf())
|
||||
}
|
||||
|
||||
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||
(many1(digit()), optional((char('.'), many1(digit())))).map(
|
||||
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
|
||||
let mut float_str = int_part;
|
||||
if let Some((chr, decimal_str)) = decimal_part_opt {
|
||||
float_str.push(chr);
|
||||
float_str.push_str(&decimal_str);
|
||||
}
|
||||
float_str.parse::<f64>().unwrap()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||
(char('^'), positive_float_number()).map(|(_, boost)| boost)
|
||||
}
|
||||
|
||||
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
|
||||
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
|
||||
UserInputAST::Boost(Box::new(leaf), boost)
|
||||
}
|
||||
_ => leaf,
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
enum BinaryOperand {
|
||||
Or,
|
||||
And,
|
||||
}
|
||||
|
||||
fn binary_operand<'a>() -> impl Parser<&'a str, Output = BinaryOperand> {
|
||||
string("AND")
|
||||
.with(value(BinaryOperand::And))
|
||||
.or(string("OR").with(value(BinaryOperand::Or)))
|
||||
}
|
||||
|
||||
fn aggregate_binary_expressions(
|
||||
left: UserInputAST,
|
||||
others: Vec<(BinaryOperand, UserInputAST)>,
|
||||
) -> UserInputAST {
|
||||
let mut dnf: Vec<Vec<UserInputAST>> = vec![vec![left]];
|
||||
for (operator, operand_ast) in others {
|
||||
match operator {
|
||||
BinaryOperand::And => {
|
||||
if let Some(last) = dnf.last_mut() {
|
||||
last.push(operand_ast);
|
||||
}
|
||||
}
|
||||
BinaryOperand::Or => {
|
||||
dnf.push(vec![operand_ast]);
|
||||
}
|
||||
}
|
||||
}
|
||||
if dnf.len() == 1 {
|
||||
UserInputAST::and(dnf.into_iter().next().unwrap()) //< safe
|
||||
} else {
|
||||
let conjunctions = dnf.into_iter().map(UserInputAST::and).collect();
|
||||
UserInputAST::or(conjunctions)
|
||||
}
|
||||
}
|
||||
|
||||
fn operand_leaf<'a>() -> impl Parser<&'a str, Output = (BinaryOperand, UserInputAST)> {
|
||||
(
|
||||
binary_operand().skip(spaces()),
|
||||
boosted_leaf().skip(spaces()),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
let boolean_expr = (boosted_leaf().skip(spaces()), many1(operand_leaf()))
|
||||
.map(|(left, right)| aggregate_binary_expressions(left, right));
|
||||
let whitespace_separated_leaves = many1(occur_leaf().skip(spaces().silent())).map(
|
||||
|subqueries: Vec<(Option<Occur>, UserInputAST)>| {
|
||||
if subqueries.len() == 1 {
|
||||
let (occur_opt, ast) = subqueries.into_iter().next().unwrap();
|
||||
match occur_opt.unwrap_or(Occur::Should) {
|
||||
Occur::Must | Occur::Should => ast,
|
||||
Occur::MustNot => UserInputAST::Clause(vec![(Some(Occur::MustNot), ast)]),
|
||||
}
|
||||
} else {
|
||||
UserInputAST::Clause(subqueries.into_iter().collect())
|
||||
}
|
||||
},
|
||||
);
|
||||
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
|
||||
spaces().with(expr).skip(spaces())
|
||||
}
|
||||
|
||||
pub fn parse_to_ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
spaces()
|
||||
.with(optional(ast()).skip(eof()))
|
||||
.map(|opt_ast| opt_ast.unwrap_or_else(UserInputAST::empty_query))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use super::*;
|
||||
use combine::parser::Parser;
|
||||
|
||||
pub fn nearly_equals(a: f64, b: f64) -> bool {
|
||||
(a - b).abs() < 0.0005 * (a + b).abs()
|
||||
}
|
||||
|
||||
fn assert_nearly_equals(expected: f64, val: f64) {
|
||||
assert!(
|
||||
nearly_equals(val, expected),
|
||||
"Got {}, expected {}.",
|
||||
val,
|
||||
expected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_occur_symbol() {
|
||||
assert_eq!(super::occur_symbol().parse("-"), Ok((Occur::MustNot, "")));
|
||||
assert_eq!(super::occur_symbol().parse("+"), Ok((Occur::Must, "")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_positive_float_number() {
|
||||
fn valid_parse(float_str: &str, expected_val: f64, expected_remaining: &str) {
|
||||
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
|
||||
assert_eq!(remaining, expected_remaining);
|
||||
assert_nearly_equals(val, expected_val);
|
||||
}
|
||||
fn error_parse(float_str: &str) {
|
||||
assert!(positive_float_number().parse(float_str).is_err());
|
||||
}
|
||||
valid_parse("1.0", 1.0, "");
|
||||
valid_parse("1", 1.0, "");
|
||||
valid_parse("0.234234 aaa", 0.234234f64, " aaa");
|
||||
error_parse(".3332");
|
||||
error_parse("1.");
|
||||
error_parse("-1.");
|
||||
}
|
||||
|
||||
fn test_parse_query_to_ast_helper(query: &str, expected: &str) {
|
||||
let query = parse_to_ast().parse(query).unwrap().0;
|
||||
let query_str = format!("{:?}", query);
|
||||
assert_eq!(query_str, expected);
|
||||
}
|
||||
|
||||
fn test_is_parse_err(query: &str) {
|
||||
assert!(parse_to_ast().parse(query).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_empty_to_ast() {
|
||||
test_parse_query_to_ast_helper("", "<emptyclause>");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_hyphen() {
|
||||
test_parse_query_to_ast_helper("\"www-form-encoded\"", "\"www-form-encoded\"");
|
||||
test_parse_query_to_ast_helper("www-form-encoded", "\"www-form-encoded\"");
|
||||
test_parse_query_to_ast_helper("www-form-encoded", "\"www-form-encoded\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_not_op() {
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("NOT")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
|
||||
test_parse_query_to_ast_helper("NOT a", "(-\"a\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_boosting() {
|
||||
assert!(parse_to_ast().parse("a^2^3").is_err());
|
||||
assert!(parse_to_ast().parse("a^2^").is_err());
|
||||
test_parse_query_to_ast_helper("a^3", "(\"a\")^3");
|
||||
test_parse_query_to_ast_helper("a^3 b^2", "(*(\"a\")^3 *(\"b\")^2)");
|
||||
test_parse_query_to_ast_helper("a^1", "\"a\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_binary_op() {
|
||||
test_parse_query_to_ast_helper("a AND b", "(+\"a\" +\"b\")");
|
||||
test_parse_query_to_ast_helper("a OR b", "(?\"a\" ?\"b\")");
|
||||
test_parse_query_to_ast_helper("a OR b AND c", "(?\"a\" ?(+\"b\" +\"c\"))");
|
||||
test_parse_query_to_ast_helper("a AND b AND c", "(+\"a\" +\"b\" +\"c\")");
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("a AND b aaa")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("aaa a OR b ")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("aaa ccc a OR b ")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_elastic_query_ranges() {
|
||||
test_parse_query_to_ast_helper("title: >a", "title:{\"a\" TO \"*\"}");
|
||||
test_parse_query_to_ast_helper("title:>=a", "title:[\"a\" TO \"*\"}");
|
||||
test_parse_query_to_ast_helper("title: <a", "title:{\"*\" TO \"a\"}");
|
||||
test_parse_query_to_ast_helper("title:<=a", "title:{\"*\" TO \"a\"]");
|
||||
test_parse_query_to_ast_helper("title:<=bsd", "title:{\"*\" TO \"bsd\"]");
|
||||
|
||||
test_parse_query_to_ast_helper("weight: >70", "weight:{\"70\" TO \"*\"}");
|
||||
test_parse_query_to_ast_helper("weight:>=70", "weight:[\"70\" TO \"*\"}");
|
||||
test_parse_query_to_ast_helper("weight: <70", "weight:{\"*\" TO \"70\"}");
|
||||
test_parse_query_to_ast_helper("weight:<=70", "weight:{\"*\" TO \"70\"]");
|
||||
test_parse_query_to_ast_helper("weight: >60.7", "weight:{\"60.7\" TO \"*\"}");
|
||||
|
||||
test_parse_query_to_ast_helper("weight: <= 70", "weight:{\"*\" TO \"70\"]");
|
||||
|
||||
test_parse_query_to_ast_helper("weight: <= 70.5", "weight:{\"*\" TO \"70.5\"]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_occur_leaf() {
|
||||
let ((occur, ast), _) = super::occur_leaf().parse("+abc").unwrap();
|
||||
assert_eq!(occur, Some(Occur::Must));
|
||||
assert_eq!(format!("{:?}", ast), "\"abc\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_parser() {
|
||||
// testing the range() parser separately
|
||||
let res = range().parse("title: <hello").unwrap().0;
|
||||
let expected = UserInputLeaf::Range {
|
||||
field: Some("title".to_string()),
|
||||
lower: UserInputBound::Unbounded,
|
||||
upper: UserInputBound::Exclusive("hello".to_string()),
|
||||
};
|
||||
let res2 = range().parse("title:{* TO hello}").unwrap().0;
|
||||
assert_eq!(res, expected);
|
||||
assert_eq!(res2, expected);
|
||||
let expected_weight = UserInputLeaf::Range {
|
||||
field: Some("weight".to_string()),
|
||||
lower: UserInputBound::Inclusive("71.2".to_string()),
|
||||
upper: UserInputBound::Unbounded,
|
||||
};
|
||||
|
||||
let res3 = range().parse("weight: >=71.2").unwrap().0;
|
||||
let res4 = range().parse("weight:[71.2 TO *}").unwrap().0;
|
||||
assert_eq!(res3, expected_weight);
|
||||
assert_eq!(res4, expected_weight);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_triming_spaces() {
|
||||
test_parse_query_to_ast_helper(" abc", "\"abc\"");
|
||||
test_parse_query_to_ast_helper("abc ", "\"abc\"");
|
||||
test_parse_query_to_ast_helper("( a OR abc)", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("a OR abc ", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("(a OR abc )", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("(a OR abc) ", "(?\"a\" ?\"abc\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_single_term() {
|
||||
test_parse_query_to_ast_helper("abc", "\"abc\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_default_clause() {
|
||||
test_parse_query_to_ast_helper("a b", "(*\"a\" *\"b\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_must_default_clause() {
|
||||
test_parse_query_to_ast_helper("+(a b)", "(*\"a\" *\"b\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_must_single_term() {
|
||||
test_parse_query_to_ast_helper("+d", "\"d\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_term_with_field() {
|
||||
test_parse_query_to_ast_helper("abc:toto", "abc:\"toto\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_term_with_float() {
|
||||
test_parse_query_to_ast_helper("abc:1.1", "abc:\"1.1\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_must_clause() {
|
||||
test_parse_query_to_ast_helper("(+a +b)", "(+\"a\" +\"b\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_test_query_plus_a_b_plus_d() {
|
||||
test_parse_query_to_ast_helper("+(a b) +d", "(+(*\"a\" *\"b\") +\"d\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_test_query_other() {
|
||||
test_parse_query_to_ast_helper("(+a +b) d", "(*(+\"a\" +\"b\") *\"d\")");
|
||||
test_parse_query_to_ast_helper("+abc:toto", "abc:\"toto\"");
|
||||
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+abc:\"toto\" -\"titi\")");
|
||||
test_parse_query_to_ast_helper("-abc:toto", "(-abc:\"toto\")");
|
||||
test_parse_query_to_ast_helper("abc:a b", "(*abc:\"a\" *\"b\")");
|
||||
test_parse_query_to_ast_helper("abc:\"a b\"", "abc:\"a b\"");
|
||||
test_parse_query_to_ast_helper("foo:[1 TO 5]", "foo:[\"1\" TO \"5\"]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_with_range() {
|
||||
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
|
||||
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
|
||||
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
|
||||
test_parse_query_to_ast_helper("foo:[* TO toto}", "foo:{\"*\" TO \"toto\"}");
|
||||
test_parse_query_to_ast_helper("foo:[1 TO *}", "foo:[\"1\" TO \"*\"}");
|
||||
test_parse_query_to_ast_helper("foo:[1.1 TO *}", "foo:[\"1.1\" TO \"*\"}");
|
||||
test_is_parse_err("abc + ");
|
||||
}
|
||||
}
|
||||
171
query-grammar/src/user_input_ast.rs
Normal file
171
query-grammar/src/user_input_ast.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
use std::fmt;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
|
||||
use crate::Occur;
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub enum UserInputLeaf {
|
||||
Literal(UserInputLiteral),
|
||||
All,
|
||||
Range {
|
||||
field: Option<String>,
|
||||
lower: UserInputBound,
|
||||
upper: UserInputBound,
|
||||
},
|
||||
}
|
||||
|
||||
impl Debug for UserInputLeaf {
|
||||
fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match self {
|
||||
UserInputLeaf::Literal(literal) => literal.fmt(formatter),
|
||||
UserInputLeaf::Range {
|
||||
ref field,
|
||||
ref lower,
|
||||
ref upper,
|
||||
} => {
|
||||
if let Some(ref field) = field {
|
||||
write!(formatter, "{}:", field)?;
|
||||
}
|
||||
lower.display_lower(formatter)?;
|
||||
write!(formatter, " TO ")?;
|
||||
upper.display_upper(formatter)?;
|
||||
Ok(())
|
||||
}
|
||||
UserInputLeaf::All => write!(formatter, "*"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub struct UserInputLiteral {
|
||||
pub field_name: Option<String>,
|
||||
pub phrase: String,
|
||||
}
|
||||
|
||||
impl fmt::Debug for UserInputLiteral {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match self.field_name {
|
||||
Some(ref field_name) => write!(formatter, "{}:\"{}\"", field_name, self.phrase),
|
||||
None => write!(formatter, "\"{}\"", self.phrase),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub enum UserInputBound {
|
||||
Inclusive(String),
|
||||
Exclusive(String),
|
||||
Unbounded,
|
||||
}
|
||||
|
||||
impl UserInputBound {
|
||||
fn display_lower(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match *self {
|
||||
UserInputBound::Inclusive(ref word) => write!(formatter, "[\"{}\"", word),
|
||||
UserInputBound::Exclusive(ref word) => write!(formatter, "{{\"{}\"", word),
|
||||
UserInputBound::Unbounded => write!(formatter, "{{\"*\""),
|
||||
}
|
||||
}
|
||||
|
||||
fn display_upper(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match *self {
|
||||
UserInputBound::Inclusive(ref word) => write!(formatter, "\"{}\"]", word),
|
||||
UserInputBound::Exclusive(ref word) => write!(formatter, "\"{}\"}}", word),
|
||||
UserInputBound::Unbounded => write!(formatter, "\"*\"}}"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn term_str(&self) -> &str {
|
||||
match *self {
|
||||
UserInputBound::Inclusive(ref contents) => contents,
|
||||
UserInputBound::Exclusive(ref contents) => contents,
|
||||
UserInputBound::Unbounded => &"*",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum UserInputAST {
|
||||
Clause(Vec<(Option<Occur>, UserInputAST)>),
|
||||
Leaf(Box<UserInputLeaf>),
|
||||
Boost(Box<UserInputAST>, f64),
|
||||
}
|
||||
|
||||
impl UserInputAST {
|
||||
pub fn unary(self, occur: Occur) -> UserInputAST {
|
||||
UserInputAST::Clause(vec![(Some(occur), self)])
|
||||
}
|
||||
|
||||
fn compose(occur: Occur, asts: Vec<UserInputAST>) -> UserInputAST {
|
||||
assert_ne!(occur, Occur::MustNot);
|
||||
assert!(!asts.is_empty());
|
||||
if asts.len() == 1 {
|
||||
asts.into_iter().next().unwrap() //< safe
|
||||
} else {
|
||||
UserInputAST::Clause(
|
||||
asts.into_iter()
|
||||
.map(|ast: UserInputAST| (Some(occur), ast))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn empty_query() -> UserInputAST {
|
||||
UserInputAST::Clause(Vec::default())
|
||||
}
|
||||
|
||||
pub fn and(asts: Vec<UserInputAST>) -> UserInputAST {
|
||||
UserInputAST::compose(Occur::Must, asts)
|
||||
}
|
||||
|
||||
pub fn or(asts: Vec<UserInputAST>) -> UserInputAST {
|
||||
UserInputAST::compose(Occur::Should, asts)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<UserInputLiteral> for UserInputLeaf {
|
||||
fn from(literal: UserInputLiteral) -> UserInputLeaf {
|
||||
UserInputLeaf::Literal(literal)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<UserInputLeaf> for UserInputAST {
|
||||
fn from(leaf: UserInputLeaf) -> UserInputAST {
|
||||
UserInputAST::Leaf(Box::new(leaf))
|
||||
}
|
||||
}
|
||||
|
||||
fn print_occur_ast(
|
||||
occur_opt: Option<Occur>,
|
||||
ast: &UserInputAST,
|
||||
formatter: &mut fmt::Formatter,
|
||||
) -> fmt::Result {
|
||||
if let Some(occur) = occur_opt {
|
||||
write!(formatter, "{}{:?}", occur, ast)?;
|
||||
} else {
|
||||
write!(formatter, "*{:?}", ast)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl fmt::Debug for UserInputAST {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
UserInputAST::Clause(ref subqueries) => {
|
||||
if subqueries.is_empty() {
|
||||
write!(formatter, "<emptyclause>")?;
|
||||
} else {
|
||||
write!(formatter, "(")?;
|
||||
print_occur_ast(subqueries[0].0, &subqueries[0].1, formatter)?;
|
||||
for subquery in &subqueries[1..] {
|
||||
write!(formatter, " ")?;
|
||||
print_occur_ast(subquery.0, &subquery.1, formatter)?;
|
||||
}
|
||||
write!(formatter, ")")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
|
||||
UserInputAST::Boost(ref leaf, boost) => write!(formatter, "({:?})^{}", leaf, boost),
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user