mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-30 14:02:55 +00:00
Compare commits
437 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4f5ce12a77 | ||
|
|
813efa4ab3 | ||
|
|
c3b6c1dc0b | ||
|
|
6f5e0ef6f4 | ||
|
|
7224f58895 | ||
|
|
49519c3f61 | ||
|
|
cb11b92505 | ||
|
|
7b2dcfbd91 | ||
|
|
d2e30e6681 | ||
|
|
ef109927b3 | ||
|
|
44e5c4dfd3 | ||
|
|
6f223253ea | ||
|
|
f7b0392bd5 | ||
|
|
442bc9a1b8 | ||
|
|
db7d784573 | ||
|
|
79132e803a | ||
|
|
9e132b7dde | ||
|
|
1e55189db1 | ||
|
|
8b1b389a76 | ||
|
|
46f3ec87a5 | ||
|
|
f24e5f405e | ||
|
|
2589be3984 | ||
|
|
a02a9294e4 | ||
|
|
8023445b63 | ||
|
|
05ce093f97 | ||
|
|
6937e23a56 | ||
|
|
974c321153 | ||
|
|
f30ec9b36b | ||
|
|
acd7c1ea2d | ||
|
|
aaeeda2bc5 | ||
|
|
ac4d433fad | ||
|
|
a298c084e6 | ||
|
|
185a72b341 | ||
|
|
bb41ae76f9 | ||
|
|
74d32e522a | ||
|
|
927dd1ee6f | ||
|
|
2c9302290f | ||
|
|
426cc436da | ||
|
|
68d42c9cf2 | ||
|
|
ca49d6130f | ||
|
|
3588ca0561 | ||
|
|
7c6cdcd876 | ||
|
|
71366b9a56 | ||
|
|
a3247ebcfb | ||
|
|
3ec13a8719 | ||
|
|
f8593c76d5 | ||
|
|
f8710bd4b0 | ||
|
|
8d05b8f7b2 | ||
|
|
fc25516b7a | ||
|
|
5b1e71947f | ||
|
|
69351fb4a5 | ||
|
|
3d0082d020 | ||
|
|
8e450c770a | ||
|
|
a757902aed | ||
|
|
b3a8074826 | ||
|
|
4289625348 | ||
|
|
850f10c1fe | ||
|
|
d7f9bfdfc5 | ||
|
|
d0d5db4515 | ||
|
|
303fc7e820 | ||
|
|
744edb2c5c | ||
|
|
2d70efb7b0 | ||
|
|
eb5b2ffdcc | ||
|
|
38513014d5 | ||
|
|
9cb7a0f6e6 | ||
|
|
8d466b8a76 | ||
|
|
413d0e1719 | ||
|
|
0eb3c872fd | ||
|
|
f9203228be | ||
|
|
8f377b92d0 | ||
|
|
1e89f86267 | ||
|
|
d1f61a50c1 | ||
|
|
2bb85ed575 | ||
|
|
236fa74767 | ||
|
|
63b35dd87b | ||
|
|
efb910f4e8 | ||
|
|
aff7e64d4e | ||
|
|
92a3f3981f | ||
|
|
447a9361d8 | ||
|
|
5f59139484 | ||
|
|
27c373d26d | ||
|
|
80ae136646 | ||
|
|
52b1398702 | ||
|
|
7b9cd09a6e | ||
|
|
4c423ad2ca | ||
|
|
9f542d5252 | ||
|
|
77d8e81ae4 | ||
|
|
76e07b9705 | ||
|
|
ea4e9fdaf1 | ||
|
|
e418bee693 | ||
|
|
af4f1a86bc | ||
|
|
753b639454 | ||
|
|
5907a47547 | ||
|
|
586a6e62a2 | ||
|
|
fdae0eff5a | ||
|
|
6eea407f20 | ||
|
|
1ba51d4dc4 | ||
|
|
6e742d5145 | ||
|
|
1843259e91 | ||
|
|
4ebacb7297 | ||
|
|
fb75e60c6e | ||
|
|
04b15c6c11 | ||
|
|
b05b5f5487 | ||
|
|
4fe96483bc | ||
|
|
09e27740e2 | ||
|
|
e51feea574 | ||
|
|
93e7f28cc0 | ||
|
|
8875b9794a | ||
|
|
f26874557e | ||
|
|
a7d10b65ae | ||
|
|
e120e3b7aa | ||
|
|
90fcfb3f43 | ||
|
|
e547e8abad | ||
|
|
5aa4565424 | ||
|
|
3637620187 | ||
|
|
a94679d74d | ||
|
|
a35a8638cc | ||
|
|
97a051996f | ||
|
|
69525cb3c7 | ||
|
|
63867a7150 | ||
|
|
19c073385a | ||
|
|
0521844e56 | ||
|
|
8d4778f94d | ||
|
|
1d5464351d | ||
|
|
522ebdc674 | ||
|
|
4a805733db | ||
|
|
568d149db8 | ||
|
|
4cfc9806c0 | ||
|
|
37042e3ccb | ||
|
|
b316cd337a | ||
|
|
c04991e5ad | ||
|
|
c59b712eeb | ||
|
|
da61baed3b | ||
|
|
b6140d2962 | ||
|
|
6a9a71bb1b | ||
|
|
e8fc4c77e2 | ||
|
|
80837601ea | ||
|
|
2b2703cf51 | ||
|
|
d79018a7f8 | ||
|
|
d8a7c428f7 | ||
|
|
45595234cc | ||
|
|
1bcebdd29e | ||
|
|
ed0333a404 | ||
|
|
ac0b1a21eb | ||
|
|
6bbc789d84 | ||
|
|
87152daef3 | ||
|
|
e0fce4782a | ||
|
|
a633c2a49a | ||
|
|
51623d593e | ||
|
|
29bf740ddf | ||
|
|
511bd25a31 | ||
|
|
66e14ac1b1 | ||
|
|
09e94072ba | ||
|
|
6c68136d31 | ||
|
|
aaf1b2c6b6 | ||
|
|
8a6af2aefa | ||
|
|
7a6e62976b | ||
|
|
2712930bd6 | ||
|
|
cb05f8c098 | ||
|
|
c0c9d04ca9 | ||
|
|
7ea5e740e0 | ||
|
|
2afa6c372a | ||
|
|
c7db8866b5 | ||
|
|
02d992324a | ||
|
|
4ab511ffc6 | ||
|
|
f318172ea4 | ||
|
|
581449a824 | ||
|
|
272589a381 | ||
|
|
73d54c6379 | ||
|
|
3e4606de5d | ||
|
|
020779f61b | ||
|
|
835936585f | ||
|
|
bdd05e97d1 | ||
|
|
2be5f08cd6 | ||
|
|
3f49d65a87 | ||
|
|
f9baf4bcc8 | ||
|
|
7ee93fbed5 | ||
|
|
57a5547ae8 | ||
|
|
c57ab6a335 | ||
|
|
02bfa9be52 | ||
|
|
b3f62b8acc | ||
|
|
2a08c247af | ||
|
|
d2926b6ee0 | ||
|
|
0272167c2e | ||
|
|
a9cf0bde16 | ||
|
|
5a457df45d | ||
|
|
ca76fd5ba0 | ||
|
|
e79a316e41 | ||
|
|
733f54d80e | ||
|
|
7b2b181652 | ||
|
|
b3f39f2343 | ||
|
|
a13122d392 | ||
|
|
113917c521 | ||
|
|
1352b95b07 | ||
|
|
c0538dbe9a | ||
|
|
0d5ea98132 | ||
|
|
0404df3fd5 | ||
|
|
a67caee141 | ||
|
|
f5fb29422a | ||
|
|
4e48bbf0ea | ||
|
|
6fea510869 | ||
|
|
39958ec476 | ||
|
|
36f51e289e | ||
|
|
5c83153035 | ||
|
|
8e407bb314 | ||
|
|
103ba6ba35 | ||
|
|
3965b26cd2 | ||
|
|
1cd0b378fb | ||
|
|
92f383fa51 | ||
|
|
6ae34d2a77 | ||
|
|
1af1f7e0d1 | ||
|
|
feec2e2620 | ||
|
|
3e2ad7542d | ||
|
|
ac02c76b1e | ||
|
|
e5c7c0b8b9 | ||
|
|
49dbe4722f | ||
|
|
f64ff77424 | ||
|
|
2bf93e9e51 | ||
|
|
3dde748b25 | ||
|
|
1dabe26395 | ||
|
|
5590537739 | ||
|
|
ccf0f9cb2f | ||
|
|
e21913ecdc | ||
|
|
2cc826adc7 | ||
|
|
4d90d8fc1d | ||
|
|
0606a8ae73 | ||
|
|
03564214e7 | ||
|
|
4c8f9742f8 | ||
|
|
a23b7a1815 | ||
|
|
6f89a86b14 | ||
|
|
b2beac1203 | ||
|
|
8cd5a2d81d | ||
|
|
b26c22ada0 | ||
|
|
8a35259300 | ||
|
|
db56167a5d | ||
|
|
ab66ffed4e | ||
|
|
e04f2f0b08 | ||
|
|
7a5df33c85 | ||
|
|
ee0873dd07 | ||
|
|
695c8828b8 | ||
|
|
4ff7dc7a4f | ||
|
|
69832bfd03 | ||
|
|
ecbdd70c37 | ||
|
|
fb1b2be782 | ||
|
|
9cd7458978 | ||
|
|
4c4c28e2c4 | ||
|
|
9f9e588905 | ||
|
|
6fd17e0ead | ||
|
|
65dc5b0d83 | ||
|
|
15d15c01f8 | ||
|
|
106832a66a | ||
|
|
477b9136b9 | ||
|
|
7852d097b8 | ||
|
|
0bd56241bb | ||
|
|
54ab897755 | ||
|
|
1369d2d144 | ||
|
|
d3f829dc8a | ||
|
|
e82ccf9627 | ||
|
|
d3d29f7f54 | ||
|
|
3566717979 | ||
|
|
90bc3e3773 | ||
|
|
ffb62b6835 | ||
|
|
4f9ce91d6a | ||
|
|
3c3a2fbfe8 | ||
|
|
0508571d1a | ||
|
|
7b733dd34f | ||
|
|
2c798e3147 | ||
|
|
2c13f210bc | ||
|
|
0dad02791c | ||
|
|
2947364ae1 | ||
|
|
05111599b3 | ||
|
|
83263eabbb | ||
|
|
5cb5c9a8f2 | ||
|
|
9ab92b7739 | ||
|
|
962bddfbbf | ||
|
|
26cfe2909f | ||
|
|
afdfb1a69b | ||
|
|
b26ad1d57a | ||
|
|
1dbd54edbb | ||
|
|
deb04eb090 | ||
|
|
bed34bf502 | ||
|
|
80f1e26c3b | ||
|
|
3e68b61d8f | ||
|
|
95bfb71901 | ||
|
|
74e10843a7 | ||
|
|
1b922e6d23 | ||
|
|
a7c6c31538 | ||
|
|
9d071c8d46 | ||
|
|
04074f7bcb | ||
|
|
8a28d1643d | ||
|
|
44c684af5c | ||
|
|
60279a03b6 | ||
|
|
dc43135fe0 | ||
|
|
ce022e5f06 | ||
|
|
0be977d9eb | ||
|
|
a4ba20eea3 | ||
|
|
4bef6c99ee | ||
|
|
a84871468b | ||
|
|
e0a39fb273 | ||
|
|
35203378ef | ||
|
|
b5bf9bb13c | ||
|
|
ea3349644c | ||
|
|
d4f2e475ff | ||
|
|
17631ed866 | ||
|
|
9eb2d3e8c5 | ||
|
|
afd08a7bbc | ||
|
|
4fc7bc5f09 | ||
|
|
602b9d235f | ||
|
|
b22c6b86c7 | ||
|
|
f0dc0de4b7 | ||
|
|
456dd3a60d | ||
|
|
d768a10077 | ||
|
|
ddb2b8d807 | ||
|
|
45806951b1 | ||
|
|
84a060552d | ||
|
|
68a956c6e7 | ||
|
|
f50f557cfc | ||
|
|
daa19b770a | ||
|
|
e75402be80 | ||
|
|
51cab39186 | ||
|
|
c8e12b6847 | ||
|
|
b44a9cb89d | ||
|
|
e650fab927 | ||
|
|
b12a97abe4 | ||
|
|
2b5a4bbde2 | ||
|
|
2d169c4454 | ||
|
|
66d6e4e246 | ||
|
|
a061ba091d | ||
|
|
92ce9b906b | ||
|
|
1e0ac31e11 | ||
|
|
ebcea0128c | ||
|
|
30075176cb | ||
|
|
7c114b602d | ||
|
|
50659147d1 | ||
|
|
da10fe3b4d | ||
|
|
4db56c6bd8 | ||
|
|
292dd6dcb6 | ||
|
|
37e71f7c63 | ||
|
|
5932278e00 | ||
|
|
202dda98ba | ||
|
|
7c971b5d3b | ||
|
|
77c61ddab2 | ||
|
|
b7f026bab9 | ||
|
|
cc2f78184b | ||
|
|
673423f762 | ||
|
|
7532c4a440 | ||
|
|
324b56a60c | ||
|
|
ac3890f93c | ||
|
|
69b3de43f6 | ||
|
|
3d1196d53e | ||
|
|
a397537ed8 | ||
|
|
ebca904767 | ||
|
|
3a472914ce | ||
|
|
c59507444f | ||
|
|
4b7afa2ae7 | ||
|
|
590a8582c9 | ||
|
|
ab3440f925 | ||
|
|
ec5fb2eaa9 | ||
|
|
15b60d72cc | ||
|
|
7a07144c68 | ||
|
|
8bcfdb8e80 | ||
|
|
a7f10f055d | ||
|
|
597dac9cb6 | ||
|
|
6a002bcc76 | ||
|
|
3a86fc00a2 | ||
|
|
ca1617d3cd | ||
|
|
e4a102d859 | ||
|
|
1d9924ee90 | ||
|
|
f326a2dafe | ||
|
|
78228ece73 | ||
|
|
503d0295cb | ||
|
|
eb39db44fc | ||
|
|
7f78d1f4ca | ||
|
|
df9090cb0b | ||
|
|
4a8eb3cb05 | ||
|
|
a74b41d7ed | ||
|
|
06017bd422 | ||
|
|
17beaab8bf | ||
|
|
062e38a2ab | ||
|
|
8c2b20c496 | ||
|
|
c677eb9f13 | ||
|
|
0f332d1fd3 | ||
|
|
1b45539f32 | ||
|
|
7315000fd4 | ||
|
|
e3d2fca844 | ||
|
|
1c03d98a11 | ||
|
|
8b68f22be1 | ||
|
|
d007cf3435 | ||
|
|
72afbb28c7 | ||
|
|
2fc3a505bc | ||
|
|
e337c35721 | ||
|
|
0c318339b0 | ||
|
|
64fee11bc0 | ||
|
|
e12fc4bb09 | ||
|
|
0820992141 | ||
|
|
09782858da | ||
|
|
ca977fb17b | ||
|
|
e8ecb68f00 | ||
|
|
0ec492dcf2 | ||
|
|
20eb586660 | ||
|
|
6530d43d6a | ||
|
|
926e71a573 | ||
|
|
bacaabf857 | ||
|
|
d6e7157173 | ||
|
|
093dcbd253 | ||
|
|
fba44b78b6 | ||
|
|
01cf303dec | ||
|
|
d5c161e196 | ||
|
|
183d5221b5 | ||
|
|
5a06f45403 | ||
|
|
395cbf3913 | ||
|
|
fe2ddb8844 | ||
|
|
3129701e92 | ||
|
|
56ba698def | ||
|
|
e0ba699c16 | ||
|
|
b6423f9a76 | ||
|
|
a667394a49 | ||
|
|
9f02b090dd | ||
|
|
f07ccd6e4f | ||
|
|
f19f8757de | ||
|
|
f729edb529 | ||
|
|
73ef201c44 | ||
|
|
3b69e790e9 | ||
|
|
1b0b3051c2 | ||
|
|
43c1da1a92 | ||
|
|
e1cb5e299d | ||
|
|
14ebed392b | ||
|
|
d3d34be167 | ||
|
|
98cdc83428 | ||
|
|
4d7d201f21 | ||
|
|
ca5f3e1d46 | ||
|
|
1559733b03 | ||
|
|
44b5f1868c | ||
|
|
4cedfd903d | ||
|
|
c0049e8487 | ||
|
|
e88adbff5c | ||
|
|
e497e04f70 |
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
cpp/* linguist-vendored
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -5,3 +5,7 @@ target/release
|
||||
Cargo.lock
|
||||
benchmark
|
||||
.DS_Store
|
||||
cpp/simdcomp/bitpackingbenchmark
|
||||
*.bk
|
||||
.idea
|
||||
trace.dat
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
||||
[submodule "cpp/simdcomp"]
|
||||
path = cpp/simdcomp
|
||||
url = git@github.com:lemire/simdcomp.git
|
||||
24
.travis.yml
24
.travis.yml
@@ -1,11 +1,7 @@
|
||||
language: rust
|
||||
cache: cargo
|
||||
rust:
|
||||
- nightly
|
||||
git:
|
||||
submodules: false
|
||||
before_install:
|
||||
- sed -i 's/git@github.com:/https:\/\/github.com\//' .gitmodules
|
||||
- git submodule update --init --recursive
|
||||
env:
|
||||
global:
|
||||
- CC=gcc-4.8
|
||||
@@ -16,6 +12,7 @@ addons:
|
||||
apt:
|
||||
sources:
|
||||
- ubuntu-toolchain-r-test
|
||||
- kalakris-cmake
|
||||
packages:
|
||||
- gcc-4.8
|
||||
- g++-4.8
|
||||
@@ -23,18 +20,15 @@ addons:
|
||||
- libelf-dev
|
||||
- libdw-dev
|
||||
- binutils-dev
|
||||
- cmake
|
||||
before_script:
|
||||
- |
|
||||
pip install 'travis-cargo<0.2' --user &&
|
||||
export PATH=$HOME/.local/bin:$PATH
|
||||
cargo install cargo-travis || echo "cargo-travis already installed"
|
||||
export PATH=$HOME/.cargo/bin:$PATH
|
||||
script:
|
||||
- |
|
||||
travis-cargo build &&
|
||||
travis-cargo test &&
|
||||
travis-cargo bench &&
|
||||
travis-cargo doc
|
||||
- cargo build
|
||||
- cargo test
|
||||
- cargo run --example simple_search
|
||||
after_success:
|
||||
- bash ./script/build-doc.sh
|
||||
- cargo coveralls --exclude-pattern cpp/
|
||||
- travis-cargo doc-upload
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then travis-cargo coveralls --no-sudo --verify; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then ./kcov/build/src/kcov --verify --coveralls-id=$TRAVIS_JOB_ID --exclude-pattern=/.cargo target/kcov target/debug/tantivy-*; fi
|
||||
|
||||
100
CHANGELOG.md
Normal file
100
CHANGELOG.md
Normal file
@@ -0,0 +1,100 @@
|
||||
|
||||
Tantivy 0.4.3
|
||||
==========================
|
||||
|
||||
- Bugfix race condition when deleting files. (#198)
|
||||
|
||||
|
||||
Tantivy 0.4.2
|
||||
==========================
|
||||
|
||||
- Prevent usage of AVX2 instructions (#201)
|
||||
|
||||
|
||||
Tantivy 0.4.1
|
||||
==========================
|
||||
|
||||
- Bugfix for non-indexed fields. (#199)
|
||||
|
||||
|
||||
Tantivy 0.4.0
|
||||
==========================
|
||||
|
||||
- Raise the limit of number of fields (previously 256 fields) (@fulmicoton)
|
||||
- Removed u32 fields. They are replaced by u64 and i64 fields (#65) (@fulmicoton)
|
||||
- Optimized skip in SegmentPostings (#130) (@lnicola)
|
||||
- Replacing rustc_serialize by serde. Kudos to @KodrAus and @lnicola
|
||||
- Using error-chain (@KodrAus)
|
||||
- QueryParser: (@fulmicoton)
|
||||
- Explicit error returned when searched for a term that is not indexed
|
||||
- Searching for a int term via the query parser was broken `(age:1)`
|
||||
- Searching for a non-indexed field returns an explicit Error
|
||||
- Phrase query for non-tokenized field are not tokenized by the query parser.
|
||||
- Faster/Better indexing (@fulmicoton)
|
||||
- using murmurhash2
|
||||
- faster merging
|
||||
- more memory efficient fast field writer (@lnicola )
|
||||
- better handling of collisions
|
||||
- lesser memory usage
|
||||
- Added API, most notably to iterate over ranges of terms (@fulmicoton)
|
||||
- Bugfix that was preventing to unmap segment files, on index drop (@fulmicoton)
|
||||
- Made the doc! macro public (@fulmicoton)
|
||||
- Added an alternative implementation of the streaming dictionary (@fulmicoton)
|
||||
|
||||
|
||||
|
||||
Tantivy 0.3.1
|
||||
==========================
|
||||
|
||||
- Expose a method to trigger files garbage collection
|
||||
|
||||
|
||||
|
||||
Tantivy 0.3
|
||||
==========================
|
||||
|
||||
|
||||
Special thanks to @Kodraus @lnicola @Ameobea @manuel-woelker @celaus
|
||||
for their contribution to this release.
|
||||
|
||||
Thanks also to everyone in tantivy gitter chat
|
||||
for their advise and company :)
|
||||
|
||||
https://gitter.im/tantivy-search/tantivy
|
||||
|
||||
|
||||
Warning:
|
||||
|
||||
Tantivy 0.3 is NOT backward compatible with tantivy 0.2
|
||||
code and index format.
|
||||
You should not expect backward compatibility before
|
||||
tantivy 1.0.
|
||||
|
||||
|
||||
|
||||
New Features
|
||||
------------
|
||||
|
||||
- Delete. You can now delete documents from an index.
|
||||
- Support for windows (Thanks to @lnicola)
|
||||
|
||||
|
||||
Various Bugfixes & small improvements
|
||||
----------------------------------------
|
||||
|
||||
- Added CI for Windows (https://ci.appveyor.com/project/fulmicoton/tantivy)
|
||||
Thanks to @KodrAus ! (#108)
|
||||
- Various dependy version update (Thanks to @Ameobea) #76
|
||||
- Fixed several race conditions in `Index.wait_merge_threads`
|
||||
- Fixed #72. Mmap were never released.
|
||||
- Fixed #80. Fast field used to take an amplitude of 32 bits after a merge. (Ouch!)
|
||||
- Fixed #92. u32 are now encoded using big endian in the fst
|
||||
in order to make there enumeration consistent with
|
||||
the natural ordering.
|
||||
- Building binary targets for tantivy-cli (Thanks to @KodrAus)
|
||||
- Misc invisible bug fixes, and code cleanup.
|
||||
- Use
|
||||
|
||||
|
||||
|
||||
|
||||
65
Cargo.toml
65
Cargo.toml
@@ -1,46 +1,58 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.1.1"
|
||||
version = "0.5.0-dev"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
build = "build.rs"
|
||||
license = "MIT"
|
||||
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
description = """Tantivy is a search engine library."""
|
||||
|
||||
documentation = "http://fulmicoton.com/tantivy/tantivy/index.html"
|
||||
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
|
||||
homepage = "https://github.com/tantivy-search/tantivy"
|
||||
repository = "https://github.com/tantivy-search/tantivy"
|
||||
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
|
||||
[dependencies]
|
||||
byteorder = "0.4"
|
||||
memmap = "0.2"
|
||||
lazy_static = "0.1"
|
||||
regex = "0.1"
|
||||
fst = "0.1"
|
||||
atomicwrites = "0.0.14"
|
||||
tempfile = "2.0"
|
||||
rustc-serialize = "0.3"
|
||||
log = "0.3"
|
||||
combine = "2.0.*"
|
||||
byteorder = "1.0"
|
||||
lazy_static = "0.2.1"
|
||||
tinysegmenter = "0.1.0"
|
||||
regex = "0.2"
|
||||
fst = "0.2"
|
||||
atomicwrites = "0.1.3"
|
||||
tempfile = "2.1"
|
||||
log = "0.3.6"
|
||||
combine = "2.2"
|
||||
tempdir = "0.3"
|
||||
bincode = "0.4"
|
||||
libc = {version = "0.2.6", optional=true}
|
||||
num_cpus = "0.2"
|
||||
itertools = "0.4"
|
||||
lz4 = "1.13"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
bincode = "0.8"
|
||||
libc = {version = "0.2.20", optional=true}
|
||||
num_cpus = "1.2"
|
||||
itertools = "0.5.9"
|
||||
lz4 = "1.20"
|
||||
bit-set = "0.4.0"
|
||||
time = "0.1"
|
||||
uuid = "0.1"
|
||||
uuid = { version = "0.5", features = ["v4", "serde"] }
|
||||
chan = "0.1"
|
||||
crossbeam = "0.2"
|
||||
version = "2"
|
||||
crossbeam = "0.3"
|
||||
futures = "0.1"
|
||||
futures-cpupool = "0.1"
|
||||
error-chain = "0.8"
|
||||
owning_ref = "0.3"
|
||||
stable_deref_trait = "1.0.0"
|
||||
rust-stemmers = "0.1.0"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.2"
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.3"
|
||||
env_logger = "0.4"
|
||||
|
||||
[build-dependencies]
|
||||
gcc = {version = "0.3", optional=true}
|
||||
cc = {version = "1.0.0", optional=true}
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
@@ -51,4 +63,9 @@ debug-assertions = false
|
||||
|
||||
[features]
|
||||
default = ["simdcompression"]
|
||||
simdcompression = ["libc", "gcc"]
|
||||
simdcompression = ["libc", "cc"]
|
||||
streamdict = []
|
||||
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "tantivy-search/tantivy" }
|
||||
|
||||
41
README.md
41
README.md
@@ -1,45 +1,50 @@
|
||||

|
||||
|
||||
[](https://travis-ci.org/tantivy-search/tantivy)
|
||||
[](https://coveralls.io/github/tantivy-search/tantivy?branch=master)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://coveralls.io/github/tantivy-search/tantivy?branch=master)
|
||||
[](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://ci.appveyor.com/project/fulmicoton/tantivy)
|
||||

|
||||
|
||||
**Tantivy** is a **full text search engine library** written in rust.
|
||||
|
||||
It is strongly inspired by Lucene's design.
|
||||
|
||||
|
||||
# Features
|
||||
|
||||
- configurable indexing (optional term frequency and position indexing)
|
||||
- tf-idf scoring
|
||||
- Basic query language
|
||||
- Phrase queries
|
||||
- Incremental indexing
|
||||
- Multithreaded indexing (indexing English Wikipedia takes 4 minutes on my desktop)
|
||||
- Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop)
|
||||
- mmap based
|
||||
- SIMD integer compression
|
||||
- u32 fast fields (equivalent of doc values in Lucene)
|
||||
- optional SIMD integer compression
|
||||
- u64 and i64 fast fields (equivalent of doc values in Lucene)
|
||||
- LZ4 compressed document store
|
||||
- Cheesy logo with a horse
|
||||
|
||||
Tantivy supports Linux, MacOS and Windows.
|
||||
|
||||
|
||||
# Getting started
|
||||
|
||||
- [tantivy's usage example](http://fulmicoton.com/tantivy-examples/simple_search.html)
|
||||
- [tantivy-cli and its tutorial](https://github.com/fulmicoton/tantivy-cli).
|
||||
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli).
|
||||
It will walk you through getting a wikipedia search engine up and running in a few minutes.
|
||||
- [reference doc](http://fulmicoton.com/tantivy/tantivy/index.html).
|
||||
- [reference doc]
|
||||
- [For the last released version](https://docs.rs/tantivy/)
|
||||
- [For the last master branch](https://tantivy-search.github.io/tantivy/tantivy/index.html)
|
||||
|
||||
# Compiling
|
||||
|
||||
# Compiling
|
||||
Tantivy requires Rust Nightly because it uses requires the features [`box_syntax`](https://doc.rust-lang.org/stable/book/box-syntax-and-patterns.html), [`optin_builtin_traits`](https://github.com/rust-lang/rfcs/blob/master/text/0019-opt-in-builtin-traits.md), and [`conservative_impl_trait`](https://github.com/rust-lang/rfcs/blob/master/text/1522-conservative-impl-trait.md).
|
||||
The project can then be built using `cargo`.
|
||||
|
||||
By default, `tantivy` uses a git submodule called `simdcomp`.
|
||||
After cloning the repository, you will need to initialize and update
|
||||
the submodules. The project can then be built using `cargo`.
|
||||
|
||||
git clone git@github.com:fulmicoton/tantivy.git
|
||||
git submodule init
|
||||
git submodule update
|
||||
git clone git@github.com:tantivy-search/tantivy.git
|
||||
cd tantivy
|
||||
cargo build
|
||||
|
||||
|
||||
@@ -47,9 +52,9 @@ Alternatively, if you are trying to compile `tantivy` without simd compression,
|
||||
you can disable this functionality. In this case, this submodule is not required
|
||||
and you can compile tantivy by using the `--no-default-features` flag.
|
||||
|
||||
cargo build --no-default-features
|
||||
cargo build --no-default-features
|
||||
|
||||
|
||||
# Contribute
|
||||
|
||||
Send me an email (paul.masurel at gmail.com) if you want to contribute to tantivy.
|
||||
Send me an email (paul.masurel at gmail.com) if you want to contribute to tantivy.
|
||||
|
||||
25
appveyor.yml
Normal file
25
appveyor.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
# Appveyor configuration template for Rust using rustup for Rust installation
|
||||
# https://github.com/starkat99/appveyor-rust
|
||||
|
||||
os: Visual Studio 2015
|
||||
environment:
|
||||
matrix:
|
||||
- channel: nightly
|
||||
target: x86_64-pc-windows-msvc
|
||||
- channel: nightly
|
||||
target: x86_64-pc-windows-gnu
|
||||
msys_bits: 64
|
||||
|
||||
install:
|
||||
- appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe
|
||||
- rustup-init -yv --default-toolchain %channel% --default-host %target%
|
||||
- set PATH=%PATH%;%USERPROFILE%\.cargo\bin
|
||||
- if defined msys_bits set PATH=%PATH%;C:\msys64\mingw%msys_bits%\bin
|
||||
- rustc -vV
|
||||
- cargo -vV
|
||||
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- REM SET RUST_LOG=tantivy,test & cargo test --verbose
|
||||
- REM SET RUST_BACKTRACE=1 & cargo run --example simple_search
|
||||
77
build.rs
77
build.rs
@@ -1,40 +1,61 @@
|
||||
#[cfg(feature= "simdcompression")]
|
||||
#[cfg(feature = "simdcompression")]
|
||||
mod build {
|
||||
extern crate gcc;
|
||||
|
||||
use std::process::Command;
|
||||
extern crate cc;
|
||||
|
||||
pub fn build() {
|
||||
Command::new("make")
|
||||
.current_dir("cpp/simdcomp")
|
||||
.output()
|
||||
.unwrap_or_else(|e| { panic!("Failed to make simdcomp: {}", e) });
|
||||
gcc::Config::new()
|
||||
.cpp(true)
|
||||
.flag("-std=c++11")
|
||||
.flag("-O3")
|
||||
.flag("-mssse3")
|
||||
.include("./cpp/simdcomp/include")
|
||||
.object("cpp/simdcomp/avxbitpacking.o")
|
||||
.object("cpp/simdcomp/simdintegratedbitpacking.o")
|
||||
.object("cpp/simdcomp/simdbitpacking.o")
|
||||
.object("cpp/simdcomp/simdpackedsearch.o")
|
||||
.object("cpp/simdcomp/simdcomputil.o")
|
||||
.object("cpp/simdcomp/simdpackedselect.o")
|
||||
.object("cpp/simdcomp/simdfor.o")
|
||||
.file("cpp/simdcomp_wrapper.cpp")
|
||||
.compile("libsimdcomp.a");
|
||||
println!("cargo:rustc-flags=-l dylib=stdc++");
|
||||
let mut config = cc::Build::new();
|
||||
config
|
||||
.include("./cpp/simdcomp/include")
|
||||
.file("cpp/simdcomp/src/avxbitpacking.c")
|
||||
.file("cpp/simdcomp/src/simdintegratedbitpacking.c")
|
||||
.file("cpp/simdcomp/src/simdbitpacking.c")
|
||||
.file("cpp/simdcomp/src/simdpackedsearch.c")
|
||||
.file("cpp/simdcomp/src/simdcomputil.c")
|
||||
.file("cpp/simdcomp/src/simdpackedselect.c")
|
||||
.file("cpp/simdcomp/src/simdfor.c")
|
||||
.file("cpp/simdcomp_wrapper.c");
|
||||
|
||||
if !cfg!(debug_assertions) {
|
||||
config.opt_level(3);
|
||||
|
||||
if cfg!(target_env = "msvc") {
|
||||
config
|
||||
.define("NDEBUG", None)
|
||||
.flag("/Gm-")
|
||||
.flag("/GS-")
|
||||
.flag("/Gy")
|
||||
.flag("/Oi")
|
||||
.flag("/GL");
|
||||
}
|
||||
}
|
||||
|
||||
if !cfg!(target_env = "msvc") {
|
||||
config
|
||||
.include("./cpp/streamvbyte/include")
|
||||
.file("cpp/streamvbyte/src/streamvbyte.c")
|
||||
.file("cpp/streamvbyte/src/streamvbytedelta.c")
|
||||
.flag("-msse4.1")
|
||||
.flag("-march=native")
|
||||
.flag("-std=c99");
|
||||
}
|
||||
|
||||
config.compile("libsimdcomp.a");
|
||||
|
||||
// Workaround for linking static libraries built with /GL
|
||||
// https://github.com/rust-lang/rust/issues/26003
|
||||
if !cfg!(debug_assertions) && cfg!(target_env = "msvc") {
|
||||
println!("cargo:rustc-link-lib=dylib=simdcomp");
|
||||
}
|
||||
|
||||
println!("cargo:rerun-if-changed=cpp");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature= "simdcompression"))]
|
||||
#[cfg(not(feature = "simdcompression"))]
|
||||
mod build {
|
||||
pub fn build() {
|
||||
}
|
||||
pub fn build() {}
|
||||
}
|
||||
|
||||
|
||||
fn main() {
|
||||
build::build();
|
||||
}
|
||||
|
||||
1
cpp/simdcomp
vendored
1
cpp/simdcomp
vendored
Submodule cpp/simdcomp deleted from 0dca28668f
9
cpp/simdcomp/.gitignore
vendored
Normal file
9
cpp/simdcomp/.gitignore
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
Makefile.in
|
||||
lib*
|
||||
unit*
|
||||
*.o
|
||||
src/*.lo
|
||||
src/*.o
|
||||
src/.deps
|
||||
src/.dirstamp
|
||||
src/.libs
|
||||
11
cpp/simdcomp/.travis.yml
Normal file
11
cpp/simdcomp/.travis.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
language: c
|
||||
sudo: false
|
||||
compiler:
|
||||
- gcc
|
||||
- clang
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: make && ./unit
|
||||
9
cpp/simdcomp/CHANGELOG
Normal file
9
cpp/simdcomp/CHANGELOG
Normal file
@@ -0,0 +1,9 @@
|
||||
Upcoming
|
||||
- added missing include
|
||||
- improved portability (MSVC)
|
||||
- implemented C89 compatibility
|
||||
Version 0.0.3 (19 May 2014)
|
||||
- improved documentation
|
||||
Version 0.0.2 (6 February 2014)
|
||||
- added go demo
|
||||
Version 0.0.1 (5 February 2014)
|
||||
27
cpp/simdcomp/LICENSE
Normal file
27
cpp/simdcomp/LICENSE
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2014--, The authors
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the {organization} nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
137
cpp/simdcomp/README.md
Normal file
137
cpp/simdcomp/README.md
Normal file
@@ -0,0 +1,137 @@
|
||||
The SIMDComp library
|
||||
====================
|
||||
[](https://travis-ci.org/lemire/simdcomp)
|
||||
|
||||
A simple C library for compressing lists of integers using binary packing and SIMD instructions.
|
||||
The assumption is either that you have a list of 32-bit integers where most of them are small, or a list of 32-bit integers where differences between successive integers are small. No software is able to reliably compress an array of 32-bit random numbers.
|
||||
|
||||
This library can decode at least 4 billions of compressed integers per second on most
|
||||
desktop or laptop processors. That is, it can decompress data at a rate of 15 GB/s.
|
||||
This is significantly faster than generic codecs like gzip, LZO, Snappy or LZ4.
|
||||
|
||||
On a Skylake Intel processor, it can decode integers at a rate 0.3 cycles per integer,
|
||||
which can easily translate into more than 8 decoded billions integers per second.
|
||||
|
||||
Contributors: Daniel Lemire, Nathan Kurz, Christoph Rupp, Anatol Belski, Nick White and others
|
||||
|
||||
What is it for?
|
||||
-------------
|
||||
|
||||
This is a low-level library for fast integer compression. By design it does not define a compressed
|
||||
format. It is up to the (sophisticated) user to create a compressed format.
|
||||
|
||||
Requirements
|
||||
-------------
|
||||
|
||||
- Your processor should support SSE4.1 (It is supported by most Intel and AMD processors released since 2008.)
|
||||
- It is possible to build the core part of the code if your processor support SSE2 (Pentium4 or better)
|
||||
- C99 compliant compiler (GCC is assumed)
|
||||
- A Linux-like distribution is assumed by the makefile
|
||||
|
||||
For a plain C version that does not use SIMD instructions, see https://github.com/lemire/LittleIntPacker
|
||||
|
||||
Usage
|
||||
-------
|
||||
|
||||
Compression works over blocks of 128 integers.
|
||||
|
||||
For a complete working example, see example.c (you can build it and
|
||||
run it with "make example; ./example").
|
||||
|
||||
|
||||
|
||||
1) Lists of integers in random order.
|
||||
|
||||
```C
|
||||
const uint32_t b = maxbits(datain);// computes bit width
|
||||
simdpackwithoutmask(datain, buffer, b);//compressed to buffer, compressing 128 32-bit integers down to b*32 bytes
|
||||
simdunpack(buffer, backbuffer, b);//uncompressed to backbuffer
|
||||
```
|
||||
|
||||
While 128 32-bit integers are read, only b 128-bit words are written. Thus, the compression ratio is 32/b.
|
||||
|
||||
2) Sorted lists of integers.
|
||||
|
||||
We used differential coding: we store the difference between successive integers. For this purpose, we need an initial value (called offset).
|
||||
|
||||
```C
|
||||
uint32_t offset = 0;
|
||||
uint32_t b1 = simdmaxbitsd1(offset,datain); // bit width
|
||||
simdpackwithoutmaskd1(offset, datain, buffer, b1);//compressing 128 32-bit integers down to b1*32 bytes
|
||||
simdunpackd1(offset, buffer, backbuffer, b1);//uncompressed
|
||||
```
|
||||
|
||||
General example for arrays of arbitrary length:
|
||||
```C
|
||||
int compress_decompress_demo() {
|
||||
size_t k, N = 9999;
|
||||
__m128i * endofbuf;
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint8_t * buffer;
|
||||
uint32_t * backbuffer = malloc(N * sizeof(uint32_t));
|
||||
uint32_t b;
|
||||
|
||||
for (k = 0; k < N; ++k){ /* start with k=0, not k=1! */
|
||||
datain[k] = k;
|
||||
}
|
||||
|
||||
b = maxbits_length(datain, N);
|
||||
buffer = malloc(simdpack_compressedbytes(N,b)); // allocate just enough memory
|
||||
endofbuf = simdpack_length(datain, N, (__m128i *)buffer, b);
|
||||
/* compressed data is stored between buffer and endofbuf using (endofbuf-buffer)*sizeof(__m128i) bytes */
|
||||
/* would be safe to do : buffer = realloc(buffer,(endofbuf-(__m128i *)buffer)*sizeof(__m128i)); */
|
||||
simdunpack_length((const __m128i *)buffer, N, backbuffer, b);
|
||||
|
||||
for (k = 0; k < N; ++k){
|
||||
if(datain[k] != backbuffer[k]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
3) Frame-of-Reference
|
||||
|
||||
We also have frame-of-reference (FOR) functions (see simdfor.h header). They work like the bit packing
|
||||
routines, but do not use differential coding so they allow faster search in some cases, at the expense
|
||||
of compression.
|
||||
|
||||
Setup
|
||||
---------
|
||||
|
||||
|
||||
make
|
||||
make test
|
||||
|
||||
and if you are daring:
|
||||
|
||||
make install
|
||||
|
||||
Go
|
||||
--------
|
||||
|
||||
If you are a go user, there is a "go" folder where you will find a simple demo.
|
||||
|
||||
Other libraries
|
||||
----------------
|
||||
|
||||
* Fast decoder for VByte-compressed integers https://github.com/lemire/MaskedVByte
|
||||
* Fast integer compression in C using StreamVByte https://github.com/lemire/streamvbyte
|
||||
* FastPFOR is a C++ research library well suited to compress unsorted arrays: https://github.com/lemire/FastPFor
|
||||
* SIMDCompressionAndIntersection is a C++ research library well suited for sorted arrays (differential coding)
|
||||
and computing intersections: https://github.com/lemire/SIMDCompressionAndIntersection
|
||||
* TurboPFor is a C library that offers lots of interesting optimizations. Well worth checking! (GPL license) https://github.com/powturbo/TurboPFor
|
||||
* Oroch is a C++ library that offers a usable API (MIT license) https://github.com/ademakov/Oroch
|
||||
|
||||
|
||||
References
|
||||
------------
|
||||
|
||||
* Daniel Lemire, Leonid Boytsov, Nathan Kurz, SIMD Compression and the Intersection of Sorted Integers, Software Practice & Experience 46 (6) 2016. http://arxiv.org/abs/1401.6399
|
||||
* Daniel Lemire and Leonid Boytsov, Decoding billions of integers per second through vectorization, Software Practice & Experience 45 (1), 2015. http://arxiv.org/abs/1209.2137 http://onlinelibrary.wiley.com/doi/10.1002/spe.2203/abstract
|
||||
* Jeff Plaisance, Nathan Kurz, Daniel Lemire, Vectorized VByte Decoding, International Symposium on Web Algorithms 2015, 2015. http://arxiv.org/abs/1503.07387
|
||||
* Wayne Xin Zhao, Xudong Zhang, Daniel Lemire, Dongdong Shan, Jian-Yun Nie, Hongfei Yan, Ji-Rong Wen, A General SIMD-based Approach to Accelerating Compression Algorithms, ACM Transactions on Information Systems 33 (3), 2015. http://arxiv.org/abs/1502.01916
|
||||
* T. D. Wu, Bitpacking techniques for indexing genomes: I. Hash tables, Algorithms for Molecular Biology 11 (5), 2016. http://almob.biomedcentral.com/articles/10.1186/s13015-016-0069-5
|
||||
235
cpp/simdcomp/benchmarks/benchmark.c
Normal file
235
cpp/simdcomp/benchmarks/benchmark.c
Normal file
@@ -0,0 +1,235 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "simdcomp.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# include <windows.h>
|
||||
|
||||
__int64 freq;
|
||||
|
||||
typedef __int64 time_snap_t;
|
||||
|
||||
static time_snap_t time_snap(void)
|
||||
{
|
||||
__int64 now;
|
||||
|
||||
QueryPerformanceCounter((LARGE_INTEGER *)&now);
|
||||
|
||||
return (__int64)((now*1000000)/freq);
|
||||
}
|
||||
# define TIME_SNAP_FMT "%I64d"
|
||||
#else
|
||||
# define time_snap clock
|
||||
# define TIME_SNAP_FMT "%lu"
|
||||
typedef clock_t time_snap_t;
|
||||
#endif
|
||||
|
||||
|
||||
void benchmarkSelect() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t backbuffer[128];
|
||||
uint32_t initial = 33;
|
||||
uint32_t b;
|
||||
time_snap_t S1, S2, S3;
|
||||
int i;
|
||||
printf("benchmarking select \n");
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 0; b <= 32; b++) {
|
||||
uint32_t prev = initial;
|
||||
uint32_t out[128];
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = ((uint32_t)(1655765 * i )) ;
|
||||
if(b < 32) buffer[i] %= (1<<b);
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = buffer[i] + prev;
|
||||
prev = buffer[i];
|
||||
}
|
||||
|
||||
for (i = 1; i < 128; i++) {
|
||||
if(buffer[i] < buffer[i-1] )
|
||||
buffer[i] = buffer[i-1];
|
||||
}
|
||||
assert(simdmaxbitsd1(initial, buffer)<=b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
out[i] = 0; /* memset would do too */
|
||||
}
|
||||
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
|
||||
|
||||
S1 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
uint32_t valretrieved = simdselectd1(initial, (__m128i *)out, b, (uint32_t)i % 128);
|
||||
assert(valretrieved == buffer[i%128]);
|
||||
}
|
||||
S2 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
simdunpackd1(initial, (__m128i *)out, backbuffer, b);
|
||||
assert(backbuffer[i % 128] == buffer[i % 128]);
|
||||
}
|
||||
S3 = time_snap();
|
||||
printf("bit width = %d, fast select function time = " TIME_SNAP_FMT ", naive time = " TIME_SNAP_FMT " \n", b, (S2-S1), (S3-S2));
|
||||
}
|
||||
}
|
||||
|
||||
int uint32_cmp(const void *a, const void *b)
|
||||
{
|
||||
const uint32_t *ia = (const uint32_t *)a;
|
||||
const uint32_t *ib = (const uint32_t *)b;
|
||||
if(*ia < *ib)
|
||||
return -1;
|
||||
else if (*ia > *ib)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* adapted from wikipedia */
|
||||
int binary_search(uint32_t * A, uint32_t key, int imin, int imax)
|
||||
{
|
||||
int imid;
|
||||
imax --;
|
||||
while(imin + 1 < imax) {
|
||||
imid = imin + ((imax - imin) / 2);
|
||||
|
||||
if (A[imid] > key) {
|
||||
imax = imid;
|
||||
} else if (A[imid] < key) {
|
||||
imin = imid;
|
||||
} else {
|
||||
return imid;
|
||||
}
|
||||
}
|
||||
return imax;
|
||||
}
|
||||
|
||||
|
||||
/* adapted from wikipedia */
|
||||
int lower_bound(uint32_t * A, uint32_t key, int imin, int imax)
|
||||
{
|
||||
int imid;
|
||||
imax --;
|
||||
while(imin + 1 < imax) {
|
||||
imid = imin + ((imax - imin) / 2);
|
||||
|
||||
if (A[imid] >= key) {
|
||||
imax = imid;
|
||||
} else if (A[imid] < key) {
|
||||
imin = imid;
|
||||
}
|
||||
}
|
||||
if(A[imin] >= key) return imin;
|
||||
return imax;
|
||||
}
|
||||
|
||||
void benchmarkSearch() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t backbuffer[128];
|
||||
uint32_t out[128];
|
||||
uint32_t result, initial = 0;
|
||||
uint32_t b, i;
|
||||
time_snap_t S1, S2, S3, S4;
|
||||
|
||||
printf("benchmarking search \n");
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 0; b <= 32; b++) {
|
||||
uint32_t prev = initial;
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = ((uint32_t)rand()) ;
|
||||
if(b < 32) buffer[i] %= (1<<b);
|
||||
}
|
||||
|
||||
qsort(buffer,128, sizeof(uint32_t), uint32_cmp);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = buffer[i] + prev;
|
||||
prev = buffer[i];
|
||||
}
|
||||
for (i = 1; i < 128; i++) {
|
||||
if(buffer[i] < buffer[i-1] )
|
||||
buffer[i] = buffer[i-1];
|
||||
}
|
||||
assert(simdmaxbitsd1(initial, buffer)<=b);
|
||||
for (i = 0; i < 128; i++) {
|
||||
out[i] = 0; /* memset would do too */
|
||||
}
|
||||
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
|
||||
simdunpackd1(initial, (__m128i *)out, backbuffer, b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
assert(buffer[i] == backbuffer[i]);
|
||||
}
|
||||
S1 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
|
||||
int pos;
|
||||
uint32_t pseudorandomkey = buffer[i%128];
|
||||
__m128i vecinitial = _mm_set1_epi32(initial);
|
||||
pos = simdsearchd1(&vecinitial, (__m128i *)out, b,
|
||||
pseudorandomkey, &result);
|
||||
if((result < pseudorandomkey) || (buffer[pos] != result)) {
|
||||
printf("bug A.\n");
|
||||
} else if (pos > 0) {
|
||||
if(buffer[pos-1] >= pseudorandomkey)
|
||||
printf("bug B.\n");
|
||||
}
|
||||
}
|
||||
S2 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
int pos;
|
||||
uint32_t pseudorandomkey = buffer[i%128];
|
||||
simdunpackd1(initial, (__m128i *)out, backbuffer, b);
|
||||
pos = lower_bound(backbuffer, pseudorandomkey, 0, 128);
|
||||
result = backbuffer[pos];
|
||||
|
||||
if((result < pseudorandomkey) || (buffer[pos] != result)) {
|
||||
printf("bug C.\n");
|
||||
} else if (pos > 0) {
|
||||
if(buffer[pos-1] >= pseudorandomkey)
|
||||
printf("bug D.\n");
|
||||
}
|
||||
}
|
||||
S3 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
|
||||
int pos;
|
||||
uint32_t pseudorandomkey = buffer[i%128];
|
||||
pos = simdsearchwithlengthd1(initial, (__m128i *)out, b, 128,
|
||||
pseudorandomkey, &result);
|
||||
if((result < pseudorandomkey) || (buffer[pos] != result)) {
|
||||
printf("bug A.\n");
|
||||
} else if (pos > 0) {
|
||||
if(buffer[pos-1] >= pseudorandomkey)
|
||||
printf("bug B.\n");
|
||||
}
|
||||
}
|
||||
S4 = time_snap();
|
||||
|
||||
printf("bit width = %d, fast search function time = " TIME_SNAP_FMT ", naive time = " TIME_SNAP_FMT " , fast with length time = " TIME_SNAP_FMT " \n", b, (S2-S1), (S3-S2), (S4-S3) );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main() {
|
||||
#ifdef _MSC_VER
|
||||
QueryPerformanceFrequency((LARGE_INTEGER *)&freq);
|
||||
#endif
|
||||
benchmarkSearch();
|
||||
benchmarkSelect();
|
||||
return 0;
|
||||
}
|
||||
205
cpp/simdcomp/benchmarks/bitpackingbenchmark.c
Normal file
205
cpp/simdcomp/benchmarks/bitpackingbenchmark.c
Normal file
@@ -0,0 +1,205 @@
|
||||
#include <stdio.h>
|
||||
|
||||
#include "simdcomp.h"
|
||||
|
||||
|
||||
#define RDTSC_START(cycles) \
|
||||
do { \
|
||||
register unsigned cyc_high, cyc_low; \
|
||||
__asm volatile( \
|
||||
"cpuid\n\t" \
|
||||
"rdtsc\n\t" \
|
||||
"mov %%edx, %0\n\t" \
|
||||
"mov %%eax, %1\n\t" \
|
||||
: "=r"(cyc_high), "=r"(cyc_low)::"%rax", "%rbx", "%rcx", "%rdx"); \
|
||||
(cycles) = ((uint64_t)cyc_high << 32) | cyc_low; \
|
||||
} while (0)
|
||||
|
||||
#define RDTSC_FINAL(cycles) \
|
||||
do { \
|
||||
register unsigned cyc_high, cyc_low; \
|
||||
__asm volatile( \
|
||||
"rdtscp\n\t" \
|
||||
"mov %%edx, %0\n\t" \
|
||||
"mov %%eax, %1\n\t" \
|
||||
"cpuid\n\t" \
|
||||
: "=r"(cyc_high), "=r"(cyc_low)::"%rax", "%rbx", "%rcx", "%rdx"); \
|
||||
(cycles) = ((uint64_t)cyc_high << 32) | cyc_low; \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
||||
|
||||
uint32_t * get_random_array_from_bit_width(uint32_t length, uint32_t bit) {
|
||||
uint32_t * answer = malloc(sizeof(uint32_t) * length);
|
||||
uint32_t mask = (uint32_t) ((UINT64_C(1) << bit) - 1);
|
||||
uint32_t i;
|
||||
for(i = 0; i < length; ++i) {
|
||||
answer[i] = rand() & mask;
|
||||
}
|
||||
return answer;
|
||||
}
|
||||
|
||||
uint32_t * get_random_array_from_bit_width_d1(uint32_t length, uint32_t bit) {
|
||||
uint32_t * answer = malloc(sizeof(uint32_t) * length);
|
||||
uint32_t mask = (uint32_t) ((UINT64_C(1) << bit) - 1);
|
||||
uint32_t i;
|
||||
answer[0] = rand() & mask;
|
||||
for(i = 1; i < length; ++i) {
|
||||
answer[i] = answer[i-1] + (rand() & mask);
|
||||
}
|
||||
return answer;
|
||||
}
|
||||
|
||||
|
||||
void demo128() {
|
||||
const uint32_t length = 128;
|
||||
uint32_t bit;
|
||||
printf("# --- %s\n", __func__);
|
||||
printf("# compressing %d integers\n",length);
|
||||
printf("# format: bit width, pack in cycles per int, unpack in cycles per int\n");
|
||||
for(bit = 1; bit <= 32; ++bit) {
|
||||
uint32_t i;
|
||||
|
||||
uint32_t * data = get_random_array_from_bit_width(length, bit);
|
||||
__m128i * buffer = malloc(length * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(length * sizeof(uint32_t));
|
||||
uint32_t repeat = 500;
|
||||
uint64_t min_diff;
|
||||
printf("%d\t",bit);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
simdpackwithoutmask(data,buffer, bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
simdunpack(buffer, backdata,bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
|
||||
free(data);
|
||||
free(buffer);
|
||||
free(backdata);
|
||||
printf("\n");
|
||||
}
|
||||
printf("\n\n"); /* two blank lines are required by gnuplot */
|
||||
}
|
||||
|
||||
void demo128_d1() {
|
||||
const uint32_t length = 128;
|
||||
uint32_t bit;
|
||||
printf("# --- %s\n", __func__);
|
||||
printf("# compressing %d integers\n",length);
|
||||
printf("# format: bit width, pack in cycles per int, unpack in cycles per int\n");
|
||||
for(bit = 1; bit <= 32; ++bit) {
|
||||
uint32_t i;
|
||||
|
||||
uint32_t * data = get_random_array_from_bit_width_d1(length, bit);
|
||||
__m128i * buffer = malloc(length * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(length * sizeof(uint32_t));
|
||||
uint32_t repeat = 500;
|
||||
uint64_t min_diff;
|
||||
printf("%d\t",bit);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
simdpackwithoutmaskd1(0,data,buffer, bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
simdunpackd1(0,buffer, backdata,bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
|
||||
free(data);
|
||||
free(buffer);
|
||||
free(backdata);
|
||||
printf("\n");
|
||||
}
|
||||
printf("\n\n"); /* two blank lines are required by gnuplot */
|
||||
}
|
||||
|
||||
#ifdef __AVX2__
|
||||
void demo256() {
|
||||
const uint32_t length = 256;
|
||||
uint32_t bit;
|
||||
printf("# --- %s\n", __func__);
|
||||
printf("# compressing %d integers\n",length);
|
||||
printf("# format: bit width, pack in cycles per int, unpack in cycles per int\n");
|
||||
for(bit = 1; bit <= 32; ++bit) {
|
||||
uint32_t i;
|
||||
|
||||
uint32_t * data = get_random_array_from_bit_width(length, bit);
|
||||
__m256i * buffer = malloc(length * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(length * sizeof(uint32_t));
|
||||
uint32_t repeat = 500;
|
||||
uint64_t min_diff;
|
||||
printf("%d\t",bit);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
avxpackwithoutmask(data,buffer, bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
avxunpack(buffer, backdata,bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
|
||||
free(data);
|
||||
free(buffer);
|
||||
free(backdata);
|
||||
printf("\n");
|
||||
}
|
||||
printf("\n\n"); /* two blank lines are required by gnuplot */
|
||||
}
|
||||
#endif /* avx 2 */
|
||||
|
||||
|
||||
int main() {
|
||||
demo128();
|
||||
demo128_d1();
|
||||
#ifdef __AVX2__
|
||||
demo256();
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
|
||||
}
|
||||
195
cpp/simdcomp/example.c
Normal file
195
cpp/simdcomp/example.c
Normal file
@@ -0,0 +1,195 @@
|
||||
/* Type "make example" to build this example program. */
|
||||
#include <stdio.h>
|
||||
#include <time.h>
|
||||
#include <stdlib.h>
|
||||
#include "simdcomp.h"
|
||||
|
||||
/**
|
||||
We provide several different code examples.
|
||||
**/
|
||||
|
||||
|
||||
/* very simple test to illustrate a simple application */
|
||||
int compress_decompress_demo() {
|
||||
size_t k, N = 9999;
|
||||
__m128i * endofbuf;
|
||||
int howmanybytes;
|
||||
float compratio;
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint8_t * buffer;
|
||||
uint32_t * backbuffer = malloc(N * sizeof(uint32_t));
|
||||
uint32_t b;
|
||||
printf("== simple test\n");
|
||||
|
||||
for (k = 0; k < N; ++k) { /* start with k=0, not k=1! */
|
||||
datain[k] = k;
|
||||
}
|
||||
|
||||
b = maxbits_length(datain, N);
|
||||
buffer = malloc(simdpack_compressedbytes(N,b));
|
||||
endofbuf = simdpack_length(datain, N, (__m128i *)buffer, b);
|
||||
howmanybytes = (endofbuf-(__m128i *)buffer)*sizeof(__m128i); /* number of compressed bytes */
|
||||
compratio = N*sizeof(uint32_t) * 1.0 / howmanybytes;
|
||||
/* endofbuf points to the end of the compressed data */
|
||||
buffer = realloc(buffer,(endofbuf-(__m128i *)buffer)*sizeof(__m128i)); /* optional but safe. */
|
||||
printf("Compressed %d integers down to %d bytes (comp. ratio = %f).\n",(int)N,howmanybytes,compratio);
|
||||
/* in actual applications b must be stored and retrieved: caller is responsible for that. */
|
||||
simdunpack_length((const __m128i *)buffer, N, backbuffer, b); /* will return a pointer to endofbuf */
|
||||
|
||||
for (k = 0; k < N; ++k) {
|
||||
if(datain[k] != backbuffer[k]) {
|
||||
printf("bug at %lu \n",(unsigned long)k);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
printf("Code works!\n");
|
||||
free(datain);
|
||||
free(buffer);
|
||||
free(backbuffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* compresses data from datain to buffer, returns how many bytes written
|
||||
used below in simple_demo */
|
||||
size_t compress(uint32_t * datain, size_t length, uint8_t * buffer) {
|
||||
uint32_t offset;
|
||||
uint8_t * initout;
|
||||
size_t k;
|
||||
if(length/SIMDBlockSize*SIMDBlockSize != length) {
|
||||
printf("Data length should be a multiple of %i \n",SIMDBlockSize);
|
||||
}
|
||||
offset = 0;
|
||||
initout = buffer;
|
||||
for(k = 0; k < length / SIMDBlockSize; ++k) {
|
||||
uint32_t b = simdmaxbitsd1(offset,
|
||||
datain + k * SIMDBlockSize);
|
||||
*buffer++ = b;
|
||||
simdpackwithoutmaskd1(offset, datain + k * SIMDBlockSize, (__m128i *) buffer,
|
||||
b);
|
||||
offset = datain[k * SIMDBlockSize + SIMDBlockSize - 1];
|
||||
buffer += b * sizeof(__m128i);
|
||||
}
|
||||
return buffer - initout;
|
||||
}
|
||||
|
||||
/* Another illustration ... */
|
||||
void simple_demo() {
|
||||
size_t REPEAT = 10, gap;
|
||||
size_t N = 1000 * SIMDBlockSize;/* SIMDBlockSize is 128 */
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
size_t compsize;
|
||||
clock_t start, end;
|
||||
uint8_t * buffer = malloc(N * sizeof(uint32_t) + N / SIMDBlockSize); /* output buffer */
|
||||
uint32_t * backbuffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
printf("== simple demo\n");
|
||||
for (gap = 1; gap <= 243; gap *= 3) {
|
||||
size_t k, repeat;
|
||||
uint32_t offset = 0;
|
||||
uint32_t bogus = 0;
|
||||
double numberofseconds;
|
||||
|
||||
printf("\n");
|
||||
printf(" gap = %lu \n", (unsigned long) gap);
|
||||
datain[0] = 0;
|
||||
for (k = 1; k < N; ++k)
|
||||
datain[k] = datain[k-1] + ( rand() % (gap + 1) );
|
||||
compsize = compress(datain,N,buffer);
|
||||
printf("compression ratio = %f \n", (N * sizeof(uint32_t))/ (compsize * 1.0 ));
|
||||
start = clock();
|
||||
for(repeat = 0; repeat < REPEAT; ++repeat) {
|
||||
uint8_t * decbuffer = buffer;
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
uint8_t b = *decbuffer++;
|
||||
simdunpackd1(offset, (__m128i *) decbuffer, backbuffer, b);
|
||||
/* do something here with backbuffer */
|
||||
bogus += backbuffer[3];
|
||||
decbuffer += b * sizeof(__m128i);
|
||||
offset = backbuffer[SIMDBlockSize - 1];
|
||||
}
|
||||
}
|
||||
end = clock();
|
||||
numberofseconds = (end-start)/(double)CLOCKS_PER_SEC;
|
||||
printf("decoding speed in million of integers per second %f \n",N*REPEAT/(numberofseconds*1000.0*1000.0));
|
||||
start = clock();
|
||||
for(repeat = 0; repeat < REPEAT; ++repeat) {
|
||||
uint8_t * decbuffer = buffer;
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
memcpy(backbuffer,decbuffer+k*SIMDBlockSize,SIMDBlockSize*sizeof(uint32_t));
|
||||
bogus += backbuffer[3] - backbuffer[100];
|
||||
}
|
||||
}
|
||||
end = clock();
|
||||
numberofseconds = (end-start)/(double)CLOCKS_PER_SEC;
|
||||
printf("memcpy speed in million of integers per second %f \n",N*REPEAT/(numberofseconds*1000.0*1000.0));
|
||||
printf("ignore me %i \n",bogus);
|
||||
printf("All tests are in CPU cache. Avoid out-of-cache decoding in applications.\n");
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
}
|
||||
|
||||
/* Used below in more_sophisticated_demo ... */
|
||||
size_t varying_bit_width_compress(uint32_t * datain, size_t length, uint8_t * buffer) {
|
||||
uint8_t * initout;
|
||||
size_t k;
|
||||
if(length/SIMDBlockSize*SIMDBlockSize != length) {
|
||||
printf("Data length should be a multiple of %i \n",SIMDBlockSize);
|
||||
}
|
||||
initout = buffer;
|
||||
for(k = 0; k < length / SIMDBlockSize; ++k) {
|
||||
uint32_t b = maxbits(datain);
|
||||
*buffer++ = b;
|
||||
simdpackwithoutmask(datain, (__m128i *)buffer, b);
|
||||
datain += SIMDBlockSize;
|
||||
buffer += b * sizeof(__m128i);
|
||||
}
|
||||
return buffer - initout;
|
||||
}
|
||||
|
||||
/* Here we compress the data in blocks of 128 integers with varying bit width */
|
||||
int varying_bit_width_demo() {
|
||||
size_t nn = 128 * 2;
|
||||
uint32_t * datainn = malloc(nn * sizeof(uint32_t));
|
||||
uint8_t * buffern = malloc(nn * sizeof(uint32_t) + nn / SIMDBlockSize);
|
||||
uint8_t * initbuffern = buffern;
|
||||
uint32_t * backbuffern = malloc(nn * sizeof(uint32_t));
|
||||
size_t k, compsize;
|
||||
printf("== varying bit-width demo\n");
|
||||
|
||||
for(k=0; k<nn; ++k) {
|
||||
datainn[k] = rand() % (k + 1);
|
||||
}
|
||||
|
||||
compsize = varying_bit_width_compress(datainn,nn,buffern);
|
||||
printf("encoded size: %u (original size: %u)\n", (unsigned)compsize,
|
||||
(unsigned)(nn * sizeof(uint32_t)));
|
||||
|
||||
for (k = 0; k * SIMDBlockSize < nn; ++k) {
|
||||
uint32_t b = *buffern;
|
||||
buffern++;
|
||||
simdunpack((const __m128i *)buffern, backbuffern + k * SIMDBlockSize, b);
|
||||
buffern += b * sizeof(__m128i);
|
||||
}
|
||||
|
||||
for (k = 0; k < nn; ++k) {
|
||||
if(backbuffern[k] != datainn[k]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
printf("Code works!\n");
|
||||
free(datainn);
|
||||
free(initbuffern);
|
||||
free(backbuffern);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main() {
|
||||
if(compress_decompress_demo() != 0) return -1;
|
||||
if(varying_bit_width_demo() != 0) return -1;
|
||||
simple_demo();
|
||||
return 0;
|
||||
}
|
||||
13
cpp/simdcomp/go/README.md
Normal file
13
cpp/simdcomp/go/README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
Simple Go demo
|
||||
==============
|
||||
|
||||
Setup
|
||||
======
|
||||
|
||||
Start by installing the simdcomp library (make && make install).
|
||||
|
||||
Then type:
|
||||
|
||||
go run test.go
|
||||
|
||||
|
||||
71
cpp/simdcomp/go/test.go
Normal file
71
cpp/simdcomp/go/test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
/////////
|
||||
// This particular file is in the public domain.
|
||||
// Author: Daniel Lemire
|
||||
////////
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lsimdcomp
|
||||
#include <simdcomp.h>
|
||||
*/
|
||||
import "C"
|
||||
import "fmt"
|
||||
|
||||
//////////
|
||||
// For this demo, we pack and unpack blocks of 128 integers
|
||||
/////////
|
||||
func main() {
|
||||
// I am going to use C types. Alternative might be to use unsafe.Pointer calls, see http://bit.ly/1ndw3W3
|
||||
// this is our original data
|
||||
var data [128]C.uint32_t
|
||||
for i := C.uint32_t(0); i < C.uint32_t(128); i++ {
|
||||
data[i] = i
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
////////////
|
||||
// We first pack without differential coding
|
||||
///////////
|
||||
// computing how many bits per int. is needed
|
||||
b := C.maxbits(&data[0])
|
||||
ratio := 32.0/float64(b)
|
||||
fmt.Println("Bit width ", b)
|
||||
fmt.Println(fmt.Sprintf("Compression ratio %f ", ratio))
|
||||
// we are now going to create a buffer to receive the packed data (each __m128i uses 128 bits)
|
||||
out := make([] C.__m128i,b)
|
||||
C.simdpackwithoutmask( &data[0],&out[0],b);
|
||||
var recovereddata [128]C.uint32_t
|
||||
C.simdunpack(&out[0],&recovereddata[0],b)
|
||||
for i := 0; i < 128; i++ {
|
||||
if data[i] != recovereddata[i] {
|
||||
fmt.Println("Bug ")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
///////////
|
||||
// Next, we use differential coding
|
||||
//////////
|
||||
offset := C.uint32_t(0) // if you pack data from K to K + 128, offset should be the value at K-1. When K = 0, choose a default
|
||||
b1 := C.simdmaxbitsd1(offset,&data[0])
|
||||
ratio1 := 32.0/float64(b1)
|
||||
fmt.Println("Bit width ", b1)
|
||||
fmt.Println(fmt.Sprintf("Compression ratio %f ", ratio1))
|
||||
// we are now going to create a buffer to receive the packed data (each __m128i uses 128 bits)
|
||||
out = make([] C.__m128i,b1)
|
||||
C.simdpackwithoutmaskd1(offset, &data[0],&out[0],b1);
|
||||
C.simdunpackd1(offset,&out[0],&recovereddata[0],b1)
|
||||
for i := 0; i < 128; i++ {
|
||||
if data[i] != recovereddata[i] {
|
||||
fmt.Println("Bug ")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("test succesful.")
|
||||
|
||||
}
|
||||
40
cpp/simdcomp/include/avxbitpacking.h
Normal file
40
cpp/simdcomp/include/avxbitpacking.h
Normal file
@@ -0,0 +1,40 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#ifndef INCLUDE_AVXBITPACKING_H_
|
||||
#define INCLUDE_AVXBITPACKING_H_
|
||||
|
||||
|
||||
#ifdef __AVX2__
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
|
||||
/* AVX2 is required */
|
||||
#include <immintrin.h>
|
||||
/* for memset */
|
||||
#include <string.h>
|
||||
|
||||
#include "simdcomputil.h"
|
||||
|
||||
enum{ AVXBlockSize = 256};
|
||||
|
||||
/* max integer logarithm over a range of AVXBlockSize integers (256 integer) */
|
||||
uint32_t avxmaxbits(const uint32_t * begin);
|
||||
|
||||
/* reads 256 values from "in", writes "bit" 256-bit vectors to "out" */
|
||||
void avxpack(const uint32_t * in,__m256i * out, const uint32_t bit);
|
||||
|
||||
/* reads 256 values from "in", writes "bit" 256-bit vectors to "out" */
|
||||
void avxpackwithoutmask(const uint32_t * in,__m256i * out, const uint32_t bit);
|
||||
|
||||
/* reads "bit" 256-bit vectors from "in", writes 256 values to "out" */
|
||||
void avxunpack(const __m256i * in,uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
|
||||
|
||||
#endif /* __AVX2__ */
|
||||
|
||||
#endif /* INCLUDE_AVXBITPACKING_H_ */
|
||||
81
cpp/simdcomp/include/portability.h
Normal file
81
cpp/simdcomp/include/portability.h
Normal file
@@ -0,0 +1,81 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#ifndef SIMDBITCOMPAT_H_
|
||||
#define SIMDBITCOMPAT_H_
|
||||
|
||||
#include <iso646.h> /* mostly for Microsoft compilers */
|
||||
#include <string.h>
|
||||
|
||||
#if SIMDCOMP_DEBUG
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline
|
||||
# define SIMDCOMP_NEVER_INLINE
|
||||
# define SIMDCOMP_PURE
|
||||
#else
|
||||
# if defined(__GNUC__)
|
||||
# if __GNUC__ >= 3
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline __attribute__((always_inline))
|
||||
# define SIMDCOMP_NEVER_INLINE __attribute__((noinline))
|
||||
# define SIMDCOMP_PURE __attribute__((pure))
|
||||
# else
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline
|
||||
# define SIMDCOMP_NEVER_INLINE
|
||||
# define SIMDCOMP_PURE
|
||||
# endif
|
||||
# elif defined(_MSC_VER)
|
||||
# define SIMDCOMP_ALWAYS_INLINE __forceinline
|
||||
# define SIMDCOMP_NEVER_INLINE
|
||||
# define SIMDCOMP_PURE
|
||||
# else
|
||||
# if __has_attribute(always_inline)
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline __attribute__((always_inline))
|
||||
# else
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline
|
||||
# endif
|
||||
# if __has_attribute(noinline)
|
||||
# define SIMDCOMP_NEVER_INLINE __attribute__((noinline))
|
||||
# else
|
||||
# define SIMDCOMP_NEVER_INLINE
|
||||
# endif
|
||||
# if __has_attribute(pure)
|
||||
# define SIMDCOMP_PURE __attribute__((pure))
|
||||
# else
|
||||
# define SIMDCOMP_PURE
|
||||
# endif
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1600
|
||||
typedef unsigned int uint32_t;
|
||||
typedef unsigned char uint8_t;
|
||||
typedef signed char int8_t;
|
||||
#else
|
||||
#include <stdint.h> /* part of Visual Studio 2010 and better, others likely anyway */
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#define SIMDCOMP_ALIGNED(x) __declspec(align(x))
|
||||
#else
|
||||
#if defined(__GNUC__)
|
||||
#define SIMDCOMP_ALIGNED(x) __attribute__ ((aligned(x)))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# include <intrin.h>
|
||||
/* 64-bit needs extending */
|
||||
# define SIMDCOMP_CTZ(result, mask) do { \
|
||||
unsigned long index; \
|
||||
if (!_BitScanForward(&(index), (mask))) { \
|
||||
(result) = 32U; \
|
||||
} else { \
|
||||
(result) = (uint32_t)(index); \
|
||||
} \
|
||||
} while (0)
|
||||
#else
|
||||
# define SIMDCOMP_CTZ(result, mask) \
|
||||
result = __builtin_ctz(mask)
|
||||
#endif
|
||||
|
||||
#endif /* SIMDBITCOMPAT_H_ */
|
||||
|
||||
72
cpp/simdcomp/include/simdbitpacking.h
Normal file
72
cpp/simdcomp/include/simdbitpacking.h
Normal file
@@ -0,0 +1,72 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#ifndef SIMDBITPACKING_H_
|
||||
#define SIMDBITPACKING_H_
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
/* SSE2 is required */
|
||||
#include <emmintrin.h>
|
||||
/* for memset */
|
||||
#include <string.h>
|
||||
|
||||
#include "simdcomputil.h"
|
||||
|
||||
/***
|
||||
* Please see example.c for various examples on how to make good use
|
||||
* of these functions.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out".
|
||||
* The input values are masked so that only the least significant "bit" bits are used. */
|
||||
void simdpack(const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out".
|
||||
* The input values are assumed to be less than 1<<bit. */
|
||||
void simdpackwithoutmask(const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
/* reads "bit" 128-bit vectors from "in", writes 128 values to "out" */
|
||||
void simdunpack(const __m128i * in,uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
|
||||
/* how many compressed bytes are needed to compressed length integers using a bit width of bit with
|
||||
the simdpackFOR_length function. */
|
||||
int simdpack_compressedbytes(int length, const uint32_t bit);
|
||||
|
||||
/* like simdpack, but supports an undetermined number of inputs.
|
||||
* This is useful if you need to unpack an array of integers that is not divisible by 128 integers.
|
||||
* Returns a pointer to the (advanced) compressed array. Compressed data is stored in the memory location between
|
||||
the provided (out) pointer and the returned pointer. */
|
||||
__m128i * simdpack_length(const uint32_t * in, size_t length, __m128i * out, const uint32_t bit);
|
||||
|
||||
/* like simdunpack, but supports an undetermined number of inputs.
|
||||
* This is useful if you need to unpack an array of integers that is not divisible by 128 integers.
|
||||
* Returns a pointer to the (advanced) compressed array. The read compressed data is between the provided
|
||||
(in) pointer and the returned pointer. */
|
||||
const __m128i * simdunpack_length(const __m128i * in, size_t length, uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
|
||||
|
||||
/* like simdpack, but supports an undetermined small number of inputs. This is useful if you need to pack less
|
||||
than 128 integers.
|
||||
* Note that this function is much slower.
|
||||
* Returns a pointer to the (advanced) compressed array. Compressed data is stored in the memory location
|
||||
between the provided (out) pointer and the returned pointer. */
|
||||
__m128i * simdpack_shortlength(const uint32_t * in, int length, __m128i * out, const uint32_t bit);
|
||||
|
||||
/* like simdunpack, but supports an undetermined small number of inputs. This is useful if you need to unpack less
|
||||
than 128 integers.
|
||||
* Note that this function is much slower.
|
||||
* Returns a pointer to the (advanced) compressed array. The read compressed data is between the provided (in)
|
||||
pointer and the returned pointer. */
|
||||
const __m128i * simdunpack_shortlength(const __m128i * in, int length, uint32_t * out, const uint32_t bit);
|
||||
|
||||
/* given a block of 128 packed values, this function sets the value at index "index" to "value" */
|
||||
void simdfastset(__m128i * in128, uint32_t b, uint32_t value, size_t index);
|
||||
|
||||
#endif /* SIMDBITPACKING_H_ */
|
||||
22
cpp/simdcomp/include/simdcomp.h
Normal file
22
cpp/simdcomp/include/simdcomp.h
Normal file
@@ -0,0 +1,22 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#ifndef SIMDCOMP_H_
|
||||
#define SIMDCOMP_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "simdbitpacking.h"
|
||||
#include "simdcomputil.h"
|
||||
#include "simdfor.h"
|
||||
#include "simdintegratedbitpacking.h"
|
||||
#include "avxbitpacking.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif
|
||||
54
cpp/simdcomp/include/simdcomputil.h
Normal file
54
cpp/simdcomp/include/simdcomputil.h
Normal file
@@ -0,0 +1,54 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#ifndef SIMDCOMPUTIL_H_
|
||||
#define SIMDCOMPUTIL_H_
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
/* SSE2 is required */
|
||||
#include <emmintrin.h>
|
||||
|
||||
|
||||
|
||||
|
||||
/* returns the integer logarithm of v (bit width) */
|
||||
uint32_t bits(const uint32_t v);
|
||||
|
||||
/* max integer logarithm over a range of SIMDBlockSize integers (128 integer) */
|
||||
uint32_t maxbits(const uint32_t * begin);
|
||||
|
||||
/* same as maxbits, but we specify the number of integers */
|
||||
uint32_t maxbits_length(const uint32_t * in,uint32_t length);
|
||||
|
||||
enum{ SIMDBlockSize = 128};
|
||||
|
||||
|
||||
/* computes (quickly) the minimal value of 128 values */
|
||||
uint32_t simdmin(const uint32_t * in);
|
||||
|
||||
/* computes (quickly) the minimal value of the specified number of values */
|
||||
uint32_t simdmin_length(const uint32_t * in, uint32_t length);
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
/* computes (quickly) the minimal and maximal value of the specified number of values */
|
||||
void simdmaxmin_length(const uint32_t * in, uint32_t length, uint32_t * getmin, uint32_t * getmax);
|
||||
|
||||
/* computes (quickly) the minimal and maximal value of the 128 values */
|
||||
void simdmaxmin(const uint32_t * in, uint32_t * getmin, uint32_t * getmax);
|
||||
|
||||
#endif
|
||||
|
||||
/* like maxbit over 128 integers (SIMDBlockSize) with provided initial value
|
||||
and using differential coding */
|
||||
uint32_t simdmaxbitsd1(uint32_t initvalue, const uint32_t * in);
|
||||
|
||||
/* like simdmaxbitsd1, but calculates maxbits over |length| integers
|
||||
with provided initial value. |length| can be any arbitrary value. */
|
||||
uint32_t simdmaxbitsd1_length(uint32_t initvalue, const uint32_t * in,
|
||||
uint32_t length);
|
||||
|
||||
|
||||
|
||||
#endif /* SIMDCOMPUTIL_H_ */
|
||||
72
cpp/simdcomp/include/simdfor.h
Normal file
72
cpp/simdcomp/include/simdfor.h
Normal file
@@ -0,0 +1,72 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#ifndef INCLUDE_SIMDFOR_H_
|
||||
#define INCLUDE_SIMDFOR_H_
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
/* SSE2 is required */
|
||||
#include <emmintrin.h>
|
||||
|
||||
#include "simdcomputil.h"
|
||||
#include "simdbitpacking.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out" */
|
||||
void simdpackFOR(uint32_t initvalue, const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* reads "bit" 128-bit vectors from "in", writes 128 values to "out" */
|
||||
void simdunpackFOR(uint32_t initvalue, const __m128i * in,uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* how many compressed bytes are needed to compressed length integers using a bit width of bit with
|
||||
the simdpackFOR_length function. */
|
||||
int simdpackFOR_compressedbytes(int length, const uint32_t bit);
|
||||
|
||||
/* like simdpackFOR, but supports an undetermined number of inputs.
|
||||
This is useful if you need to pack less than 128 integers. Note that this function is much slower.
|
||||
Compressed data is stored in the memory location between
|
||||
the provided (out) pointer and the returned pointer. */
|
||||
__m128i * simdpackFOR_length(uint32_t initvalue, const uint32_t * in, int length, __m128i * out, const uint32_t bit);
|
||||
|
||||
/* like simdunpackFOR, but supports an undetermined number of inputs.
|
||||
This is useful if you need to unpack less than 128 integers. Note that this function is much slower.
|
||||
The read compressed data is between the provided
|
||||
(in) pointer and the returned pointer. */
|
||||
const __m128i * simdunpackFOR_length(uint32_t initvalue, const __m128i * in, int length, uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* returns the value stored at the specified "slot".
|
||||
* */
|
||||
uint32_t simdselectFOR(uint32_t initvalue, const __m128i *in, uint32_t bit,
|
||||
int slot);
|
||||
|
||||
/* given a block of 128 packed values, this function sets the value at index "index" to "value" */
|
||||
void simdfastsetFOR(uint32_t initvalue, __m128i * in, uint32_t bit, uint32_t value, size_t index);
|
||||
|
||||
|
||||
/* searches "bit" 128-bit vectors from "in" (= length<=128 encoded integers) for the first encoded uint32 value
|
||||
* which is >= |key|, and returns its position. It is assumed that the values
|
||||
* stored are in sorted order.
|
||||
* The encoded key is stored in "*presult".
|
||||
* The first length decoded integers, ignoring others. If no value is larger or equal to the key,
|
||||
* length is returned. Length should be no larger than 128.
|
||||
*
|
||||
* If no value is larger or equal to the key,
|
||||
* length is returned */
|
||||
int simdsearchwithlengthFOR(uint32_t initvalue, const __m128i *in, uint32_t bit,
|
||||
int length, uint32_t key, uint32_t *presult);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
#endif /* INCLUDE_SIMDFOR_H_ */
|
||||
98
cpp/simdcomp/include/simdintegratedbitpacking.h
Normal file
98
cpp/simdcomp/include/simdintegratedbitpacking.h
Normal file
@@ -0,0 +1,98 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#ifndef SIMD_INTEGRATED_BITPACKING_H
|
||||
#define SIMD_INTEGRATED_BITPACKING_H
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
/* SSE2 is required */
|
||||
#include <emmintrin.h>
|
||||
|
||||
#include "simdcomputil.h"
|
||||
#include "simdbitpacking.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out"
|
||||
integer values should be in sorted order (for best results).
|
||||
The differences are masked so that only the least significant "bit" bits are used. */
|
||||
void simdpackd1(uint32_t initvalue, const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out"
|
||||
integer values should be in sorted order (for best results).
|
||||
The difference values are assumed to be less than 1<<bit. */
|
||||
void simdpackwithoutmaskd1(uint32_t initvalue, const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* reads "bit" 128-bit vectors from "in", writes 128 values to "out" */
|
||||
void simdunpackd1(uint32_t initvalue, const __m128i * in,uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* searches "bit" 128-bit vectors from "in" (= 128 encoded integers) for the first encoded uint32 value
|
||||
* which is >= |key|, and returns its position. It is assumed that the values
|
||||
* stored are in sorted order.
|
||||
* The encoded key is stored in "*presult". If no value is larger or equal to the key,
|
||||
* 128 is returned. The pointer initOffset is a pointer to the last four value decoded
|
||||
* (when starting out, this can be a zero vector or initialized with _mm_set1_epi32(init)),
|
||||
* and the vector gets updated.
|
||||
**/
|
||||
int
|
||||
simdsearchd1(__m128i * initOffset, const __m128i *in, uint32_t bit,
|
||||
uint32_t key, uint32_t *presult);
|
||||
|
||||
|
||||
/* searches "bit" 128-bit vectors from "in" (= length<=128 encoded integers) for the first encoded uint32 value
|
||||
* which is >= |key|, and returns its position. It is assumed that the values
|
||||
* stored are in sorted order.
|
||||
* The encoded key is stored in "*presult".
|
||||
* The first length decoded integers, ignoring others. If no value is larger or equal to the key,
|
||||
* length is returned. Length should be no larger than 128.
|
||||
*
|
||||
* If no value is larger or equal to the key,
|
||||
* length is returned */
|
||||
int simdsearchwithlengthd1(uint32_t initvalue, const __m128i *in, uint32_t bit,
|
||||
int length, uint32_t key, uint32_t *presult);
|
||||
|
||||
|
||||
|
||||
/* returns the value stored at the specified "slot".
|
||||
* */
|
||||
uint32_t simdselectd1(uint32_t initvalue, const __m128i *in, uint32_t bit,
|
||||
int slot);
|
||||
|
||||
/* given a block of 128 packed values, this function sets the value at index "index" to "value",
|
||||
* you must somehow know the previous value.
|
||||
* Because of differential coding, all following values are incremented by the offset between this new
|
||||
* value and the old value...
|
||||
* This functions is useful if you want to modify the last value.
|
||||
*/
|
||||
void simdfastsetd1fromprevious( __m128i * in, uint32_t bit, uint32_t previousvalue, uint32_t value, size_t index);
|
||||
|
||||
/* given a block of 128 packed values, this function sets the value at index "index" to "value",
|
||||
* This function computes the previous value if needed.
|
||||
* Because of differential coding, all following values are incremented by the offset between this new
|
||||
* value and the old value...
|
||||
* This functions is useful if you want to modify the last value.
|
||||
*/
|
||||
void simdfastsetd1(uint32_t initvalue, __m128i * in, uint32_t bit, uint32_t value, size_t index);
|
||||
|
||||
|
||||
/*Simply scan the data
|
||||
* The pointer initOffset is a pointer to the last four value decoded
|
||||
* (when starting out, this can be a zero vector or initialized with _mm_set1_epi32(init);),
|
||||
* and the vector gets updated.
|
||||
* */
|
||||
|
||||
void
|
||||
simdscand1(__m128i * initOffset, const __m128i *in, uint32_t bit);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif
|
||||
79
cpp/simdcomp/makefile
Normal file
79
cpp/simdcomp/makefile
Normal file
@@ -0,0 +1,79 @@
|
||||
# minimalist makefile
|
||||
.SUFFIXES:
|
||||
#
|
||||
.SUFFIXES: .cpp .o .c .h
|
||||
ifeq ($(DEBUG),1)
|
||||
CFLAGS = -fPIC -std=c89 -ggdb -msse4.1 -march=native -Wall -Wextra -Wshadow -fsanitize=undefined -fno-omit-frame-pointer -fsanitize=address
|
||||
else
|
||||
CFLAGS = -fPIC -std=c89 -O3 -msse4.1 -march=native -Wall -Wextra -Wshadow
|
||||
endif # debug
|
||||
LDFLAGS = -shared
|
||||
LIBNAME=libsimdcomp.so.0.0.3
|
||||
all: unit unit_chars bitpackingbenchmark $(LIBNAME)
|
||||
test:
|
||||
./unit
|
||||
./unit_chars
|
||||
install: $(OBJECTS)
|
||||
cp $(LIBNAME) /usr/local/lib
|
||||
ln -s /usr/local/lib/$(LIBNAME) /usr/local/lib/libsimdcomp.so
|
||||
ldconfig
|
||||
cp $(HEADERS) /usr/local/include
|
||||
|
||||
|
||||
|
||||
HEADERS=./include/simdbitpacking.h ./include/simdcomputil.h ./include/simdintegratedbitpacking.h ./include/simdcomp.h ./include/simdfor.h ./include/avxbitpacking.h
|
||||
|
||||
uninstall:
|
||||
for h in $(HEADERS) ; do rm /usr/local/$$h; done
|
||||
rm /usr/local/lib/$(LIBNAME)
|
||||
rm /usr/local/lib/libsimdcomp.so
|
||||
ldconfig
|
||||
|
||||
|
||||
OBJECTS= simdbitpacking.o simdintegratedbitpacking.o simdcomputil.o \
|
||||
simdpackedsearch.o simdpackedselect.o simdfor.o avxbitpacking.o
|
||||
|
||||
$(LIBNAME): $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o $(LIBNAME) $(OBJECTS) $(LDFLAGS)
|
||||
|
||||
|
||||
avxbitpacking.o: ./src/avxbitpacking.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/avxbitpacking.c -Iinclude
|
||||
|
||||
|
||||
simdfor.o: ./src/simdfor.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdfor.c -Iinclude
|
||||
|
||||
|
||||
simdcomputil.o: ./src/simdcomputil.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdcomputil.c -Iinclude
|
||||
|
||||
simdbitpacking.o: ./src/simdbitpacking.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdbitpacking.c -Iinclude
|
||||
|
||||
simdintegratedbitpacking.o: ./src/simdintegratedbitpacking.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdintegratedbitpacking.c -Iinclude
|
||||
|
||||
simdpackedsearch.o: ./src/simdpackedsearch.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdpackedsearch.c -Iinclude
|
||||
|
||||
simdpackedselect.o: ./src/simdpackedselect.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdpackedselect.c -Iinclude
|
||||
|
||||
example: ./example.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o example ./example.c -Iinclude $(OBJECTS)
|
||||
|
||||
unit: ./tests/unit.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o unit ./tests/unit.c -Iinclude $(OBJECTS)
|
||||
|
||||
bitpackingbenchmark: ./benchmarks/bitpackingbenchmark.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o bitpackingbenchmark ./benchmarks/bitpackingbenchmark.c -Iinclude $(OBJECTS)
|
||||
benchmark: ./benchmarks/benchmark.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o benchmark ./benchmarks/benchmark.c -Iinclude $(OBJECTS)
|
||||
dynunit: ./tests/unit.c $(HEADERS) $(LIBNAME)
|
||||
$(CC) $(CFLAGS) -o dynunit ./tests/unit.c -Iinclude -lsimdcomp
|
||||
|
||||
unit_chars: ./tests/unit_chars.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o unit_chars ./tests/unit_chars.c -Iinclude $(OBJECTS)
|
||||
clean:
|
||||
rm -f unit *.o $(LIBNAME) example benchmark bitpackingbenchmark dynunit unit_chars
|
||||
104
cpp/simdcomp/makefile.vc
Normal file
104
cpp/simdcomp/makefile.vc
Normal file
@@ -0,0 +1,104 @@
|
||||
|
||||
!IFNDEF MACHINE
|
||||
!IF "$(PROCESSOR_ARCHITECTURE)"=="AMD64"
|
||||
MACHINE=x64
|
||||
!ELSE
|
||||
MACHINE=x86
|
||||
!ENDIF
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF DEBUG
|
||||
DEBUG=no
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF CC
|
||||
CC=cl.exe
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF AR
|
||||
AR=lib.exe
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF LINK
|
||||
LINK=link.exe
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF PGO
|
||||
PGO=no
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF PGI
|
||||
PGI=no
|
||||
!ENDIF
|
||||
|
||||
INC = /Iinclude
|
||||
|
||||
!IF "$(DEBUG)"=="yes"
|
||||
CFLAGS = /nologo /MDd /LDd /Od /Zi /D_DEBUG /RTC1 /W3 /GS /Gm
|
||||
ARFLAGS = /nologo
|
||||
LDFLAGS = /nologo /debug /nodefaultlib:msvcrt
|
||||
!ELSE
|
||||
CFLAGS = /nologo /MD /O2 /Zi /DNDEBUG /W3 /Gm- /GS /Gy /Oi /GL /MP
|
||||
ARFLAGS = /nologo /LTCG
|
||||
LDFLAGS = /nologo /LTCG /DYNAMICBASE /incremental:no /debug /opt:ref,icf
|
||||
!ENDIF
|
||||
|
||||
!IF "$(PGI)"=="yes"
|
||||
LDFLAGS = $(LDFLAGS) /ltcg:pgi
|
||||
!ENDIF
|
||||
|
||||
!IF "$(PGO)"=="yes"
|
||||
LDFLAGS = $(LDFLAGS) /ltcg:pgo
|
||||
!ENDIF
|
||||
|
||||
LIB_OBJS = simdbitpacking.obj simdintegratedbitpacking.obj simdcomputil.obj \
|
||||
simdpackedsearch.obj simdpackedselect.obj simdfor.obj
|
||||
|
||||
|
||||
all: lib dll dynunit unit_chars example benchmark
|
||||
# need some good use case scenario to train the instrumented build
|
||||
@if "$(PGI)"=="yes" echo Running PGO training
|
||||
@if "$(PGI)"=="yes" benchmark.exe >nul 2>&1
|
||||
@if "$(PGI)"=="yes" example.exe >nul 2>&1
|
||||
|
||||
|
||||
$(LIB_OBJS):
|
||||
$(CC) $(INC) $(CFLAGS) /c src/simdbitpacking.c src/simdintegratedbitpacking.c src/simdcomputil.c \
|
||||
src/simdpackedsearch.c src/simdpackedselect.c src/simdfor.c
|
||||
|
||||
lib: $(LIB_OBJS)
|
||||
$(AR) $(ARFLAGS) /OUT:simdcomp_a.lib $(LIB_OBJS)
|
||||
|
||||
dll: $(LIB_OBJS)
|
||||
$(LINK) /DLL $(LDFLAGS) /OUT:simdcomp.dll /IMPLIB:simdcomp.lib /DEF:simdcomp.def $(LIB_OBJS)
|
||||
|
||||
unit: lib
|
||||
$(CC) $(INC) $(CFLAGS) /c src/unit.c
|
||||
$(LINK) $(LDFLAGS) /OUT:unit.exe unit.obj simdcomp_a.lib
|
||||
|
||||
dynunit: dll
|
||||
$(CC) $(INC) $(CFLAGS) /c src/unit.c
|
||||
$(LINK) $(LDFLAGS) /OUT:unit.exe unit.obj simdcomp.lib
|
||||
|
||||
unit_chars: lib
|
||||
$(CC) $(INC) $(CFLAGS) /c src/unit_chars.c
|
||||
$(LINK) $(LDFLAGS) /OUT:unit_chars.exe unit_chars.obj simdcomp.lib
|
||||
|
||||
|
||||
example: lib
|
||||
$(CC) $(INC) $(CFLAGS) /c example.c
|
||||
$(LINK) $(LDFLAGS) /OUT:example.exe example.obj simdcomp.lib
|
||||
|
||||
benchmark: lib
|
||||
$(CC) $(INC) $(CFLAGS) /c src/benchmark.c
|
||||
$(LINK) $(LDFLAGS) /OUT:benchmark.exe benchmark.obj simdcomp.lib
|
||||
|
||||
clean:
|
||||
del /Q *.obj
|
||||
del /Q *.lib
|
||||
del /Q *.exe
|
||||
del /Q *.dll
|
||||
del /Q *.pgc
|
||||
del /Q *.pgd
|
||||
del /Q *.pdb
|
||||
|
||||
16
cpp/simdcomp/package.json
Normal file
16
cpp/simdcomp/package.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"name": "simdcomp",
|
||||
"version": "0.0.3",
|
||||
"repo": "lemire/simdcomp",
|
||||
"description": "A simple C library for compressing lists of integers",
|
||||
"license": "BSD-3-Clause",
|
||||
"src": [
|
||||
"src/simdbitpacking.c",
|
||||
"src/simdcomputil.c",
|
||||
"src/simdintegratedbitpacking.c",
|
||||
"include/simdbitpacking.h",
|
||||
"include/simdcomp.h",
|
||||
"include/simdcomputil.h",
|
||||
"include/simdintegratedbitpacking.h"
|
||||
]
|
||||
}
|
||||
182
cpp/simdcomp/scripts/avxpacking.py
Executable file
182
cpp/simdcomp/scripts/avxpacking.py
Executable file
@@ -0,0 +1,182 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
def howmany(bit):
|
||||
""" how many values are we going to pack? """
|
||||
return 256
|
||||
|
||||
def howmanywords(bit):
|
||||
return (howmany(bit) * bit + 255)/256
|
||||
|
||||
def howmanybytes(bit):
|
||||
return howmanywords(bit) * 16
|
||||
|
||||
print("""
|
||||
/** code generated by avxpacking.py starts here **/
|
||||
""")
|
||||
|
||||
print("""typedef void (*avxpackblockfnc)(const uint32_t * pin, __m256i * compressed);""")
|
||||
print("""typedef void (*avxunpackblockfnc)(const __m256i * compressed, uint32_t * pout);""")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def plurial(number):
|
||||
if(number <> 1):
|
||||
return "s"
|
||||
else :
|
||||
return ""
|
||||
|
||||
print("")
|
||||
print("static void avxpackblock0(const uint32_t * pin, __m256i * compressed) {");
|
||||
print(" (void)compressed;");
|
||||
print(" (void) pin; /* we consumed {0} 32-bit integer{1} */ ".format(howmany(0),plurial(howmany(0))));
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
for bit in range(1,33):
|
||||
print("")
|
||||
print("/* we are going to pack {0} {1}-bit values, touching {2} 256-bit words, using {3} bytes */ ".format(howmany(bit),bit,howmanywords(bit),howmanybytes(bit)))
|
||||
print("static void avxpackblock{0}(const uint32_t * pin, __m256i * compressed) {{".format(bit));
|
||||
print(" const __m256i * in = (const __m256i *) pin;");
|
||||
print(" /* we are going to touch {0} 256-bit word{1} */ ".format(howmanywords(bit),plurial(howmanywords(bit))));
|
||||
if(howmanywords(bit) == 1):
|
||||
print(" __m256i w0;")
|
||||
else:
|
||||
print(" __m256i w0, w1;")
|
||||
if( (bit & (bit-1)) <> 0) : print(" __m256i tmp; /* used to store inputs at word boundary */")
|
||||
oldword = 0
|
||||
for j in range(howmany(bit)/8):
|
||||
firstword = j * bit / 32
|
||||
if(firstword > oldword):
|
||||
print(" _mm256_storeu_si256(compressed + {0}, w{1});".format(oldword,oldword%2))
|
||||
oldword = firstword
|
||||
secondword = (j * bit + bit - 1)/32
|
||||
firstshift = (j*bit) % 32
|
||||
if( firstword == secondword):
|
||||
if(firstshift == 0):
|
||||
print(" w{0} = _mm256_lddqu_si256 (in + {1});".format(firstword%2,j))
|
||||
else:
|
||||
print(" w{0} = _mm256_or_si256(w{0},_mm256_slli_epi32(_mm256_lddqu_si256 (in + {1}) , {2}));".format(firstword%2,j,firstshift))
|
||||
else:
|
||||
print(" tmp = _mm256_lddqu_si256 (in + {0});".format(j))
|
||||
print(" w{0} = _mm256_or_si256(w{0},_mm256_slli_epi32(tmp , {2}));".format(firstword%2,j,firstshift))
|
||||
secondshift = 32-firstshift
|
||||
print(" w{0} = _mm256_srli_epi32(tmp,{2});".format(secondword%2,j,secondshift))
|
||||
print(" _mm256_storeu_si256(compressed + {0}, w{1});".format(secondword,secondword%2))
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
|
||||
print("")
|
||||
print("static void avxpackblockmask0(const uint32_t * pin, __m256i * compressed) {");
|
||||
print(" (void)compressed;");
|
||||
print(" (void) pin; /* we consumed {0} 32-bit integer{1} */ ".format(howmany(0),plurial(howmany(0))));
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
for bit in range(1,33):
|
||||
print("")
|
||||
print("/* we are going to pack {0} {1}-bit values, touching {2} 256-bit words, using {3} bytes */ ".format(howmany(bit),bit,howmanywords(bit),howmanybytes(bit)))
|
||||
print("static void avxpackblockmask{0}(const uint32_t * pin, __m256i * compressed) {{".format(bit));
|
||||
print(" /* we are going to touch {0} 256-bit word{1} */ ".format(howmanywords(bit),plurial(howmanywords(bit))));
|
||||
if(howmanywords(bit) == 1):
|
||||
print(" __m256i w0;")
|
||||
else:
|
||||
print(" __m256i w0, w1;")
|
||||
print(" const __m256i * in = (const __m256i *) pin;");
|
||||
if(bit < 32): print(" const __m256i mask = _mm256_set1_epi32({0});".format((1<<bit)-1));
|
||||
def maskfnc(x):
|
||||
if(bit == 32): return x
|
||||
return " _mm256_and_si256 ( mask, {0}) ".format(x)
|
||||
if( (bit & (bit-1)) <> 0) : print(" __m256i tmp; /* used to store inputs at word boundary */")
|
||||
oldword = 0
|
||||
for j in range(howmany(bit)/8):
|
||||
firstword = j * bit / 32
|
||||
if(firstword > oldword):
|
||||
print(" _mm256_storeu_si256(compressed + {0}, w{1});".format(oldword,oldword%2))
|
||||
oldword = firstword
|
||||
secondword = (j * bit + bit - 1)/32
|
||||
firstshift = (j*bit) % 32
|
||||
loadstr = maskfnc(" _mm256_lddqu_si256 (in + {0}) ".format(j))
|
||||
if( firstword == secondword):
|
||||
if(firstshift == 0):
|
||||
print(" w{0} = {1};".format(firstword%2,loadstr))
|
||||
else:
|
||||
print(" w{0} = _mm256_or_si256(w{0},_mm256_slli_epi32({1} , {2}));".format(firstword%2,loadstr,firstshift))
|
||||
else:
|
||||
print(" tmp = {0};".format(loadstr))
|
||||
print(" w{0} = _mm256_or_si256(w{0},_mm256_slli_epi32(tmp , {2}));".format(firstword%2,j,firstshift))
|
||||
secondshift = 32-firstshift
|
||||
print(" w{0} = _mm256_srli_epi32(tmp,{2});".format(secondword%2,j,secondshift))
|
||||
print(" _mm256_storeu_si256(compressed + {0}, w{1});".format(secondword,secondword%2))
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
|
||||
print("static void avxunpackblock0(const __m256i * compressed, uint32_t * pout) {");
|
||||
print(" (void) compressed;");
|
||||
print(" memset(pout,0,{0});".format(howmany(0)));
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
for bit in range(1,33):
|
||||
print("")
|
||||
print("/* we packed {0} {1}-bit values, touching {2} 256-bit words, using {3} bytes */ ".format(howmany(bit),bit,howmanywords(bit),howmanybytes(bit)))
|
||||
print("static void avxunpackblock{0}(const __m256i * compressed, uint32_t * pout) {{".format(bit));
|
||||
print(" /* we are going to access {0} 256-bit word{1} */ ".format(howmanywords(bit),plurial(howmanywords(bit))));
|
||||
if(howmanywords(bit) == 1):
|
||||
print(" __m256i w0;")
|
||||
else:
|
||||
print(" __m256i w0, w1;")
|
||||
print(" __m256i * out = (__m256i *) pout;");
|
||||
if(bit < 32): print(" const __m256i mask = _mm256_set1_epi32({0});".format((1<<bit)-1));
|
||||
maskstr = " _mm256_and_si256 ( mask, {0}) "
|
||||
if (bit == 32) : maskstr = " {0} " # no need
|
||||
oldword = 0
|
||||
print(" w0 = _mm256_lddqu_si256 (compressed);")
|
||||
for j in range(howmany(bit)/8):
|
||||
firstword = j * bit / 32
|
||||
secondword = (j * bit + bit - 1)/32
|
||||
if(secondword > oldword):
|
||||
print(" w{0} = _mm256_lddqu_si256 (compressed + {1});".format(secondword%2,secondword))
|
||||
oldword = secondword
|
||||
firstshift = (j*bit) % 32
|
||||
firstshiftstr = "_mm256_srli_epi32( w{0} , "+str(firstshift)+") "
|
||||
if(firstshift == 0):
|
||||
firstshiftstr =" w{0} " # no need
|
||||
wfirst = firstshiftstr.format(firstword%2)
|
||||
if( firstword == secondword):
|
||||
if(firstshift + bit <> 32):
|
||||
wfirst = maskstr.format(wfirst)
|
||||
print(" _mm256_storeu_si256(out + {0}, {1});".format(j,wfirst))
|
||||
else:
|
||||
secondshift = (32-firstshift)
|
||||
wsecond = "_mm256_slli_epi32( w{0} , {1} ) ".format((firstword+1)%2,secondshift)
|
||||
wfirstorsecond = " _mm256_or_si256 ({0},{1}) ".format(wfirst,wsecond)
|
||||
wfirstorsecond = maskstr.format(wfirstorsecond)
|
||||
print(" _mm256_storeu_si256(out + {0},\n {1});".format(j,wfirstorsecond))
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
|
||||
print("static avxpackblockfnc avxfuncPackArr[] = {")
|
||||
for bit in range(0,32):
|
||||
print("&avxpackblock{0},".format(bit))
|
||||
print("&avxpackblock32")
|
||||
print("};")
|
||||
|
||||
print("static avxpackblockfnc avxfuncPackMaskArr[] = {")
|
||||
for bit in range(0,32):
|
||||
print("&avxpackblockmask{0},".format(bit))
|
||||
print("&avxpackblockmask32")
|
||||
print("};")
|
||||
|
||||
|
||||
print("static avxunpackblockfnc avxfuncUnpackArr[] = {")
|
||||
for bit in range(0,32):
|
||||
print("&avxunpackblock{0},".format(bit))
|
||||
print("&avxunpackblock32")
|
||||
print("};")
|
||||
print("/** code generated by avxpacking.py ends here **/")
|
||||
152
cpp/simdcomp/scripts/simdfor.py
Executable file
152
cpp/simdcomp/scripts/simdfor.py
Executable file
@@ -0,0 +1,152 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
from math import ceil
|
||||
|
||||
print("""
|
||||
/**
|
||||
* Blablabla
|
||||
*
|
||||
*/
|
||||
|
||||
""");
|
||||
|
||||
def mask(bit):
|
||||
return str((1 << bit) - 1)
|
||||
|
||||
for length in [32]:
|
||||
print("""
|
||||
static __m128i iunpackFOR0(__m128i initOffset, const __m128i * _in , uint32_t * _out) {
|
||||
__m128i *out = (__m128i*)(_out);
|
||||
int i;
|
||||
(void) _in;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
_mm_store_si128(out++, initOffset);
|
||||
_mm_store_si128(out++, initOffset);
|
||||
_mm_store_si128(out++, initOffset);
|
||||
_mm_store_si128(out++, initOffset);
|
||||
}
|
||||
|
||||
return initOffset;
|
||||
}
|
||||
|
||||
""")
|
||||
print("""
|
||||
|
||||
static void ipackFOR0(__m128i initOffset , const uint32_t * _in , __m128i * out ) {
|
||||
(void) initOffset;
|
||||
(void) _in;
|
||||
(void) out;
|
||||
}
|
||||
""")
|
||||
for bit in range(1,33):
|
||||
offsetVar = " initOffset";
|
||||
print("""
|
||||
static void ipackFOR"""+str(bit)+"""(__m128i """+offsetVar+""", const uint32_t * _in, __m128i * out) {
|
||||
const __m128i *in = (const __m128i*)(_in);
|
||||
__m128i OutReg;
|
||||
|
||||
""");
|
||||
|
||||
if (bit != 32):
|
||||
print(" __m128i CurrIn = _mm_load_si128(in);");
|
||||
print(" __m128i InReg = _mm_sub_epi32(CurrIn, initOffset);");
|
||||
else:
|
||||
print(" __m128i InReg = _mm_load_si128(in);");
|
||||
print(" (void) initOffset;");
|
||||
|
||||
|
||||
inwordpointer = 0
|
||||
valuecounter = 0
|
||||
for k in range(ceil((length * bit) / 32)):
|
||||
if(valuecounter == length): break
|
||||
for x in range(inwordpointer,32,bit):
|
||||
if(x!=0) :
|
||||
print(" OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, " + str(x) + "));");
|
||||
else:
|
||||
print(" OutReg = InReg; ");
|
||||
if((x+bit>=32) ):
|
||||
while(inwordpointer<32):
|
||||
inwordpointer += bit
|
||||
print(" _mm_store_si128(out, OutReg);");
|
||||
print("");
|
||||
|
||||
if(valuecounter + 1 < length):
|
||||
print(" ++out;")
|
||||
inwordpointer -= 32;
|
||||
if(inwordpointer>0):
|
||||
print(" OutReg = _mm_srli_epi32(InReg, " + str(bit) + " - " + str(inwordpointer) + ");");
|
||||
if(valuecounter + 1 < length):
|
||||
print(" ++in;")
|
||||
|
||||
if (bit != 32):
|
||||
print(" CurrIn = _mm_load_si128(in);");
|
||||
print(" InReg = _mm_sub_epi32(CurrIn, initOffset);");
|
||||
else:
|
||||
print(" InReg = _mm_load_si128(in);");
|
||||
print("");
|
||||
valuecounter = valuecounter + 1
|
||||
if(valuecounter == length): break
|
||||
assert(valuecounter == length)
|
||||
print("\n}\n\n""")
|
||||
|
||||
for bit in range(1,32):
|
||||
offsetVar = " initOffset";
|
||||
print("""\n
|
||||
static __m128i iunpackFOR"""+str(bit)+"""(__m128i """+offsetVar+""", const __m128i* in, uint32_t * _out) {
|
||||
""");
|
||||
print(""" __m128i* out = (__m128i*)(_out);
|
||||
__m128i InReg = _mm_load_si128(in);
|
||||
__m128i OutReg;
|
||||
__m128i tmp;
|
||||
const __m128i mask = _mm_set1_epi32((1U<<"""+str(bit)+""")-1);
|
||||
|
||||
""");
|
||||
|
||||
MainText = "";
|
||||
|
||||
MainText += "\n";
|
||||
inwordpointer = 0
|
||||
valuecounter = 0
|
||||
for k in range(ceil((length * bit) / 32)):
|
||||
for x in range(inwordpointer,32,bit):
|
||||
if(valuecounter == length): break
|
||||
if (x > 0):
|
||||
MainText += " tmp = _mm_srli_epi32(InReg," + str(x) +");\n";
|
||||
else:
|
||||
MainText += " tmp = InReg;\n";
|
||||
if(x+bit<32):
|
||||
MainText += " OutReg = _mm_and_si128(tmp, mask);\n";
|
||||
else:
|
||||
MainText += " OutReg = tmp;\n";
|
||||
if((x+bit>=32) ):
|
||||
while(inwordpointer<32):
|
||||
inwordpointer += bit
|
||||
if(valuecounter + 1 < length):
|
||||
MainText += " ++in;"
|
||||
MainText += " InReg = _mm_load_si128(in);\n";
|
||||
inwordpointer -= 32;
|
||||
if(inwordpointer>0):
|
||||
MainText += " OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, " + str(bit) + "-" + str(inwordpointer) + "), mask));\n\n";
|
||||
if (bit != 32):
|
||||
MainText += " OutReg = _mm_add_epi32(OutReg, initOffset);\n";
|
||||
MainText += " _mm_store_si128(out++, OutReg);\n\n";
|
||||
MainText += "";
|
||||
valuecounter = valuecounter + 1
|
||||
if(valuecounter == length): break
|
||||
assert(valuecounter == length)
|
||||
print(MainText)
|
||||
print(" return initOffset;");
|
||||
print("\n}\n\n")
|
||||
print("""
|
||||
static __m128i iunpackFOR32(__m128i initvalue , const __m128i* in, uint32_t * _out) {
|
||||
__m128i * mout = (__m128i *)_out;
|
||||
__m128i invec;
|
||||
size_t k;
|
||||
for(k = 0; k < 128/4; ++k) {
|
||||
invec = _mm_load_si128(in++);
|
||||
_mm_store_si128(mout++, invec);
|
||||
}
|
||||
return invec;
|
||||
}
|
||||
""")
|
||||
40
cpp/simdcomp/simdcomp.def
Normal file
40
cpp/simdcomp/simdcomp.def
Normal file
@@ -0,0 +1,40 @@
|
||||
EXPORTS
|
||||
simdpack
|
||||
simdpackwithoutmask
|
||||
simdunpack
|
||||
bits
|
||||
maxbits
|
||||
maxbits_length
|
||||
simdmin
|
||||
simdmin_length
|
||||
simdmaxmin
|
||||
simdmaxmin_length
|
||||
simdmaxbitsd1
|
||||
simdmaxbitsd1_length
|
||||
simdpackd1
|
||||
simdpackwithoutmaskd1
|
||||
simdunpackd1
|
||||
simdsearchd1
|
||||
simdsearchwithlengthd1
|
||||
simdselectd1
|
||||
simdpackFOR
|
||||
simdselectFOR
|
||||
simdsearchwithlengthFOR
|
||||
simdunpackFOR
|
||||
simdmin_length
|
||||
simdmaxmin
|
||||
simdmaxmin_length
|
||||
simdpack_length
|
||||
simdpackFOR_length
|
||||
simdunpackFOR_length
|
||||
simdpack_shortlength
|
||||
simdfastsetFOR
|
||||
simdfastset
|
||||
simdfastsetd1
|
||||
simdunpack_length
|
||||
simdunpack_shortlength
|
||||
simdsearchwithlengthFOR
|
||||
simdscand1
|
||||
simdfastsetd1fromprevious
|
||||
simdfastsetd1
|
||||
|
||||
7795
cpp/simdcomp/src/avxbitpacking.c
Normal file
7795
cpp/simdcomp/src/avxbitpacking.c
Normal file
File diff suppressed because it is too large
Load Diff
14183
cpp/simdcomp/src/simdbitpacking.c
Normal file
14183
cpp/simdcomp/src/simdbitpacking.c
Normal file
File diff suppressed because it is too large
Load Diff
234
cpp/simdcomp/src/simdcomputil.c
Normal file
234
cpp/simdcomp/src/simdcomputil.c
Normal file
@@ -0,0 +1,234 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#include "simdcomputil.h"
|
||||
#ifdef __SSE4_1__
|
||||
#include <smmintrin.h>
|
||||
#endif
|
||||
#include <assert.h>
|
||||
|
||||
#define Delta(curr, prev) \
|
||||
_mm_sub_epi32(curr, \
|
||||
_mm_or_si128(_mm_slli_si128(curr, 4), _mm_srli_si128(prev, 12)))
|
||||
|
||||
/* returns the integer logarithm of v (bit width) */
|
||||
uint32_t bits(const uint32_t v) {
|
||||
#ifdef _MSC_VER
|
||||
unsigned long answer;
|
||||
if (v == 0) {
|
||||
return 0;
|
||||
}
|
||||
_BitScanReverse(&answer, v);
|
||||
return answer + 1;
|
||||
#else
|
||||
return v == 0 ? 0 : 32 - __builtin_clz(v); /* assume GCC-like compiler if not microsoft */
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
static uint32_t maxbitas32int(const __m128i accumulator) {
|
||||
const __m128i _tmp1 = _mm_or_si128(_mm_srli_si128(accumulator, 8), accumulator); /* (A,B,C,D) xor (0,0,A,B) = (A,B,C xor A,D xor B)*/
|
||||
const __m128i _tmp2 = _mm_or_si128(_mm_srli_si128(_tmp1, 4), _tmp1); /* (A,B,C xor A,D xor B) xor (0,0,0,C xor A)*/
|
||||
uint32_t ans = _mm_cvtsi128_si32(_tmp2);
|
||||
return bits(ans);
|
||||
}
|
||||
|
||||
SIMDCOMP_PURE uint32_t maxbits(const uint32_t * begin) {
|
||||
const __m128i* pin = (const __m128i*)(begin);
|
||||
__m128i accumulator = _mm_loadu_si128(pin);
|
||||
uint32_t k = 1;
|
||||
for(; 4*k < SIMDBlockSize; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_or_si128(accumulator,newvec);
|
||||
}
|
||||
return maxbitas32int(accumulator);
|
||||
}
|
||||
static uint32_t orasint(const __m128i accumulator) {
|
||||
const __m128i _tmp1 = _mm_or_si128(_mm_srli_si128(accumulator, 8), accumulator); /* (A,B,C,D) xor (0,0,A,B) = (A,B,C xor A,D xor B)*/
|
||||
const __m128i _tmp2 = _mm_or_si128(_mm_srli_si128(_tmp1, 4), _tmp1); /* (A,B,C xor A,D xor B) xor (0,0,0,C xor A)*/
|
||||
return _mm_cvtsi128_si32(_tmp2);
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
|
||||
static uint32_t minasint(const __m128i accumulator) {
|
||||
const __m128i _tmp1 = _mm_min_epu32(_mm_srli_si128(accumulator, 8), accumulator); /* (A,B,C,D) xor (0,0,A,B) = (A,B,C xor A,D xor B)*/
|
||||
const __m128i _tmp2 = _mm_min_epu32(_mm_srli_si128(_tmp1, 4), _tmp1); /* (A,B,C xor A,D xor B) xor (0,0,0,C xor A)*/
|
||||
return _mm_cvtsi128_si32(_tmp2);
|
||||
}
|
||||
|
||||
static uint32_t maxasint(const __m128i accumulator) {
|
||||
const __m128i _tmp1 = _mm_max_epu32(_mm_srli_si128(accumulator, 8), accumulator); /* (A,B,C,D) xor (0,0,A,B) = (A,B,C xor A,D xor B)*/
|
||||
const __m128i _tmp2 = _mm_max_epu32(_mm_srli_si128(_tmp1, 4), _tmp1); /* (A,B,C xor A,D xor B) xor (0,0,0,C xor A)*/
|
||||
return _mm_cvtsi128_si32(_tmp2);
|
||||
}
|
||||
|
||||
uint32_t simdmin(const uint32_t * in) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i accumulator = _mm_loadu_si128(pin);
|
||||
uint32_t k = 1;
|
||||
for(; 4*k < SIMDBlockSize; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_min_epu32(accumulator,newvec);
|
||||
}
|
||||
return minasint(accumulator);
|
||||
}
|
||||
|
||||
void simdmaxmin(const uint32_t * in, uint32_t * getmin, uint32_t * getmax) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i minaccumulator = _mm_loadu_si128(pin);
|
||||
__m128i maxaccumulator = minaccumulator;
|
||||
uint32_t k = 1;
|
||||
for(; 4*k < SIMDBlockSize; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
minaccumulator = _mm_min_epu32(minaccumulator,newvec);
|
||||
maxaccumulator = _mm_max_epu32(maxaccumulator,newvec);
|
||||
}
|
||||
*getmin = minasint(minaccumulator);
|
||||
*getmax = maxasint(maxaccumulator);
|
||||
}
|
||||
|
||||
|
||||
uint32_t simdmin_length(const uint32_t * in, uint32_t length) {
|
||||
uint32_t currentmin = 0xFFFFFFFF;
|
||||
uint32_t lengthdividedby4 = length / 4;
|
||||
uint32_t offset = lengthdividedby4 * 4;
|
||||
uint32_t k;
|
||||
if (lengthdividedby4 > 0) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i accumulator = _mm_loadu_si128(pin);
|
||||
k = 1;
|
||||
for(; 4*k < lengthdividedby4 * 4; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_min_epu32(accumulator,newvec);
|
||||
}
|
||||
currentmin = minasint(accumulator);
|
||||
}
|
||||
for (k = offset; k < length; ++k)
|
||||
if (in[k] < currentmin)
|
||||
currentmin = in[k];
|
||||
return currentmin;
|
||||
}
|
||||
|
||||
void simdmaxmin_length(const uint32_t * in, uint32_t length, uint32_t * getmin, uint32_t * getmax) {
|
||||
uint32_t lengthdividedby4 = length / 4;
|
||||
uint32_t offset = lengthdividedby4 * 4;
|
||||
uint32_t k;
|
||||
*getmin = 0xFFFFFFFF;
|
||||
*getmax = 0;
|
||||
if (lengthdividedby4 > 0) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i minaccumulator = _mm_loadu_si128(pin);
|
||||
__m128i maxaccumulator = minaccumulator;
|
||||
k = 1;
|
||||
for(; 4*k < lengthdividedby4 * 4; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
minaccumulator = _mm_min_epu32(minaccumulator,newvec);
|
||||
maxaccumulator = _mm_max_epu32(maxaccumulator,newvec);
|
||||
}
|
||||
*getmin = minasint(minaccumulator);
|
||||
*getmax = maxasint(maxaccumulator);
|
||||
}
|
||||
for (k = offset; k < length; ++k) {
|
||||
if (in[k] < *getmin)
|
||||
*getmin = in[k];
|
||||
if (in[k] > *getmax)
|
||||
*getmax = in[k];
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
SIMDCOMP_PURE uint32_t maxbits_length(const uint32_t * in,uint32_t length) {
|
||||
uint32_t k;
|
||||
uint32_t lengthdividedby4 = length / 4;
|
||||
uint32_t offset = lengthdividedby4 * 4;
|
||||
uint32_t bigxor = 0;
|
||||
if(lengthdividedby4 > 0) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i accumulator = _mm_loadu_si128(pin);
|
||||
k = 1;
|
||||
for(; 4*k < 4*lengthdividedby4; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_or_si128(accumulator,newvec);
|
||||
}
|
||||
bigxor = orasint(accumulator);
|
||||
}
|
||||
for(k = offset; k < length; ++k)
|
||||
bigxor |= in[k];
|
||||
return bits(bigxor);
|
||||
}
|
||||
|
||||
|
||||
/* maxbit over 128 integers (SIMDBlockSize) with provided initial value */
|
||||
uint32_t simdmaxbitsd1(uint32_t initvalue, const uint32_t * in) {
|
||||
__m128i initoffset = _mm_set1_epi32 (initvalue);
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i newvec = _mm_loadu_si128(pin);
|
||||
__m128i accumulator = Delta(newvec , initoffset);
|
||||
__m128i oldvec = newvec;
|
||||
uint32_t k = 1;
|
||||
for(; 4*k < SIMDBlockSize; ++k) {
|
||||
newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_or_si128(accumulator,Delta(newvec , oldvec));
|
||||
oldvec = newvec;
|
||||
}
|
||||
initoffset = oldvec;
|
||||
return maxbitas32int(accumulator);
|
||||
}
|
||||
|
||||
|
||||
/* maxbit over |length| integers with provided initial value */
|
||||
uint32_t simdmaxbitsd1_length(uint32_t initvalue, const uint32_t * in,
|
||||
uint32_t length) {
|
||||
__m128i newvec;
|
||||
__m128i oldvec;
|
||||
__m128i initoffset;
|
||||
__m128i accumulator;
|
||||
const __m128i *pin;
|
||||
uint32_t tmparray[4];
|
||||
uint32_t k = 1;
|
||||
uint32_t acc;
|
||||
|
||||
assert(length > 0);
|
||||
|
||||
pin = (const __m128i *)(in);
|
||||
initoffset = _mm_set1_epi32(initvalue);
|
||||
switch (length) {
|
||||
case 1:
|
||||
newvec = _mm_set1_epi32(in[0]);
|
||||
break;
|
||||
case 2:
|
||||
newvec = _mm_setr_epi32(in[0], in[1], in[1], in[1]);
|
||||
break;
|
||||
case 3:
|
||||
newvec = _mm_setr_epi32(in[0], in[1], in[2], in[2]);
|
||||
break;
|
||||
default:
|
||||
newvec = _mm_loadu_si128(pin);
|
||||
break;
|
||||
}
|
||||
accumulator = Delta(newvec, initoffset);
|
||||
oldvec = newvec;
|
||||
|
||||
/* process 4 integers and build an accumulator */
|
||||
while (k * 4 + 4 <= length) {
|
||||
newvec = _mm_loadu_si128(pin + k);
|
||||
accumulator = _mm_or_si128(accumulator, Delta(newvec, oldvec));
|
||||
oldvec = newvec;
|
||||
k++;
|
||||
}
|
||||
|
||||
/* extract the accumulator as an integer */
|
||||
_mm_storeu_si128((__m128i *)(tmparray), accumulator);
|
||||
acc = tmparray[0] | tmparray[1] | tmparray[2] | tmparray[3];
|
||||
|
||||
/* now process the remaining integers */
|
||||
for (k *= 4; k < length; k++)
|
||||
acc |= in[k] - (k == 0 ? initvalue : in[k - 1]);
|
||||
|
||||
/* return the number of bits */
|
||||
return bits(acc);
|
||||
}
|
||||
14501
cpp/simdcomp/src/simdfor.c
Normal file
14501
cpp/simdcomp/src/simdfor.c
Normal file
File diff suppressed because it is too large
Load Diff
24882
cpp/simdcomp/src/simdintegratedbitpacking.c
Normal file
24882
cpp/simdcomp/src/simdintegratedbitpacking.c
Normal file
File diff suppressed because it is too large
Load Diff
15866
cpp/simdcomp/src/simdpackedsearch.c
Normal file
15866
cpp/simdcomp/src/simdpackedsearch.c
Normal file
File diff suppressed because it is too large
Load Diff
15490
cpp/simdcomp/src/simdpackedselect.c
Normal file
15490
cpp/simdcomp/src/simdpackedselect.c
Normal file
File diff suppressed because it is too large
Load Diff
900
cpp/simdcomp/tests/unit.c
Normal file
900
cpp/simdcomp/tests/unit.c
Normal file
@@ -0,0 +1,900 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include "simdcomp.h"
|
||||
|
||||
|
||||
|
||||
int testshortpack() {
|
||||
int bit;
|
||||
size_t i;
|
||||
size_t length;
|
||||
__m128i * bb;
|
||||
srand(0);
|
||||
printf("testshortpack\n");
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = rand() & ((1 << bit) - 1);
|
||||
}
|
||||
for (length = 0; length <= N; ++length) {
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
bb = simdpack_shortlength(data, length, (__m128i *) buffer,
|
||||
bit);
|
||||
if((bb - (__m128i *) buffer) * sizeof(__m128i) != (unsigned) simdpack_compressedbytes(length,bit)) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
simdunpack_shortlength((__m128i *) buffer, length,
|
||||
backdata, bit);
|
||||
for (i = 0; i < length; ++i) {
|
||||
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int testlongpack() {
|
||||
int bit;
|
||||
size_t i;
|
||||
size_t length;
|
||||
__m128i * bb;
|
||||
srand(0);
|
||||
printf("testlongpack\n");
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
const size_t N = 2048;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = rand() & ((1 << bit) - 1);
|
||||
}
|
||||
for (length = 0; length <= N; ++length) {
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
bb = simdpack_length(data, length, (__m128i *) buffer,
|
||||
bit);
|
||||
if((bb - (__m128i *) buffer) * sizeof(__m128i) != (unsigned) simdpack_compressedbytes(length,bit)) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
simdunpack_length((__m128i *) buffer, length,
|
||||
backdata, bit);
|
||||
for (i = 0; i < length; ++i) {
|
||||
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int testset() {
|
||||
int bit;
|
||||
size_t i;
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
srand(0);
|
||||
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
printf("simple set %d \n",bit);
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = rand() & ((1 << bit) - 1);
|
||||
}
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
simdpack(data, (__m128i *) buffer, bit);
|
||||
simdunpack((__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
for(i = N ; i > 0; i--) {
|
||||
simdfastset((__m128i *) buffer, bit, data[N - i], i - 1);
|
||||
}
|
||||
simdunpack((__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[N - i - 1]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
simdpack(data, (__m128i *) buffer, bit);
|
||||
for(i = 1 ; i <= N; i++) {
|
||||
simdfastset((__m128i *) buffer, bit, data[i - 1], i - 1);
|
||||
}
|
||||
simdunpack((__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
|
||||
int testsetd1() {
|
||||
int bit;
|
||||
size_t i;
|
||||
uint32_t newvalue;
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * datazeroes = malloc(N * sizeof(uint32_t));
|
||||
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
srand(0);
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
printf("simple set d1 %d \n",bit);
|
||||
data[0] = rand() & ((1 << bit) - 1);
|
||||
datazeroes[0] = 0;
|
||||
|
||||
for (i = 1; i < N; ++i) {
|
||||
data[i] = data[i - 1] + (rand() & ((1 << bit) - 1));
|
||||
datazeroes[i] = 0;
|
||||
}
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
simdpackd1(0,datazeroes, (__m128i *) buffer, bit);
|
||||
for(i = 1 ; i <= N; i++) {
|
||||
simdfastsetd1(0,(__m128i *) buffer, bit, data[i - 1], i - 1);
|
||||
newvalue = simdselectd1(0, (const __m128i *) buffer, bit,i - 1);
|
||||
if( newvalue != data[i-1] ) {
|
||||
printf("bad set-select\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
simdunpackd1(0,(__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[i])
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
free(datazeroes);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int testsetFOR() {
|
||||
int bit;
|
||||
size_t i;
|
||||
uint32_t newvalue;
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * datazeroes = malloc(N * sizeof(uint32_t));
|
||||
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
srand(0);
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
printf("simple set FOR %d \n",bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = (rand() & ((1 << bit) - 1));
|
||||
datazeroes[i] = 0;
|
||||
}
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
simdpackFOR(0,datazeroes, (__m128i *) buffer, bit);
|
||||
for(i = 1 ; i <= N; i++) {
|
||||
simdfastsetFOR(0,(__m128i *) buffer, bit, data[i - 1], i - 1);
|
||||
newvalue = simdselectFOR(0, (const __m128i *) buffer, bit,i - 1);
|
||||
if( newvalue != data[i-1] ) {
|
||||
printf("bad set-select\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
simdunpackFOR(0,(__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[i])
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
free(datazeroes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int testshortFORpack() {
|
||||
int bit;
|
||||
size_t i;
|
||||
__m128i * rb;
|
||||
size_t length;
|
||||
uint32_t offset = 7;
|
||||
srand(0);
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = (rand() & ((1 << bit) - 1)) + offset;
|
||||
}
|
||||
for (length = 0; length <= N; ++length) {
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
rb = simdpackFOR_length(offset,data, length, (__m128i *) buffer,
|
||||
bit);
|
||||
if(((rb - (__m128i *) buffer)*sizeof(__m128i)) != (unsigned) simdpackFOR_compressedbytes(length,bit)) {
|
||||
return -1;
|
||||
}
|
||||
simdunpackFOR_length(offset,(__m128i *) buffer, length,
|
||||
backdata, bit);
|
||||
for (i = 0; i < length; ++i) {
|
||||
|
||||
if (data[i] != backdata[i])
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifdef __AVX2__
|
||||
|
||||
int testbabyavx() {
|
||||
int bit;
|
||||
int trial;
|
||||
unsigned int i,j;
|
||||
const size_t N = AVXBlockSize;
|
||||
srand(0);
|
||||
printf("testbabyavx\n");
|
||||
printf("bit = ");
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
printf(" %d ",bit);
|
||||
fflush(stdout);
|
||||
for(trial = 0; trial < 100; ++trial) {
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t)+ 64 * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t) + 64 * sizeof(uint32_t) );
|
||||
__m256i * buffer = malloc((2 * N + 1024) * sizeof(uint32_t) + 32);
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = rand() & ((uint32_t)(1 << bit) - 1);
|
||||
}
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
if(avxmaxbits(data) != maxbits_length(data,N)) {
|
||||
printf("avxmaxbits is buggy\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
avxpackwithoutmask(data, buffer, bit);
|
||||
avxunpack(buffer, backdata, bit);
|
||||
for (i = 0; i < AVXBlockSize; ++i) {
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
for (j = 0; j < N; ++j) {
|
||||
if (data[j] != backdata[j]) {
|
||||
printf("data[%d]=%d v.s. backdata[%d]=%d\n",j,data[j],j,backdata[j]);
|
||||
} else {
|
||||
printf("data[%d]=%d\n",j,data[j]);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
}
|
||||
}
|
||||
printf("\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int testavx2() {
|
||||
int N = 5000 * AVXBlockSize, gap;
|
||||
__m256i * buffer = malloc(AVXBlockSize * sizeof(uint32_t));
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backbuffer = malloc(AVXBlockSize * sizeof(uint32_t));
|
||||
for (gap = 1; gap <= 387420489; gap *= 3) {
|
||||
int k;
|
||||
printf(" gap = %u \n", gap);
|
||||
for (k = 0; k < N; ++k)
|
||||
datain[k] = k * gap;
|
||||
for (k = 0; k * AVXBlockSize < N; ++k) {
|
||||
/*
|
||||
First part works for general arrays (sorted or unsorted)
|
||||
*/
|
||||
int j;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b = avxmaxbits(datain + k * AVXBlockSize);
|
||||
if(avxmaxbits(datain + k * AVXBlockSize) != maxbits_length(datain + k * AVXBlockSize,AVXBlockSize)) {
|
||||
printf("avxmaxbits is buggy %d %d \n",
|
||||
avxmaxbits(datain + k * AVXBlockSize),
|
||||
maxbits_length(datain + k * AVXBlockSize,AVXBlockSize));
|
||||
return -1;
|
||||
}
|
||||
printf("bit width = %d\n",b);
|
||||
|
||||
|
||||
/* we read 256 integers at "datain + k * AVXBlockSize" and
|
||||
write b 256-bit vectors at "buffer" */
|
||||
avxpackwithoutmask(datain + k * AVXBlockSize, buffer, b);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
avxunpack(buffer, backbuffer, b);/* uncompressed */
|
||||
for (j = 0; j < AVXBlockSize; ++j) {
|
||||
if (backbuffer[j] != datain[k * AVXBlockSize + j]) {
|
||||
int i;
|
||||
printf("bug in avxpack\n");
|
||||
for(i = 0; i < AVXBlockSize; ++i) {
|
||||
printf("data[%d]=%d got back %d %s\n",i,
|
||||
datain[k * AVXBlockSize + i],backbuffer[i],
|
||||
datain[k * AVXBlockSize + i]!=backbuffer[i]?"bug":"");
|
||||
}
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
#endif /* avx2 */
|
||||
|
||||
int test() {
|
||||
int N = 5000 * SIMDBlockSize, gap;
|
||||
__m128i * buffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backbuffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
for (gap = 1; gap <= 387420489; gap *= 3) {
|
||||
int k;
|
||||
printf(" gap = %u \n", gap);
|
||||
for (k = 0; k < N; ++k)
|
||||
datain[k] = k * gap;
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
/*
|
||||
First part works for general arrays (sorted or unsorted)
|
||||
*/
|
||||
int j;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b = maxbits(datain + k * SIMDBlockSize);
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b 128-bit vectors at "buffer" */
|
||||
simdpackwithoutmask(datain + k * SIMDBlockSize, buffer, b);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpack(buffer, backbuffer, b);/* uncompressed */
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
if (backbuffer[j] != datain[k * SIMDBlockSize + j]) {
|
||||
printf("bug in simdpack\n");
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
/*
|
||||
next part assumes that the data is sorted (uses differential coding)
|
||||
*/
|
||||
uint32_t offset = 0;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b1 = simdmaxbitsd1(offset,
|
||||
datain + k * SIMDBlockSize);
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b1 128-bit vectors at "buffer" */
|
||||
simdpackwithoutmaskd1(offset, datain + k * SIMDBlockSize, buffer,
|
||||
b1);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpackd1(offset, buffer, backbuffer, b1);
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
if (backbuffer[j] != datain[k * SIMDBlockSize + j]) {
|
||||
printf("bug in simdpack d1\n");
|
||||
return -3;
|
||||
}
|
||||
}
|
||||
offset = datain[k * SIMDBlockSize + SIMDBlockSize - 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
int testFOR() {
|
||||
int N = 5000 * SIMDBlockSize, gap;
|
||||
__m128i * buffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backbuffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
uint32_t tmax, tmin, tb;
|
||||
for (gap = 1; gap <= 387420489; gap *= 2) {
|
||||
int k;
|
||||
printf(" gap = %u \n", gap);
|
||||
for (k = 0; k < N; ++k)
|
||||
datain[k] = k * gap;
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
int j;
|
||||
simdmaxmin_length(datain + k * SIMDBlockSize,SIMDBlockSize,&tmin,&tmax);
|
||||
/* we compute the bit width */
|
||||
tb = bits(tmax - tmin);
|
||||
|
||||
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b 128-bit vectors at "buffer" */
|
||||
simdpackFOR(tmin,datain + k * SIMDBlockSize, buffer, tb);
|
||||
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
uint32_t selectedvalue = simdselectFOR(tmin,buffer,tb,j);
|
||||
if (selectedvalue != datain[k * SIMDBlockSize + j]) {
|
||||
printf("bug in simdselectFOR\n");
|
||||
return -3;
|
||||
}
|
||||
}
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpackFOR(tmin,buffer, backbuffer, tb);/* uncompressed */
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
if (backbuffer[j] != datain[k * SIMDBlockSize + j]) {
|
||||
printf("bug in simdpackFOR\n");
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define MAX 300
|
||||
int test_simdmaxbitsd1_length() {
|
||||
uint32_t result, buffer[MAX + 1];
|
||||
int i, j;
|
||||
|
||||
memset(&buffer[0], 0xff, sizeof(buffer));
|
||||
|
||||
/* this test creates buffers of different length; each buffer is
|
||||
* initialized to result in the following deltas:
|
||||
* length 1: 2
|
||||
* length 2: 1 2
|
||||
* length 3: 1 1 2
|
||||
* length 4: 1 1 1 2
|
||||
* length 5: 1 1 1 1 2
|
||||
* etc. Each sequence's "maxbits" is 2. */
|
||||
for (i = 0; i < MAX; i++) {
|
||||
for (j = 0; j < i; j++)
|
||||
buffer[j] = j + 1;
|
||||
buffer[i] = i + 2;
|
||||
|
||||
result = simdmaxbitsd1_length(0, &buffer[0], i + 1);
|
||||
if (result != 2) {
|
||||
printf("simdmaxbitsd1_length: unexpected result %u in loop %d\n",
|
||||
result, i);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
printf("simdmaxbitsd1_length: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int uint32_cmp(const void *a, const void *b)
|
||||
{
|
||||
const uint32_t *ia = (const uint32_t *)a;
|
||||
const uint32_t *ib = (const uint32_t *)b;
|
||||
if(*ia < *ib)
|
||||
return -1;
|
||||
else if (*ia > *ib)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
int test_simdpackedsearch() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t result = 0;
|
||||
int b, i;
|
||||
uint32_t init = 0;
|
||||
__m128i initial = _mm_set1_epi32(init);
|
||||
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++)
|
||||
buffer[i] = (uint32_t)(i + 1);
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 1; b <= 32; b++) {
|
||||
uint32_t out[128];
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(init, buffer, (__m128i *)out, b);
|
||||
initial = _mm_setzero_si128();
|
||||
printf("simdsearchd1: %d bits\n", b);
|
||||
|
||||
/* now perform the searches */
|
||||
initial = _mm_set1_epi32(init);
|
||||
assert(simdsearchd1(&initial, (__m128i *)out, b, 0, &result) == 0);
|
||||
assert(result > 0);
|
||||
|
||||
for (i = 1; i <= 128; i++) {
|
||||
initial = _mm_set1_epi32(init);
|
||||
assert(simdsearchd1(&initial, (__m128i *)out, b,
|
||||
(uint32_t)i, &result) == i - 1);
|
||||
assert(result == (unsigned)i);
|
||||
}
|
||||
initial = _mm_set1_epi32(init);
|
||||
assert(simdsearchd1(&initial, (__m128i *)out, b, 200, &result)
|
||||
== 128);
|
||||
assert(result > 200);
|
||||
}
|
||||
printf("simdsearchd1: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_simdpackedsearchFOR() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t result = 0;
|
||||
int b;
|
||||
uint32_t i;
|
||||
uint32_t maxv, tmin, tmax, tb;
|
||||
uint32_t out[128];
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 1; b <= 32; b++) {
|
||||
/* initialize the buffer */
|
||||
maxv = (b == 32)
|
||||
? 0xFFFFFFFF
|
||||
: ((1U<<b) - 1);
|
||||
for (i = 0; i < 128; i++)
|
||||
buffer[i] = maxv * (i + 1) / 128;
|
||||
simdmaxmin_length(buffer,SIMDBlockSize,&tmin,&tmax);
|
||||
/* we compute the bit width */
|
||||
tb = bits(tmax - tmin);
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackFOR(tmin, buffer, (__m128i *)out, tb);
|
||||
printf("simdsearchd1: %d bits\n", b);
|
||||
|
||||
/* now perform the searches */
|
||||
for (i = 0; i < 128; i++) {
|
||||
assert(buffer[i] == simdselectFOR(tmin, (__m128i *)out, tb,i));
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
int x = simdsearchwithlengthFOR(tmin, (__m128i *)out, tb,
|
||||
128,buffer[i], &result) ;
|
||||
assert(simdselectFOR(tmin, (__m128i *)out, tb,x) == buffer[x]);
|
||||
assert(simdselectFOR(tmin, (__m128i *)out, tb,x) == result);
|
||||
assert(buffer[x] == result);
|
||||
assert(result == buffer[i]);
|
||||
assert(buffer[x] == buffer[i]);
|
||||
}
|
||||
}
|
||||
printf("simdsearchFOR: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_simdpackedsearch_advanced() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t backbuffer[128];
|
||||
uint32_t out[128];
|
||||
uint32_t result = 0;
|
||||
uint32_t b, i;
|
||||
uint32_t init = 0;
|
||||
__m128i initial = _mm_set1_epi32(init);
|
||||
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 0; b <= 32; b++) {
|
||||
uint32_t prev = init;
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = ((uint32_t)(1431655765 * i + 0xFFFFFFFF)) ;
|
||||
if(b < 32) buffer[i] %= (1<<b);
|
||||
}
|
||||
|
||||
qsort(buffer,128, sizeof(uint32_t), uint32_cmp);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = buffer[i] + prev;
|
||||
prev = buffer[i];
|
||||
}
|
||||
for (i = 1; i < 128; i++) {
|
||||
if(buffer[i] < buffer[i-1] )
|
||||
buffer[i] = buffer[i-1];
|
||||
}
|
||||
assert(simdmaxbitsd1(init, buffer)<=b);
|
||||
for (i = 0; i < 128; i++) {
|
||||
out[i] = 0; /* memset would do too */
|
||||
}
|
||||
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(init, buffer, (__m128i *)out, b);
|
||||
simdunpackd1(init, (__m128i *)out, backbuffer, b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
assert(buffer[i] == backbuffer[i]);
|
||||
}
|
||||
|
||||
printf("advanced simdsearchd1: %d bits\n", b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
int pos;
|
||||
initial = _mm_set1_epi32(init);
|
||||
pos = simdsearchd1(&initial, (__m128i *)out, b,
|
||||
buffer[i], &result);
|
||||
assert(pos == simdsearchwithlengthd1(init, (__m128i *)out, b, 128,
|
||||
buffer[i], &result));
|
||||
assert(buffer[pos] == buffer[i]);
|
||||
if(pos > 0)
|
||||
assert(buffer[pos - 1] < buffer[i]);
|
||||
assert(result == buffer[i]);
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
int pos;
|
||||
if(buffer[i] == 0) continue;
|
||||
initial = _mm_set1_epi32(init);
|
||||
pos = simdsearchd1(&initial, (__m128i *)out, b,
|
||||
buffer[i] - 1, &result);
|
||||
assert(pos == simdsearchwithlengthd1(init, (__m128i *)out, b, 128,
|
||||
buffer[i] - 1, &result));
|
||||
assert(buffer[pos] >= buffer[i] - 1);
|
||||
if(pos > 0)
|
||||
assert(buffer[pos - 1] < buffer[i] - 1);
|
||||
assert(result == buffer[pos]);
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
int pos;
|
||||
if (buffer[i] + 1 == 0)
|
||||
continue;
|
||||
initial = _mm_set1_epi32(init);
|
||||
pos = simdsearchd1(&initial, (__m128i *) out, b,
|
||||
buffer[i] + 1, &result);
|
||||
assert(pos == simdsearchwithlengthd1(init, (__m128i *)out, b, 128,
|
||||
buffer[i] + 1, &result));
|
||||
if(pos == 128) {
|
||||
assert(buffer[i] == buffer[127]);
|
||||
} else {
|
||||
assert(buffer[pos] >= buffer[i] + 1);
|
||||
if (pos > 0)
|
||||
assert(buffer[pos - 1] < buffer[i] + 1);
|
||||
assert(result == buffer[pos]);
|
||||
}
|
||||
}
|
||||
}
|
||||
printf("advanced simdsearchd1: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_simdpackedselect() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t initial = 33;
|
||||
int b, i;
|
||||
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++)
|
||||
buffer[i] = (uint32_t)(initial + i);
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 1; b <= 32; b++) {
|
||||
uint32_t out[128];
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
|
||||
|
||||
printf("simdselectd1: %d bits\n", b);
|
||||
|
||||
/* now perform the searches */
|
||||
for (i = 0; i < 128; i++) {
|
||||
assert(simdselectd1(initial, (__m128i *)out, b, (uint32_t)i)
|
||||
== initial + i);
|
||||
}
|
||||
}
|
||||
printf("simdselectd1: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_simdpackedselect_advanced() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t initial = 33;
|
||||
uint32_t b;
|
||||
int i;
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 0; b <= 32; b++) {
|
||||
uint32_t prev = initial;
|
||||
uint32_t out[128];
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = ((uint32_t)(165576 * i)) ;
|
||||
if(b < 32) buffer[i] %= (1<<b);
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = buffer[i] + prev;
|
||||
prev = buffer[i];
|
||||
}
|
||||
|
||||
for (i = 1; i < 128; i++) {
|
||||
if(buffer[i] < buffer[i-1] )
|
||||
buffer[i] = buffer[i-1];
|
||||
}
|
||||
assert(simdmaxbitsd1(initial, buffer)<=b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
out[i] = 0; /* memset would do too */
|
||||
}
|
||||
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
|
||||
|
||||
printf("simdselectd1: %d bits\n", b);
|
||||
|
||||
/* now perform the searches */
|
||||
for (i = 0; i < 128; i++) {
|
||||
uint32_t valretrieved = simdselectd1(initial, (__m128i *)out, b, (uint32_t)i);
|
||||
assert(valretrieved == buffer[i]);
|
||||
}
|
||||
}
|
||||
printf("advanced simdselectd1: ok\n");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
int main() {
|
||||
int r;
|
||||
r = testsetFOR();
|
||||
if (r) {
|
||||
printf("test failure 1\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
r = testsetd1();
|
||||
if (r) {
|
||||
printf("test failure 2\n");
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
r = testset();
|
||||
if (r) {
|
||||
printf("test failure 3\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = testshortFORpack();
|
||||
if (r) {
|
||||
printf("test failure 4\n");
|
||||
return r;
|
||||
}
|
||||
r = testshortpack();
|
||||
if (r) {
|
||||
printf("test failure 5\n");
|
||||
return r;
|
||||
}
|
||||
r = testlongpack();
|
||||
if (r) {
|
||||
printf("test failure 6\n");
|
||||
return r;
|
||||
}
|
||||
#ifdef __SSE4_1__
|
||||
r = test_simdpackedsearchFOR();
|
||||
if (r) {
|
||||
printf("test failure 7\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = testFOR();
|
||||
if (r) {
|
||||
printf("test failure 8\n");
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
#ifdef __AVX2__
|
||||
r= testbabyavx();
|
||||
if (r) {
|
||||
printf("test failure baby avx\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = testavx2();
|
||||
if (r) {
|
||||
printf("test failure 9 avx\n");
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
r = test();
|
||||
if (r) {
|
||||
printf("test failure 9\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = test_simdmaxbitsd1_length();
|
||||
if (r) {
|
||||
printf("test failure 10\n");
|
||||
return r;
|
||||
}
|
||||
#ifdef __SSE4_1__
|
||||
r = test_simdpackedsearch();
|
||||
if (r) {
|
||||
printf("test failure 11\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = test_simdpackedsearch_advanced();
|
||||
if (r) {
|
||||
printf("test failure 12\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = test_simdpackedselect();
|
||||
if (r) {
|
||||
printf("test failure 13\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = test_simdpackedselect_advanced();
|
||||
if (r) {
|
||||
printf("test failure 14\n");
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
printf("All tests OK!\n");
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
102
cpp/simdcomp/tests/unit_chars.c
Normal file
102
cpp/simdcomp/tests/unit_chars.c
Normal file
@@ -0,0 +1,102 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include "simdcomp.h"
|
||||
|
||||
|
||||
#define get_random_char() (uint8_t)(rand() % 256);
|
||||
|
||||
|
||||
int main() {
|
||||
int N = 5000 * SIMDBlockSize, gap;
|
||||
__m128i * buffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backbuffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
|
||||
srand(time(NULL));
|
||||
|
||||
for (gap = 1; gap <= 387420489; gap *= 3) {
|
||||
int k;
|
||||
printf(" gap = %u \n", gap);
|
||||
|
||||
/* simulate some random character string, don't care about endiannes */
|
||||
for (k = 0; k < N; ++k) {
|
||||
uint8_t _tmp[4];
|
||||
|
||||
_tmp[0] = get_random_char();
|
||||
_tmp[1] = get_random_char();
|
||||
_tmp[2] = get_random_char();
|
||||
_tmp[3] = get_random_char();
|
||||
|
||||
memmove(&datain[k], _tmp, 4);
|
||||
}
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
/*
|
||||
First part works for general arrays (sorted or unsorted)
|
||||
*/
|
||||
int j;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b = maxbits(datain + k * SIMDBlockSize);
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b 128-bit vectors at "buffer" */
|
||||
simdpackwithoutmask(datain + k * SIMDBlockSize, buffer, b);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpack(buffer, backbuffer, b);/* uncompressed */
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
uint8_t chars_back[4];
|
||||
uint8_t chars_in[4];
|
||||
|
||||
memmove(chars_back, &backbuffer[j], 4);
|
||||
memmove(chars_in, &datain[k * SIMDBlockSize + j], 4);
|
||||
|
||||
if (chars_in[0] != chars_back[0]
|
||||
|| chars_in[1] != chars_back[1]
|
||||
|| chars_in[2] != chars_back[2]
|
||||
|| chars_in[3] != chars_back[3]) {
|
||||
printf("bug in simdpack\n");
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
/*
|
||||
next part assumes that the data is sorted (uses differential coding)
|
||||
*/
|
||||
uint32_t offset = 0;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b1 = simdmaxbitsd1(offset,
|
||||
datain + k * SIMDBlockSize);
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b1 128-bit vectors at "buffer" */
|
||||
simdpackwithoutmaskd1(offset, datain + k * SIMDBlockSize, buffer,
|
||||
b1);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpackd1(offset, buffer, backbuffer, b1);
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
uint8_t chars_back[4];
|
||||
uint8_t chars_in[4];
|
||||
|
||||
memmove(chars_back, &backbuffer[j], 4);
|
||||
memmove(chars_in, &datain[k * SIMDBlockSize + j], 4);
|
||||
|
||||
if (chars_in[0] != chars_back[0]
|
||||
|| chars_in[1] != chars_back[1]
|
||||
|| chars_in[2] != chars_back[2]
|
||||
|| chars_in[3] != chars_back[3]) {
|
||||
printf("bug in simdpack\n");
|
||||
return -3;
|
||||
}
|
||||
}
|
||||
offset = datain[k * SIMDBlockSize + SIMDBlockSize - 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
42
cpp/simdcomp_wrapper.c
vendored
Normal file
42
cpp/simdcomp_wrapper.c
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
#include "simdcomp.h"
|
||||
#include "simdcomputil.h"
|
||||
|
||||
// assumes datain has a size of 128 uint32
|
||||
// and that buffer is large enough to host the data.
|
||||
size_t compress_sorted(
|
||||
const uint32_t* datain,
|
||||
uint8_t* output,
|
||||
const uint32_t offset) {
|
||||
const uint32_t b = simdmaxbitsd1(offset, datain);
|
||||
*output++ = b;
|
||||
simdpackwithoutmaskd1(offset, datain, (__m128i *) output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
|
||||
// assumes datain has a size of 128 uint32
|
||||
// and that buffer is large enough to host the data.
|
||||
size_t uncompress_sorted(
|
||||
const uint8_t* compressed_data,
|
||||
uint32_t* output,
|
||||
uint32_t offset) {
|
||||
const uint32_t b = *compressed_data++;
|
||||
simdunpackd1(offset, (__m128i *)compressed_data, output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
|
||||
size_t compress_unsorted(
|
||||
const uint32_t* datain,
|
||||
uint8_t* output) {
|
||||
const uint32_t b = maxbits(datain);
|
||||
*output++ = b;
|
||||
simdpackwithoutmask(datain, (__m128i *) output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
|
||||
size_t uncompress_unsorted(
|
||||
const uint8_t* compressed_data,
|
||||
uint32_t* output) {
|
||||
const uint32_t b = *compressed_data++;
|
||||
simdunpack((__m128i *)compressed_data, output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
48
cpp/simdcomp_wrapper.cpp
vendored
48
cpp/simdcomp_wrapper.cpp
vendored
@@ -1,48 +0,0 @@
|
||||
#include <stdio.h>
|
||||
#include <time.h>
|
||||
#include <stdlib.h>
|
||||
#include "simdcomp.h"
|
||||
#include "simdcomputil.h"
|
||||
|
||||
extern "C" {
|
||||
|
||||
// assumes datain has a size of 128 uint32
|
||||
// and that buffer is large enough to host the data.
|
||||
size_t compress_sorted_cpp(
|
||||
const uint32_t* datain,
|
||||
uint8_t* output,
|
||||
const uint32_t offset) {
|
||||
const uint32_t b = simdmaxbitsd1(offset, datain);
|
||||
*output++ = b;
|
||||
simdpackwithoutmaskd1(offset, datain, (__m128i *) output, b);
|
||||
return 1 + b * sizeof(__m128i);;
|
||||
}
|
||||
|
||||
// assumes datain has a size of 128 uint32
|
||||
// and that buffer is large enough to host the data.
|
||||
size_t uncompress_sorted_cpp(
|
||||
const uint8_t* compressed_data,
|
||||
uint32_t* output,
|
||||
uint32_t offset) {
|
||||
const uint32_t b = *compressed_data++;
|
||||
simdunpackd1(offset, (__m128i *)compressed_data, output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
|
||||
size_t compress_unsorted_cpp(
|
||||
const uint32_t* datain,
|
||||
uint8_t* output) {
|
||||
const uint32_t b = maxbits(datain);
|
||||
*output++ = b;
|
||||
simdpackwithoutmask(datain, (__m128i *) output, b);
|
||||
return 1 + b * sizeof(__m128i);;
|
||||
}
|
||||
|
||||
size_t uncompress_unsorted_cpp(
|
||||
const uint8_t* compressed_data,
|
||||
uint32_t* output) {
|
||||
const uint32_t b = *compressed_data++;
|
||||
simdunpack((__m128i *)compressed_data, output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
}
|
||||
32
cpp/streamvbyte/.gitignore
vendored
Normal file
32
cpp/streamvbyte/.gitignore
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
# Object files
|
||||
*.o
|
||||
*.ko
|
||||
*.obj
|
||||
*.elf
|
||||
|
||||
# Precompiled Headers
|
||||
*.gch
|
||||
*.pch
|
||||
|
||||
# Libraries
|
||||
*.lib
|
||||
*.a
|
||||
*.la
|
||||
*.lo
|
||||
|
||||
# Shared objects (inc. Windows DLLs)
|
||||
*.dll
|
||||
*.so
|
||||
*.so.*
|
||||
*.dylib
|
||||
|
||||
# Executables
|
||||
*.exe
|
||||
*.out
|
||||
*.app
|
||||
*.i*86
|
||||
*.x86_64
|
||||
*.hex
|
||||
|
||||
# Debug files
|
||||
*.dSYM/
|
||||
7
cpp/streamvbyte/.travis.yml
Normal file
7
cpp/streamvbyte/.travis.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
language: c
|
||||
sudo: false
|
||||
compiler:
|
||||
- gcc
|
||||
- clang
|
||||
|
||||
script: make && ./unit
|
||||
202
cpp/streamvbyte/LICENSE
Normal file
202
cpp/streamvbyte/LICENSE
Normal file
@@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
60
cpp/streamvbyte/README.md
Normal file
60
cpp/streamvbyte/README.md
Normal file
@@ -0,0 +1,60 @@
|
||||
streamvbyte
|
||||
===========
|
||||
[](https://travis-ci.org/lemire/streamvbyte)
|
||||
|
||||
StreamVByte is a new integer compression technique that applies SIMD instructions (vectorization) to
|
||||
Google's Group Varint approach. The net result is faster than other byte-oriented compression
|
||||
techniques.
|
||||
|
||||
The approach is patent-free, the code is available under the Apache License.
|
||||
|
||||
|
||||
It includes fast differential coding.
|
||||
|
||||
It assumes a recent Intel processor (e.g., haswell or better) .
|
||||
|
||||
The code should build using most standard-compliant C99 compilers. The provided makefile
|
||||
expects a Linux-like system.
|
||||
|
||||
|
||||
Usage:
|
||||
|
||||
make
|
||||
./unit
|
||||
|
||||
See example.c for an example.
|
||||
|
||||
Short code sample:
|
||||
```C
|
||||
// suppose that datain is an array of uint32_t integers
|
||||
size_t compsize = streamvbyte_encode(datain, N, compressedbuffer); // encoding
|
||||
// here the result is stored in compressedbuffer using compsize bytes
|
||||
streamvbyte_decode(compressedbuffer, recovdata, N); // decoding (fast)
|
||||
```
|
||||
|
||||
If the values are sorted, then it might be preferable to use differential coding:
|
||||
```C
|
||||
// suppose that datain is an array of uint32_t integers
|
||||
size_t compsize = streamvbyte_delta_encode(datain, N, compressedbuffer,0); // encoding
|
||||
// here the result is stored in compressedbuffer using compsize bytes
|
||||
streamvbyte_delta_decode(compressedbuffer, recovdata, N,0); // decoding (fast)
|
||||
```
|
||||
You have to know how many integers were coded when you decompress. You can store this
|
||||
information along with the compressed stream.
|
||||
|
||||
See also
|
||||
--------
|
||||
* SIMDCompressionAndIntersection: A C++ library to compress and intersect sorted lists of integers using SIMD instructions https://github.com/lemire/SIMDCompressionAndIntersect
|
||||
* The FastPFOR C++ library : Fast integer compression https://github.com/lemire/FastPFor
|
||||
* High-performance dictionary coding https://github.com/lemire/dictionary
|
||||
* LittleIntPacker: C library to pack and unpack short arrays of integers as fast as possible https://github.com/lemire/LittleIntPacker
|
||||
* The SIMDComp library: A simple C library for compressing lists of integers using binary packing https://github.com/lemire/simdcomp
|
||||
* MaskedVByte: Fast decoder for VByte-compressed integers https://github.com/lemire/MaskedVByte
|
||||
* CSharpFastPFOR: A C# integer compression library https://github.com/Genbox/CSharpFastPFOR
|
||||
* JavaFastPFOR: A java integer compression library https://github.com/lemire/JavaFastPFOR
|
||||
* Encoding: Integer Compression Libraries for Go https://github.com/zhenjl/encoding
|
||||
* FrameOfReference is a C++ library dedicated to frame-of-reference (FOR) compression: https://github.com/lemire/FrameOfReference
|
||||
* libvbyte: A fast implementation for varbyte 32bit/64bit integer compression https://github.com/cruppstahl/libvbyte
|
||||
* TurboPFor is a C library that offers lots of interesting optimizations. Well worth checking! (GPL license) https://github.com/powturbo/TurboPFor
|
||||
* Oroch is a C++ library that offers a usable API (MIT license) https://github.com/ademakov/Oroch
|
||||
|
||||
24
cpp/streamvbyte/example.c
Normal file
24
cpp/streamvbyte/example.c
Normal file
@@ -0,0 +1,24 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "streamvbyte.h"
|
||||
|
||||
int main() {
|
||||
int N = 5000;
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint8_t * compressedbuffer = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * recovdata = malloc(N * sizeof(uint32_t));
|
||||
for (int k = 0; k < N; ++k)
|
||||
datain[k] = 120;
|
||||
size_t compsize = streamvbyte_encode(datain, N, compressedbuffer); // encoding
|
||||
// here the result is stored in compressedbuffer using compsize bytes
|
||||
size_t compsize2 = streamvbyte_decode(compressedbuffer, recovdata,
|
||||
N); // decoding (fast)
|
||||
assert(compsize == compsize2);
|
||||
free(datain);
|
||||
free(compressedbuffer);
|
||||
free(recovdata);
|
||||
printf("Compressed %d integers down to %d bytes.\n",N,(int) compsize);
|
||||
return 0;
|
||||
}
|
||||
19
cpp/streamvbyte/include/streamvbyte.h
Normal file
19
cpp/streamvbyte/include/streamvbyte.h
Normal file
@@ -0,0 +1,19 @@
|
||||
|
||||
#ifndef VARINTDECODE_H_
|
||||
#define VARINTDECODE_H_
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#include <inttypes.h>
|
||||
#include <stdint.h>// please use a C99-compatible compiler
|
||||
#include <stddef.h>
|
||||
|
||||
|
||||
// Encode an array of a given length read from in to bout in varint format.
|
||||
// Returns the number of bytes written.
|
||||
size_t streamvbyte_encode(const uint32_t *in, uint32_t length, uint8_t *out);
|
||||
|
||||
// Read "length" 32-bit integers in varint format from in, storing the result in out.
|
||||
// Returns the number of bytes read.
|
||||
size_t streamvbyte_decode(const uint8_t* in, uint32_t* out, uint32_t length);
|
||||
|
||||
|
||||
#endif /* VARINTDECODE_H_ */
|
||||
24
cpp/streamvbyte/include/streamvbytedelta.h
Normal file
24
cpp/streamvbyte/include/streamvbytedelta.h
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* streamvbytedelta.h
|
||||
*
|
||||
* Created on: Apr 14, 2016
|
||||
* Author: lemire
|
||||
*/
|
||||
|
||||
#ifndef INCLUDE_STREAMVBYTEDELTA_H_
|
||||
#define INCLUDE_STREAMVBYTEDELTA_H_
|
||||
|
||||
|
||||
// Encode an array of a given length read from in to bout in StreamVByte format.
|
||||
// Returns the number of bytes written.
|
||||
// this version uses differential coding (coding differences between values) starting at prev (you can often set prev to zero)
|
||||
size_t streamvbyte_delta_encode(const uint32_t *in, uint32_t length, uint8_t *out, uint32_t prev);
|
||||
|
||||
// Read "length" 32-bit integers in StreamVByte format from in, storing the result in out.
|
||||
// Returns the number of bytes read.
|
||||
// this version uses differential coding (coding differences between values) starting at prev (you can often set prev to zero)
|
||||
size_t streamvbyte_delta_decode(const uint8_t* in, uint32_t* out, uint32_t length, uint32_t prev);
|
||||
|
||||
|
||||
|
||||
#endif /* INCLUDE_STREAMVBYTEDELTA_H_ */
|
||||
58
cpp/streamvbyte/makefile
Normal file
58
cpp/streamvbyte/makefile
Normal file
@@ -0,0 +1,58 @@
|
||||
# minimalist makefile
|
||||
.SUFFIXES:
|
||||
#
|
||||
.SUFFIXES: .cpp .o .c .h
|
||||
|
||||
CFLAGS = -fPIC -march=native -std=c99 -O3 -Wall -Wextra -pedantic -Wshadow
|
||||
LDFLAGS = -shared
|
||||
LIBNAME=libstreamvbyte.so.0.0.1
|
||||
all: unit $(LIBNAME)
|
||||
test:
|
||||
./unit
|
||||
install: $(OBJECTS)
|
||||
cp $(LIBNAME) /usr/local/lib
|
||||
ln -s /usr/local/lib/$(LIBNAME) /usr/local/lib/libstreamvbyte.so
|
||||
ldconfig
|
||||
cp $(HEADERS) /usr/local/include
|
||||
|
||||
|
||||
|
||||
HEADERS=./include/streamvbyte.h ./include/streamvbytedelta.h
|
||||
|
||||
uninstall:
|
||||
for h in $(HEADERS) ; do rm /usr/local/$$h; done
|
||||
rm /usr/local/lib/$(LIBNAME)
|
||||
rm /usr/local/lib/libstreamvbyte.so
|
||||
ldconfig
|
||||
|
||||
|
||||
OBJECTS= streamvbyte.o streamvbytedelta.o
|
||||
|
||||
|
||||
|
||||
streamvbytedelta.o: ./src/streamvbytedelta.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/streamvbytedelta.c -Iinclude
|
||||
|
||||
|
||||
streamvbyte.o: ./src/streamvbyte.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/streamvbyte.c -Iinclude
|
||||
|
||||
|
||||
|
||||
$(LIBNAME): $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o $(LIBNAME) $(OBJECTS) $(LDFLAGS)
|
||||
|
||||
|
||||
|
||||
|
||||
example: ./example.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o example ./example.c -Iinclude $(OBJECTS)
|
||||
|
||||
unit: ./tests/unit.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o unit ./tests/unit.c -Iinclude $(OBJECTS)
|
||||
|
||||
dynunit: ./tests/unit.c $(HEADERS) $(LIBNAME)
|
||||
$(CC) $(CFLAGS) -o dynunit ./tests/unit.c -Iinclude -lstreamvbyte
|
||||
|
||||
clean:
|
||||
rm -f unit *.o $(LIBNAME) example
|
||||
495
cpp/streamvbyte/src/streamvbyte.c
Normal file
495
cpp/streamvbyte/src/streamvbyte.c
Normal file
@@ -0,0 +1,495 @@
|
||||
#include "streamvbyte.h"
|
||||
#if defined(_MSC_VER)
|
||||
/* Microsoft C/C++-compatible compiler */
|
||||
#include <intrin.h>
|
||||
#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
|
||||
/* GCC-compatible compiler, targeting x86/x86-64 */
|
||||
#include <x86intrin.h>
|
||||
#elif defined(__GNUC__) && defined(__ARM_NEON__)
|
||||
/* GCC-compatible compiler, targeting ARM with NEON */
|
||||
#include <arm_neon.h>
|
||||
#elif defined(__GNUC__) && defined(__IWMMXT__)
|
||||
/* GCC-compatible compiler, targeting ARM with WMMX */
|
||||
#include <mmintrin.h>
|
||||
#elif (defined(__GNUC__) || defined(__xlC__)) && (defined(__VEC__) || defined(__ALTIVEC__))
|
||||
/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
|
||||
#include <altivec.h>
|
||||
#elif defined(__GNUC__) && defined(__SPE__)
|
||||
/* GCC-compatible compiler, targeting PowerPC with SPE */
|
||||
#include <spe.h>
|
||||
#endif
|
||||
|
||||
static uint8_t lengthTable[256] = { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9,
|
||||
10, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 6, 7, 8, 9, 7, 8,
|
||||
9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10,
|
||||
11, 12, 10, 11, 12, 13, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10,
|
||||
11, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9, 10,
|
||||
8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11,
|
||||
12, 10, 11, 12, 13, 11, 12, 13, 14, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10,
|
||||
11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12,
|
||||
13, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 9, 10,
|
||||
11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 12, 13, 14, 15, 7, 8, 9, 10, 8,
|
||||
9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11, 12,
|
||||
10, 11, 12, 13, 11, 12, 13, 14, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12,
|
||||
13, 14, 12, 13, 14, 15, 10, 11, 12, 13, 11, 12, 13, 14, 12, 13, 14, 15,
|
||||
13, 14, 15, 16 };
|
||||
|
||||
static uint8_t shuffleTable[256][16] = { { 0, -1, -1, -1, 1, -1, -1, -1, 2, -1,
|
||||
-1, -1, 3, -1, -1, -1 }, // 1111
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1 }, // 2111
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 3111
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 4111
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1 }, // 1211
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 2211
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 3211
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, -1, -1, -1 }, // 4211
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 1311
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 2311
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, -1, -1, -1 }, // 3311
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, -1, -1, -1 }, // 4311
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, -1, -1, -1 }, // 1411
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, -1, -1, -1 }, // 2411
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, -1, -1, -1 }, // 3411
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, -1, -1, -1 }, // 4411
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1 }, // 1121
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, -1, -1, -1 }, // 2121
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 3121
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 4121
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, -1, -1, -1 }, // 1221
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 2221
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 3221
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, -1, -1, -1 }, // 4221
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 1321
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 2321
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, -1, -1, -1 }, // 3321
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, -1, -1, -1 }, // 4321
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, -1, -1, -1 }, // 1421
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, -1, -1, -1 }, // 2421
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, -1, -1, -1 }, // 3421
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, -1, -1, -1 }, // 4421
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1 }, // 1131
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, -1, -1, -1 }, // 2131
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 3131
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 4131
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, -1, -1, -1 }, // 1231
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 2231
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 3231
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, -1, -1, -1 }, // 4231
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 1331
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 2331
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, -1, -1, -1 }, // 3331
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, -1, -1, -1 }, // 4331
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, -1, -1 }, // 1431
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, -1, -1, -1 }, // 2431
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, -1, -1, -1 }, // 3431
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, -1, -1, -1 }, // 4431
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1 }, // 1141
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, -1, -1, -1 }, // 2141
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 3141
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 4141
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, -1, -1, -1 }, // 1241
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 2241
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 3241
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, -1, -1, -1 }, // 4241
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 1341
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 2341
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, -1, -1, -1 }, // 3341
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, -1, -1, -1 }, // 4341
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1 }, // 1441
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, -1, -1 }, // 2441
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1, -1 }, // 3441
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1, -1 }, // 4441
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1 }, // 1112
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, -1, -1 }, // 2112
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 3112
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 4112
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, -1, -1 }, // 1212
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 2212
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 3212
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, -1, -1 }, // 4212
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 1312
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 2312
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, -1, -1 }, // 3312
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, -1, -1 }, // 4312
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, -1, -1 }, // 1412
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, -1, -1 }, // 2412
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, -1, -1 }, // 3412
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, -1, -1 }, // 4412
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1 }, // 1122
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, -1, -1 }, // 2122
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 3122
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 4122
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, -1, -1 }, // 1222
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 2222
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 3222
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, -1, -1 }, // 4222
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 1322
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 2322
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, -1, -1 }, // 3322
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, -1, -1 }, // 4322
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, -1, -1 }, // 1422
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, -1, -1 }, // 2422
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, -1, -1 }, // 3422
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, -1, -1 }, // 4422
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1 }, // 1132
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, -1, -1 }, // 2132
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 3132
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 4132
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, -1, -1 }, // 1232
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 2232
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 3232
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, -1, -1 }, // 4232
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 1332
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 2332
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, -1, -1 }, // 3332
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, -1, -1 }, // 4332
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, -1, -1 }, // 1432
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, -1, -1 }, // 2432
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, -1, -1 }, // 3432
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, -1, -1 }, // 4432
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1 }, // 1142
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, -1, -1 }, // 2142
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 3142
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 4142
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, -1, -1 }, // 1242
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 2242
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 3242
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, -1, -1 }, // 4242
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 1342
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 2342
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, -1, -1 }, // 3342
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, -1, -1 }, // 4342
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, -1 }, // 1442
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1 }, // 2442
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1 }, // 3442
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1, -1 }, // 4442
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1 }, // 1113
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, 6, -1 }, // 2113
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 3113
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 4113
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, 6, -1 }, // 1213
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 2213
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 3213
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, 9, -1 }, // 4213
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 1313
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 2313
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, 9, -1 }, // 3313
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, 10, -1 }, // 4313
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, 8, -1 }, // 1413
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, 9, -1 }, // 2413
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, 10, -1 }, // 3413
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, 11, -1 }, // 4413
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1 }, // 1123
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, 7, -1 }, // 2123
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 3123
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 4123
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, 7, -1 }, // 1223
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 2223
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 3223
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, 10, -1 }, // 4223
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 1323
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 2323
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, 10, -1 }, // 3323
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, 11, -1 }, // 4323
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, 9, -1 }, // 1423
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, -1 }, // 2423
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, 11, -1 }, // 3423
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, 12, -1 }, // 4423
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1 }, // 1133
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, 8, -1 }, // 2133
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 3133
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 4133
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, 8, -1 }, // 1233
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 2233
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 3233
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, 11, -1 }, // 4233
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 1333
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 2333
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, -1 }, // 3333
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, 12, -1 }, // 4333
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, 10, -1 }, // 1433
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, 11, -1 }, // 2433
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, 12, -1 }, // 3433
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13, -1 }, // 4433
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1 }, // 1143
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, 9, -1 }, // 2143
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 3143
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 4143
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, 9, -1 }, // 1243
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 2243
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 3243
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, -1 }, // 4243
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 1343
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 2343
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, 12, -1 }, // 3343
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, 13, -1 }, // 4343
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1 }, // 1443
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1 }, // 2443
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1 }, // 3443
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -1 }, // 4443
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6 }, // 1114
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, 6, 7 }, // 2114
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 3114
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 4114
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, 6, 7 }, // 1214
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 2214
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 3214
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, 9, 10 }, // 4214
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 1314
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 2314
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, 9, 10 }, // 3314
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, 10, 11 }, // 4314
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, 8, 9 }, // 1414
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, 9, 10 }, // 2414
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, 10, 11 }, // 3414
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, 11, 12 }, // 4414
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7 }, // 1124
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, 7, 8 }, // 2124
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 3124
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 4124
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, 7, 8 }, // 1224
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 2224
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 3224
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, 10, 11 }, // 4224
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 1324
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 2324
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, 10, 11 }, // 3324
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, 11, 12 }, // 4324
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, 9, 10 }, // 1424
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11 }, // 2424
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, 11, 12 }, // 3424
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, 12, 13 }, // 4424
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8 }, // 1134
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, 8, 9 }, // 2134
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 3134
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 4134
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, 8, 9 }, // 1234
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 2234
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 3234
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, 11, 12 }, // 4234
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 1334
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 2334
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, 12 }, // 3334
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, 12, 13 }, // 4334
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, 10, 11 }, // 1434
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, 11, 12 }, // 2434
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, 12, 13 }, // 3434
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13, 14 }, // 4434
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9 }, // 1144
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, 9, 10 }, // 2144
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 3144
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 4144
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, 9, 10 }, // 1244
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 2244
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 3244
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13 }, // 4244
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 1344
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 2344
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, 12, 13 }, // 3344
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, 13, 14 }, // 4344
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }, // 1444
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, // 2444
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }, // 3444
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } // 4444
|
||||
};
|
||||
|
||||
static uint8_t _encode_data(uint32_t val, uint8_t *__restrict__ *dataPtrPtr) {
|
||||
uint8_t *dataPtr = *dataPtrPtr;
|
||||
uint8_t code;
|
||||
|
||||
if (val < (1 << 8)) { // 1 byte
|
||||
*dataPtr = (uint8_t)(val);
|
||||
*dataPtrPtr += 1;
|
||||
code = 0;
|
||||
} else if (val < (1 << 16)) { // 2 bytes
|
||||
*(uint16_t *) dataPtr = (uint16_t)(val);
|
||||
*dataPtrPtr += 2;
|
||||
code = 1;
|
||||
} else if (val < (1 << 24)) { // 3 bytes
|
||||
*(uint16_t *) dataPtr = (uint16_t)(val);
|
||||
*(dataPtr + 2) = (uint8_t)(val >> 16);
|
||||
*dataPtrPtr += 3;
|
||||
code = 2;
|
||||
} else { // 4 bytes
|
||||
*(uint32_t *) dataPtr = val;
|
||||
*dataPtrPtr += 4;
|
||||
code = 3;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static uint8_t *svb_encode_scalar(const uint32_t *in,
|
||||
uint8_t *__restrict__ keyPtr, uint8_t *__restrict__ dataPtr,
|
||||
uint32_t count) {
|
||||
if (count == 0)
|
||||
return dataPtr; // exit immediately if no data
|
||||
|
||||
uint8_t shift = 0; // cycles 0, 2, 4, 6, 0, 2, 4, 6, ...
|
||||
uint8_t key = 0;
|
||||
for (uint32_t c = 0; c < count; c++) {
|
||||
if (shift == 8) {
|
||||
shift = 0;
|
||||
*keyPtr++ = key;
|
||||
key = 0;
|
||||
}
|
||||
uint32_t val = in[c];
|
||||
uint8_t code = _encode_data(val, &dataPtr);
|
||||
key |= code << shift;
|
||||
shift += 2;
|
||||
}
|
||||
|
||||
*keyPtr = key; // write last key (no increment needed)
|
||||
return dataPtr; // pointer to first unused data byte
|
||||
}
|
||||
|
||||
// Encode an array of a given length read from in to bout in streamvbyte format.
|
||||
// Returns the number of bytes written.
|
||||
size_t streamvbyte_encode(const uint32_t *in, uint32_t count, uint8_t *out) {
|
||||
uint8_t *keyPtr = out;
|
||||
uint32_t keyLen = (count + 3) / 4; // 2-bits rounded to full byte
|
||||
uint8_t *dataPtr = keyPtr + keyLen; // variable byte data after all keys
|
||||
return svb_encode_scalar(in, keyPtr, dataPtr, count) - out;
|
||||
}
|
||||
|
||||
static inline __m128i _decode_avx(uint32_t key,
|
||||
const uint8_t *__restrict__ *dataPtrPtr) {
|
||||
uint8_t len = lengthTable[key];
|
||||
__m128i Data = _mm_loadu_si128((__m128i *) *dataPtrPtr);
|
||||
__m128i Shuf = *(__m128i *) &shuffleTable[key];
|
||||
|
||||
Data = _mm_shuffle_epi8(Data, Shuf);
|
||||
*dataPtrPtr += len;
|
||||
return Data;
|
||||
}
|
||||
|
||||
static inline void _write_avx(uint32_t *out, __m128i Vec) {
|
||||
_mm_storeu_si128((__m128i *) out, Vec);
|
||||
}
|
||||
|
||||
static inline uint32_t _decode_data(const uint8_t **dataPtrPtr, uint8_t code) {
|
||||
const uint8_t *dataPtr = *dataPtrPtr;
|
||||
uint32_t val;
|
||||
|
||||
if (code == 0) { // 1 byte
|
||||
val = (uint32_t) * dataPtr;
|
||||
dataPtr += 1;
|
||||
} else if (code == 1) { // 2 bytes
|
||||
val = (uint32_t) * (uint16_t *) dataPtr;
|
||||
dataPtr += 2;
|
||||
} else if (code == 2) { // 3 bytes
|
||||
val = (uint32_t) * (uint16_t *) dataPtr;
|
||||
val |= *(dataPtr + 2) << 16;
|
||||
dataPtr += 3;
|
||||
} else { // code == 3
|
||||
val = *(uint32_t *) dataPtr; // 4 bytes
|
||||
dataPtr += 4;
|
||||
}
|
||||
|
||||
*dataPtrPtr = dataPtr;
|
||||
return val;
|
||||
}
|
||||
static const uint8_t *svb_decode_scalar(uint32_t *outPtr, const uint8_t *keyPtr,
|
||||
const uint8_t *dataPtr, uint32_t count) {
|
||||
if (count == 0)
|
||||
return dataPtr; // no reads or writes if no data
|
||||
|
||||
uint8_t shift = 0;
|
||||
uint32_t key = *keyPtr++;
|
||||
for (uint32_t c = 0; c < count; c++) {
|
||||
if (shift == 8) {
|
||||
shift = 0;
|
||||
key = *keyPtr++;
|
||||
}
|
||||
uint32_t val = _decode_data(&dataPtr, (key >> shift) & 0x3);
|
||||
*outPtr++ = val;
|
||||
shift += 2;
|
||||
}
|
||||
|
||||
return dataPtr; // pointer to first unused byte after end
|
||||
}
|
||||
|
||||
const uint8_t *svb_decode_avx_simple(uint32_t *out,
|
||||
const uint8_t *__restrict__ keyPtr, const uint8_t *__restrict__ dataPtr,
|
||||
uint64_t count) {
|
||||
|
||||
uint64_t keybytes = count / 4; // number of key bytes
|
||||
__m128i Data;
|
||||
if (keybytes >= 8) {
|
||||
|
||||
int64_t Offset = -(int64_t) keybytes / 8 + 1;
|
||||
|
||||
const uint64_t *keyPtr64 = (const uint64_t *) keyPtr - Offset;
|
||||
uint64_t nextkeys = keyPtr64[Offset];
|
||||
for (; Offset != 0; ++Offset) {
|
||||
uint64_t keys = nextkeys;
|
||||
nextkeys = keyPtr64[Offset + 1];
|
||||
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 4, Data);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out + 8, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 12, Data);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out + 16, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 20, Data);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out + 24, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 28, Data);
|
||||
|
||||
out += 32;
|
||||
}
|
||||
{
|
||||
uint64_t keys = nextkeys;
|
||||
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 4, Data);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out + 8, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 12, Data);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out + 16, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 20, Data);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out + 24, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 28, Data);
|
||||
|
||||
out += 32;
|
||||
}
|
||||
}
|
||||
uint64_t consumedkeys = keybytes - (keybytes & 7);
|
||||
return svb_decode_scalar(out, keyPtr + consumedkeys, dataPtr, count & 31);
|
||||
}
|
||||
|
||||
// Read count 32-bit integers in maskedvbyte format from in, storing the result in out. Returns the number of bytes read.
|
||||
size_t streamvbyte_decode(const uint8_t* in, uint32_t* out, uint32_t count) {
|
||||
if (count == 0)
|
||||
return 0;
|
||||
const uint8_t *keyPtr = in; // full list of keys is next
|
||||
uint32_t keyLen = ((count + 3) / 4); // 2-bits per key (rounded up)
|
||||
const uint8_t *dataPtr = keyPtr + keyLen; // data starts at end of keys
|
||||
return svb_decode_avx_simple(out, keyPtr, dataPtr, count) - in;
|
||||
|
||||
}
|
||||
575
cpp/streamvbyte/src/streamvbytedelta.c
Normal file
575
cpp/streamvbyte/src/streamvbytedelta.c
Normal file
@@ -0,0 +1,575 @@
|
||||
#include "streamvbyte.h"
|
||||
#if defined(_MSC_VER)
|
||||
/* Microsoft C/C++-compatible compiler */
|
||||
#include <intrin.h>
|
||||
#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
|
||||
/* GCC-compatible compiler, targeting x86/x86-64 */
|
||||
#include <x86intrin.h>
|
||||
#elif defined(__GNUC__) && defined(__ARM_NEON__)
|
||||
/* GCC-compatible compiler, targeting ARM with NEON */
|
||||
#include <arm_neon.h>
|
||||
#elif defined(__GNUC__) && defined(__IWMMXT__)
|
||||
/* GCC-compatible compiler, targeting ARM with WMMX */
|
||||
#include <mmintrin.h>
|
||||
#elif (defined(__GNUC__) || defined(__xlC__)) && (defined(__VEC__) || defined(__ALTIVEC__))
|
||||
/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
|
||||
#include <altivec.h>
|
||||
#elif defined(__GNUC__) && defined(__SPE__)
|
||||
/* GCC-compatible compiler, targeting PowerPC with SPE */
|
||||
#include <spe.h>
|
||||
#endif
|
||||
|
||||
static uint8_t lengthTable[256] = { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9,
|
||||
10, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 6, 7, 8, 9, 7, 8,
|
||||
9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10,
|
||||
11, 12, 10, 11, 12, 13, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10,
|
||||
11, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9, 10,
|
||||
8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11,
|
||||
12, 10, 11, 12, 13, 11, 12, 13, 14, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10,
|
||||
11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12,
|
||||
13, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 9, 10,
|
||||
11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 12, 13, 14, 15, 7, 8, 9, 10, 8,
|
||||
9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11, 12,
|
||||
10, 11, 12, 13, 11, 12, 13, 14, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12,
|
||||
13, 14, 12, 13, 14, 15, 10, 11, 12, 13, 11, 12, 13, 14, 12, 13, 14, 15,
|
||||
13, 14, 15, 16 };
|
||||
|
||||
static uint8_t shuffleTable[256][16] = { { 0, -1, -1, -1, 1, -1, -1, -1, 2, -1,
|
||||
-1, -1, 3, -1, -1, -1 }, // 1111
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1 }, // 2111
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 3111
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 4111
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1 }, // 1211
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 2211
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 3211
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, -1, -1, -1 }, // 4211
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 1311
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 2311
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, -1, -1, -1 }, // 3311
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, -1, -1, -1 }, // 4311
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, -1, -1, -1 }, // 1411
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, -1, -1, -1 }, // 2411
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, -1, -1, -1 }, // 3411
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, -1, -1, -1 }, // 4411
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1 }, // 1121
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, -1, -1, -1 }, // 2121
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 3121
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 4121
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, -1, -1, -1 }, // 1221
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 2221
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 3221
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, -1, -1, -1 }, // 4221
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 1321
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 2321
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, -1, -1, -1 }, // 3321
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, -1, -1, -1 }, // 4321
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, -1, -1, -1 }, // 1421
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, -1, -1, -1 }, // 2421
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, -1, -1, -1 }, // 3421
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, -1, -1, -1 }, // 4421
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1 }, // 1131
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, -1, -1, -1 }, // 2131
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 3131
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 4131
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, -1, -1, -1 }, // 1231
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 2231
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 3231
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, -1, -1, -1 }, // 4231
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 1331
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 2331
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, -1, -1, -1 }, // 3331
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, -1, -1, -1 }, // 4331
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, -1, -1 }, // 1431
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, -1, -1, -1 }, // 2431
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, -1, -1, -1 }, // 3431
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, -1, -1, -1 }, // 4431
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1 }, // 1141
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, -1, -1, -1 }, // 2141
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 3141
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 4141
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, -1, -1, -1 }, // 1241
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 2241
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 3241
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, -1, -1, -1 }, // 4241
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 1341
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 2341
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, -1, -1, -1 }, // 3341
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, -1, -1, -1 }, // 4341
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1 }, // 1441
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, -1, -1 }, // 2441
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1, -1 }, // 3441
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1, -1 }, // 4441
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1 }, // 1112
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, -1, -1 }, // 2112
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 3112
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 4112
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, -1, -1 }, // 1212
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 2212
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 3212
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, -1, -1 }, // 4212
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 1312
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 2312
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, -1, -1 }, // 3312
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, -1, -1 }, // 4312
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, -1, -1 }, // 1412
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, -1, -1 }, // 2412
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, -1, -1 }, // 3412
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, -1, -1 }, // 4412
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1 }, // 1122
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, -1, -1 }, // 2122
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 3122
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 4122
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, -1, -1 }, // 1222
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 2222
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 3222
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, -1, -1 }, // 4222
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 1322
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 2322
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, -1, -1 }, // 3322
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, -1, -1 }, // 4322
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, -1, -1 }, // 1422
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, -1, -1 }, // 2422
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, -1, -1 }, // 3422
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, -1, -1 }, // 4422
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1 }, // 1132
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, -1, -1 }, // 2132
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 3132
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 4132
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, -1, -1 }, // 1232
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 2232
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 3232
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, -1, -1 }, // 4232
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 1332
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 2332
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, -1, -1 }, // 3332
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, -1, -1 }, // 4332
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, -1, -1 }, // 1432
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, -1, -1 }, // 2432
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, -1, -1 }, // 3432
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, -1, -1 }, // 4432
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1 }, // 1142
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, -1, -1 }, // 2142
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 3142
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 4142
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, -1, -1 }, // 1242
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 2242
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 3242
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, -1, -1 }, // 4242
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 1342
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 2342
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, -1, -1 }, // 3342
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, -1, -1 }, // 4342
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, -1 }, // 1442
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1 }, // 2442
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1 }, // 3442
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1, -1 }, // 4442
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1 }, // 1113
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, 6, -1 }, // 2113
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 3113
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 4113
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, 6, -1 }, // 1213
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 2213
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 3213
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, 9, -1 }, // 4213
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 1313
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 2313
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, 9, -1 }, // 3313
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, 10, -1 }, // 4313
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, 8, -1 }, // 1413
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, 9, -1 }, // 2413
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, 10, -1 }, // 3413
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, 11, -1 }, // 4413
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1 }, // 1123
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, 7, -1 }, // 2123
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 3123
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 4123
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, 7, -1 }, // 1223
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 2223
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 3223
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, 10, -1 }, // 4223
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 1323
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 2323
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, 10, -1 }, // 3323
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, 11, -1 }, // 4323
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, 9, -1 }, // 1423
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, -1 }, // 2423
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, 11, -1 }, // 3423
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, 12, -1 }, // 4423
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1 }, // 1133
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, 8, -1 }, // 2133
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 3133
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 4133
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, 8, -1 }, // 1233
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 2233
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 3233
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, 11, -1 }, // 4233
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 1333
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 2333
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, -1 }, // 3333
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, 12, -1 }, // 4333
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, 10, -1 }, // 1433
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, 11, -1 }, // 2433
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, 12, -1 }, // 3433
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13, -1 }, // 4433
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1 }, // 1143
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, 9, -1 }, // 2143
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 3143
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 4143
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, 9, -1 }, // 1243
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 2243
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 3243
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, -1 }, // 4243
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 1343
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 2343
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, 12, -1 }, // 3343
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, 13, -1 }, // 4343
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1 }, // 1443
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1 }, // 2443
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1 }, // 3443
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -1 }, // 4443
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6 }, // 1114
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, 6, 7 }, // 2114
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 3114
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 4114
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, 6, 7 }, // 1214
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 2214
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 3214
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, 9, 10 }, // 4214
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 1314
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 2314
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, 9, 10 }, // 3314
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, 10, 11 }, // 4314
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, 8, 9 }, // 1414
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, 9, 10 }, // 2414
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, 10, 11 }, // 3414
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, 11, 12 }, // 4414
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7 }, // 1124
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, 7, 8 }, // 2124
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 3124
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 4124
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, 7, 8 }, // 1224
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 2224
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 3224
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, 10, 11 }, // 4224
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 1324
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 2324
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, 10, 11 }, // 3324
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, 11, 12 }, // 4324
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, 9, 10 }, // 1424
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11 }, // 2424
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, 11, 12 }, // 3424
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, 12, 13 }, // 4424
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8 }, // 1134
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, 8, 9 }, // 2134
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 3134
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 4134
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, 8, 9 }, // 1234
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 2234
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 3234
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, 11, 12 }, // 4234
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 1334
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 2334
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, 12 }, // 3334
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, 12, 13 }, // 4334
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, 10, 11 }, // 1434
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, 11, 12 }, // 2434
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, 12, 13 }, // 3434
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13, 14 }, // 4434
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9 }, // 1144
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, 9, 10 }, // 2144
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 3144
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 4144
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, 9, 10 }, // 1244
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 2244
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 3244
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13 }, // 4244
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 1344
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 2344
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, 12, 13 }, // 3344
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, 13, 14 }, // 4344
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }, // 1444
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, // 2444
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }, // 3444
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } // 4444
|
||||
};
|
||||
|
||||
static uint8_t _encode_data(uint32_t val, uint8_t *__restrict__ *dataPtrPtr) {
|
||||
uint8_t *dataPtr = *dataPtrPtr;
|
||||
uint8_t code;
|
||||
|
||||
if (val < (1 << 8)) { // 1 byte
|
||||
*dataPtr = (uint8_t)(val);
|
||||
*dataPtrPtr += 1;
|
||||
code = 0;
|
||||
} else if (val < (1 << 16)) { // 2 bytes
|
||||
*(uint16_t *) dataPtr = (uint16_t)(val);
|
||||
*dataPtrPtr += 2;
|
||||
code = 1;
|
||||
} else if (val < (1 << 24)) { // 3 bytes
|
||||
*(uint16_t *) dataPtr = (uint16_t)(val);
|
||||
*(dataPtr + 2) = (uint8_t)(val >> 16);
|
||||
*dataPtrPtr += 3;
|
||||
code = 2;
|
||||
} else { // 4 bytes
|
||||
*(uint32_t *) dataPtr = val;
|
||||
*dataPtrPtr += 4;
|
||||
code = 3;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static uint8_t *svb_encode_scalar_d1_init(const uint32_t *in,
|
||||
uint8_t *__restrict__ keyPtr, uint8_t *__restrict__ dataPtr,
|
||||
uint32_t count, uint32_t prev) {
|
||||
if (count == 0)
|
||||
return dataPtr; // exit immediately if no data
|
||||
|
||||
uint8_t shift = 0; // cycles 0, 2, 4, 6, 0, 2, 4, 6, ...
|
||||
uint8_t key = 0;
|
||||
for (uint32_t c = 0; c < count; c++) {
|
||||
if (shift == 8) {
|
||||
shift = 0;
|
||||
*keyPtr++ = key;
|
||||
key = 0;
|
||||
}
|
||||
uint32_t val = in[c] - prev;
|
||||
prev = in[c];
|
||||
uint8_t code = _encode_data(val, &dataPtr);
|
||||
key |= code << shift;
|
||||
shift += 2;
|
||||
}
|
||||
|
||||
*keyPtr = key; // write last key (no increment needed)
|
||||
return dataPtr; // pointer to first unused data byte
|
||||
}
|
||||
|
||||
size_t streamvbyte_delta_encode(const uint32_t *in, uint32_t count, uint8_t *out,
|
||||
uint32_t prev) {
|
||||
uint8_t *keyPtr = out; // keys come immediately after 32-bit count
|
||||
uint32_t keyLen = (count + 3) / 4; // 2-bits rounded to full byte
|
||||
uint8_t *dataPtr = keyPtr + keyLen; // variable byte data after all keys
|
||||
|
||||
return svb_encode_scalar_d1_init(in, keyPtr, dataPtr, count, prev) - out;
|
||||
|
||||
}
|
||||
|
||||
static inline __m128i _decode_avx(uint32_t key, const uint8_t *__restrict__ *dataPtrPtr) {
|
||||
uint8_t len = lengthTable[key];
|
||||
__m128i Data = _mm_loadu_si128((__m128i *) *dataPtrPtr);
|
||||
__m128i Shuf = *(__m128i *) &shuffleTable[key];
|
||||
|
||||
Data = _mm_shuffle_epi8(Data, Shuf);
|
||||
*dataPtrPtr += len;
|
||||
|
||||
return Data;
|
||||
}
|
||||
#define BroadcastLastXMM 0xFF // bits 0-7 all set to choose highest element
|
||||
|
||||
|
||||
|
||||
static inline void _write_avx(uint32_t *out, __m128i Vec) {
|
||||
_mm_storeu_si128((__m128i *) out, Vec);
|
||||
}
|
||||
|
||||
static __m128i _write_avx_d1(uint32_t *out, __m128i Vec, __m128i Prev) {
|
||||
__m128i Add = _mm_slli_si128(Vec, 4); // Cycle 1: [- A B C] (already done)
|
||||
Prev = _mm_shuffle_epi32(Prev, BroadcastLastXMM); // Cycle 2: [P P P P]
|
||||
Vec = _mm_add_epi32(Vec, Add); // Cycle 2: [A AB BC CD]
|
||||
Add = _mm_slli_si128(Vec, 8); // Cycle 3: [- - A AB]
|
||||
Vec = _mm_add_epi32(Vec, Prev); // Cycle 3: [PA PAB PBC PCD]
|
||||
Vec = _mm_add_epi32(Vec, Add); // Cycle 4: [PA PAB PABC PABCD]
|
||||
|
||||
_write_avx(out, Vec);
|
||||
return Vec;
|
||||
}
|
||||
|
||||
#ifndef _MSC_VER
|
||||
static __m128i High16To32 = {0xFFFF0B0AFFFF0908, 0xFFFF0F0EFFFF0D0C};
|
||||
#else
|
||||
static __m128i High16To32 = {8, 9, -1, -1, 10, 11, -1, -1,
|
||||
12, 13, -1, -1, 14, 15, -1, -1};
|
||||
#endif
|
||||
|
||||
static inline __m128i _write_16bit_avx_d1(uint32_t *out, __m128i Vec, __m128i Prev) {
|
||||
// vec == [A B C D E F G H] (16 bit values)
|
||||
__m128i Add = _mm_slli_si128(Vec, 2); // [- A B C D E F G]
|
||||
Prev = _mm_shuffle_epi32(Prev, BroadcastLastXMM); // [P P P P] (32-bit)
|
||||
Vec = _mm_add_epi32(Vec, Add); // [A AB BC CD DE FG GH]
|
||||
Add = _mm_slli_si128(Vec, 4); // [- - A AB BC CD DE EF]
|
||||
Vec = _mm_add_epi32(Vec, Add); // [A AB ABC ABCD BCDE CDEF DEFG EFGH]
|
||||
__m128i V1 = _mm_cvtepu16_epi32(Vec); // [A AB ABC ABCD] (32-bit)
|
||||
V1 = _mm_add_epi32(V1, Prev); // [PA PAB PABC PABCD] (32-bit)
|
||||
__m128i V2 =
|
||||
_mm_shuffle_epi8(Vec, High16To32); // [BCDE CDEF DEFG EFGH] (32-bit)
|
||||
V2 = _mm_add_epi32(V1, V2); // [PABCDE PABCDEF PABCDEFG PABCDEFGH] (32-bit)
|
||||
_write_avx(out, V1);
|
||||
_write_avx(out + 4, V2);
|
||||
return V2;
|
||||
}
|
||||
|
||||
static inline uint32_t _decode_data(const uint8_t **dataPtrPtr, uint8_t code) {
|
||||
const uint8_t *dataPtr = *dataPtrPtr;
|
||||
uint32_t val;
|
||||
|
||||
if (code == 0) { // 1 byte
|
||||
val = (uint32_t) * dataPtr;
|
||||
dataPtr += 1;
|
||||
} else if (code == 1) { // 2 bytes
|
||||
val = (uint32_t) * (uint16_t *) dataPtr;
|
||||
dataPtr += 2;
|
||||
} else if (code == 2) { // 3 bytes
|
||||
val = (uint32_t) * (uint16_t *) dataPtr;
|
||||
val |= *(dataPtr + 2) << 16;
|
||||
dataPtr += 3;
|
||||
} else { // code == 3
|
||||
val = *(uint32_t *) dataPtr; // 4 bytes
|
||||
dataPtr += 4;
|
||||
}
|
||||
|
||||
*dataPtrPtr = dataPtr;
|
||||
return val;
|
||||
}
|
||||
|
||||
const uint8_t *svb_decode_scalar_d1_init(uint32_t *outPtr, const uint8_t *keyPtr,
|
||||
const uint8_t *dataPtr, uint32_t count,
|
||||
uint32_t prev) {
|
||||
if (count == 0)
|
||||
return dataPtr; // no reads or writes if no data
|
||||
|
||||
uint8_t shift = 0;
|
||||
uint32_t key = *keyPtr++;
|
||||
|
||||
for (uint32_t c = 0; c < count; c++) {
|
||||
if (shift == 8) {
|
||||
shift = 0;
|
||||
key = *keyPtr++;
|
||||
}
|
||||
uint32_t val = _decode_data(&dataPtr, (key >> shift) & 0x3);
|
||||
val += prev;
|
||||
*outPtr++ = val;
|
||||
prev = val;
|
||||
shift += 2;
|
||||
}
|
||||
|
||||
return dataPtr; // pointer to first unused byte after end
|
||||
}
|
||||
|
||||
const uint8_t *svb_decode_avx_d1_init(uint32_t *out, const uint8_t *__restrict__ keyPtr,
|
||||
const uint8_t *__restrict__ dataPtr, uint64_t count, uint32_t prev) {
|
||||
uint64_t keybytes = count / 4; // number of key bytes
|
||||
if (keybytes >= 8) {
|
||||
__m128i Prev = _mm_set1_epi32(prev);
|
||||
__m128i Data;
|
||||
|
||||
int64_t Offset = -(int64_t) keybytes / 8 + 1;
|
||||
|
||||
const uint64_t *keyPtr64 = (const uint64_t *) keyPtr - Offset;
|
||||
uint64_t nextkeys = keyPtr64[Offset];
|
||||
for (; Offset != 0; ++Offset) {
|
||||
uint64_t keys = nextkeys;
|
||||
nextkeys = keyPtr64[Offset + 1];
|
||||
// faster 16-bit delta since we only have 8-bit values
|
||||
if (!keys) { // 32 1-byte ints in a row
|
||||
|
||||
Data = _mm_cvtepu8_epi16(_mm_lddqu_si128((__m128i *) (dataPtr)));
|
||||
Prev = _write_16bit_avx_d1(out, Data, Prev);
|
||||
Data = _mm_cvtepu8_epi16(
|
||||
_mm_lddqu_si128((__m128i *) (dataPtr + 8)));
|
||||
Prev = _write_16bit_avx_d1(out + 8, Data, Prev);
|
||||
Data = _mm_cvtepu8_epi16(
|
||||
_mm_lddqu_si128((__m128i *) (dataPtr + 16)));
|
||||
Prev = _write_16bit_avx_d1(out + 16, Data, Prev);
|
||||
Data = _mm_cvtepu8_epi16(
|
||||
_mm_lddqu_si128((__m128i *) (dataPtr + 24)));
|
||||
Prev = _write_16bit_avx_d1(out + 24, Data, Prev);
|
||||
out += 32;
|
||||
dataPtr += 32;
|
||||
continue;
|
||||
}
|
||||
|
||||
Data = _decode_avx(keys & 0x00FF, &dataPtr);
|
||||
Prev = _write_avx_d1(out, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 4, Data, Prev);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0x00FF), &dataPtr);
|
||||
Prev = _write_avx_d1(out + 8, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 12, Data, Prev);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0x00FF), &dataPtr);
|
||||
Prev = _write_avx_d1(out + 16, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 20, Data, Prev);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0x00FF), &dataPtr);
|
||||
Prev = _write_avx_d1(out + 24, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 28, Data, Prev);
|
||||
|
||||
out += 32;
|
||||
}
|
||||
{
|
||||
uint64_t keys = nextkeys;
|
||||
// faster 16-bit delta since we only have 8-bit values
|
||||
if (!keys) { // 32 1-byte ints in a row
|
||||
Data = _mm_cvtepu8_epi16(_mm_lddqu_si128((__m128i *) (dataPtr)));
|
||||
Prev = _write_16bit_avx_d1(out, Data, Prev);
|
||||
Data = _mm_cvtepu8_epi16(
|
||||
_mm_lddqu_si128((__m128i *) (dataPtr + 8)));
|
||||
Prev = _write_16bit_avx_d1(out + 8, Data, Prev);
|
||||
Data = _mm_cvtepu8_epi16(
|
||||
_mm_lddqu_si128((__m128i *) (dataPtr + 16)));
|
||||
Prev = _write_16bit_avx_d1(out + 16, Data, Prev);
|
||||
Data = _mm_cvtepu8_epi16(
|
||||
_mm_loadl_epi64((__m128i *) (dataPtr + 24)));
|
||||
Prev = _write_16bit_avx_d1(out + 24, Data, Prev);
|
||||
out += 32;
|
||||
dataPtr += 32;
|
||||
|
||||
} else {
|
||||
|
||||
Data = _decode_avx(keys & 0x00FF, &dataPtr);
|
||||
Prev = _write_avx_d1(out, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 4, Data, Prev);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0x00FF), &dataPtr);
|
||||
Prev = _write_avx_d1(out + 8, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 12, Data, Prev);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0x00FF), &dataPtr);
|
||||
Prev = _write_avx_d1(out + 16, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 20, Data, Prev);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0x00FF), &dataPtr);
|
||||
Prev = _write_avx_d1(out + 24, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 28, Data, Prev);
|
||||
|
||||
out += 32;
|
||||
}
|
||||
}
|
||||
prev = out[-1];
|
||||
}
|
||||
uint64_t consumedkeys = keybytes - (keybytes & 7);
|
||||
return svb_decode_scalar_d1_init(out, keyPtr + consumedkeys, dataPtr,
|
||||
count & 31, prev);
|
||||
}
|
||||
|
||||
size_t streamvbyte_delta_decode(const uint8_t* in, uint32_t* out,
|
||||
uint32_t count, uint32_t prev) {
|
||||
uint32_t keyLen = ((count + 3) / 4); // 2-bits per key (rounded up)
|
||||
const uint8_t *keyPtr = in;
|
||||
const uint8_t *dataPtr = keyPtr + keyLen; // data starts at end of keys
|
||||
return svb_decode_avx_d1_init(out, keyPtr, dataPtr, count, prev) - in;
|
||||
}
|
||||
73
cpp/streamvbyte/tests/unit.c
Normal file
73
cpp/streamvbyte/tests/unit.c
Normal file
@@ -0,0 +1,73 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "streamvbyte.h"
|
||||
#include "streamvbytedelta.h"
|
||||
|
||||
int main() {
|
||||
int N = 4096;
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint8_t * compressedbuffer = malloc(2 * N * sizeof(uint32_t));
|
||||
uint32_t * recovdata = malloc(N * sizeof(uint32_t));
|
||||
|
||||
for (int length = 0; length <= N;) {
|
||||
printf("length = %d \n", length);
|
||||
for (uint32_t gap = 1; gap <= 387420489; gap *= 3) {
|
||||
for (int k = 0; k < length; ++k)
|
||||
datain[k] = gap;
|
||||
size_t compsize = streamvbyte_encode(datain, length,
|
||||
compressedbuffer);
|
||||
size_t usedbytes = streamvbyte_decode(compressedbuffer, recovdata,
|
||||
length);
|
||||
if (compsize != usedbytes) {
|
||||
printf(
|
||||
"[streamvbyte_decode] code is buggy gap = %d, size mismatch %d %d \n",
|
||||
(int) gap, (int) compsize, (int) usedbytes);
|
||||
return -1;
|
||||
}
|
||||
for (int k = 0; k < length; ++k) {
|
||||
if (recovdata[k] != datain[k]) {
|
||||
printf("[streamvbyte_decode] code is buggy gap = %d\n",
|
||||
(int) gap);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
printf("Delta \n");
|
||||
for (size_t gap = 1; gap <= 531441; gap *= 3) {
|
||||
for (int k = 0; k < length; ++k)
|
||||
datain[k] = gap * k;
|
||||
size_t compsize = streamvbyte_delta_encode(datain, length,
|
||||
compressedbuffer, 0);
|
||||
size_t usedbytes = streamvbyte_delta_decode(compressedbuffer,
|
||||
recovdata, length, 0);
|
||||
if (compsize != usedbytes) {
|
||||
printf(
|
||||
"[streamvbyte_delta_decode] code is buggy gap = %d, size mismatch %d %d \n",
|
||||
(int) gap, (int) compsize, (int) usedbytes);
|
||||
return -1;
|
||||
}
|
||||
for (int k = 0; k < length; ++k) {
|
||||
if (recovdata[k] != datain[k]) {
|
||||
printf(
|
||||
"[streamvbyte_delta_decode] code is buggy gap = %d\n",
|
||||
(int) gap);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (length < 128)
|
||||
++length;
|
||||
else {
|
||||
length *= 2;
|
||||
}
|
||||
}
|
||||
free(datain);
|
||||
free(compressedbuffer);
|
||||
free(recovdata);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
@@ -30,10 +30,12 @@
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre><span class="hljs-keyword">extern</span> <span class="hljs-keyword">crate</span> rustc_serialize;
|
||||
<span class="hljs-keyword">extern</span> <span class="hljs-keyword">crate</span> tantivy;
|
||||
<div class="content"><div class='highlight'><pre><span class="hljs-keyword">extern</span> <span class="hljs-keyword">crate</span> tantivy;
|
||||
<span class="hljs-keyword">extern</span> <span class="hljs-keyword">crate</span> tempdir;
|
||||
|
||||
<span class="hljs-meta">#[macro_use]</span>
|
||||
<span class="hljs-keyword">extern</span> <span class="hljs-keyword">crate</span> serde_json;
|
||||
|
||||
<span class="hljs-keyword">use</span> std::path::Path;
|
||||
<span class="hljs-keyword">use</span> tempdir::TempDir;
|
||||
<span class="hljs-keyword">use</span> tantivy::Index;
|
||||
@@ -52,7 +54,7 @@
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-2">¶</a>
|
||||
</div>
|
||||
<p>Let’s create a temporary directory for the
|
||||
<p>Let’s create a temporary directory for the
|
||||
sake of this example</p>
|
||||
|
||||
</div>
|
||||
@@ -60,7 +62,7 @@ sake of this example</p>
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">if</span> <span class="hljs-keyword">let</span> <span class="hljs-literal">Ok</span>(dir) = TempDir::new(<span class="hljs-string">"tantivy_example_dir"</span>) {
|
||||
run_example(dir.path()).unwrap();
|
||||
dir.close().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -78,7 +80,7 @@ sake of this example</p>
|
||||
<h1 id="defining-the-schema">Defining the schema</h1>
|
||||
<p>The Tantivy index requires a very strict schema.
|
||||
The schema declares which fields are in the index,
|
||||
and for each field, its type and “the way it should
|
||||
and for each field, its type and “the way it should
|
||||
be indexed”.</p>
|
||||
|
||||
</div>
|
||||
@@ -108,15 +110,15 @@ be indexed”.</p>
|
||||
<a class="pilcrow" href="#section-5">¶</a>
|
||||
</div>
|
||||
<p>Our first field is title.
|
||||
We want full-text search for it, and we want to be able
|
||||
to retrieve the document after the search.</p>
|
||||
We want full-text search for it, and we also want
|
||||
to be able to retrieve the document after the search.</p>
|
||||
<p>TEXT | STORED is some syntactic sugar to describe
|
||||
that. </p>
|
||||
that.</p>
|
||||
<p><code>TEXT</code> means the field should be tokenized and indexed,
|
||||
along with its term frequency and term positions.</p>
|
||||
<p><code>STORED</code> means that the field will also be saved
|
||||
in a compressed, row-oriented key-value store.
|
||||
This store is useful to reconstruct the
|
||||
This store is useful to reconstruct the
|
||||
documents that were selected during the search phase.</p>
|
||||
|
||||
</div>
|
||||
@@ -132,14 +134,17 @@ documents that were selected during the search phase.</p>
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-6">¶</a>
|
||||
</div>
|
||||
<p>Our first field is body.
|
||||
We want full-text search for it, and we want to be able
|
||||
to retrieve the body after the search.</p>
|
||||
<p>Our second field is body.
|
||||
We want full-text search for it, but we do not
|
||||
need to be able to be able to retrieve it
|
||||
for our application. </p>
|
||||
<p>We can make our index lighter and
|
||||
by omitting <code>STORED</code> flag.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> schema_builder.add_text_field(<span class="hljs-string">"body"</span>, TEXT);
|
||||
|
||||
|
||||
<span class="hljs-keyword">let</span> schema = schema_builder.build();</pre></div></div>
|
||||
|
||||
</li>
|
||||
@@ -158,7 +163,7 @@ with our schema in the directory.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> index = <span class="hljs-built_in">try!</span>(Index::create(index_path, schema.clone()));</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> index = Index::create(index_path, schema.clone())?;</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -173,14 +178,12 @@ with our schema in the directory.</p>
|
||||
There must be only one writer at a time.
|
||||
This single <code>IndexWriter</code> is already
|
||||
multithreaded.</p>
|
||||
<p>Here we use a buffer of 1 GB. Using a bigger
|
||||
heap for the indexer can increase its throughput.
|
||||
This buffer will be split between the indexing
|
||||
threads.</p>
|
||||
<p>Here we use a buffer of 50MB per thread. Using a bigger
|
||||
heap for the indexer can increase its throughput.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> index_writer = <span class="hljs-built_in">try!</span>(index.writer(<span class="hljs-number">1_000_000_000</span>));</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> index_writer = index.writer(<span class="hljs-number">50_000_000</span>)?;</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -213,10 +216,14 @@ one by one in a Document object.</p>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> title = schema.get_field(<span class="hljs-string">"title"</span>).unwrap();
|
||||
<span class="hljs-keyword">let</span> body = schema.get_field(<span class="hljs-string">"body"</span>).unwrap();
|
||||
|
||||
|
||||
<span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> old_man_doc = Document::<span class="hljs-keyword">default</span>();
|
||||
old_man_doc.add_text(title, <span class="hljs-string">"The Old Man and the Sea"</span>);
|
||||
old_man_doc.add_text(body, <span class="hljs-string">"He was an old man who fished alone in a skiff in the Gulf Stream and he had gone eighty-four days now without taking a fish."</span>);</pre></div></div>
|
||||
old_man_doc.add_text(
|
||||
body,
|
||||
<span class="hljs-string">"He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
he had gone eighty-four days now without taking a fish."</span>,
|
||||
);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -231,7 +238,7 @@ one by one in a Document object.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-built_in">try!</span>(index_writer.add_document(old_man_doc));</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> index_writer.add_document(old_man_doc);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -243,18 +250,27 @@ one by one in a Document object.</p>
|
||||
<a class="pilcrow" href="#section-12">¶</a>
|
||||
</div>
|
||||
<h3 id="create-a-document-directly-from-json-">Create a document directly from json.</h3>
|
||||
<p>Alternatively, we can use our schema to parse
|
||||
a document object directly from json.</p>
|
||||
<p>Alternatively, we can use our schema to parse a
|
||||
document object directly from json.
|
||||
The document is a string, but we use the <code>json</code> macro
|
||||
from <code>serde_json</code> for the convenience of multi-line support.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre>
|
||||
<span class="hljs-keyword">let</span> mice_and_men_doc = <span class="hljs-built_in">try!</span>(schema.parse_document(r#<span class="hljs-string">"{
|
||||
"</span>title<span class="hljs-string">": "</span>Of Mice and Men<span class="hljs-string">",
|
||||
"</span>body<span class="hljs-string">": "</span>few miles south of Soledad, the Salinas River drops <span class="hljs-keyword">in</span> close to the hillside bank and runs deep and green. The water is warm too, <span class="hljs-keyword">for</span> it has slipped twinkling over the yellow sands <span class="hljs-keyword">in</span> the sunlight before reaching the narrow pool. On one side of the river the golden foothill slopes curve up to the strong and rocky Gabilan Mountains, but on the valley side the water is lined with trees—willows fresh and green with every spring, carrying <span class="hljs-keyword">in</span> their lower leaf junctures the debris of the winter’s flooding; and sycamores with mottled, white,recumbent limbs and branches that arch over the pool<span class="hljs-string">"
|
||||
}"</span>#));
|
||||
|
||||
<span class="hljs-built_in">try!</span>(index_writer.add_document(mice_and_men_doc));</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> json = json!({
|
||||
<span class="hljs-string">"title"</span>: <span class="hljs-string">"Of Mice and Men"</span>,
|
||||
<span class="hljs-string">"body"</span>: <span class="hljs-string">"A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"</span>
|
||||
});
|
||||
<span class="hljs-keyword">let</span> mice_and_men_doc = schema.parse_document(&json.to_string())?;
|
||||
|
||||
index_writer.add_document(mice_and_men_doc);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -271,11 +287,16 @@ The following document has two titles.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> frankenstein_doc = <span class="hljs-built_in">try!</span>(schema.parse_document(r#<span class="hljs-string">"{
|
||||
"</span>title<span class="hljs-string">": ["</span>Frankenstein<span class="hljs-string">", "</span>The Modern Promotheus<span class="hljs-string">"],
|
||||
"</span>body<span class="hljs-string">": "</span>You will rejoice to hear that no disaster has accompanied the commencement of an enterprise which you have regarded with such evil forebodings. I arrived here yesterday, and my first task is to assure my dear sister of my welfare and increasing confidence <span class="hljs-keyword">in</span> the success of my undertaking.<span class="hljs-string">"
|
||||
}"</span>#));
|
||||
<span class="hljs-built_in">try!</span>(index_writer.add_document(frankenstein_doc));</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> json = json!({
|
||||
<span class="hljs-string">"title"</span>: [<span class="hljs-string">"Frankenstein"</span>, <span class="hljs-string">"The Modern Prometheus"</span>],
|
||||
<span class="hljs-string">"body"</span>: <span class="hljs-string">"You will rejoice to hear that no disaster has accompanied the commencement of an \
|
||||
enterprise which you have regarded with such evil forebodings. I arrived here \
|
||||
yesterday, and my first task is to assure my dear sister of my welfare and \
|
||||
increasing confidence in the success of my undertaking."</span>
|
||||
});
|
||||
<span class="hljs-keyword">let</span> frankenstein_doc = schema.parse_document(&json.to_string())?;
|
||||
|
||||
index_writer.add_document(frankenstein_doc);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -288,7 +309,7 @@ The following document has two titles.</p>
|
||||
</div>
|
||||
<p>This is an example, so we will only index 3 documents
|
||||
here. You can check out tantivy’s tutorial to index
|
||||
the English wikipedia. Tantivy’s indexing is rather fast.
|
||||
the English wikipedia. Tantivy’s indexing is rather fast.
|
||||
Indexing 5 million articles of the English wikipedia takes
|
||||
around 4 minutes on my computer!</p>
|
||||
|
||||
@@ -313,7 +334,7 @@ the existence of new documents.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-built_in">try!</span>(index_writer.commit());</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> index_writer.commit()?;</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -343,15 +364,13 @@ commit.</p>
|
||||
<a class="pilcrow" href="#section-17">¶</a>
|
||||
</div>
|
||||
<h1 id="searching">Searching</h1>
|
||||
<p>Let’s search our index. We start
|
||||
by creating a searcher. There can be more
|
||||
than one searcher at a time.</p>
|
||||
<p>You should create a searcher
|
||||
every time you start a “search query”.</p>
|
||||
<p>Let’s search our index. Start by reloading
|
||||
searchers in the index. This should be done
|
||||
after every commit().</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> searcher = index.searcher();</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> index.load_searchers()?;</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -362,14 +381,13 @@ every time you start a “search query”.</p>
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-18">¶</a>
|
||||
</div>
|
||||
<p>The query parser can interpret human queries.
|
||||
Here, if the user does not specify which
|
||||
field they want to search, tantivy will search
|
||||
in both title and body.</p>
|
||||
<p>Afterwards create one (or more) searchers.</p>
|
||||
<p>You should create a searcher
|
||||
every time you start a “search query”.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> query_parser = QueryParser::new(index.schema(), <span class="hljs-built_in">vec!</span>(title, body));</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> searcher = index.searcher();</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -380,13 +398,14 @@ in both title and body.</p>
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-19">¶</a>
|
||||
</div>
|
||||
<p>QueryParser may fail if the query is not in the right
|
||||
format. For user facing applications, this can be a problem.
|
||||
A ticket has been opened regarding this problem.</p>
|
||||
<p>The query parser can interpret human queries.
|
||||
Here, if the user does not specify which
|
||||
field they want to search, tantivy will search
|
||||
in both title and body.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> query = <span class="hljs-built_in">try!</span>(query_parser.parse_query(<span class="hljs-string">"sea whale"</span>));</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> query_parser = QueryParser::for_index(index, <span class="hljs-built_in">vec!</span>[title, body]);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -397,6 +416,23 @@ A ticket has been opened regarding this problem.</p>
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-20">¶</a>
|
||||
</div>
|
||||
<p>QueryParser may fail if the query is not in the right
|
||||
format. For user facing applications, this can be a problem.
|
||||
A ticket has been opened regarding this problem.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> query = query_parser.parse_query(<span class="hljs-string">"sea whale"</span>)?;</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-21">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-21">¶</a>
|
||||
</div>
|
||||
<p>A query defines a set of documents, as
|
||||
well as the way they should be scored.</p>
|
||||
<p>A query created by the query parser is scored according
|
||||
@@ -408,36 +444,20 @@ any document matching at least one of our terms.</p>
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-21">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-21">¶</a>
|
||||
</div>
|
||||
<h3 id="collectors">Collectors</h3>
|
||||
<p>We are not interested in all of the documents but
|
||||
only in the top 10. Keeping track of our top 10 best documents
|
||||
is the role of the TopCollector.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre>
|
||||
<span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> top_collector = TopCollector::with_limit(<span class="hljs-number">10</span>);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-22">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-22">¶</a>
|
||||
</div>
|
||||
<p>We can now perform our query.</p>
|
||||
<h3 id="collectors">Collectors</h3>
|
||||
<p>We are not interested in all of the documents but
|
||||
only in the top 10. Keeping track of our top 10 best documents
|
||||
is the role of the TopCollector.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-built_in">try!</span>(searcher.search(&query, &<span class="hljs-keyword">mut</span> top_collector)));</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> top_collector = TopCollector::with_limit(<span class="hljs-number">10</span>);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -448,12 +468,11 @@ is the role of the TopCollector.</p>
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-23">¶</a>
|
||||
</div>
|
||||
<p>Our top collector now contains the 10
|
||||
most relevant doc ids…</p>
|
||||
<p>We can now perform our query.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> doc_addresses = top_collector.docs();</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> searcher.search(&*query, &<span class="hljs-keyword">mut</span> top_collector)?;</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -464,7 +483,23 @@ most relevant doc ids…</p>
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-24">¶</a>
|
||||
</div>
|
||||
<p>The actual documents still need to be
|
||||
<p>Our top collector now contains the 10
|
||||
most relevant doc ids…</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> doc_addresses = top_collector.docs();</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-25">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-25">¶</a>
|
||||
</div>
|
||||
<p>The actual documents still need to be
|
||||
retrieved from Tantivy’s store.</p>
|
||||
<p>Since the body field was not configured as stored,
|
||||
the document returned will only contain
|
||||
@@ -472,11 +507,29 @@ a title.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre>
|
||||
<div class="content"><div class='highlight'><pre>
|
||||
<span class="hljs-keyword">for</span> doc_address <span class="hljs-keyword">in</span> doc_addresses {
|
||||
<span class="hljs-keyword">let</span> retrieved_doc = <span class="hljs-built_in">try!</span>(searcher.doc(&doc_address));
|
||||
<span class="hljs-built_in">println!</span>(<span class="hljs-string">"{}"</span>, schema.to_json(&retrieved_doc));
|
||||
}
|
||||
<span class="hljs-keyword">let</span> retrieved_doc = searcher.doc(&doc_address)?;
|
||||
<span class="hljs-built_in">println!</span>(<span class="hljs-string">"{}"</span>, schema.to_json(&retrieved_doc));
|
||||
}</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-26">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-26">¶</a>
|
||||
</div>
|
||||
<p>Wait for indexing and merging threads to shut down.
|
||||
Usually this isn’t needed, but in <code>main</code> we try to
|
||||
delete the temporary directory and that fails on
|
||||
Windows if the files are still open.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> index_writer.wait_merging_threads()?;
|
||||
|
||||
<span class="hljs-literal">Ok</span>(())
|
||||
}</pre></div></div>
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
extern crate rustc_serialize;
|
||||
extern crate tantivy;
|
||||
extern crate tempdir;
|
||||
|
||||
#[macro_use]
|
||||
extern crate serde_json;
|
||||
|
||||
use std::path::Path;
|
||||
use tempdir::TempDir;
|
||||
use tantivy::Index;
|
||||
@@ -10,149 +12,161 @@ use tantivy::collector::TopCollector;
|
||||
use tantivy::query::QueryParser;
|
||||
|
||||
fn main() {
|
||||
// Let's create a temporary directory for the
|
||||
// Let's create a temporary directory for the
|
||||
// sake of this example
|
||||
if let Ok(dir) = TempDir::new("tantivy_example_dir") {
|
||||
run_example(dir.path()).unwrap();
|
||||
dir.close().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn run_example(index_path: &Path) -> tantivy::Result<()> {
|
||||
|
||||
|
||||
// # Defining the schema
|
||||
//
|
||||
// The Tantivy index requires a very strict schema.
|
||||
// The schema declares which fields are in the index,
|
||||
// and for each field, its type and "the way it should
|
||||
// and for each field, its type and "the way it should
|
||||
// be indexed".
|
||||
|
||||
|
||||
|
||||
// first we need to define a schema ...
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
|
||||
|
||||
// Our first field is title.
|
||||
// We want full-text search for it, and we want to be able
|
||||
// to retrieve the document after the search.
|
||||
// We want full-text search for it, and we also want
|
||||
// to be able to retrieve the document after the search.
|
||||
//
|
||||
// TEXT | STORED is some syntactic sugar to describe
|
||||
// that.
|
||||
//
|
||||
// that.
|
||||
//
|
||||
// `TEXT` means the field should be tokenized and indexed,
|
||||
// along with its term frequency and term positions.
|
||||
//
|
||||
// `STORED` means that the field will also be saved
|
||||
// in a compressed, row-oriented key-value store.
|
||||
// This store is useful to reconstruct the
|
||||
// This store is useful to reconstruct the
|
||||
// documents that were selected during the search phase.
|
||||
schema_builder.add_text_field("title", TEXT | STORED);
|
||||
|
||||
// Our first field is body.
|
||||
// We want full-text search for it, and we want to be able
|
||||
// to retrieve the body after the search.
|
||||
|
||||
// Our second field is body.
|
||||
// We want full-text search for it, but we do not
|
||||
// need to be able to be able to retrieve it
|
||||
// for our application.
|
||||
//
|
||||
// We can make our index lighter and
|
||||
// by omitting `STORED` flag.
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
|
||||
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// # Indexing documents
|
||||
//
|
||||
// Let's create a brand new index.
|
||||
//
|
||||
//
|
||||
// This will actually just save a meta.json
|
||||
// with our schema in the directory.
|
||||
let index = try!(Index::create(index_path, schema.clone()));
|
||||
let index = Index::create(index_path, schema.clone())?;
|
||||
|
||||
|
||||
|
||||
// To insert document we need an index writer.
|
||||
// There must be only one writer at a time.
|
||||
// This single `IndexWriter` is already
|
||||
// multithreaded.
|
||||
//
|
||||
// Here we use a buffer of 1 GB. Using a bigger
|
||||
// Here we use a buffer of 50MB per thread. Using a bigger
|
||||
// heap for the indexer can increase its throughput.
|
||||
// This buffer will be split between the indexing
|
||||
// threads.
|
||||
let mut index_writer = try!(index.writer(1_000_000_000));
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// Let's index our documents!
|
||||
// We first need a handle on the title and the body field.
|
||||
|
||||
|
||||
|
||||
// ### Create a document "manually".
|
||||
//
|
||||
// We can create a document manually, by setting the fields
|
||||
// one by one in a Document object.
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
|
||||
let mut old_man_doc = Document::default();
|
||||
old_man_doc.add_text(title, "The Old Man and the Sea");
|
||||
old_man_doc.add_text(body, "He was an old man who fished alone in a skiff in the Gulf Stream and he had gone eighty-four days now without taking a fish.");
|
||||
|
||||
old_man_doc.add_text(
|
||||
body,
|
||||
"He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
he had gone eighty-four days now without taking a fish.",
|
||||
);
|
||||
|
||||
// ... and add it to the `IndexWriter`.
|
||||
try!(index_writer.add_document(old_man_doc));
|
||||
|
||||
index_writer.add_document(old_man_doc);
|
||||
|
||||
// ### Create a document directly from json.
|
||||
//
|
||||
// Alternatively, we can use our schema to parse
|
||||
// a document object directly from json.
|
||||
|
||||
let mice_and_men_doc = try!(schema.parse_document(r#"{
|
||||
// Alternatively, we can use our schema to parse a
|
||||
// document object directly from json.
|
||||
// The document is a string, but we use the `json` macro
|
||||
// from `serde_json` for the convenience of multi-line support.
|
||||
let json = json!({
|
||||
"title": "Of Mice and Men",
|
||||
"body": "few miles south of Soledad, the Salinas River drops in close to the hillside bank and runs deep and green. The water is warm too, for it has slipped twinkling over the yellow sands in the sunlight before reaching the narrow pool. On one side of the river the golden foothill slopes curve up to the strong and rocky Gabilan Mountains, but on the valley side the water is lined with trees—willows fresh and green with every spring, carrying in their lower leaf junctures the debris of the winter’s flooding; and sycamores with mottled, white,recumbent limbs and branches that arch over the pool"
|
||||
}"#));
|
||||
|
||||
try!(index_writer.add_document(mice_and_men_doc));
|
||||
|
||||
"body": "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
});
|
||||
let mice_and_men_doc = schema.parse_document(&json.to_string())?;
|
||||
|
||||
index_writer.add_document(mice_and_men_doc);
|
||||
|
||||
// Multi-valued field are allowed, they are
|
||||
// expressed in JSON by an array.
|
||||
// The following document has two titles.
|
||||
let frankenstein_doc = try!(schema.parse_document(r#"{
|
||||
"title": ["Frankenstein", "The Modern Promotheus"],
|
||||
"body": "You will rejoice to hear that no disaster has accompanied the commencement of an enterprise which you have regarded with such evil forebodings. I arrived here yesterday, and my first task is to assure my dear sister of my welfare and increasing confidence in the success of my undertaking."
|
||||
}"#));
|
||||
try!(index_writer.add_document(frankenstein_doc));
|
||||
|
||||
let json = json!({
|
||||
"title": ["Frankenstein", "The Modern Prometheus"],
|
||||
"body": "You will rejoice to hear that no disaster has accompanied the commencement of an \
|
||||
enterprise which you have regarded with such evil forebodings. I arrived here \
|
||||
yesterday, and my first task is to assure my dear sister of my welfare and \
|
||||
increasing confidence in the success of my undertaking."
|
||||
});
|
||||
let frankenstein_doc = schema.parse_document(&json.to_string())?;
|
||||
|
||||
index_writer.add_document(frankenstein_doc);
|
||||
|
||||
// This is an example, so we will only index 3 documents
|
||||
// here. You can check out tantivy's tutorial to index
|
||||
// the English wikipedia. Tantivy's indexing is rather fast.
|
||||
// the English wikipedia. Tantivy's indexing is rather fast.
|
||||
// Indexing 5 million articles of the English wikipedia takes
|
||||
// around 4 minutes on my computer!
|
||||
|
||||
|
||||
|
||||
// ### Committing
|
||||
//
|
||||
//
|
||||
// At this point our documents are not searchable.
|
||||
//
|
||||
//
|
||||
//
|
||||
// We need to call .commit() explicitly to force the
|
||||
// index_writer to finish processing the documents in the queue,
|
||||
// flush the current index to the disk, and advertise
|
||||
// the existence of new documents.
|
||||
//
|
||||
// This call is blocking.
|
||||
try!(index_writer.commit());
|
||||
|
||||
index_writer.commit()?;
|
||||
|
||||
// If `.commit()` returns correctly, then all of the
|
||||
// documents that have been added are guaranteed to be
|
||||
// persistently indexed.
|
||||
//
|
||||
//
|
||||
// In the scenario of a crash or a power failure,
|
||||
// tantivy behaves as if has rolled back to its last
|
||||
// commit.
|
||||
|
||||
|
||||
|
||||
// # Searching
|
||||
//
|
||||
// Let's search our index. We start
|
||||
// by creating a searcher. There can be more
|
||||
// than one searcher at a time.
|
||||
//
|
||||
// Let's search our index. Start by reloading
|
||||
// searchers in the index. This should be done
|
||||
// after every commit().
|
||||
index.load_searchers()?;
|
||||
|
||||
// Afterwards create one (or more) searchers.
|
||||
//
|
||||
// You should create a searcher
|
||||
// every time you start a "search query".
|
||||
let searcher = index.searcher();
|
||||
@@ -161,47 +175,51 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
|
||||
// Here, if the user does not specify which
|
||||
// field they want to search, tantivy will search
|
||||
// in both title and body.
|
||||
let query_parser = QueryParser::new(index.schema(), vec!(title, body));
|
||||
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
|
||||
// QueryParser may fail if the query is not in the right
|
||||
// format. For user facing applications, this can be a problem.
|
||||
// A ticket has been opened regarding this problem.
|
||||
let query = try!(query_parser.parse_query("sea whale"));
|
||||
|
||||
|
||||
let query = query_parser.parse_query("sea whale")?;
|
||||
|
||||
// A query defines a set of documents, as
|
||||
// well as the way they should be scored.
|
||||
//
|
||||
//
|
||||
// A query created by the query parser is scored according
|
||||
// to a metric called Tf-Idf, and will consider
|
||||
// any document matching at least one of our terms.
|
||||
|
||||
// ### Collectors
|
||||
|
||||
// ### Collectors
|
||||
//
|
||||
// We are not interested in all of the documents but
|
||||
// We are not interested in all of the documents but
|
||||
// only in the top 10. Keeping track of our top 10 best documents
|
||||
// is the role of the TopCollector.
|
||||
|
||||
let mut top_collector = TopCollector::with_limit(10);
|
||||
|
||||
// We can now perform our query.
|
||||
try!(searcher.search(&*query, &mut top_collector));
|
||||
|
||||
// Our top collector now contains the 10
|
||||
// We can now perform our query.
|
||||
searcher.search(&*query, &mut top_collector)?;
|
||||
|
||||
// Our top collector now contains the 10
|
||||
// most relevant doc ids...
|
||||
let doc_addresses = top_collector.docs();
|
||||
|
||||
// The actual documents still need to be
|
||||
// The actual documents still need to be
|
||||
// retrieved from Tantivy's store.
|
||||
//
|
||||
//
|
||||
// Since the body field was not configured as stored,
|
||||
// the document returned will only contain
|
||||
// a title.
|
||||
|
||||
|
||||
for doc_address in doc_addresses {
|
||||
let retrieved_doc = try!(searcher.doc(&doc_address));
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
let retrieved_doc = searcher.doc(&doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
// Wait for indexing and merging threads to shut down.
|
||||
// Usually this isn't needed, but in `main` we try to
|
||||
// delete the temporary directory and that fails on
|
||||
// Windows if the files are still open.
|
||||
index_writer.wait_merging_threads()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
extern crate regex;
|
||||
|
||||
use std::str::Chars;
|
||||
use std::ascii::AsciiExt;
|
||||
|
||||
pub struct TokenIter<'a> {
|
||||
chars: Chars<'a>,
|
||||
term_buffer: String,
|
||||
}
|
||||
|
||||
fn append_char_lowercase(c: char, term_buffer: &mut String) {
|
||||
term_buffer.push(c.to_ascii_lowercase());
|
||||
}
|
||||
|
||||
pub trait StreamingIterator<'a, T> {
|
||||
fn next(&'a mut self) -> Option<T>;
|
||||
}
|
||||
|
||||
impl<'a, 'b> TokenIter<'b> {
|
||||
fn consume_token(&'a mut self) -> Option<&'a str> {
|
||||
for c in &mut self.chars {
|
||||
if c.is_alphanumeric() {
|
||||
append_char_lowercase(c, &mut self.term_buffer);
|
||||
}
|
||||
else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Some(&self.term_buffer)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'a, 'b> StreamingIterator<'a, &'a str> for TokenIter<'b> {
|
||||
|
||||
#[inline]
|
||||
fn next(&'a mut self,) -> Option<&'a str> {
|
||||
self.term_buffer.clear();
|
||||
// skipping non-letter characters.
|
||||
loop {
|
||||
match self.chars.next() {
|
||||
Some(c) => {
|
||||
if c.is_alphanumeric() {
|
||||
append_char_lowercase(c, &mut self.term_buffer);
|
||||
return self.consume_token();
|
||||
}
|
||||
}
|
||||
None => { return None; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
pub struct SimpleTokenizer;
|
||||
|
||||
|
||||
impl SimpleTokenizer {
|
||||
|
||||
pub fn tokenize<'a>(&self, text: &'a str) -> TokenIter<'a> {
|
||||
TokenIter {
|
||||
term_buffer: String::new(),
|
||||
chars: text.chars(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_tokenizer() {
|
||||
let simple_tokenizer = SimpleTokenizer;
|
||||
let mut term_reader = simple_tokenizer.tokenize("hello, happy tax payer!");
|
||||
assert_eq!(term_reader.next().unwrap(), "hello");
|
||||
assert_eq!(term_reader.next().unwrap(), "happy");
|
||||
assert_eq!(term_reader.next().unwrap(), "tax");
|
||||
assert_eq!(term_reader.next().unwrap(), "payer");
|
||||
assert_eq!(term_reader.next(), None);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_tokenizer_empty() {
|
||||
let simple_tokenizer = SimpleTokenizer;
|
||||
let mut term_reader = simple_tokenizer.tokenize("");
|
||||
assert_eq!(term_reader.next(), None);
|
||||
}
|
||||
@@ -1,18 +1,17 @@
|
||||
use Result;
|
||||
use collector::Collector;
|
||||
use SegmentLocalId;
|
||||
use SegmentReader;
|
||||
use std::io;
|
||||
use DocId;
|
||||
use Score;
|
||||
|
||||
|
||||
/// Collector that does nothing.
|
||||
/// This is used in the chain Collector and will hopefully
|
||||
/// be optimized away by the compiler.
|
||||
/// This is used in the chain Collector and will hopefully
|
||||
/// be optimized away by the compiler.
|
||||
pub struct DoNothingCollector;
|
||||
impl Collector for DoNothingCollector {
|
||||
#[inline]
|
||||
fn set_segment(&mut self, _: SegmentLocalId, _: &SegmentReader) -> io::Result<()> {
|
||||
fn set_segment(&mut self, _: SegmentLocalId, _: &SegmentReader) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
#[inline]
|
||||
@@ -24,10 +23,10 @@ impl Collector for DoNothingCollector {
|
||||
/// are known at compile time.
|
||||
pub struct ChainedCollector<Left: Collector, Right: Collector> {
|
||||
left: Left,
|
||||
right: Right
|
||||
right: Right,
|
||||
}
|
||||
|
||||
impl<Left: Collector, Right: Collector> ChainedCollector<Left, Right> {
|
||||
impl<Left: Collector, Right: Collector> ChainedCollector<Left, Right> {
|
||||
/// Adds a collector
|
||||
pub fn push<C: Collector>(self, new_collector: &mut C) -> ChainedCollector<Self, &mut C> {
|
||||
ChainedCollector {
|
||||
@@ -38,7 +37,11 @@ impl<Left: Collector, Right: Collector> ChainedCollector<Left, Right> {
|
||||
}
|
||||
|
||||
impl<Left: Collector, Right: Collector> Collector for ChainedCollector<Left, Right> {
|
||||
fn set_segment(&mut self, segment_local_id: SegmentLocalId, segment: &SegmentReader) -> io::Result<()> {
|
||||
fn set_segment(
|
||||
&mut self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader,
|
||||
) -> Result<()> {
|
||||
try!(self.left.set_segment(segment_local_id, segment));
|
||||
try!(self.right.set_segment(segment_local_id, segment));
|
||||
Ok(())
|
||||
@@ -58,7 +61,6 @@ pub fn chain() -> ChainedCollector<DoNothingCollector, DoNothingCollector> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -70,9 +72,7 @@ mod tests {
|
||||
let mut top_collector = TopCollector::with_limit(2);
|
||||
let mut count_collector = CountCollector::default();
|
||||
{
|
||||
let mut collectors = chain()
|
||||
.push(&mut top_collector)
|
||||
.push(&mut count_collector);
|
||||
let mut collectors = chain().push(&mut top_collector).push(&mut count_collector);
|
||||
collectors.collect(1, 0.2);
|
||||
collectors.collect(2, 0.1);
|
||||
collectors.collect(3, 0.5);
|
||||
@@ -80,4 +80,4 @@ mod tests {
|
||||
assert_eq!(count_collector.count(), 3);
|
||||
assert!(top_collector.at_capacity());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use std::io;
|
||||
use super::Collector;
|
||||
use DocId;
|
||||
use Score;
|
||||
use Result;
|
||||
use SegmentReader;
|
||||
use SegmentLocalId;
|
||||
|
||||
/// `CountCollector` collector only counts how many
|
||||
/// documents match the query.
|
||||
/// documents match the query.
|
||||
pub struct CountCollector {
|
||||
count: usize,
|
||||
}
|
||||
@@ -14,21 +14,19 @@ pub struct CountCollector {
|
||||
impl CountCollector {
|
||||
/// Returns the count of documents that were
|
||||
/// collected.
|
||||
pub fn count(&self,) -> usize {
|
||||
pub fn count(&self) -> usize {
|
||||
self.count
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CountCollector {
|
||||
fn default() -> CountCollector {
|
||||
CountCollector {count: 0,
|
||||
}
|
||||
CountCollector { count: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
impl Collector for CountCollector {
|
||||
|
||||
fn set_segment(&mut self, _: SegmentLocalId, _: &SegmentReader) -> io::Result<()> {
|
||||
fn set_segment(&mut self, _: SegmentLocalId, _: &SegmentReader) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
113
src/collector/facet_collector.rs
Normal file
113
src/collector/facet_collector.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
use std::cmp::Eq;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::Hash;
|
||||
|
||||
use collector::Collector;
|
||||
use fastfield::FastFieldReader;
|
||||
use schema::Field;
|
||||
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
use SegmentReader;
|
||||
use SegmentLocalId;
|
||||
|
||||
/// Facet collector for i64/u64 fast field
|
||||
pub struct FacetCollector<T>
|
||||
where
|
||||
T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash,
|
||||
{
|
||||
counters: HashMap<T::ValueType, u64>,
|
||||
field: Field,
|
||||
ff_reader: Option<T>,
|
||||
}
|
||||
|
||||
impl<T> FacetCollector<T>
|
||||
where
|
||||
T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash,
|
||||
{
|
||||
/// Creates a new facet collector for aggregating a given field.
|
||||
pub fn new(field: Field) -> FacetCollector<T> {
|
||||
FacetCollector {
|
||||
counters: HashMap::new(),
|
||||
field: field,
|
||||
ff_reader: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Collector for FacetCollector<T>
|
||||
where
|
||||
T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash,
|
||||
{
|
||||
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
|
||||
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect(&mut self, doc: DocId, _: Score) {
|
||||
let val = self.ff_reader
|
||||
.as_ref()
|
||||
.expect("collect() was called before set_segment. This should never happen.")
|
||||
.get(doc);
|
||||
*(self.counters.entry(val).or_insert(0)) += 1;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use collector::{chain, FacetCollector};
|
||||
use query::QueryParser;
|
||||
use fastfield::{I64FastFieldReader, U64FastFieldReader};
|
||||
use schema::{self, FAST, STRING};
|
||||
use Index;
|
||||
|
||||
#[test]
|
||||
// create 10 documents, set num field value to 0 or 1 for even/odd ones
|
||||
// make sure we have facet counters correctly filled
|
||||
fn test_facet_collector_results() {
|
||||
let mut schema_builder = schema::SchemaBuilder::new();
|
||||
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
|
||||
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
|
||||
let text_field = schema_builder.add_text_field("text", STRING);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
{
|
||||
for i in 0u64..10u64 {
|
||||
index_writer.add_document(doc!(
|
||||
num_field_i64 => ((i as i64) % 3i64) as i64,
|
||||
num_field_u64 => (i % 2u64) as u64,
|
||||
text_field => "text"
|
||||
));
|
||||
}
|
||||
}
|
||||
assert_eq!(index_writer.commit().unwrap(), 10u64);
|
||||
}
|
||||
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
let mut ffvf_i64: FacetCollector<I64FastFieldReader> = FacetCollector::new(num_field_i64);
|
||||
let mut ffvf_u64: FacetCollector<U64FastFieldReader> = FacetCollector::new(num_field_u64);
|
||||
|
||||
{
|
||||
// perform the query
|
||||
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64);
|
||||
let query_parser = QueryParser::for_index(&index, vec![text_field]);
|
||||
let query = query_parser.parse_query("text:text").unwrap();
|
||||
query.search(&searcher, &mut facet_collectors).unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(ffvf_u64.counters[&0], 5);
|
||||
assert_eq!(ffvf_u64.counters[&1], 5);
|
||||
assert_eq!(ffvf_i64.counters[&0], 4);
|
||||
assert_eq!(ffvf_i64.counters[&1], 3);
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,12 @@
|
||||
/*!
|
||||
Defines how the documents matching a search query should be processed.
|
||||
*/
|
||||
|
||||
use SegmentReader;
|
||||
use SegmentLocalId;
|
||||
use DocId;
|
||||
use Score;
|
||||
use std::io;
|
||||
use Result;
|
||||
|
||||
mod count_collector;
|
||||
pub use self::count_collector::CountCollector;
|
||||
@@ -13,14 +17,17 @@ pub use self::multi_collector::MultiCollector;
|
||||
mod top_collector;
|
||||
pub use self::top_collector::TopCollector;
|
||||
|
||||
mod facet_collector;
|
||||
pub use self::facet_collector::FacetCollector;
|
||||
|
||||
mod chained_collector;
|
||||
pub use self::chained_collector::chain;
|
||||
|
||||
/// Collectors are in charge of collecting and retaining relevant
|
||||
/// Collectors are in charge of collecting and retaining relevant
|
||||
/// information from the document found and scored by the query.
|
||||
///
|
||||
///
|
||||
/// For instance,
|
||||
/// For instance,
|
||||
///
|
||||
/// - keeping track of the top 10 best documents
|
||||
/// - computing a breakdown over a fast field
|
||||
@@ -29,7 +36,7 @@ pub use self::chained_collector::chain;
|
||||
/// Queries are in charge of pushing the `DocSet` to the collector.
|
||||
///
|
||||
/// As they work on multiple segments, they first inform
|
||||
/// the collector of a change in a segment and then
|
||||
/// the collector of a change in a segment and then
|
||||
/// call the `collect` method to push the document to the collector.
|
||||
///
|
||||
/// Temporally, our collector will receive calls
|
||||
@@ -46,16 +53,23 @@ pub use self::chained_collector::chain;
|
||||
///
|
||||
/// Segments are not guaranteed to be visited in any specific order.
|
||||
pub trait Collector {
|
||||
/// `set_segment` is called before beginning to enumerate
|
||||
/// `set_segment` is called before beginning to enumerate
|
||||
/// on this segment.
|
||||
fn set_segment(&mut self, segment_local_id: SegmentLocalId, segment: &SegmentReader) -> io::Result<()>;
|
||||
fn set_segment(
|
||||
&mut self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader,
|
||||
) -> Result<()>;
|
||||
/// The query pushes the scored document to the collector via this method.
|
||||
fn collect(&mut self, doc: DocId, score: Score);
|
||||
}
|
||||
|
||||
|
||||
impl<'a, C: Collector> Collector for &'a mut C {
|
||||
fn set_segment(&mut self, segment_local_id: SegmentLocalId, segment: &SegmentReader) -> io::Result<()> {
|
||||
fn set_segment(
|
||||
&mut self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader,
|
||||
) -> Result<()> {
|
||||
(*self).set_segment(segment_local_id, segment)
|
||||
}
|
||||
/// The query pushes the scored document to the collector via this method.
|
||||
@@ -64,7 +78,6 @@ impl<'a, C: Collector> Collector for &'a mut C {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
@@ -73,11 +86,11 @@ pub mod tests {
|
||||
use DocId;
|
||||
use Score;
|
||||
use core::SegmentReader;
|
||||
use std::io;
|
||||
use SegmentLocalId;
|
||||
use fastfield::U32FastFieldReader;
|
||||
use fastfield::U64FastFieldReader;
|
||||
use fastfield::FastFieldReader;
|
||||
use schema::Field;
|
||||
|
||||
|
||||
/// Stores all of the doc ids.
|
||||
/// This collector is only used for tests.
|
||||
/// It is unusable in practise, as it does not store
|
||||
@@ -90,7 +103,7 @@ pub mod tests {
|
||||
|
||||
impl TestCollector {
|
||||
/// Return the exhalist of documents.
|
||||
pub fn docs(self,) -> Vec<DocId> {
|
||||
pub fn docs(self) -> Vec<DocId> {
|
||||
self.docs
|
||||
}
|
||||
}
|
||||
@@ -106,8 +119,7 @@ pub mod tests {
|
||||
}
|
||||
|
||||
impl Collector for TestCollector {
|
||||
|
||||
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> io::Result<()> {
|
||||
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
|
||||
self.offset += self.segment_max_doc;
|
||||
self.segment_max_doc = reader.max_doc();
|
||||
Ok(())
|
||||
@@ -117,18 +129,15 @@ pub mod tests {
|
||||
self.docs.push(doc + self.offset);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/// Collects in order all of the fast fields for all of the
|
||||
/// doc in the `DocSet`
|
||||
///
|
||||
/// This collector is mainly useful for tests.
|
||||
pub struct FastFieldTestCollector {
|
||||
vals: Vec<u32>,
|
||||
vals: Vec<u64>,
|
||||
field: Field,
|
||||
ff_reader: Option<U32FastFieldReader>,
|
||||
ff_reader: Option<U64FastFieldReader>,
|
||||
}
|
||||
|
||||
impl FastFieldTestCollector {
|
||||
@@ -140,14 +149,14 @@ pub mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vals(&self,) -> &Vec<u32> {
|
||||
&self.vals
|
||||
pub fn vals(self) -> Vec<u64> {
|
||||
self.vals
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Collector for FastFieldTestCollector {
|
||||
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> io::Result<()> {
|
||||
self.ff_reader = Some(try!(reader.get_fast_field_reader(self.field)));
|
||||
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
|
||||
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -157,7 +166,6 @@ pub mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[bench]
|
||||
fn build_collector(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
use std::io;
|
||||
use super::Collector;
|
||||
use DocId;
|
||||
use Score;
|
||||
use Result;
|
||||
use SegmentReader;
|
||||
use SegmentLocalId;
|
||||
|
||||
|
||||
/// Multicollector makes it possible to collect on more than one collector.
|
||||
/// It should only be used for use cases where the Collector types is unknown
|
||||
/// It should only be used for use cases where the Collector types is unknown
|
||||
/// at compile time.
|
||||
/// If the type of the collectors is known, you should prefer to use `ChainedCollector`.
|
||||
pub struct MultiCollector<'a> {
|
||||
@@ -23,9 +22,12 @@ impl<'a> MultiCollector<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'a> Collector for MultiCollector<'a> {
|
||||
fn set_segment(&mut self, segment_local_id: SegmentLocalId, segment: &SegmentReader) -> io::Result<()> {
|
||||
fn set_segment(
|
||||
&mut self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader,
|
||||
) -> Result<()> {
|
||||
for collector in &mut self.collectors {
|
||||
try!(collector.set_segment(segment_local_id, segment));
|
||||
}
|
||||
@@ -39,8 +41,6 @@ impl<'a> Collector for MultiCollector<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -52,7 +52,8 @@ mod tests {
|
||||
let mut top_collector = TopCollector::with_limit(2);
|
||||
let mut count_collector = CountCollector::default();
|
||||
{
|
||||
let mut collectors = MultiCollector::from(vec!(&mut top_collector, &mut count_collector));
|
||||
let mut collectors =
|
||||
MultiCollector::from(vec![&mut top_collector, &mut count_collector]);
|
||||
collectors.collect(1, 0.2);
|
||||
collectors.collect(2, 0.1);
|
||||
collectors.collect(3, 0.5);
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use std::io;
|
||||
use super::Collector;
|
||||
use SegmentReader;
|
||||
use SegmentLocalId;
|
||||
use DocAddress;
|
||||
use Result;
|
||||
use std::collections::BinaryHeap;
|
||||
use std::cmp::Ordering;
|
||||
use DocId;
|
||||
@@ -12,8 +12,7 @@ use Score;
|
||||
#[derive(Clone, Copy)]
|
||||
struct GlobalScoredDoc {
|
||||
score: Score,
|
||||
doc_address: DocAddress
|
||||
|
||||
doc_address: DocAddress,
|
||||
}
|
||||
|
||||
impl PartialOrd for GlobalScoredDoc {
|
||||
@@ -25,10 +24,10 @@ impl PartialOrd for GlobalScoredDoc {
|
||||
impl Ord for GlobalScoredDoc {
|
||||
#[inline]
|
||||
fn cmp(&self, other: &GlobalScoredDoc) -> Ordering {
|
||||
other.score.partial_cmp(&self.score)
|
||||
.unwrap_or(
|
||||
other.doc_address.cmp(&self.doc_address)
|
||||
)
|
||||
other
|
||||
.score
|
||||
.partial_cmp(&self.score)
|
||||
.unwrap_or_else(|| other.doc_address.cmp(&self.doc_address))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +39,6 @@ impl PartialEq for GlobalScoredDoc {
|
||||
|
||||
impl Eq for GlobalScoredDoc {}
|
||||
|
||||
|
||||
/// The Top Collector keeps track of the K documents
|
||||
/// with the best scores.
|
||||
///
|
||||
@@ -53,7 +51,6 @@ pub struct TopCollector {
|
||||
}
|
||||
|
||||
impl TopCollector {
|
||||
|
||||
/// Creates a top collector, with a number of documents equal to "limit".
|
||||
///
|
||||
/// # Panics
|
||||
@@ -68,9 +65,9 @@ impl TopCollector {
|
||||
segment_id: 0,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Returns K best documents sorted in decreasing order.
|
||||
///
|
||||
///
|
||||
/// Calling this method triggers the sort.
|
||||
/// The result of the sort is not cached.
|
||||
pub fn docs(&self) -> Vec<DocAddress> {
|
||||
@@ -81,31 +78,28 @@ impl TopCollector {
|
||||
}
|
||||
|
||||
/// Returns K best ScoredDocument sorted in decreasing order.
|
||||
///
|
||||
///
|
||||
/// Calling this method triggers the sort.
|
||||
/// The result of the sort is not cached.
|
||||
pub fn score_docs(&self) -> Vec<(Score, DocAddress)> {
|
||||
let mut scored_docs: Vec<GlobalScoredDoc> = self.heap
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
let mut scored_docs: Vec<GlobalScoredDoc> = self.heap.iter().cloned().collect();
|
||||
scored_docs.sort();
|
||||
scored_docs.into_iter()
|
||||
.map(|GlobalScoredDoc {score, doc_address}| (score, doc_address))
|
||||
scored_docs
|
||||
.into_iter()
|
||||
.map(|GlobalScoredDoc { score, doc_address }| (score, doc_address))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Return true iff at least K documents have gone through
|
||||
/// the collector.
|
||||
#[inline]
|
||||
pub fn at_capacity(&self, ) -> bool {
|
||||
pub fn at_capacity(&self) -> bool {
|
||||
self.heap.len() >= self.limit
|
||||
}
|
||||
}
|
||||
|
||||
impl Collector for TopCollector {
|
||||
|
||||
fn set_segment(&mut self, segment_id: SegmentLocalId, _: &SegmentReader) -> io::Result<()> {
|
||||
fn set_segment(&mut self, segment_id: SegmentLocalId, _: &SegmentReader) -> Result<()> {
|
||||
self.segment_id = segment_id;
|
||||
Ok(())
|
||||
}
|
||||
@@ -113,25 +107,26 @@ impl Collector for TopCollector {
|
||||
fn collect(&mut self, doc: DocId, score: Score) {
|
||||
if self.at_capacity() {
|
||||
// It's ok to unwrap as long as a limit of 0 is forbidden.
|
||||
let limit_doc: GlobalScoredDoc = *self.heap.peek().expect("Top collector with size 0 is forbidden");
|
||||
let limit_doc: GlobalScoredDoc = *self.heap
|
||||
.peek()
|
||||
.expect("Top collector with size 0 is forbidden");
|
||||
if limit_doc.score < score {
|
||||
let mut mut_head = self.heap.peek_mut().expect("Top collector with size 0 is forbidden");
|
||||
let mut mut_head = self.heap
|
||||
.peek_mut()
|
||||
.expect("Top collector with size 0 is forbidden");
|
||||
mut_head.score = score;
|
||||
mut_head.doc_address = DocAddress(self.segment_id, doc);
|
||||
mut_head.doc_address = DocAddress(self.segment_id, doc);
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
let wrapped_doc = GlobalScoredDoc {
|
||||
score: score,
|
||||
doc_address: DocAddress(self.segment_id, doc)
|
||||
doc_address: DocAddress(self.segment_id, doc),
|
||||
};
|
||||
self.heap.push(wrapped_doc);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -147,13 +142,12 @@ mod tests {
|
||||
top_collector.collect(3, 0.2);
|
||||
top_collector.collect(5, 0.3);
|
||||
assert!(!top_collector.at_capacity());
|
||||
let score_docs: Vec<(Score, DocId)> = top_collector.score_docs()
|
||||
let score_docs: Vec<(Score, DocId)> = top_collector
|
||||
.score_docs()
|
||||
.into_iter()
|
||||
.map(|(score, doc_address)| (score, doc_address.doc()))
|
||||
.collect();
|
||||
assert_eq!(score_docs, vec!(
|
||||
(0.8, 1), (0.3, 5), (0.2, 3),
|
||||
));
|
||||
assert_eq!(score_docs, vec![(0.8, 1), (0.3, 5), (0.2, 3)]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -171,9 +165,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.map(|(score, doc_address)| (score, doc_address.doc()))
|
||||
.collect();
|
||||
assert_eq!(score_docs, vec!(
|
||||
(0.9, 7), (0.8, 1), (0.3, 5), (0.2, 3)
|
||||
));
|
||||
assert_eq!(score_docs, vec![(0.9, 7), (0.8, 1), (0.3, 5), (0.2, 3)]);
|
||||
}
|
||||
{
|
||||
let docs: Vec<DocId> = top_collector
|
||||
@@ -181,10 +173,8 @@ mod tests {
|
||||
.into_iter()
|
||||
.map(|doc_address| doc_address.doc())
|
||||
.collect();
|
||||
assert_eq!(docs, vec!(7, 1, 5, 3));
|
||||
assert_eq!(docs, vec![7, 1, 5, 3]);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -2,118 +2,181 @@ use std::io::Write;
|
||||
use std::io;
|
||||
use common::serialize::BinarySerializable;
|
||||
use std::mem;
|
||||
use std::ops::Deref;
|
||||
|
||||
|
||||
pub fn compute_num_bits(amplitude: u32) -> u8 {
|
||||
(32u32 - amplitude.leading_zeros()) as u8
|
||||
/// Computes the number of bits that will be used for bitpacking.
|
||||
///
|
||||
/// In general the target is the minimum number of bits
|
||||
/// required to express the amplitude given in argument.
|
||||
///
|
||||
/// e.g. If the amplitude is 10, we can store all ints on simply 4bits.
|
||||
///
|
||||
/// The logic is slightly more convoluted here as for optimization
|
||||
/// reasons, we want to ensure that a value spawns over at most 8 bytes
|
||||
/// of aligns bytes.
|
||||
///
|
||||
/// Spanning over 9 bytes is possible for instance, if we do
|
||||
/// bitpacking with an amplitude of 63 bits.
|
||||
/// In this case, the second int will start on bit
|
||||
/// 63 (which belongs to byte 7) and ends at byte 15;
|
||||
/// Hence 9 bytes (from byte 7 to byte 15 included).
|
||||
///
|
||||
/// To avoid this, we force the number of bits to 64bits
|
||||
/// when the result is greater than `64-8 = 56 bits`.
|
||||
///
|
||||
/// Note that this only affects rare use cases spawning over
|
||||
/// a very large range of values. Even in this case, it results
|
||||
/// in an extra cost of at most 12% compared to the optimal
|
||||
/// number of bits.
|
||||
pub fn compute_num_bits(amplitude: u64) -> u8 {
|
||||
let amplitude = (64u32 - amplitude.leading_zeros()) as u8;
|
||||
if amplitude <= 64 - 8 {
|
||||
amplitude
|
||||
} else {
|
||||
64
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BitPacker {
|
||||
mini_buffer: u64,
|
||||
mini_buffer_written: usize,
|
||||
num_bits: usize,
|
||||
written_size: usize,
|
||||
}
|
||||
|
||||
impl BitPacker {
|
||||
|
||||
impl BitPacker {
|
||||
pub fn new(num_bits: usize) -> BitPacker {
|
||||
BitPacker {
|
||||
mini_buffer: 0u64,
|
||||
mini_buffer_written: 0,
|
||||
num_bits: num_bits,
|
||||
written_size: 0,
|
||||
num_bits,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write<TWrite: Write>(&mut self, val: u32, output: &mut TWrite) -> io::Result<()> {
|
||||
|
||||
pub fn write<TWrite: Write>(&mut self, val: u64, output: &mut TWrite) -> io::Result<()> {
|
||||
let val_u64 = val as u64;
|
||||
if self.mini_buffer_written + self.num_bits > 64 {
|
||||
self.mini_buffer |= val_u64.wrapping_shl(self.mini_buffer_written as u32);
|
||||
self.written_size += self.mini_buffer.serialize(output)?;
|
||||
self.mini_buffer.serialize(output)?;
|
||||
self.mini_buffer = val_u64.wrapping_shr((64 - self.mini_buffer_written) as u32);
|
||||
self.mini_buffer_written = self.mini_buffer_written + (self.num_bits as usize) - 64;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
self.mini_buffer |= val_u64 << self.mini_buffer_written;
|
||||
self.mini_buffer_written += self.num_bits;
|
||||
if self.mini_buffer_written == 64 {
|
||||
self.written_size += self.mini_buffer.serialize(output)?;
|
||||
self.mini_buffer.serialize(output)?;
|
||||
self.mini_buffer_written = 0;
|
||||
self.mini_buffer = 0u64;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<()>{
|
||||
|
||||
pub(crate) fn flush<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
if self.mini_buffer_written > 0 {
|
||||
let num_bytes = (self.mini_buffer_written + 7) / 8;
|
||||
let arr: [u8; 8] = unsafe { mem::transmute::<u64, [u8; 8]>(self.mini_buffer) };
|
||||
let arr: [u8; 8] = unsafe { mem::transmute::<u64, [u8; 8]>(self.mini_buffer) };
|
||||
output.write_all(&arr[..num_bytes])?;
|
||||
self.written_size += num_bytes;
|
||||
self.mini_buffer_written = 0;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn close<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<usize> {
|
||||
|
||||
pub fn close<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
self.flush(output)?;
|
||||
Ok(self.written_size)
|
||||
// Padding the write file to simplify reads.
|
||||
output.write_all(&[0u8; 7])?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
pub struct BitUnpacker {
|
||||
pub struct BitUnpacker<Data>
|
||||
where
|
||||
Data: Deref<Target = [u8]>,
|
||||
{
|
||||
num_bits: usize,
|
||||
mask: u32,
|
||||
data_ptr: *const u8,
|
||||
data_len: usize,
|
||||
mask: u64,
|
||||
data: Data,
|
||||
}
|
||||
|
||||
impl BitUnpacker {
|
||||
pub fn new(data: &[u8], num_bits: usize) -> BitUnpacker {
|
||||
impl<Data> BitUnpacker<Data>
|
||||
where
|
||||
Data: Deref<Target = [u8]>,
|
||||
{
|
||||
pub fn new(data: Data, num_bits: usize) -> BitUnpacker<Data> {
|
||||
let mask: u64 = if num_bits == 64 {
|
||||
!0u64
|
||||
} else {
|
||||
(1u64 << num_bits) - 1u64
|
||||
};
|
||||
BitUnpacker {
|
||||
num_bits: num_bits,
|
||||
mask: (1u32 << num_bits) - 1u32,
|
||||
data_ptr: data.as_ptr(),
|
||||
data_len: data.len()
|
||||
num_bits,
|
||||
mask,
|
||||
data,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&self, idx: usize) -> u32 {
|
||||
|
||||
pub fn get(&self, idx: usize) -> u64 {
|
||||
if self.num_bits == 0 {
|
||||
return 0;
|
||||
}
|
||||
let addr = (idx * self.num_bits) / 8;
|
||||
let bit_shift = idx * self.num_bits - addr * 8;
|
||||
let val_unshifted_unmasked: u64;
|
||||
if addr + 8 <= self.data_len {
|
||||
val_unshifted_unmasked = unsafe { * (self.data_ptr.offset(addr as isize) as *const u64) };
|
||||
}
|
||||
else {
|
||||
let mut arr = [0u8; 8];
|
||||
if addr < self.data_len {
|
||||
for i in 0..self.data_len - addr {
|
||||
arr[i] = unsafe { *self.data_ptr.offset( (addr + i) as isize) };
|
||||
let data: &[u8] = &*self.data;
|
||||
let num_bits = self.num_bits;
|
||||
let mask = self.mask;
|
||||
let addr_in_bits = idx * num_bits;
|
||||
let addr = addr_in_bits >> 3;
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
if cfg!(feature = "simdcompression") {
|
||||
// for simdcompression,
|
||||
// the bitpacker is only used for fastfields,
|
||||
// and we expect them to be always padded.
|
||||
debug_assert!(
|
||||
addr + 8 <= data.len(),
|
||||
"The fast field field should have been padded with 7 bytes."
|
||||
);
|
||||
let val_unshifted_unmasked: u64 = unsafe { *(data[addr..].as_ptr() as *const u64) };
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
(val_shifted & mask)
|
||||
} else {
|
||||
let val_unshifted_unmasked: u64 = if addr + 8 <= data.len() {
|
||||
unsafe { *(data[addr..].as_ptr() as *const u64) }
|
||||
} else {
|
||||
let mut buffer = [0u8; 8];
|
||||
for i in addr..data.len() {
|
||||
buffer[i - addr] += data[i];
|
||||
}
|
||||
}
|
||||
val_unshifted_unmasked = unsafe { mem::transmute::<[u8; 8], u64>(arr) };
|
||||
unsafe { *(buffer[..].as_ptr() as *const u64) }
|
||||
};
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
(val_shifted & mask)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_range(&self, start: u32, output: &mut [u64]) {
|
||||
if self.num_bits == 0 {
|
||||
for val in output.iter_mut() {
|
||||
*val = 0;
|
||||
}
|
||||
} else {
|
||||
let data: &[u8] = &*self.data;
|
||||
let num_bits = self.num_bits;
|
||||
let mask = self.mask;
|
||||
let mut addr_in_bits = (start as usize) * num_bits;
|
||||
for output_val in output.iter_mut() {
|
||||
let addr = addr_in_bits >> 3;
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
let val_unshifted_unmasked: u64 = unsafe { *(data[addr..].as_ptr() as *const u64) };
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
*output_val = val_shifted & mask;
|
||||
addr_in_bits += num_bits;
|
||||
}
|
||||
}
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u32;
|
||||
(val_shifted & self.mask)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::{BitPacker, BitUnpacker, compute_num_bits};
|
||||
|
||||
use super::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
#[test]
|
||||
fn test_compute_num_bits() {
|
||||
assert_eq!(compute_num_bits(1), 1u8);
|
||||
@@ -123,32 +186,32 @@ mod test {
|
||||
assert_eq!(compute_num_bits(4), 3u8);
|
||||
assert_eq!(compute_num_bits(255), 8u8);
|
||||
assert_eq!(compute_num_bits(256), 9u8);
|
||||
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
|
||||
}
|
||||
|
||||
fn test_bitpacker_util(len: usize, num_bits: usize) {
|
||||
|
||||
fn create_fastfield_bitpacker(len: usize, num_bits: usize) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
|
||||
let mut data = Vec::new();
|
||||
let mut bitpacker = BitPacker::new(num_bits);
|
||||
let max_val: u32 = (1 << num_bits) - 1;
|
||||
let vals: Vec<u32> = (0u32..len as u32).map(|i| {
|
||||
if max_val == 0 {
|
||||
0
|
||||
}
|
||||
else {
|
||||
i % max_val
|
||||
}
|
||||
}).collect();
|
||||
let max_val: u64 = (1 << num_bits) - 1;
|
||||
let vals: Vec<u64> = (0u64..len as u64)
|
||||
.map(|i| if max_val == 0 { 0 } else { i % max_val })
|
||||
.collect();
|
||||
for &val in &vals {
|
||||
bitpacker.write(val, &mut data).unwrap();
|
||||
}
|
||||
let num_bytes = bitpacker.close(&mut data).unwrap();
|
||||
assert_eq!(num_bytes, (num_bits * len + 7) / 8);
|
||||
assert_eq!(data.len(), num_bytes);
|
||||
let bitunpacker = BitUnpacker::new(&data, num_bits);
|
||||
bitpacker.close(&mut data).unwrap();
|
||||
assert_eq!(data.len(), (num_bits * len + 7) / 8 + 7);
|
||||
let bitunpacker = BitUnpacker::new(data, num_bits);
|
||||
(bitunpacker, vals)
|
||||
}
|
||||
|
||||
fn test_bitpacker_util(len: usize, num_bits: usize) {
|
||||
let (bitunpacker, vals) = create_fastfield_bitpacker(len, num_bits);
|
||||
for (i, val) in vals.iter().enumerate() {
|
||||
assert_eq!(bitunpacker.get(i), *val);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_bitpacker() {
|
||||
test_bitpacker_util(10, 3);
|
||||
@@ -157,4 +220,17 @@ mod test {
|
||||
test_bitpacker_util(6, 14);
|
||||
test_bitpacker_util(1000, 14);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitpacker_range() {
|
||||
let (bitunpacker, vals) = create_fastfield_bitpacker(100_000, 12);
|
||||
let buffer_len = 100;
|
||||
let mut buffer = vec![0u64; buffer_len];
|
||||
for start in vec![0, 10, 20, 100, 1_000] {
|
||||
bitunpacker.get_range(start as u32, &mut buffer[..]);
|
||||
for i in 0..buffer_len {
|
||||
assert_eq!(buffer[i], vals[start + i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
184
src/common/composite_file.rs
Normal file
184
src/common/composite_file.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
use std::io::Write;
|
||||
use common::CountingWriter;
|
||||
use std::collections::HashMap;
|
||||
use schema::Field;
|
||||
use common::VInt;
|
||||
use directory::WritePtr;
|
||||
use std::io;
|
||||
use directory::ReadOnlySource;
|
||||
use common::BinarySerializable;
|
||||
|
||||
/// A `CompositeWrite` is used to write a `CompositeFile`.
|
||||
pub struct CompositeWrite<W = WritePtr> {
|
||||
write: CountingWriter<W>,
|
||||
offsets: HashMap<Field, usize>,
|
||||
}
|
||||
|
||||
impl<W: Write> CompositeWrite<W> {
|
||||
/// Crate a new API writer that writes a composite file
|
||||
/// in a given write.
|
||||
pub fn wrap(w: W) -> CompositeWrite<W> {
|
||||
CompositeWrite {
|
||||
write: CountingWriter::wrap(w),
|
||||
offsets: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Start writing a new field.
|
||||
pub fn for_field(&mut self, field: Field) -> &mut CountingWriter<W> {
|
||||
let offset = self.write.written_bytes();
|
||||
assert!(!self.offsets.contains_key(&field));
|
||||
self.offsets.insert(field, offset);
|
||||
&mut self.write
|
||||
}
|
||||
|
||||
/// Close the composite file.
|
||||
///
|
||||
/// An index of the different field offsets
|
||||
/// will be written as a footer.
|
||||
pub fn close(mut self) -> io::Result<()> {
|
||||
let footer_offset = self.write.written_bytes();
|
||||
VInt(self.offsets.len() as u64).serialize(&mut self.write)?;
|
||||
|
||||
let mut offset_fields: Vec<_> = self.offsets
|
||||
.iter()
|
||||
.map(|(field, offset)| (offset, field))
|
||||
.collect();
|
||||
|
||||
offset_fields.sort();
|
||||
|
||||
let mut prev_offset = 0;
|
||||
for (offset, field) in offset_fields {
|
||||
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
|
||||
field.serialize(&mut self.write)?;
|
||||
prev_offset = *offset;
|
||||
}
|
||||
|
||||
let footer_len = (self.write.written_bytes() - footer_offset) as u32;
|
||||
footer_len.serialize(&mut self.write)?;
|
||||
self.write.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A composite file is an abstraction to store a
|
||||
/// file partitioned by field.
|
||||
///
|
||||
/// The file needs to be written field by field.
|
||||
/// A footer describes the start and stop offsets
|
||||
/// for each field.
|
||||
#[derive(Clone)]
|
||||
pub struct CompositeFile {
|
||||
data: ReadOnlySource,
|
||||
offsets_index: HashMap<Field, (usize, usize)>,
|
||||
}
|
||||
|
||||
impl CompositeFile {
|
||||
/// Opens a composite file stored in a given
|
||||
/// `ReadOnlySource`.
|
||||
pub fn open(data: &ReadOnlySource) -> io::Result<CompositeFile> {
|
||||
let end = data.len();
|
||||
let footer_len_data = data.slice_from(end - 4);
|
||||
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
|
||||
|
||||
let footer_start = end - 4 - footer_len;
|
||||
let footer_data = data.slice(footer_start, footer_start + footer_len);
|
||||
let mut footer_buffer = footer_data.as_slice();
|
||||
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
|
||||
|
||||
let mut fields = vec![];
|
||||
let mut offsets = vec![];
|
||||
|
||||
let mut field_index = HashMap::new();
|
||||
|
||||
let mut offset = 0;
|
||||
for _ in 0..num_fields {
|
||||
offset += VInt::deserialize(&mut footer_buffer)?.0 as usize;
|
||||
let field = Field::deserialize(&mut footer_buffer)?;
|
||||
offsets.push(offset);
|
||||
fields.push(field);
|
||||
}
|
||||
offsets.push(footer_start);
|
||||
for i in 0..num_fields {
|
||||
let field = fields[i];
|
||||
let start_offset = offsets[i];
|
||||
let end_offset = offsets[i + 1];
|
||||
field_index.insert(field, (start_offset, end_offset));
|
||||
}
|
||||
|
||||
Ok(CompositeFile {
|
||||
data: data.slice_to(footer_start),
|
||||
offsets_index: field_index,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns a composite file that stores
|
||||
/// no fields.
|
||||
pub fn empty() -> CompositeFile {
|
||||
CompositeFile {
|
||||
offsets_index: HashMap::new(),
|
||||
data: ReadOnlySource::empty(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `ReadOnlySource` associated
|
||||
/// to a given `Field` and stored in a `CompositeFile`.
|
||||
pub fn open_read(&self, field: Field) -> Option<ReadOnlySource> {
|
||||
self.offsets_index
|
||||
.get(&field)
|
||||
.map(|&(from, to)| self.data.slice(from, to))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use std::io::Write;
|
||||
use super::{CompositeFile, CompositeWrite};
|
||||
use directory::{Directory, RAMDirectory};
|
||||
use schema::Field;
|
||||
use common::VInt;
|
||||
use common::BinarySerializable;
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn test_composite_file() {
|
||||
let path = Path::new("test_path");
|
||||
let mut directory = RAMDirectory::create();
|
||||
{
|
||||
let w = directory.open_write(path).unwrap();
|
||||
let mut composite_write = CompositeWrite::wrap(w);
|
||||
{
|
||||
let mut write_0 = composite_write.for_field(Field(0u32));
|
||||
VInt(32431123u64).serialize(&mut write_0).unwrap();
|
||||
write_0.flush().unwrap();
|
||||
}
|
||||
|
||||
{
|
||||
let mut write_4 = composite_write.for_field(Field(4u32));
|
||||
VInt(2).serialize(&mut write_4).unwrap();
|
||||
write_4.flush().unwrap();
|
||||
}
|
||||
composite_write.close().unwrap();
|
||||
}
|
||||
{
|
||||
let r = directory.open_read(path).unwrap();
|
||||
let composite_file = CompositeFile::open(&r).unwrap();
|
||||
{
|
||||
let file0 = composite_file.open_read(Field(0u32)).unwrap();
|
||||
let mut file0_buf = file0.as_slice();
|
||||
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
|
||||
assert_eq!(file0_buf.len(), 0);
|
||||
assert_eq!(payload_0, 32431123u64);
|
||||
}
|
||||
{
|
||||
let file4 = composite_file.open_read(Field(4u32)).unwrap();
|
||||
let mut file4_buf = file4.as_slice();
|
||||
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
|
||||
assert_eq!(file4_buf.len(), 0);
|
||||
assert_eq!(payload_4, 2u64);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
55
src/common/counting_writer.rs
Normal file
55
src/common/counting_writer.rs
Normal file
@@ -0,0 +1,55 @@
|
||||
use std::io::Write;
|
||||
use std::io;
|
||||
|
||||
pub struct CountingWriter<W> {
|
||||
underlying: W,
|
||||
written_bytes: usize,
|
||||
}
|
||||
|
||||
impl<W: Write> CountingWriter<W> {
|
||||
pub fn wrap(underlying: W) -> CountingWriter<W> {
|
||||
CountingWriter {
|
||||
underlying,
|
||||
written_bytes: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn written_bytes(&self) -> usize {
|
||||
self.written_bytes
|
||||
}
|
||||
|
||||
pub fn finish(mut self) -> io::Result<(W, usize)> {
|
||||
self.flush()?;
|
||||
Ok((self.underlying, self.written_bytes))
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> Write for CountingWriter<W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let written_size = self.underlying.write(buf)?;
|
||||
self.written_bytes += written_size;
|
||||
Ok(written_size)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.underlying.flush()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use super::CountingWriter;
|
||||
use std::io::Write;
|
||||
|
||||
#[test]
|
||||
fn test_counting_writer() {
|
||||
let buffer: Vec<u8> = vec![];
|
||||
let mut counting_writer = CountingWriter::wrap(buffer);
|
||||
let bytes = (0u8..10u8).collect::<Vec<u8>>();
|
||||
counting_writer.write_all(&bytes).unwrap();
|
||||
let (w, len): (Vec<u8>, usize) = counting_writer.finish().unwrap();
|
||||
assert_eq!(len, 10);
|
||||
assert_eq!(w.len(), 10);
|
||||
}
|
||||
}
|
||||
@@ -1,32 +1,87 @@
|
||||
mod serialize;
|
||||
mod timer;
|
||||
mod vint;
|
||||
mod counting_writer;
|
||||
mod composite_file;
|
||||
pub mod bitpacker;
|
||||
|
||||
|
||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||
pub use self::serialize::BinarySerializable;
|
||||
pub use self::timer::Timing;
|
||||
pub use self::timer::TimerTree;
|
||||
pub use self::timer::OpenTimer;
|
||||
pub use self::vint::VInt;
|
||||
|
||||
pub use self::counting_writer::CountingWriter;
|
||||
|
||||
use std::io;
|
||||
|
||||
/// Create a default io error given a string.
|
||||
pub fn make_io_err(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
}
|
||||
|
||||
|
||||
/// Has length trait
|
||||
pub trait HasLen {
|
||||
/// Return length
|
||||
fn len(&self,) -> usize;
|
||||
|
||||
fn len(&self) -> usize;
|
||||
|
||||
/// Returns true iff empty.
|
||||
fn is_empty(&self,) -> bool {
|
||||
fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
}
|
||||
|
||||
const HIGHEST_BIT: u64 = 1 << 63;
|
||||
|
||||
/// Maps a `i64` to `u64`
|
||||
///
|
||||
/// For simplicity, tantivy internally handles `i64` as `u64`.
|
||||
/// The mapping is defined by this function.
|
||||
///
|
||||
/// Maps `i64` to `u64` so that
|
||||
/// `-2^63 .. 2^63-1` is mapped
|
||||
/// to
|
||||
/// `0 .. 2^64-1`
|
||||
/// in that order.
|
||||
///
|
||||
/// This is more suited than simply casting (`val as u64`)
|
||||
/// because of bitpacking.
|
||||
///
|
||||
/// Imagine a list of `i64` ranging from -10 to 10.
|
||||
/// When casting negative values, the negative values are projected
|
||||
/// to values over 2^63, and all values end up requiring 64 bits.
|
||||
///
|
||||
/// # See also
|
||||
/// The [reverse mapping is `u64_to_i64`](./fn.u64_to_i64.html).
|
||||
#[inline(always)]
|
||||
pub fn i64_to_u64(val: i64) -> u64 {
|
||||
(val as u64) ^ HIGHEST_BIT
|
||||
}
|
||||
|
||||
/// Reverse the mapping given by [`i64_to_u64`](./fn.i64_to_u64.html).
|
||||
#[inline(always)]
|
||||
pub fn u64_to_i64(val: u64) -> i64 {
|
||||
(val ^ HIGHEST_BIT) as i64
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use super::{i64_to_u64, u64_to_i64};
|
||||
|
||||
fn test_i64_converter_helper(val: i64) {
|
||||
assert_eq!(u64_to_i64(i64_to_u64(val)), val);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_i64_converter() {
|
||||
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
|
||||
assert_eq!(i64_to_u64(i64::max_value()), u64::max_value());
|
||||
test_i64_converter_helper(0i64);
|
||||
test_i64_converter_helper(i64::min_value());
|
||||
test_i64_converter_helper(i64::max_value());
|
||||
for i in -1000i64..1000i64 {
|
||||
test_i64_converter_helper(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
use byteorder::{ReadBytesExt, WriteBytesExt};
|
||||
use byteorder::LittleEndian as Endianness;
|
||||
use std::fmt;
|
||||
@@ -6,115 +5,104 @@ use std::io::Write;
|
||||
use std::io::Read;
|
||||
use std::io;
|
||||
use common::VInt;
|
||||
use byteorder;
|
||||
|
||||
pub trait BinarySerializable : fmt::Debug + Sized {
|
||||
fn serialize(&self, writer: &mut Write) -> io::Result<usize>;
|
||||
fn deserialize(reader: &mut Read) -> io::Result<Self>;
|
||||
}
|
||||
|
||||
fn convert_byte_order_error(byteorder_error: byteorder::Error) -> io::Error {
|
||||
match byteorder_error {
|
||||
byteorder::Error::UnexpectedEOF => io::Error::new(io::ErrorKind::InvalidData, "Reached EOF unexpectedly"),
|
||||
byteorder::Error::Io(e) => e,
|
||||
}
|
||||
pub trait BinarySerializable: fmt::Debug + Sized {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()>;
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self>;
|
||||
}
|
||||
|
||||
impl BinarySerializable for () {
|
||||
fn serialize(&self, _: &mut Write) -> io::Result<usize> {
|
||||
Ok(0)
|
||||
fn serialize<W: Write>(&self, _: &mut W) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
fn deserialize(_: &mut Read) -> io::Result<Self> {
|
||||
fn deserialize<R: Read>(_: &mut R) -> io::Result<Self> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BinarySerializable> BinarySerializable for Vec<T> {
|
||||
fn serialize(&self, writer: &mut Write) -> io::Result<usize> {
|
||||
let mut total_size = try!(VInt(self.len() as u64).serialize(writer));
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.len() as u64).serialize(writer)?;
|
||||
for it in self {
|
||||
total_size += try!(it.serialize(writer));
|
||||
it.serialize(writer)?;
|
||||
}
|
||||
Ok(total_size)
|
||||
Ok(())
|
||||
}
|
||||
fn deserialize(reader: &mut Read) -> io::Result<Vec<T>> {
|
||||
let num_items = try!(VInt::deserialize(reader)).val();
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Vec<T>> {
|
||||
let num_items = VInt::deserialize(reader)?.val();
|
||||
let mut items: Vec<T> = Vec::with_capacity(num_items as usize);
|
||||
for _ in 0..num_items {
|
||||
let item = try!(T::deserialize(reader));
|
||||
let item = T::deserialize(reader)?;
|
||||
items.push(item);
|
||||
}
|
||||
Ok(items)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<Left: BinarySerializable, Right: BinarySerializable> BinarySerializable for (Left, Right) {
|
||||
fn serialize(&self, write: &mut Write) -> io::Result<usize> {
|
||||
Ok(try!(self.0.serialize(write)) + try!(self.1.serialize(write)))
|
||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
||||
self.0.serialize(write)?;
|
||||
self.1.serialize(write)
|
||||
}
|
||||
fn deserialize(reader: &mut Read) -> io::Result<Self> {
|
||||
Ok( (try!(Left::deserialize(reader)), try!(Right::deserialize(reader))) )
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
Ok((Left::deserialize(reader)?, Right::deserialize(reader)?))
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for u32 {
|
||||
fn serialize(&self, writer: &mut Write) -> io::Result<usize> {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u32::<Endianness>(*self)
|
||||
.map(|_| 4)
|
||||
.map_err(convert_byte_order_error)
|
||||
}
|
||||
|
||||
fn deserialize(reader: &mut Read) -> io::Result<u32> {
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u32> {
|
||||
reader.read_u32::<Endianness>()
|
||||
.map_err(convert_byte_order_error)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl BinarySerializable for u64 {
|
||||
fn serialize(&self, writer: &mut Write) -> io::Result<usize> {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u64::<Endianness>(*self)
|
||||
.map(|_| 8)
|
||||
.map_err(convert_byte_order_error)
|
||||
}
|
||||
fn deserialize(reader: &mut Read) -> io::Result<u64> {
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
reader.read_u64::<Endianness>()
|
||||
.map_err(convert_byte_order_error)
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for i64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_i64::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
reader.read_i64::<Endianness>()
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for u8 {
|
||||
fn serialize(&self, writer: &mut Write) -> io::Result<usize> {
|
||||
// TODO error
|
||||
try!(writer.write_u8(*self).map_err(convert_byte_order_error));
|
||||
Ok(1)
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u8(*self)
|
||||
}
|
||||
fn deserialize(reader: &mut Read) -> io::Result<u8> {
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u8> {
|
||||
reader.read_u8()
|
||||
.map_err(convert_byte_order_error)
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for String {
|
||||
fn serialize(&self, writer: &mut Write) -> io::Result<usize> {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let data: &[u8] = self.as_bytes();
|
||||
let mut size = try!(VInt(data.len() as u64).serialize(writer));
|
||||
size += data.len();
|
||||
try!(writer.write_all(data));
|
||||
Ok(size)
|
||||
VInt(data.len() as u64).serialize(writer)?;
|
||||
writer.write_all(data)
|
||||
}
|
||||
|
||||
fn deserialize(reader: &mut Read) -> io::Result<String> {
|
||||
let string_length = try!(VInt::deserialize(reader)).val() as usize;
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<String> {
|
||||
let string_length = VInt::deserialize(reader)?.val() as usize;
|
||||
let mut result = String::with_capacity(string_length);
|
||||
try!(reader.take(string_length as u64).read_to_string(&mut result));
|
||||
reader
|
||||
.take(string_length as u64)
|
||||
.read_to_string(&mut result)?;
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
@@ -123,12 +111,10 @@ mod test {
|
||||
|
||||
fn serialize_test<T: BinarySerializable + Eq>(v: T, num_bytes: usize) {
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
|
||||
if num_bytes != 0 {
|
||||
assert_eq!(v.serialize(&mut buffer).unwrap(), num_bytes);
|
||||
v.serialize(&mut buffer).unwrap();
|
||||
assert_eq!(buffer.len(), num_bytes);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
v.serialize(&mut buffer).unwrap();
|
||||
}
|
||||
let mut cursor = &buffer[..];
|
||||
@@ -152,15 +138,15 @@ mod test {
|
||||
#[test]
|
||||
fn test_serialize_string() {
|
||||
serialize_test(String::from(""), 1);
|
||||
serialize_test(String::from("ぽよぽよ"), 1 + 3*4);
|
||||
serialize_test(String::from("富士さん見える。"), 1 + 3*8);
|
||||
serialize_test(String::from("ぽよぽよ"), 1 + 3 * 4);
|
||||
serialize_test(String::from("富士さん見える。"), 1 + 3 * 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_vec() {
|
||||
let v: Vec<u8> = Vec::new();
|
||||
serialize_test(v, 1);
|
||||
serialize_test(vec!(1u32, 3u32), 1 + 4*2);
|
||||
serialize_test(vec![1u32, 3u32], 1 + 4 * 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -10,7 +10,7 @@ pub struct OpenTimer<'a> {
|
||||
impl<'a> OpenTimer<'a> {
|
||||
/// Starts timing a new named subtask
|
||||
///
|
||||
/// The timer is stopped automatically
|
||||
/// The timer is stopped automatically
|
||||
/// when the `OpenTimer` is dropped.
|
||||
pub fn open(&mut self, name: &'static str) -> OpenTimer {
|
||||
OpenTimer {
|
||||
@@ -23,17 +23,20 @@ impl<'a> OpenTimer<'a> {
|
||||
}
|
||||
|
||||
impl<'a> Drop for OpenTimer<'a> {
|
||||
fn drop(&mut self,) {
|
||||
self.timer_tree.timings.push(Timing {
|
||||
fn drop(&mut self) {
|
||||
self.timer_tree.timings.push(Timing {
|
||||
name: self.name,
|
||||
duration: self.start.to(PreciseTime::now()).num_microseconds().unwrap(),
|
||||
duration: self.start
|
||||
.to(PreciseTime::now())
|
||||
.num_microseconds()
|
||||
.unwrap(),
|
||||
depth: self.depth,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Timing recording
|
||||
#[derive(Debug, RustcEncodable)]
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct Timing {
|
||||
name: &'static str,
|
||||
duration: i64,
|
||||
@@ -41,18 +44,17 @@ pub struct Timing {
|
||||
}
|
||||
|
||||
/// Timer tree
|
||||
#[derive(Debug, RustcEncodable)]
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct TimerTree {
|
||||
timings: Vec<Timing>,
|
||||
}
|
||||
|
||||
impl TimerTree {
|
||||
|
||||
/// Returns the total time elapsed in microseconds
|
||||
pub fn total_time(&self,) -> i64 {
|
||||
/// Returns the total time elapsed in microseconds
|
||||
pub fn total_time(&self) -> i64 {
|
||||
self.timings.last().unwrap().duration
|
||||
}
|
||||
|
||||
|
||||
/// Open a new named subtask
|
||||
pub fn open(&mut self, name: &'static str) -> OpenTimer {
|
||||
OpenTimer {
|
||||
@@ -72,7 +74,6 @@ impl Default for TimerTree {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
|
||||
@@ -3,59 +3,55 @@ use std::io;
|
||||
use std::io::Write;
|
||||
use std::io::Read;
|
||||
|
||||
|
||||
|
||||
/// Wrapper over a `u64` that serializes as a variable int.
|
||||
/// Wrapper over a `u64` that serializes as a variable int.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub struct VInt(pub u64);
|
||||
|
||||
impl VInt {
|
||||
pub fn val(&self,) -> u64 {
|
||||
pub fn val(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for VInt {
|
||||
fn serialize(&self, writer: &mut Write) -> io::Result<usize> {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let mut remaining = self.0;
|
||||
let mut written: usize = 0;
|
||||
let mut buffer = [0u8; 10];
|
||||
let mut i = 0;
|
||||
loop {
|
||||
let next_byte: u8 = (remaining % 128u64) as u8;
|
||||
remaining /= 128u64;
|
||||
if remaining == 0u64 {
|
||||
buffer[written] = next_byte | 128u8;
|
||||
written += 1;
|
||||
break;
|
||||
}
|
||||
else {
|
||||
buffer[written] = next_byte;
|
||||
written += 1;
|
||||
buffer[i] = next_byte | 128u8;
|
||||
return writer.write_all(&buffer[0..i + 1]);
|
||||
} else {
|
||||
buffer[i] = next_byte;
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
try!(writer.write_all(&buffer[0..written]));
|
||||
Ok(written)
|
||||
}
|
||||
|
||||
fn deserialize(reader: &mut Read) -> io::Result<Self> {
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut bytes = reader.bytes();
|
||||
let mut result = 0u64;
|
||||
let mut shift = 0u64;
|
||||
loop {
|
||||
match bytes.next() {
|
||||
Some(Ok(b)) => {
|
||||
result += ((b % 128u8) as u64) << shift;
|
||||
result += u64::from(b % 128u8) << shift;
|
||||
if b & 128u8 != 0u8 {
|
||||
break;
|
||||
}
|
||||
shift += 7;
|
||||
}
|
||||
_ => {
|
||||
return Err(io::Error::new(io::ErrorKind::InvalidData, "Reach end of buffer"))
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Reach end of buffer",
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(VInt(result))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,159 +0,0 @@
|
||||
use super::{BlockEncoder, BlockDecoder};
|
||||
use super::NUM_DOCS_PER_BLOCK;
|
||||
use compression::{VIntEncoder, VIntDecoder};
|
||||
|
||||
pub struct CompositeEncoder {
|
||||
block_encoder: BlockEncoder,
|
||||
output: Vec<u8>,
|
||||
}
|
||||
|
||||
impl CompositeEncoder {
|
||||
|
||||
pub fn new() -> CompositeEncoder {
|
||||
CompositeEncoder {
|
||||
block_encoder: BlockEncoder::new(),
|
||||
output: Vec::with_capacity(500_000),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compress_sorted(&mut self, vals: &[u32]) -> &[u8] {
|
||||
self.output.clear();
|
||||
let num_blocks = vals.len() / NUM_DOCS_PER_BLOCK;
|
||||
let mut offset = 0u32;
|
||||
for i in 0..num_blocks {
|
||||
let vals_slice = &vals[i * NUM_DOCS_PER_BLOCK .. (i + 1) * NUM_DOCS_PER_BLOCK];
|
||||
let block_compressed = self.block_encoder.compress_block_sorted(vals_slice, offset);
|
||||
offset = vals_slice[NUM_DOCS_PER_BLOCK - 1];
|
||||
self.output.extend_from_slice(block_compressed);
|
||||
}
|
||||
let vint_compressed = self.block_encoder.compress_vint_sorted(&vals[num_blocks * NUM_DOCS_PER_BLOCK..], offset);
|
||||
self.output.extend_from_slice(vint_compressed);
|
||||
&self.output
|
||||
}
|
||||
|
||||
pub fn compress_unsorted(&mut self, vals: &[u32]) -> &[u8] {
|
||||
self.output.clear();
|
||||
let num_blocks = vals.len() / NUM_DOCS_PER_BLOCK;
|
||||
for i in 0..num_blocks {
|
||||
let vals_slice = &vals[i * NUM_DOCS_PER_BLOCK .. (i + 1) * NUM_DOCS_PER_BLOCK];
|
||||
let block_compressed = self.block_encoder.compress_block_unsorted(vals_slice);
|
||||
self.output.extend_from_slice(block_compressed);
|
||||
}
|
||||
let vint_compressed = self.block_encoder.compress_vint_unsorted(&vals[num_blocks * NUM_DOCS_PER_BLOCK..]);
|
||||
self.output.extend_from_slice(vint_compressed);
|
||||
&self.output
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub struct CompositeDecoder {
|
||||
block_decoder: BlockDecoder,
|
||||
vals: Vec<u32>,
|
||||
}
|
||||
|
||||
|
||||
impl CompositeDecoder {
|
||||
pub fn new() -> CompositeDecoder {
|
||||
CompositeDecoder {
|
||||
block_decoder: BlockDecoder::new(),
|
||||
vals: Vec::with_capacity(500_000),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uncompress_sorted(&mut self, mut compressed_data: &[u8], uncompressed_len: usize) -> &[u32] {
|
||||
if uncompressed_len > self.vals.capacity() {
|
||||
let extra_capacity = uncompressed_len - self.vals.capacity();
|
||||
self.vals.reserve(extra_capacity);
|
||||
}
|
||||
let mut offset = 0u32;
|
||||
self.vals.clear();
|
||||
let num_blocks = uncompressed_len / NUM_DOCS_PER_BLOCK;
|
||||
for _ in 0..num_blocks {
|
||||
compressed_data = self.block_decoder.uncompress_block_sorted(compressed_data, offset);
|
||||
offset = self.block_decoder.output(NUM_DOCS_PER_BLOCK - 1);
|
||||
self.vals.extend_from_slice(self.block_decoder.output_array());
|
||||
}
|
||||
self.block_decoder.uncompress_vint_sorted(compressed_data, offset, uncompressed_len % NUM_DOCS_PER_BLOCK);
|
||||
self.vals.extend_from_slice(self.block_decoder.output_array());
|
||||
&self.vals
|
||||
}
|
||||
|
||||
pub fn uncompress_unsorted(&mut self, mut compressed_data: &[u8], uncompressed_len: usize) -> &[u32] {
|
||||
self.vals.clear();
|
||||
let num_blocks = uncompressed_len / NUM_DOCS_PER_BLOCK;
|
||||
for _ in 0..num_blocks {
|
||||
compressed_data = self.block_decoder.uncompress_block_unsorted(compressed_data);
|
||||
self.vals.extend_from_slice(self.block_decoder.output_array());
|
||||
}
|
||||
self.block_decoder.uncompress_vint_unsorted(compressed_data, uncompressed_len % NUM_DOCS_PER_BLOCK);
|
||||
self.vals.extend_from_slice(self.block_decoder.output_array());
|
||||
&self.vals
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Vec<u32>> for CompositeDecoder {
|
||||
fn into(self) -> Vec<u32> {
|
||||
self.vals
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use test::Bencher;
|
||||
use super::*;
|
||||
use compression::tests::generate_array;
|
||||
|
||||
#[test]
|
||||
fn test_composite_unsorted() {
|
||||
let data = generate_array(10_000, 0.1);
|
||||
let mut encoder = CompositeEncoder::new();
|
||||
let compressed = encoder.compress_unsorted(&data);
|
||||
assert_eq!(compressed.len(), 19_790);
|
||||
let mut decoder = CompositeDecoder::new();
|
||||
let result = decoder.uncompress_unsorted(&compressed, data.len());
|
||||
for i in 0..data.len() {
|
||||
assert_eq!(data[i], result[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_composite_sorted() {
|
||||
let data = generate_array(10_000, 0.1);
|
||||
let mut encoder = CompositeEncoder::new();
|
||||
let compressed = encoder.compress_sorted(&data);
|
||||
assert_eq!(compressed.len(), 7_822);
|
||||
let mut decoder = CompositeDecoder::new();
|
||||
let result = decoder.uncompress_sorted(&compressed, data.len());
|
||||
for i in 0..data.len() {
|
||||
assert_eq!(data[i], result[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const BENCH_NUM_INTS: usize = 99_968;
|
||||
|
||||
#[bench]
|
||||
fn bench_compress(b: &mut Bencher) {
|
||||
let mut encoder = CompositeEncoder::new();
|
||||
let data = generate_array(BENCH_NUM_INTS, 0.1);
|
||||
b.iter(|| {
|
||||
encoder.compress_sorted(&data);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_uncompress(b: &mut Bencher) {
|
||||
let mut encoder = CompositeEncoder::new();
|
||||
let data = generate_array(BENCH_NUM_INTS, 0.1);
|
||||
let compressed = encoder.compress_sorted(&data);
|
||||
let mut decoder = CompositeDecoder::new();
|
||||
b.iter(|| {
|
||||
decoder.uncompress_sorted(compressed, BENCH_NUM_INTS);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
use common::bitpacker::compute_num_bits;
|
||||
use common::bitpacker::{BitPacker, BitUnpacker};
|
||||
use std::cmp;
|
||||
use std::io::Write;
|
||||
use super::NUM_DOCS_PER_BLOCK;
|
||||
|
||||
const COMPRESSED_BLOCK_MAX_SIZE: usize = NUM_DOCS_PER_BLOCK * 4 + 1;
|
||||
|
||||
pub fn compress_sorted(vals: &mut [u32], mut output: &mut [u8], offset: u32) -> usize {
|
||||
let mut max_delta = 0;
|
||||
{
|
||||
let mut local_offset = offset;
|
||||
for i in 0..NUM_DOCS_PER_BLOCK {
|
||||
let val = vals[i];
|
||||
let delta = val - local_offset;
|
||||
max_delta = cmp::max(max_delta, delta);
|
||||
vals[i] = delta;
|
||||
local_offset = val;
|
||||
}
|
||||
}
|
||||
let num_bits = compute_num_bits(max_delta);
|
||||
output.write_all(&[num_bits]).unwrap();
|
||||
let mut bit_packer = BitPacker::new(num_bits as usize);
|
||||
for val in vals {
|
||||
bit_packer.write(*val, &mut output).unwrap();
|
||||
}
|
||||
1 + bit_packer.close(&mut output).expect("packing in memory should never fail")
|
||||
}
|
||||
|
||||
|
||||
|
||||
pub struct BlockEncoder {
|
||||
pub output: [u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
pub output_len: usize,
|
||||
input_buffer: [u32; NUM_DOCS_PER_BLOCK],
|
||||
}
|
||||
|
||||
impl BlockEncoder {
|
||||
|
||||
pub fn new() -> BlockEncoder {
|
||||
BlockEncoder {
|
||||
output: [0u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
output_len: 0,
|
||||
input_buffer: [0u32; NUM_DOCS_PER_BLOCK],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compress_block_sorted(&mut self, vals: &[u32], offset: u32) -> &[u8] {
|
||||
self.input_buffer.clone_from_slice(vals);
|
||||
let compressed_size = compress_sorted(&mut self.input_buffer, &mut self.output, offset);
|
||||
&self.output[..compressed_size]
|
||||
}
|
||||
|
||||
pub fn compress_block_unsorted(&mut self, vals: &[u32]) -> &[u8] {
|
||||
let compressed_size: usize = {
|
||||
let mut output: &mut [u8] = &mut self.output;
|
||||
let max = vals.iter().cloned().max().expect("compress unsorted called with an empty array");
|
||||
let num_bits = compute_num_bits(max);
|
||||
output.write_all(&[num_bits]).unwrap();
|
||||
let mut bit_packer = BitPacker::new(num_bits as usize);
|
||||
for val in vals {
|
||||
bit_packer.write(*val, &mut output).unwrap();
|
||||
}
|
||||
1 + bit_packer.close(&mut output).expect("packing in memory should never fail")
|
||||
};
|
||||
&self.output[..compressed_size]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
pub struct BlockDecoder {
|
||||
pub output: [u32; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
pub output_len: usize,
|
||||
}
|
||||
|
||||
|
||||
impl BlockDecoder {
|
||||
pub fn new() -> BlockDecoder {
|
||||
BlockDecoder::with_val(0u32)
|
||||
}
|
||||
|
||||
pub fn with_val(val: u32) -> BlockDecoder {
|
||||
BlockDecoder {
|
||||
output: [val; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
output_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uncompress_block_sorted<'a>(&mut self, compressed_data: &'a [u8], mut offset: u32) -> &'a[u8] {
|
||||
let consumed_size = {
|
||||
let num_bits = compressed_data[0];
|
||||
let bit_unpacker = BitUnpacker::new(&compressed_data[1..], num_bits as usize);
|
||||
for i in 0..NUM_DOCS_PER_BLOCK {
|
||||
let delta = bit_unpacker.get(i);
|
||||
let val = offset + delta;
|
||||
self.output[i] = val;
|
||||
offset = val;
|
||||
}
|
||||
1 + (num_bits as usize * NUM_DOCS_PER_BLOCK + 7) / 8
|
||||
};
|
||||
self.output_len = NUM_DOCS_PER_BLOCK;
|
||||
&compressed_data[consumed_size..]
|
||||
}
|
||||
|
||||
pub fn uncompress_block_unsorted<'a>(&mut self, compressed_data: &'a [u8]) -> &'a[u8] {
|
||||
let num_bits = compressed_data[0];
|
||||
let bit_unpacker = BitUnpacker::new(&compressed_data[1..], num_bits as usize);
|
||||
for i in 0..NUM_DOCS_PER_BLOCK {
|
||||
self.output[i] = bit_unpacker.get(i);
|
||||
}
|
||||
let consumed_size = 1 + (num_bits as usize * NUM_DOCS_PER_BLOCK + 7) / 8;
|
||||
self.output_len = NUM_DOCS_PER_BLOCK;
|
||||
&compressed_data[consumed_size..]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn output_array(&self,) -> &[u32] {
|
||||
&self.output[..self.output_len]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn output(&self, idx: usize) -> u32 {
|
||||
self.output[idx]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,164 +1,135 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
|
||||
mod composite;
|
||||
pub use self::composite::{CompositeEncoder, CompositeDecoder};
|
||||
mod stream;
|
||||
|
||||
#[cfg(feature="simdcompression")]
|
||||
mod compression_simd;
|
||||
#[cfg(feature="simdcompression")]
|
||||
pub use self::compression_simd::{BlockEncoder, BlockDecoder};
|
||||
pub use self::stream::CompressedIntStream;
|
||||
|
||||
#[cfg(not(feature = "simdcompression"))]
|
||||
mod pack {
|
||||
mod compression_pack_nosimd;
|
||||
pub use self::compression_pack_nosimd::{BlockDecoder, BlockEncoder};
|
||||
}
|
||||
|
||||
#[cfg(not(feature="simdcompression"))]
|
||||
mod compression_nosimd;
|
||||
#[cfg(not(feature="simdcompression"))]
|
||||
pub use self::compression_nosimd::{BlockEncoder, BlockDecoder};
|
||||
#[cfg(feature = "simdcompression")]
|
||||
mod pack {
|
||||
mod compression_pack_simd;
|
||||
pub use self::compression_pack_simd::{BlockDecoder, BlockEncoder};
|
||||
}
|
||||
|
||||
pub use self::pack::{BlockDecoder, BlockEncoder};
|
||||
|
||||
#[cfg(any(not(feature = "simdcompression"), target_env = "msvc"))]
|
||||
mod vint {
|
||||
mod compression_vint_nosimd;
|
||||
pub(crate) use self::compression_vint_nosimd::*;
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "simdcompression", not(target_env = "msvc")))]
|
||||
mod vint {
|
||||
mod compression_vint_simd;
|
||||
pub(crate) use self::compression_vint_simd::*;
|
||||
}
|
||||
|
||||
/// Returns the size in bytes of a compressed block, given `num_bits`.
|
||||
pub fn compressed_block_size(num_bits: u8) -> usize {
|
||||
1 + (num_bits as usize) * 16
|
||||
}
|
||||
|
||||
pub trait VIntEncoder {
|
||||
/// Compresses an array of `u32` integers,
|
||||
/// using [delta-encoding](https://en.wikipedia.org/wiki/Delta_encoding)
|
||||
/// and variable bytes encoding.
|
||||
///
|
||||
/// The method takes an array of ints to compress, and returns
|
||||
/// a `&[u8]` representing the compressed data.
|
||||
///
|
||||
/// The method also takes an offset to give the value of the
|
||||
/// hypothetical previous element in the delta-encoding.
|
||||
fn compress_vint_sorted(&mut self, input: &[u32], offset: u32) -> &[u8];
|
||||
|
||||
/// Compresses an array of `u32` integers,
|
||||
/// using variable bytes encoding.
|
||||
///
|
||||
/// The method takes an array of ints to compress, and returns
|
||||
/// a `&[u8]` representing the compressed data.
|
||||
fn compress_vint_unsorted(&mut self, input: &[u32]) -> &[u8];
|
||||
}
|
||||
|
||||
pub trait VIntDecoder {
|
||||
fn uncompress_vint_sorted<'a>(&mut self, compressed_data: &'a [u8], offset: u32, num_els: usize) -> &'a [u8];
|
||||
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> &'a [u8];
|
||||
}
|
||||
|
||||
impl VIntEncoder for BlockEncoder{
|
||||
|
||||
fn compress_vint_sorted(&mut self, input: &[u32], mut offset: u32) -> &[u8] {
|
||||
let mut byte_written = 0;
|
||||
for &v in input {
|
||||
let mut to_encode: u32 = v - offset;
|
||||
offset = v;
|
||||
loop {
|
||||
let next_byte: u8 = (to_encode % 128u32) as u8;
|
||||
to_encode /= 128u32;
|
||||
if to_encode == 0u32 {
|
||||
self.output[byte_written] = next_byte | 128u8;
|
||||
byte_written += 1;
|
||||
break;
|
||||
}
|
||||
else {
|
||||
self.output[byte_written] = next_byte;
|
||||
byte_written += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
&self.output[..byte_written]
|
||||
}
|
||||
|
||||
fn compress_vint_unsorted(&mut self, input: &[u32]) -> &[u8] {
|
||||
let mut byte_written = 0;
|
||||
for &v in input {
|
||||
let mut to_encode: u32 = v;
|
||||
loop {
|
||||
let next_byte: u8 = (to_encode % 128u32) as u8;
|
||||
to_encode /= 128u32;
|
||||
if to_encode == 0u32 {
|
||||
self.output[byte_written] = next_byte | 128u8;
|
||||
byte_written += 1;
|
||||
break;
|
||||
}
|
||||
else {
|
||||
self.output[byte_written] = next_byte;
|
||||
byte_written += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
&self.output[..byte_written]
|
||||
}
|
||||
}
|
||||
|
||||
impl VIntDecoder for BlockDecoder {
|
||||
|
||||
/// Uncompress an array of `u32` integers,
|
||||
/// that were compressed using [delta-encoding](https://en.wikipedia.org/wiki/Delta_encoding)
|
||||
/// and variable bytes encoding.
|
||||
///
|
||||
/// The method takes a number of int to decompress, and returns
|
||||
/// the amount of bytes that were read to decompress them.
|
||||
///
|
||||
/// The method also takes an offset to give the value of the
|
||||
/// hypothetical previous element in the delta-encoding.
|
||||
///
|
||||
/// For instance, if delta encoded are `1, 3, 9`, and the
|
||||
/// `offset` is 5, then the output will be:
|
||||
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
|
||||
fn uncompress_vint_sorted<'a>(
|
||||
&mut self,
|
||||
compressed_data: &'a [u8],
|
||||
offset: u32,
|
||||
num_els: usize) -> &'a [u8] {
|
||||
let mut read_byte = 0;
|
||||
let mut result = offset;
|
||||
for i in 0..num_els {
|
||||
let mut shift = 0u32;
|
||||
loop {
|
||||
let cur_byte = compressed_data[read_byte];
|
||||
read_byte += 1;
|
||||
result += ((cur_byte % 128u8) as u32) << shift;
|
||||
if cur_byte & 128u8 != 0u8 {
|
||||
break;
|
||||
}
|
||||
shift += 7;
|
||||
}
|
||||
self.output[i] = result;
|
||||
}
|
||||
self.output_len = num_els;
|
||||
&compressed_data[read_byte..]
|
||||
}
|
||||
|
||||
fn uncompress_vint_unsorted<'a>(
|
||||
&mut self,
|
||||
compressed_data: &'a [u8],
|
||||
num_els: usize) -> &'a [u8] {
|
||||
let mut read_byte = 0;
|
||||
for i in 0..num_els {
|
||||
let mut result = 0u32;
|
||||
let mut shift = 0u32;
|
||||
loop {
|
||||
let cur_byte = compressed_data[read_byte];
|
||||
read_byte += 1;
|
||||
result += ((cur_byte % 128u8) as u32) << shift;
|
||||
if cur_byte & 128u8 != 0u8 {
|
||||
break;
|
||||
}
|
||||
shift += 7;
|
||||
}
|
||||
self.output[i] = result;
|
||||
}
|
||||
self.output_len = num_els;
|
||||
&compressed_data[read_byte..]
|
||||
}
|
||||
|
||||
num_els: usize,
|
||||
) -> usize;
|
||||
|
||||
/// Uncompress an array of `u32s`, compressed using variable
|
||||
/// byte encoding.
|
||||
///
|
||||
/// The method takes a number of int to decompress, and returns
|
||||
/// the amount of bytes that were read to decompress them.
|
||||
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize;
|
||||
}
|
||||
|
||||
|
||||
|
||||
impl VIntEncoder for BlockEncoder {
|
||||
fn compress_vint_sorted(&mut self, input: &[u32], offset: u32) -> &[u8] {
|
||||
vint::compress_sorted(input, &mut self.output, offset)
|
||||
}
|
||||
|
||||
pub const NUM_DOCS_PER_BLOCK: usize = 128; //< should be a power of 2 to let the compiler optimize.
|
||||
fn compress_vint_unsorted(&mut self, input: &[u32]) -> &[u8] {
|
||||
vint::compress_unsorted(input, &mut self.output)
|
||||
}
|
||||
}
|
||||
|
||||
impl VIntDecoder for BlockDecoder {
|
||||
fn uncompress_vint_sorted<'a>(
|
||||
&mut self,
|
||||
compressed_data: &'a [u8],
|
||||
offset: u32,
|
||||
num_els: usize,
|
||||
) -> usize {
|
||||
self.output_len = num_els;
|
||||
vint::uncompress_sorted(compressed_data, &mut self.output[..num_els], offset)
|
||||
}
|
||||
|
||||
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize {
|
||||
self.output_len = num_els;
|
||||
vint::uncompress_unsorted(compressed_data, &mut self.output[..num_els])
|
||||
}
|
||||
}
|
||||
|
||||
pub const COMPRESSION_BLOCK_SIZE: usize = 128;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use rand::Rng;
|
||||
use rand::SeedableRng;
|
||||
use rand::XorShiftRng;
|
||||
use super::*;
|
||||
use tests;
|
||||
use test::Bencher;
|
||||
|
||||
fn generate_array_with_seed(n: usize, ratio: f32, seed_val: u32) -> Vec<u32> {
|
||||
let seed: &[u32; 4] = &[1, 2, 3, seed_val];
|
||||
let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed);
|
||||
(0..u32::max_value())
|
||||
.filter(|_| rng.next_f32()< ratio)
|
||||
.take(n)
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn generate_array(n: usize, ratio: f32) -> Vec<u32> {
|
||||
generate_array_with_seed(n, ratio, 4)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encode_sorted_block() {
|
||||
let vals: Vec<u32> = (0u32..128u32).map(|i| i*7).collect();
|
||||
let vals: Vec<u32> = (0u32..128u32).map(|i| i * 7).collect();
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let compressed_data = encoder.compress_block_sorted(&vals, 0);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
{
|
||||
let remaining_data = decoder.uncompress_block_sorted(compressed_data, 0);
|
||||
assert_eq!(remaining_data.len(), 0);
|
||||
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 0);
|
||||
assert_eq!(consumed_num_bytes, compressed_data.len());
|
||||
}
|
||||
for i in 0..128 {
|
||||
assert_eq!(vals[i], decoder.output(i));
|
||||
@@ -167,33 +138,33 @@ pub mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_encode_sorted_block_with_offset() {
|
||||
let vals: Vec<u32> = (0u32..128u32).map(|i| 11 + i*7).collect();
|
||||
let vals: Vec<u32> = (0u32..128u32).map(|i| 11 + i * 7).collect();
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let compressed_data = encoder.compress_block_sorted(&vals, 10);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
{
|
||||
let remaining_data = decoder.uncompress_block_sorted(compressed_data, 10);
|
||||
assert_eq!(remaining_data.len(), 0);
|
||||
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 10);
|
||||
assert_eq!(consumed_num_bytes, compressed_data.len());
|
||||
}
|
||||
for i in 0..128 {
|
||||
assert_eq!(vals[i], decoder.output(i));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_encode_sorted_block_with_junk() {
|
||||
let mut compressed: Vec<u8> = Vec::new();
|
||||
let n = 128;
|
||||
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32)*7u32).collect();
|
||||
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32).collect();
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let compressed_data = encoder.compress_block_sorted(&vals, 10);
|
||||
compressed.extend_from_slice(compressed_data);
|
||||
compressed.push(173u8);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
{
|
||||
let remaining_data = decoder.uncompress_block_sorted(&compressed, 10);
|
||||
assert_eq!(remaining_data.len(), 1);
|
||||
assert_eq!(remaining_data[0], 173u8);
|
||||
let consumed_num_bytes = decoder.uncompress_block_sorted(&compressed, 10);
|
||||
assert_eq!(consumed_num_bytes, compressed.len() - 1);
|
||||
assert_eq!(compressed[consumed_num_bytes], 173u8);
|
||||
}
|
||||
for i in 0..n {
|
||||
assert_eq!(vals[i], decoder.output(i));
|
||||
@@ -204,72 +175,94 @@ pub mod tests {
|
||||
fn test_encode_unsorted_block_with_junk() {
|
||||
let mut compressed: Vec<u8> = Vec::new();
|
||||
let n = 128;
|
||||
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32)*7u32 % 12).collect();
|
||||
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32 % 12).collect();
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let compressed_data = encoder.compress_block_unsorted(&vals);
|
||||
compressed.extend_from_slice(compressed_data);
|
||||
compressed.push(173u8);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
{
|
||||
let remaining_data = decoder.uncompress_block_unsorted(&compressed);
|
||||
assert_eq!(remaining_data.len(), 1);
|
||||
assert_eq!(remaining_data[0], 173u8);
|
||||
let consumed_num_bytes = decoder.uncompress_block_unsorted(&compressed);
|
||||
assert_eq!(consumed_num_bytes + 1, compressed.len());
|
||||
assert_eq!(compressed[consumed_num_bytes], 173u8);
|
||||
}
|
||||
for i in 0..n {
|
||||
assert_eq!(vals[i], decoder.output(i));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_encode_vint() {
|
||||
{
|
||||
let expected_length = 123;
|
||||
let expected_length = 154;
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let input: Vec<u32> = (0u32..123u32)
|
||||
.map(|i| 4 + i * 7 / 2)
|
||||
.into_iter()
|
||||
.collect();
|
||||
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
|
||||
for offset in &[0u32, 1u32, 2u32] {
|
||||
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
|
||||
assert_eq!(encoded_data.len(), expected_length);
|
||||
assert!(encoded_data.len() <= expected_length);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
let remaining_data = decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
|
||||
assert_eq!(0, remaining_data.len());
|
||||
let consumed_num_bytes =
|
||||
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
|
||||
assert_eq!(consumed_num_bytes, encoded_data.len());
|
||||
assert_eq!(input, decoder.output_array());
|
||||
}
|
||||
}
|
||||
{
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let input = vec!(3u32, 17u32, 187u32);
|
||||
let encoded_data = encoder.compress_vint_sorted(&input, 0);
|
||||
assert_eq!(encoded_data.len(), 4);
|
||||
assert_eq!(encoded_data[0], 3u8 + 128u8);
|
||||
assert_eq!(encoded_data[1], (17u8 - 3u8) + 128u8);
|
||||
assert_eq!(encoded_data[2], (187u8 - 17u8 - 128u8));
|
||||
assert_eq!(encoded_data[3], (1u8 + 128u8));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[bench]
|
||||
fn bench_compress(b: &mut Bencher) {
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let data = generate_array(NUM_DOCS_PER_BLOCK, 0.1);
|
||||
let data = tests::generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
|
||||
b.iter(|| {
|
||||
encoder.compress_block_sorted(&data, 0u32);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
#[bench]
|
||||
fn bench_uncompress(b: &mut Bencher) {
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let data = generate_array(NUM_DOCS_PER_BLOCK, 0.1);
|
||||
let data = tests::generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
|
||||
let compressed = encoder.compress_block_sorted(&data, 0u32);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
let mut decoder = BlockDecoder::new();
|
||||
b.iter(|| {
|
||||
decoder.uncompress_block_sorted(compressed, 0u32);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_docs_compression_numbits() {
|
||||
for num_bits in 0..33 {
|
||||
let mut data = [0u32; 128];
|
||||
if num_bits > 0 {
|
||||
data[0] = 1 << (num_bits - 1);
|
||||
}
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let compressed = encoder.compress_block_unsorted(&data);
|
||||
assert_eq!(compressed[0] as usize, num_bits);
|
||||
assert_eq!(compressed.len(), compressed_block_size(compressed[0]));
|
||||
}
|
||||
}
|
||||
|
||||
const NUM_INTS_BENCH_VINT: usize = 10;
|
||||
|
||||
#[bench]
|
||||
fn bench_compress_vint(b: &mut Bencher) {
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let data = tests::generate_array(NUM_INTS_BENCH_VINT, 0.001);
|
||||
b.iter(|| {
|
||||
encoder.compress_vint_sorted(&data, 0u32);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_uncompress_vint(b: &mut Bencher) {
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let data = tests::generate_array(NUM_INTS_BENCH_VINT, 0.001);
|
||||
let compressed = encoder.compress_vint_sorted(&data, 0u32);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
b.iter(|| {
|
||||
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
142
src/compression/pack/compression_pack_nosimd.rs
Normal file
142
src/compression/pack/compression_pack_nosimd.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
use common::bitpacker::compute_num_bits;
|
||||
use common::bitpacker::{BitPacker, BitUnpacker};
|
||||
use common::CountingWriter;
|
||||
use std::cmp;
|
||||
use std::io::Write;
|
||||
use super::super::COMPRESSION_BLOCK_SIZE;
|
||||
|
||||
const COMPRESSED_BLOCK_MAX_SIZE: usize = COMPRESSION_BLOCK_SIZE * 4 + 1;
|
||||
|
||||
pub fn compress_sorted(vals: &mut [u32], output: &mut [u8], offset: u32) -> usize {
|
||||
let mut max_delta = 0;
|
||||
{
|
||||
let mut local_offset = offset;
|
||||
for i in 0..COMPRESSION_BLOCK_SIZE {
|
||||
let val = vals[i];
|
||||
let delta = val - local_offset;
|
||||
max_delta = cmp::max(max_delta, delta);
|
||||
vals[i] = delta;
|
||||
local_offset = val;
|
||||
}
|
||||
}
|
||||
let mut counting_writer = CountingWriter::wrap(output);
|
||||
let num_bits = compute_num_bits(max_delta as u64);
|
||||
counting_writer.write_all(&[num_bits]).unwrap();
|
||||
|
||||
let mut bit_packer = BitPacker::new(num_bits as usize);
|
||||
for val in vals {
|
||||
bit_packer.write(*val as u64, &mut counting_writer).unwrap();
|
||||
}
|
||||
counting_writer.written_bytes()
|
||||
}
|
||||
|
||||
pub struct BlockEncoder {
|
||||
pub output: [u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
pub output_len: usize,
|
||||
input_buffer: [u32; COMPRESSION_BLOCK_SIZE],
|
||||
}
|
||||
|
||||
impl BlockEncoder {
|
||||
pub fn new() -> BlockEncoder {
|
||||
BlockEncoder {
|
||||
output: [0u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
output_len: 0,
|
||||
input_buffer: [0u32; COMPRESSION_BLOCK_SIZE],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compress_block_sorted(&mut self, vals: &[u32], offset: u32) -> &[u8] {
|
||||
self.input_buffer.clone_from_slice(vals);
|
||||
let compressed_size = compress_sorted(&mut self.input_buffer, &mut self.output, offset);
|
||||
&self.output[..compressed_size]
|
||||
}
|
||||
|
||||
pub fn compress_block_unsorted(&mut self, vals: &[u32]) -> &[u8] {
|
||||
let compressed_size = {
|
||||
let output: &mut [u8] = &mut self.output;
|
||||
let max = vals.iter()
|
||||
.cloned()
|
||||
.max()
|
||||
.expect("compress unsorted called with an empty array");
|
||||
let num_bits = compute_num_bits(max as u64);
|
||||
let mut counting_writer = CountingWriter::wrap(output);
|
||||
counting_writer.write_all(&[num_bits]).unwrap();
|
||||
let mut bit_packer = BitPacker::new(num_bits as usize);
|
||||
for val in vals {
|
||||
bit_packer.write(*val as u64, &mut counting_writer).unwrap();
|
||||
}
|
||||
for _ in vals.len()..COMPRESSION_BLOCK_SIZE {
|
||||
bit_packer
|
||||
.write(vals[0] as u64, &mut counting_writer)
|
||||
.unwrap();
|
||||
}
|
||||
bit_packer.flush(&mut counting_writer).expect(
|
||||
"Flushing the bitpacking \
|
||||
in an in RAM buffer should never fail",
|
||||
);
|
||||
// we avoid writing "closing", because we
|
||||
// do not want 7 bytes of padding here.
|
||||
counting_writer.written_bytes()
|
||||
};
|
||||
&self.output[..compressed_size]
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BlockDecoder {
|
||||
pub output: [u32; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
pub output_len: usize,
|
||||
}
|
||||
|
||||
impl BlockDecoder {
|
||||
pub fn new() -> BlockDecoder {
|
||||
BlockDecoder::with_val(0u32)
|
||||
}
|
||||
|
||||
pub fn with_val(val: u32) -> BlockDecoder {
|
||||
BlockDecoder {
|
||||
output: [val; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
output_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uncompress_block_sorted<'a>(
|
||||
&mut self,
|
||||
compressed_data: &'a [u8],
|
||||
mut offset: u32,
|
||||
) -> usize {
|
||||
let consumed_size = {
|
||||
let num_bits = compressed_data[0];
|
||||
let bit_unpacker = BitUnpacker::new(&compressed_data[1..], num_bits as usize);
|
||||
for i in 0..COMPRESSION_BLOCK_SIZE {
|
||||
let delta = bit_unpacker.get(i);
|
||||
let val = offset + delta as u32;
|
||||
self.output[i] = val;
|
||||
offset = val;
|
||||
}
|
||||
1 + (num_bits as usize * COMPRESSION_BLOCK_SIZE + 7) / 8
|
||||
};
|
||||
self.output_len = COMPRESSION_BLOCK_SIZE;
|
||||
consumed_size
|
||||
}
|
||||
|
||||
pub fn uncompress_block_unsorted<'a>(&mut self, compressed_data: &'a [u8]) -> usize {
|
||||
let num_bits = compressed_data[0];
|
||||
let bit_unpacker = BitUnpacker::new(&compressed_data[1..], num_bits as usize);
|
||||
for i in 0..COMPRESSION_BLOCK_SIZE {
|
||||
self.output[i] = bit_unpacker.get(i) as u32;
|
||||
}
|
||||
let consumed_size = 1 + (num_bits as usize * COMPRESSION_BLOCK_SIZE + 7) / 8;
|
||||
self.output_len = COMPRESSION_BLOCK_SIZE;
|
||||
consumed_size
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn output_array(&self) -> &[u32] {
|
||||
&self.output[..self.output_len]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn output(&self, idx: usize) -> u32 {
|
||||
self.output[idx]
|
||||
}
|
||||
}
|
||||
@@ -1,71 +1,63 @@
|
||||
use super::super::COMPRESSION_BLOCK_SIZE;
|
||||
|
||||
use super::NUM_DOCS_PER_BLOCK;
|
||||
const COMPRESSED_BLOCK_MAX_SIZE: usize = COMPRESSION_BLOCK_SIZE * 4 + 1;
|
||||
|
||||
use libc::size_t;
|
||||
mod simdcomp {
|
||||
use libc::size_t;
|
||||
|
||||
const COMPRESSED_BLOCK_MAX_SIZE: usize = NUM_DOCS_PER_BLOCK * 4 + 1;
|
||||
extern "C" {
|
||||
pub fn compress_sorted(data: *const u32, output: *mut u8, offset: u32) -> size_t;
|
||||
|
||||
extern {
|
||||
fn compress_sorted_cpp(
|
||||
data: *const u32,
|
||||
output: *mut u8,
|
||||
offset: u32) -> size_t;
|
||||
pub fn uncompress_sorted(
|
||||
compressed_data: *const u8,
|
||||
output: *mut u32,
|
||||
offset: u32,
|
||||
) -> size_t;
|
||||
|
||||
fn uncompress_sorted_cpp(
|
||||
compressed_data: *const u8,
|
||||
output: *mut u32,
|
||||
offset: u32) -> size_t;
|
||||
|
||||
fn compress_unsorted_cpp(
|
||||
data: *const u32,
|
||||
output: *mut u8) -> size_t;
|
||||
pub fn compress_unsorted(data: *const u32, output: *mut u8) -> size_t;
|
||||
|
||||
fn uncompress_unsorted_cpp(
|
||||
compressed_data: *const u8,
|
||||
output: *mut u32) -> size_t;
|
||||
pub fn uncompress_unsorted(compressed_data: *const u8, output: *mut u32) -> size_t;
|
||||
}
|
||||
}
|
||||
|
||||
fn compress_sorted(vals: &[u32], output: &mut [u8], offset: u32) -> usize {
|
||||
unsafe { compress_sorted_cpp(vals.as_ptr(), output.as_mut_ptr(), offset) }
|
||||
unsafe { simdcomp::compress_sorted(vals.as_ptr(), output.as_mut_ptr(), offset) }
|
||||
}
|
||||
|
||||
fn uncompress_sorted(compressed_data: &[u8], output: &mut [u32], offset: u32) -> usize {
|
||||
unsafe { uncompress_sorted_cpp(compressed_data.as_ptr(), output.as_mut_ptr(), offset) }
|
||||
unsafe { simdcomp::uncompress_sorted(compressed_data.as_ptr(), output.as_mut_ptr(), offset) }
|
||||
}
|
||||
|
||||
fn compress_unsorted(vals: &[u32], output: &mut [u8]) -> usize {
|
||||
unsafe { compress_unsorted_cpp(vals.as_ptr(), output.as_mut_ptr()) }
|
||||
unsafe { simdcomp::compress_unsorted(vals.as_ptr(), output.as_mut_ptr()) }
|
||||
}
|
||||
|
||||
fn uncompress_unsorted(compressed_data: &[u8], output: &mut [u32]) -> usize {
|
||||
unsafe { uncompress_unsorted_cpp(compressed_data.as_ptr(), output.as_mut_ptr()) }
|
||||
unsafe { simdcomp::uncompress_unsorted(compressed_data.as_ptr(), output.as_mut_ptr()) }
|
||||
}
|
||||
|
||||
|
||||
pub struct BlockEncoder {
|
||||
pub output: [u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
pub output_len: usize,
|
||||
}
|
||||
|
||||
impl BlockEncoder {
|
||||
|
||||
pub fn new() -> BlockEncoder {
|
||||
BlockEncoder {
|
||||
output: [0u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
output_len: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn compress_block_sorted(&mut self, vals: &[u32], offset: u32) -> &[u8] {
|
||||
let compressed_size = compress_sorted(vals, &mut self.output, offset);
|
||||
&self.output[..compressed_size]
|
||||
}
|
||||
|
||||
|
||||
pub fn compress_block_unsorted(&mut self, vals: &[u32]) -> &[u8] {
|
||||
let compressed_size = compress_unsorted(vals, &mut self.output);
|
||||
&self.output[..compressed_size]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
pub struct BlockDecoder {
|
||||
@@ -73,40 +65,52 @@ pub struct BlockDecoder {
|
||||
pub output_len: usize,
|
||||
}
|
||||
|
||||
|
||||
impl BlockDecoder {
|
||||
pub fn new() -> BlockDecoder {
|
||||
BlockDecoder::with_val(0u32)
|
||||
}
|
||||
|
||||
|
||||
pub fn with_val(val: u32) -> BlockDecoder {
|
||||
BlockDecoder {
|
||||
output: [val; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
output_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uncompress_block_sorted<'a>(&mut self, compressed_data: &'a [u8], offset: u32) -> &'a[u8] {
|
||||
|
||||
pub fn uncompress_block_sorted(&mut self, compressed_data: &[u8], offset: u32) -> usize {
|
||||
let consumed_size = uncompress_sorted(compressed_data, &mut self.output, offset);
|
||||
self.output_len = NUM_DOCS_PER_BLOCK;
|
||||
&compressed_data[consumed_size..]
|
||||
self.output_len = COMPRESSION_BLOCK_SIZE;
|
||||
consumed_size
|
||||
}
|
||||
|
||||
pub fn uncompress_block_unsorted<'a>(&mut self, compressed_data: &'a [u8]) -> &'a[u8] {
|
||||
|
||||
pub fn uncompress_block_unsorted<'a>(&mut self, compressed_data: &'a [u8]) -> usize {
|
||||
let consumed_size = uncompress_unsorted(compressed_data, &mut self.output);
|
||||
self.output_len = NUM_DOCS_PER_BLOCK;
|
||||
&compressed_data[consumed_size..]
|
||||
self.output_len = COMPRESSION_BLOCK_SIZE;
|
||||
consumed_size
|
||||
}
|
||||
|
||||
|
||||
#[inline]
|
||||
pub fn output_array(&self,) -> &[u32] {
|
||||
pub fn output_array(&self) -> &[u32] {
|
||||
&self.output[..self.output_len]
|
||||
}
|
||||
|
||||
|
||||
#[inline]
|
||||
pub fn output(&self, idx: usize) -> u32 {
|
||||
self.output[idx]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::BlockEncoder;
|
||||
|
||||
#[test]
|
||||
fn test_all_docs_compression_len() {
|
||||
let data: Vec<u32> = (0u32..128u32).collect();
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let compressed = encoder.compress_block_sorted(&data, 0u32);
|
||||
assert_eq!(compressed.len(), 17);
|
||||
}
|
||||
|
||||
}
|
||||
130
src/compression/stream.rs
Normal file
130
src/compression/stream.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
use compression::BlockDecoder;
|
||||
use compression::COMPRESSION_BLOCK_SIZE;
|
||||
use compression::compressed_block_size;
|
||||
use directory::{ReadOnlySource, SourceRead};
|
||||
|
||||
/// Reads a stream of compressed ints.
|
||||
///
|
||||
/// Tantivy uses `CompressedIntStream` to read
|
||||
/// the position file.
|
||||
/// The `.skip(...)` makes it possible to avoid
|
||||
/// decompressing blocks that are not required.
|
||||
pub struct CompressedIntStream {
|
||||
buffer: SourceRead,
|
||||
block_decoder: BlockDecoder,
|
||||
inner_offset: usize,
|
||||
}
|
||||
|
||||
impl CompressedIntStream {
|
||||
/// Opens a compressed int stream.
|
||||
pub(crate) fn wrap(source: ReadOnlySource) -> CompressedIntStream {
|
||||
CompressedIntStream {
|
||||
buffer: SourceRead::from(source),
|
||||
block_decoder: BlockDecoder::new(),
|
||||
inner_offset: COMPRESSION_BLOCK_SIZE,
|
||||
}
|
||||
}
|
||||
|
||||
/// Fills a buffer with the next `output.len()` integers,
|
||||
/// and advance the stream by that many els.
|
||||
pub fn read(&mut self, output: &mut [u32]) {
|
||||
let mut num_els: usize = output.len();
|
||||
let mut start: usize = 0;
|
||||
loop {
|
||||
let available = COMPRESSION_BLOCK_SIZE - self.inner_offset;
|
||||
if num_els >= available {
|
||||
if available > 0 {
|
||||
let uncompressed_block =
|
||||
&self.block_decoder.output_array()[self.inner_offset..];
|
||||
output[start..][..available].clone_from_slice(uncompressed_block);
|
||||
}
|
||||
num_els -= available;
|
||||
start += available;
|
||||
let num_consumed_bytes = self.block_decoder
|
||||
.uncompress_block_unsorted(self.buffer.as_ref());
|
||||
self.buffer.advance(num_consumed_bytes);
|
||||
self.inner_offset = 0;
|
||||
} else {
|
||||
let uncompressed_block = &self.block_decoder.output_array()
|
||||
[self.inner_offset..self.inner_offset + num_els];
|
||||
output[start..][..num_els].clone_from_slice(uncompressed_block);
|
||||
self.inner_offset += num_els;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Skip the next `skip_len` integer.
|
||||
///
|
||||
/// If a full block is skipped, calling
|
||||
/// `.skip(...)` will avoid decompressing it.
|
||||
pub fn skip(&mut self, mut skip_len: usize) {
|
||||
let available = COMPRESSION_BLOCK_SIZE - self.inner_offset;
|
||||
if available >= skip_len {
|
||||
self.inner_offset += skip_len;
|
||||
} else {
|
||||
skip_len -= available;
|
||||
// entirely skip decompressing some blocks.
|
||||
while skip_len >= COMPRESSION_BLOCK_SIZE {
|
||||
skip_len -= COMPRESSION_BLOCK_SIZE;
|
||||
let num_bits: u8 = self.buffer.as_ref()[0];
|
||||
let block_len = compressed_block_size(num_bits);
|
||||
self.buffer.advance(block_len);
|
||||
}
|
||||
let num_consumed_bytes = self.block_decoder
|
||||
.uncompress_block_unsorted(self.buffer.as_ref());
|
||||
self.buffer.advance(num_consumed_bytes);
|
||||
self.inner_offset = skip_len;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use super::CompressedIntStream;
|
||||
use compression::compressed_block_size;
|
||||
use compression::COMPRESSION_BLOCK_SIZE;
|
||||
use compression::BlockEncoder;
|
||||
use directory::ReadOnlySource;
|
||||
|
||||
fn create_stream_buffer() -> ReadOnlySource {
|
||||
let mut buffer: Vec<u8> = vec![];
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let vals: Vec<u32> = (0u32..1_025u32).collect();
|
||||
for chunk in vals.chunks(COMPRESSION_BLOCK_SIZE) {
|
||||
let compressed_block = encoder.compress_block_unsorted(chunk);
|
||||
let num_bits = compressed_block[0];
|
||||
assert_eq!(compressed_block_size(num_bits), compressed_block.len());
|
||||
buffer.extend_from_slice(compressed_block);
|
||||
}
|
||||
if cfg!(simd) {
|
||||
buffer.extend_from_slice(&[0u8; 7]);
|
||||
}
|
||||
ReadOnlySource::from(buffer)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compressed_int_stream() {
|
||||
let buffer = create_stream_buffer();
|
||||
let mut stream = CompressedIntStream::wrap(buffer);
|
||||
let mut block: [u32; COMPRESSION_BLOCK_SIZE] = [0u32; COMPRESSION_BLOCK_SIZE];
|
||||
|
||||
stream.read(&mut block[0..2]);
|
||||
assert_eq!(block[0], 0);
|
||||
assert_eq!(block[1], 1);
|
||||
stream.skip(5);
|
||||
stream.read(&mut block[0..3]);
|
||||
assert_eq!(block[0], 7);
|
||||
assert_eq!(block[1], 8);
|
||||
assert_eq!(block[2], 9);
|
||||
stream.skip(500);
|
||||
stream.read(&mut block[0..3]);
|
||||
assert_eq!(block[0], 510);
|
||||
assert_eq!(block[1], 511);
|
||||
assert_eq!(block[2], 512);
|
||||
stream.skip(511);
|
||||
stream.read(&mut block[..1]);
|
||||
assert_eq!(block[0], 1024);
|
||||
}
|
||||
}
|
||||
92
src/compression/vint/compression_vint_nosimd.rs
Normal file
92
src/compression/vint/compression_vint_nosimd.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
#[inline(always)]
|
||||
pub(crate) fn compress_sorted<'a>(
|
||||
input: &[u32],
|
||||
output: &'a mut [u8],
|
||||
mut offset: u32,
|
||||
) -> &'a [u8] {
|
||||
let mut byte_written = 0;
|
||||
for &v in input {
|
||||
let mut to_encode: u32 = v - offset;
|
||||
offset = v;
|
||||
loop {
|
||||
let next_byte: u8 = (to_encode % 128u32) as u8;
|
||||
to_encode /= 128u32;
|
||||
if to_encode == 0u32 {
|
||||
output[byte_written] = next_byte | 128u8;
|
||||
byte_written += 1;
|
||||
break;
|
||||
} else {
|
||||
output[byte_written] = next_byte;
|
||||
byte_written += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
&output[..byte_written]
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn compress_unsorted<'a>(input: &[u32], output: &'a mut [u8]) -> &'a [u8] {
|
||||
let mut byte_written = 0;
|
||||
for &v in input {
|
||||
let mut to_encode: u32 = v;
|
||||
loop {
|
||||
let next_byte: u8 = (to_encode % 128u32) as u8;
|
||||
to_encode /= 128u32;
|
||||
if to_encode == 0u32 {
|
||||
output[byte_written] = next_byte | 128u8;
|
||||
byte_written += 1;
|
||||
break;
|
||||
} else {
|
||||
output[byte_written] = next_byte;
|
||||
byte_written += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
&output[..byte_written]
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn uncompress_sorted<'a>(
|
||||
compressed_data: &'a [u8],
|
||||
output: &mut [u32],
|
||||
offset: u32,
|
||||
) -> usize {
|
||||
let mut read_byte = 0;
|
||||
let mut result = offset;
|
||||
let num_els = output.len();
|
||||
for i in 0..num_els {
|
||||
let mut shift = 0u32;
|
||||
loop {
|
||||
let cur_byte = compressed_data[read_byte];
|
||||
read_byte += 1;
|
||||
result += ((cur_byte % 128u8) as u32) << shift;
|
||||
if cur_byte & 128u8 != 0u8 {
|
||||
break;
|
||||
}
|
||||
shift += 7;
|
||||
}
|
||||
output[i] = result;
|
||||
}
|
||||
read_byte
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn uncompress_unsorted<'a>(compressed_data: &'a [u8], output: &mut [u32]) -> usize {
|
||||
let mut read_byte = 0;
|
||||
let num_els = output.len();
|
||||
for i in 0..num_els {
|
||||
let mut result = 0u32;
|
||||
let mut shift = 0u32;
|
||||
loop {
|
||||
let cur_byte = compressed_data[read_byte];
|
||||
read_byte += 1;
|
||||
result += ((cur_byte % 128u8) as u32) << shift;
|
||||
if cur_byte & 128u8 != 0u8 {
|
||||
break;
|
||||
}
|
||||
shift += 7;
|
||||
}
|
||||
output[i] = result;
|
||||
}
|
||||
read_byte
|
||||
}
|
||||
72
src/compression/vint/compression_vint_simd.rs
Normal file
72
src/compression/vint/compression_vint_simd.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
mod streamvbyte {
|
||||
|
||||
use libc::size_t;
|
||||
|
||||
extern "C" {
|
||||
pub fn streamvbyte_delta_encode(
|
||||
data: *const u32,
|
||||
num_els: u32,
|
||||
output: *mut u8,
|
||||
offset: u32,
|
||||
) -> size_t;
|
||||
|
||||
pub fn streamvbyte_delta_decode(
|
||||
compressed_data: *const u8,
|
||||
output: *mut u32,
|
||||
num_els: u32,
|
||||
offset: u32,
|
||||
) -> size_t;
|
||||
|
||||
pub fn streamvbyte_encode(data: *const u32, num_els: u32, output: *mut u8) -> size_t;
|
||||
|
||||
pub fn streamvbyte_decode(
|
||||
compressed_data: *const u8,
|
||||
output: *mut u32,
|
||||
num_els: usize,
|
||||
) -> size_t;
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn compress_sorted<'a>(input: &[u32], output: &'a mut [u8], offset: u32) -> &'a [u8] {
|
||||
let compress_length = unsafe {
|
||||
streamvbyte::streamvbyte_delta_encode(
|
||||
input.as_ptr(),
|
||||
input.len() as u32,
|
||||
output.as_mut_ptr(),
|
||||
offset,
|
||||
)
|
||||
};
|
||||
&output[..compress_length]
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn compress_unsorted<'a>(input: &[u32], output: &'a mut [u8]) -> &'a [u8] {
|
||||
let compress_length = unsafe {
|
||||
streamvbyte::streamvbyte_encode(input.as_ptr(), input.len() as u32, output.as_mut_ptr())
|
||||
};
|
||||
&output[..compress_length]
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn uncompress_sorted<'a>(
|
||||
compressed_data: &'a [u8],
|
||||
output: &mut [u32],
|
||||
offset: u32,
|
||||
) -> usize {
|
||||
unsafe {
|
||||
streamvbyte::streamvbyte_delta_decode(
|
||||
compressed_data.as_ptr(),
|
||||
output.as_mut_ptr(),
|
||||
output.len() as u32,
|
||||
offset,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn uncompress_unsorted<'a>(compressed_data: &'a [u8], output: &mut [u32]) -> usize {
|
||||
unsafe {
|
||||
streamvbyte::streamvbyte_decode(compressed_data.as_ptr(), output.as_mut_ptr(), output.len())
|
||||
}
|
||||
}
|
||||
@@ -1,51 +1,45 @@
|
||||
use Result;
|
||||
use Error;
|
||||
use error::{ErrorKind, ResultExt};
|
||||
use serde_json;
|
||||
use schema::Schema;
|
||||
use std::sync::Arc;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::fmt;
|
||||
use rustc_serialize::json;
|
||||
use core::SegmentId;
|
||||
use directory::{Directory, MmapDirectory, RAMDirectory};
|
||||
use indexer::IndexWriter;
|
||||
use indexer::index_writer::open_index_writer;
|
||||
use core::searcher::Searcher;
|
||||
use std::convert::From;
|
||||
use num_cpus;
|
||||
use super::segment::Segment;
|
||||
use core::SegmentReader;
|
||||
use super::pool::Pool;
|
||||
use core::SegmentMeta;
|
||||
use super::pool::LeasedItem;
|
||||
use std::path::Path;
|
||||
use indexer::SegmentManager;
|
||||
use core::IndexMeta;
|
||||
use indexer::DirectoryLock;
|
||||
use IndexWriter;
|
||||
use directory::ManagedDirectory;
|
||||
use core::META_FILEPATH;
|
||||
use super::segment::create_segment;
|
||||
use indexer::segment_updater::save_new_metas;
|
||||
use tokenizer::TokenizerManager;
|
||||
|
||||
const NUM_SEARCHERS: usize = 12;
|
||||
|
||||
/// Accessor to the index segment manager
|
||||
///
|
||||
/// This method is not part of tantivy's public API
|
||||
pub fn get_segment_manager(index: &Index) -> Arc<SegmentManager> {
|
||||
index.segment_manager.clone()
|
||||
}
|
||||
|
||||
|
||||
fn load_metas(directory: &Directory) -> Result<IndexMeta> {
|
||||
let meta_file = try!(directory.open_read(&META_FILEPATH));
|
||||
let meta_content = String::from_utf8_lossy(meta_file.as_slice());
|
||||
json::decode(&meta_content)
|
||||
.map_err(|e| Error::CorruptedFile(META_FILEPATH.clone(), Box::new(e)))
|
||||
let meta_data = directory.atomic_read(&META_FILEPATH)?;
|
||||
let meta_string = String::from_utf8_lossy(&meta_data);
|
||||
serde_json::from_str(&meta_string).chain_err(|| ErrorKind::CorruptedFile(META_FILEPATH.clone()))
|
||||
}
|
||||
|
||||
/// Tantivy's Search Index
|
||||
/// Search Index
|
||||
pub struct Index {
|
||||
segment_manager: Arc<SegmentManager>,
|
||||
|
||||
directory: Box<Directory>,
|
||||
directory: ManagedDirectory,
|
||||
schema: Schema,
|
||||
searcher_pool: Arc<Pool<Searcher>>,
|
||||
docstamp: u64,
|
||||
tokenizers: TokenizerManager,
|
||||
}
|
||||
|
||||
impl Index {
|
||||
@@ -54,18 +48,28 @@ impl Index {
|
||||
/// The index will be allocated in anonymous memory.
|
||||
/// This should only be used for unit tests.
|
||||
pub fn create_in_ram(schema: Schema) -> Index {
|
||||
let directory = Box::new(RAMDirectory::create());
|
||||
Index::from_directory(directory, schema).expect("Creating a RAMDirectory should never fail") // unwrap is ok here
|
||||
let ram_directory = RAMDirectory::create();
|
||||
// unwrap is ok here
|
||||
let directory = ManagedDirectory::new(ram_directory).expect(
|
||||
"Creating a managed directory from a brand new RAM directory \
|
||||
should never fail.",
|
||||
);
|
||||
Index::from_directory(directory, schema).expect("Creating a RAMDirectory should never fail")
|
||||
}
|
||||
|
||||
/// Creates a new index in a given filepath.
|
||||
/// The index will use the `MMapDirectory`.
|
||||
///
|
||||
/// If a previous index was in this directory, then its meta file will be destroyed.
|
||||
pub fn create(directory_path: &Path, schema: Schema) -> Result<Index> {
|
||||
let mut directory = MmapDirectory::open(directory_path)?;
|
||||
save_new_metas(schema.clone(), 0, &mut directory)?;
|
||||
Index::from_directory(box directory, schema)
|
||||
pub fn create<P: AsRef<Path>>(directory_path: P, schema: Schema) -> Result<Index> {
|
||||
let mmap_directory = MmapDirectory::open(directory_path)?;
|
||||
let directory = ManagedDirectory::new(mmap_directory)?;
|
||||
Index::from_directory(directory, schema)
|
||||
}
|
||||
|
||||
/// Accessor for the tokenizer manager.
|
||||
pub fn tokenizers(&self) -> &TokenizerManager {
|
||||
&self.tokenizers
|
||||
}
|
||||
|
||||
/// Creates a new index in a temp directory.
|
||||
@@ -77,64 +81,71 @@ impl Index {
|
||||
/// The temp directory is only used for testing the `MmapDirectory`.
|
||||
/// For other unit tests, prefer the `RAMDirectory`, see: `create_in_ram`.
|
||||
pub fn create_from_tempdir(schema: Schema) -> Result<Index> {
|
||||
let directory = Box::new(try!(MmapDirectory::create_from_tempdir()));
|
||||
let mmap_directory = MmapDirectory::create_from_tempdir()?;
|
||||
let directory = ManagedDirectory::new(mmap_directory)?;
|
||||
Index::from_directory(directory, schema)
|
||||
}
|
||||
|
||||
/// Creates a new index given a directory and an `IndexMeta`.
|
||||
fn create_from_metas(directory: Box<Directory>, metas: IndexMeta) -> Result<Index> {
|
||||
fn create_from_metas(directory: ManagedDirectory, metas: &IndexMeta) -> Result<Index> {
|
||||
let schema = metas.schema.clone();
|
||||
let docstamp = metas.docstamp;
|
||||
let committed_segments = metas.committed_segments;
|
||||
// TODO log somethings is uncommitted is not empty.
|
||||
let index = Index {
|
||||
segment_manager: Arc::new(SegmentManager::from_segments(committed_segments)),
|
||||
directory: directory,
|
||||
schema: schema,
|
||||
directory,
|
||||
schema,
|
||||
searcher_pool: Arc::new(Pool::new()),
|
||||
docstamp: docstamp,
|
||||
tokenizers: TokenizerManager::default(),
|
||||
};
|
||||
try!(index.load_searchers());
|
||||
index.load_searchers()?;
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
/// Opens a new directory from a directory.
|
||||
pub fn from_directory(directory: Box<Directory>, schema: Schema) -> Result<Index> {
|
||||
Index::create_from_metas(directory, IndexMeta::with_schema(schema))
|
||||
/// Create a new index from a directory.
|
||||
pub fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> Result<Index> {
|
||||
save_new_metas(schema.clone(), 0, directory.borrow_mut())?;
|
||||
let metas = IndexMeta::with_schema(schema);
|
||||
Index::create_from_metas(directory, &metas)
|
||||
}
|
||||
|
||||
/// Opens a new directory from an index path.
|
||||
pub fn open(directory_path: &Path) -> Result<Index> {
|
||||
let directory = try!(MmapDirectory::open(directory_path));
|
||||
let metas = try!(load_metas(&directory)); //< TODO does the directory already exists?
|
||||
Index::create_from_metas(directory.box_clone(), metas)
|
||||
pub fn open<P: AsRef<Path>>(directory_path: P) -> Result<Index> {
|
||||
let mmap_directory = MmapDirectory::open(directory_path)?;
|
||||
let directory = ManagedDirectory::new(mmap_directory)?;
|
||||
let metas = load_metas(&directory)?;
|
||||
Index::create_from_metas(directory, &metas)
|
||||
}
|
||||
|
||||
/// Returns the index docstamp.
|
||||
/// Reads the index meta file from the directory.
|
||||
pub fn load_metas(&self) -> Result<IndexMeta> {
|
||||
load_metas(self.directory())
|
||||
}
|
||||
|
||||
/// Open a new index writer. Attempts to acquire a lockfile.
|
||||
///
|
||||
/// The docstamp is the number of documents that have been added
|
||||
/// from the beginning of time, and until the moment of the last commit.
|
||||
pub fn docstamp(&self) -> u64 {
|
||||
self.docstamp
|
||||
}
|
||||
|
||||
/// Creates a multithreaded writer.
|
||||
/// Each writer produces an independent segment.
|
||||
/// The lockfile should be deleted on drop, but it is possible
|
||||
/// that due to a panic or other error, a stale lockfile will be
|
||||
/// left in the index directory. If you are sure that no other
|
||||
/// `IndexWriter` on the system is accessing the index directory,
|
||||
/// it is safe to manually delete the lockfile.
|
||||
///
|
||||
/// num_threads specifies the number of indexing workers that
|
||||
/// should work at the same time.
|
||||
///
|
||||
/// # Errors
|
||||
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
||||
/// # Panics
|
||||
/// If the heap size per thread is too small, panics.
|
||||
pub fn writer_with_num_threads(&self,
|
||||
num_threads: usize,
|
||||
heap_size_in_bytes: usize)
|
||||
-> Result<IndexWriter> {
|
||||
IndexWriter::open(self, num_threads, heap_size_in_bytes)
|
||||
pub fn writer_with_num_threads(
|
||||
&self,
|
||||
num_threads: usize,
|
||||
heap_size_in_bytes: usize,
|
||||
) -> Result<IndexWriter> {
|
||||
let directory_lock = DirectoryLock::lock(self.directory().box_clone())?;
|
||||
open_index_writer(self, num_threads, heap_size_in_bytes, directory_lock)
|
||||
}
|
||||
|
||||
|
||||
/// Creates a multithreaded writer
|
||||
/// It just calls `writer_with_num_threads` with the number of cores as `num_threads`
|
||||
///
|
||||
/// # Errors
|
||||
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
||||
/// # Panics
|
||||
@@ -151,65 +162,63 @@ impl Index {
|
||||
}
|
||||
|
||||
/// Returns the list of segments that are searchable
|
||||
pub fn searchable_segments(&self) -> Vec<Segment> {
|
||||
self.searchable_segment_ids()
|
||||
pub fn searchable_segments(&self) -> Result<Vec<Segment>> {
|
||||
Ok(self.searchable_segment_metas()?
|
||||
.into_iter()
|
||||
.map(|segment_id| self.segment(segment_id))
|
||||
.collect()
|
||||
.map(|segment_meta| self.segment(segment_meta))
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// Remove all of the file associated with the segment.
|
||||
///
|
||||
/// This method cannot fail. If a problem occurs,
|
||||
/// some files may end up never being removed.
|
||||
/// The error will only be logged.
|
||||
pub fn delete_segment(&self, segment_id: SegmentId) {
|
||||
self.segment(segment_id).delete();
|
||||
}
|
||||
|
||||
/// Return a segment object given a `segment_id`
|
||||
///
|
||||
/// The segment may or may not exist.
|
||||
pub fn segment(&self, segment_id: SegmentId) -> Segment {
|
||||
create_segment(self.clone(), segment_id)
|
||||
}
|
||||
|
||||
/// Return a reference to the index directory.
|
||||
pub fn directory(&self) -> &Directory {
|
||||
&*self.directory
|
||||
}
|
||||
|
||||
/// Return a mutable reference to the index directory.
|
||||
pub fn directory_mut(&mut self) -> &mut Directory {
|
||||
&mut *self.directory
|
||||
}
|
||||
|
||||
/// Returns the list of segment ids that are searchable.
|
||||
fn searchable_segment_ids(&self) -> Vec<SegmentId> {
|
||||
self.segment_manager.committed_segments()
|
||||
#[doc(hidden)]
|
||||
pub fn segment(&self, segment_meta: SegmentMeta) -> Segment {
|
||||
create_segment(self.clone(), segment_meta)
|
||||
}
|
||||
|
||||
/// Creates a new segment.
|
||||
pub fn new_segment(&self) -> Segment {
|
||||
self.segment(SegmentId::generate_random())
|
||||
let segment_meta = SegmentMeta::new(SegmentId::generate_random());
|
||||
create_segment(self.clone(), segment_meta)
|
||||
}
|
||||
|
||||
/// Return a reference to the index directory.
|
||||
pub fn directory(&self) -> &ManagedDirectory {
|
||||
&self.directory
|
||||
}
|
||||
|
||||
/// Return a mutable reference to the index directory.
|
||||
pub fn directory_mut(&mut self) -> &mut ManagedDirectory {
|
||||
&mut self.directory
|
||||
}
|
||||
|
||||
/// Reads the meta.json and returns the list of
|
||||
/// `SegmentMeta` from the last commit.
|
||||
pub fn searchable_segment_metas(&self) -> Result<Vec<SegmentMeta>> {
|
||||
Ok(self.load_metas()?.segments)
|
||||
}
|
||||
|
||||
/// Returns the list of segment ids that are searchable.
|
||||
pub fn searchable_segment_ids(&self) -> Result<Vec<SegmentId>> {
|
||||
Ok(self.searchable_segment_metas()?
|
||||
.iter()
|
||||
.map(|segment_meta| segment_meta.id())
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// Creates a new generation of searchers after
|
||||
|
||||
/// a change of the set of searchable indexes.
|
||||
///
|
||||
/// This needs to be called when a new segment has been
|
||||
/// published or after a merge.
|
||||
pub fn load_searchers(&self) -> Result<()> {
|
||||
let searchable_segments = self.searchable_segments();
|
||||
let mut searchers = Vec::new();
|
||||
for _ in 0..NUM_SEARCHERS {
|
||||
let searchable_segments_clone = searchable_segments.clone();
|
||||
let segment_readers: Vec<SegmentReader> = try!(searchable_segments_clone.into_iter()
|
||||
.map(SegmentReader::open)
|
||||
.collect());
|
||||
let searcher = Searcher::from(segment_readers);
|
||||
searchers.push(searcher);
|
||||
}
|
||||
let searchable_segments = self.searchable_segments()?;
|
||||
let segment_readers: Vec<SegmentReader> = searchable_segments
|
||||
.iter()
|
||||
.map(SegmentReader::open)
|
||||
.collect::<Result<_>>()?;
|
||||
let searchers = (0..NUM_SEARCHERS)
|
||||
.map(|_| Searcher::from(segment_readers.clone()))
|
||||
.collect();
|
||||
self.searcher_pool.publish_new_generation(searchers);
|
||||
Ok(())
|
||||
}
|
||||
@@ -229,7 +238,6 @@ impl Index {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl fmt::Debug for Index {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Index({:?})", self.directory)
|
||||
@@ -239,12 +247,10 @@ impl fmt::Debug for Index {
|
||||
impl Clone for Index {
|
||||
fn clone(&self) -> Index {
|
||||
Index {
|
||||
segment_manager: self.segment_manager.clone(),
|
||||
|
||||
directory: self.directory.box_clone(),
|
||||
directory: self.directory.clone(),
|
||||
schema: self.schema.clone(),
|
||||
searcher_pool: self.searcher_pool.clone(),
|
||||
docstamp: self.docstamp,
|
||||
searcher_pool: Arc::clone(&self.searcher_pool),
|
||||
tokenizers: self.tokenizers.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,47 +1,54 @@
|
||||
|
||||
use schema::Schema;
|
||||
use core::SegmentId;
|
||||
|
||||
use core::SegmentMeta;
|
||||
|
||||
/// Meta information about the `Index`.
|
||||
///
|
||||
///
|
||||
/// This object is serialized on disk in the `meta.json` file.
|
||||
/// It keeps information about
|
||||
/// It keeps information about
|
||||
/// * the searchable segments,
|
||||
/// * the index docstamp
|
||||
/// * the index `docstamp`
|
||||
/// * the schema
|
||||
///
|
||||
#[derive(Clone,Debug,RustcDecodable,RustcEncodable)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct IndexMeta {
|
||||
pub committed_segments: Vec<SegmentMeta>,
|
||||
pub uncommitted_segments: Vec<SegmentMeta>,
|
||||
pub segments: Vec<SegmentMeta>,
|
||||
pub schema: Schema,
|
||||
pub docstamp: u64,
|
||||
pub opstamp: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")] pub payload: Option<String>,
|
||||
}
|
||||
|
||||
impl IndexMeta {
|
||||
pub fn with_schema(schema: Schema) -> IndexMeta {
|
||||
IndexMeta {
|
||||
committed_segments: Vec::new(),
|
||||
uncommitted_segments: Vec::new(),
|
||||
schema: schema,
|
||||
docstamp: 0u64,
|
||||
segments: vec![],
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, RustcDecodable,RustcEncodable)]
|
||||
pub struct SegmentMeta {
|
||||
pub segment_id: SegmentId,
|
||||
pub num_docs: u32,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl SegmentMeta {
|
||||
pub fn new(segment_id: SegmentId, num_docs: u32) -> SegmentMeta {
|
||||
SegmentMeta {
|
||||
segment_id: segment_id,
|
||||
num_docs: num_docs,
|
||||
}
|
||||
mod tests {
|
||||
|
||||
use serde_json;
|
||||
use super::IndexMeta;
|
||||
use schema::{SchemaBuilder, TEXT};
|
||||
|
||||
#[test]
|
||||
fn test_serialize_metas() {
|
||||
let schema = {
|
||||
let mut schema_builder = SchemaBuilder::new();
|
||||
schema_builder.add_text_field("text", TEXT);
|
||||
schema_builder.build()
|
||||
};
|
||||
let index_metas = IndexMeta {
|
||||
segments: Vec::new(),
|
||||
schema: schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
};
|
||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||
assert_eq!(json, r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
152
src/core/inverted_index_reader.rs
Normal file
152
src/core/inverted_index_reader.rs
Normal file
@@ -0,0 +1,152 @@
|
||||
use directory::{ReadOnlySource, SourceRead};
|
||||
use termdict::{TermDictionary, TermDictionaryImpl};
|
||||
use postings::{BlockSegmentPostings, SegmentPostings};
|
||||
use postings::TermInfo;
|
||||
use schema::IndexRecordOption;
|
||||
use schema::Term;
|
||||
use std::cmp;
|
||||
use fastfield::DeleteBitSet;
|
||||
use schema::Schema;
|
||||
use compression::CompressedIntStream;
|
||||
|
||||
/// The inverted index reader is in charge of accessing
|
||||
/// the inverted index associated to a specific field.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// It is safe to delete the segment associated to
|
||||
/// an `InvertedIndexReader`. As long as it is open,
|
||||
/// the `ReadOnlySource` it is relying on should
|
||||
/// stay available.
|
||||
///
|
||||
///
|
||||
/// `InvertedIndexReader` are created by calling
|
||||
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
||||
pub struct InvertedIndexReader {
|
||||
termdict: TermDictionaryImpl,
|
||||
postings_source: ReadOnlySource,
|
||||
positions_source: ReadOnlySource,
|
||||
delete_bitset: DeleteBitSet,
|
||||
schema: Schema,
|
||||
}
|
||||
|
||||
impl InvertedIndexReader {
|
||||
pub(crate) fn new(
|
||||
termdict_source: ReadOnlySource,
|
||||
postings_source: ReadOnlySource,
|
||||
positions_source: ReadOnlySource,
|
||||
delete_bitset: DeleteBitSet,
|
||||
schema: Schema,
|
||||
) -> InvertedIndexReader {
|
||||
InvertedIndexReader {
|
||||
termdict: TermDictionaryImpl::from_source(termdict_source),
|
||||
postings_source,
|
||||
positions_source,
|
||||
delete_bitset,
|
||||
schema,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the term info associated with the term.
|
||||
pub fn get_term_info(&self, term: &Term) -> Option<TermInfo> {
|
||||
self.termdict.get(term.as_slice())
|
||||
}
|
||||
|
||||
/// Return the term dictionary datastructure.
|
||||
pub fn terms(&self) -> &TermDictionaryImpl {
|
||||
&self.termdict
|
||||
}
|
||||
|
||||
/// Resets the block segment to another position of the postings
|
||||
/// file.
|
||||
///
|
||||
/// This is useful for enumerating through a list of terms,
|
||||
/// and consuming the associated posting lists while avoiding
|
||||
/// reallocating a `BlockSegmentPostings`.
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// This does not reset the positions list.
|
||||
pub fn reset_block_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
block_postings: &mut BlockSegmentPostings,
|
||||
) {
|
||||
let offset = term_info.postings_offset as usize;
|
||||
let end_source = self.postings_source.len();
|
||||
let postings_slice = self.postings_source.slice(offset, end_source);
|
||||
let postings_reader = SourceRead::from(postings_slice);
|
||||
block_postings.reset(term_info.doc_freq as usize, postings_reader);
|
||||
}
|
||||
|
||||
/// Returns a block postings given a `term_info`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
pub fn read_block_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
option: IndexRecordOption,
|
||||
) -> BlockSegmentPostings {
|
||||
let offset = term_info.postings_offset as usize;
|
||||
let postings_data = self.postings_source.slice_from(offset);
|
||||
let has_freq = option.has_freq();
|
||||
BlockSegmentPostings::from_data(
|
||||
term_info.doc_freq as usize,
|
||||
SourceRead::from(postings_data),
|
||||
has_freq,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns a posting object given a `term_info`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
pub fn read_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
option: IndexRecordOption,
|
||||
) -> SegmentPostings {
|
||||
let block_postings = self.read_block_postings_from_terminfo(term_info, option);
|
||||
let delete_bitset = self.delete_bitset.clone();
|
||||
let position_stream = {
|
||||
if option.has_positions() {
|
||||
let position_offset = term_info.positions_offset;
|
||||
let positions_source = self.positions_source.slice_from(position_offset as usize);
|
||||
let mut stream = CompressedIntStream::wrap(positions_source);
|
||||
stream.skip(term_info.positions_inner_offset as usize);
|
||||
Some(stream)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
SegmentPostings::from_block_postings(block_postings, delete_bitset, position_stream)
|
||||
}
|
||||
|
||||
/// Returns the segment postings associated with the term, and with the given option,
|
||||
/// or `None` if the term has never been encountered and indexed.
|
||||
///
|
||||
/// If the field was not indexed with the indexing options that cover
|
||||
/// the requested options, the returned `SegmentPostings` the method does not fail
|
||||
/// and returns a `SegmentPostings` with as much information as possible.
|
||||
///
|
||||
/// For instance, requesting `IndexRecordOption::Freq` for a
|
||||
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
||||
/// with `DocId`s and frequencies.
|
||||
pub fn read_postings(&self, term: &Term, option: IndexRecordOption) -> Option<SegmentPostings> {
|
||||
let field = term.field();
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let term_info = get!(self.get_term_info(term));
|
||||
let maximum_option = get!(field_entry.field_type().get_index_record_option());
|
||||
let best_effort_option = cmp::min(maximum_option, option);
|
||||
Some(self.read_postings_from_terminfo(&term_info, best_effort_option))
|
||||
}
|
||||
|
||||
/// Returns the number of documents containing the term.
|
||||
pub fn doc_freq(&self, term: &Term) -> u32 {
|
||||
match self.get_term_info(term) {
|
||||
Some(term_info) => term_info.doc_freq,
|
||||
None => 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
pub mod searcher;
|
||||
|
||||
pub mod index;
|
||||
mod segment_reader;
|
||||
mod segment_id;
|
||||
@@ -7,20 +6,37 @@ mod segment_component;
|
||||
mod segment;
|
||||
mod index_meta;
|
||||
mod pool;
|
||||
mod segment_meta;
|
||||
mod inverted_index_reader;
|
||||
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub use self::inverted_index_reader::InvertedIndexReader;
|
||||
pub use self::searcher::Searcher;
|
||||
pub use self::segment_component::SegmentComponent;
|
||||
pub use self::segment_id::SegmentId;
|
||||
pub use self::segment_reader::SegmentReader;
|
||||
pub use self::segment::Segment;
|
||||
pub use self::segment::SegmentInfo;
|
||||
pub use self::segment::SerializableSegment;
|
||||
pub use self::index::Index;
|
||||
pub use self::index_meta::{IndexMeta, SegmentMeta};
|
||||
pub use self::segment_meta::SegmentMeta;
|
||||
pub use self::index_meta::IndexMeta;
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
lazy_static! {
|
||||
/// The meta file contains all the information about the list of segments and the schema
|
||||
/// of the index.
|
||||
pub static ref META_FILEPATH: PathBuf = PathBuf::from("meta.json");
|
||||
}
|
||||
|
||||
/// The managed file contains a list of files that were created by the tantivy
|
||||
/// and will therefore be garbage collected when they are deemed useless by tantivy.
|
||||
///
|
||||
/// Removing this file is safe, but will prevent the garbage collection of all of the file that
|
||||
/// are currently in the directory
|
||||
pub static ref MANAGED_FILEPATH: PathBuf = PathBuf::from(".managed.json");
|
||||
|
||||
/// Only one process should be able to write tantivy's index at a time.
|
||||
/// This file, when present, is in charge of preventing other processes to open an IndexWriter.
|
||||
///
|
||||
/// If the process is killed and this file remains, it is safe to remove it manually.
|
||||
pub static ref LOCKFILE_FILEPATH: PathBuf = PathBuf::from(".tantivy-indexer.lock");
|
||||
}
|
||||
|
||||
@@ -17,10 +17,10 @@ pub struct Pool<T> {
|
||||
}
|
||||
|
||||
impl<T> Pool<T> {
|
||||
|
||||
pub fn new() -> Pool<T> {
|
||||
let queue = Arc::new(MsQueue::new());
|
||||
Pool {
|
||||
queue: Arc::new(MsQueue::new()),
|
||||
queue,
|
||||
freshest_generation: AtomicUsize::default(),
|
||||
next_generation: AtomicUsize::default(),
|
||||
}
|
||||
@@ -30,52 +30,52 @@ impl<T> Pool<T> {
|
||||
let next_generation = self.next_generation.fetch_add(1, Ordering::SeqCst) + 1;
|
||||
for item in items {
|
||||
let gen_item = GenerationItem {
|
||||
item: item,
|
||||
item,
|
||||
generation: next_generation,
|
||||
};
|
||||
self.queue.push(gen_item);
|
||||
}
|
||||
self.advertise_generation(next_generation);
|
||||
}
|
||||
|
||||
/// At the exit of this method,
|
||||
|
||||
/// At the exit of this method,
|
||||
/// - freshest_generation has a value greater or equal than generation
|
||||
/// - freshest_generation has a value that has been advertised
|
||||
/// - freshest_generation has
|
||||
/// - freshest_generation has)
|
||||
fn advertise_generation(&self, generation: usize) {
|
||||
// not optimal at all but the easiest to read proof.
|
||||
// not optimal at all but the easiest to read proof.
|
||||
loop {
|
||||
let former_generation = self.freshest_generation.load(Ordering::Acquire);
|
||||
if former_generation >= generation {
|
||||
break;
|
||||
}
|
||||
self.freshest_generation.compare_and_swap(former_generation, generation, Ordering::SeqCst);
|
||||
}
|
||||
self.freshest_generation.compare_and_swap(
|
||||
former_generation,
|
||||
generation,
|
||||
Ordering::SeqCst,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn generation(&self,) -> usize {
|
||||
|
||||
fn generation(&self) -> usize {
|
||||
self.freshest_generation.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
pub fn acquire(&self,) -> LeasedItem<T> {
|
||||
pub fn acquire(&self) -> LeasedItem<T> {
|
||||
let generation = self.generation();
|
||||
loop {
|
||||
let gen_item = self.queue.pop();
|
||||
if gen_item.generation >= generation {
|
||||
return LeasedItem {
|
||||
gen_item: Some(gen_item),
|
||||
recycle_queue: self.queue.clone(),
|
||||
}
|
||||
}
|
||||
else {
|
||||
recycle_queue: Arc::clone(&self.queue),
|
||||
};
|
||||
} else {
|
||||
// this searcher is obsolete,
|
||||
// removing it from the pool.
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
pub struct LeasedItem<T> {
|
||||
@@ -84,29 +84,33 @@ pub struct LeasedItem<T> {
|
||||
}
|
||||
|
||||
impl<T> Deref for LeasedItem<T> {
|
||||
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self.gen_item.as_ref().expect("Unwrapping a leased item should never fail").item // unwrap is safe here
|
||||
&self.gen_item
|
||||
.as_ref()
|
||||
.expect("Unwrapping a leased item should never fail")
|
||||
.item // unwrap is safe here
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for LeasedItem<T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
&mut self.gen_item.as_mut().expect("Unwrapping a mut leased item should never fail").item // unwrap is safe here
|
||||
&mut self.gen_item
|
||||
.as_mut()
|
||||
.expect("Unwrapping a mut leased item should never fail")
|
||||
.item // unwrap is safe here
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for LeasedItem<T> {
|
||||
fn drop(&mut self) {
|
||||
let gen_item: GenerationItem<T> = mem::replace(&mut self.gen_item, None).expect("Unwrapping a leased item should never fail");
|
||||
let gen_item: GenerationItem<T> = mem::replace(&mut self.gen_item, None)
|
||||
.expect("Unwrapping a leased item should never fail");
|
||||
self.recycle_queue.push(gen_item);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -127,4 +131,4 @@ mod tests {
|
||||
assert_eq!(*pool.acquire(), 11);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,68 +6,106 @@ use common::TimerTree;
|
||||
use query::Query;
|
||||
use DocId;
|
||||
use DocAddress;
|
||||
use schema::Term;
|
||||
|
||||
use schema::{Field, Term};
|
||||
use termdict::{TermDictionary, TermMerger};
|
||||
use std::sync::Arc;
|
||||
use std::fmt;
|
||||
use core::InvertedIndexReader;
|
||||
|
||||
/// Holds a list of `SegmentReader`s ready for search.
|
||||
///
|
||||
/// It guarantees that the `Segment` will not be removed before
|
||||
/// It guarantees that the `Segment` will not be removed before
|
||||
/// the destruction of the `Searcher`.
|
||||
///
|
||||
#[derive(Debug)]
|
||||
///
|
||||
pub struct Searcher {
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
}
|
||||
|
||||
impl Searcher {
|
||||
|
||||
/// Fetches a document from tantivy's store given a `DocAddress`.
|
||||
///
|
||||
/// The searcher uses the segment ordinal to route the
|
||||
/// the request to the right `Segment`.
|
||||
/// the request to the right `Segment`.
|
||||
pub fn doc(&self, doc_address: &DocAddress) -> Result<Document> {
|
||||
let DocAddress(segment_local_id, doc_id) = *doc_address;
|
||||
let segment_reader = &self.segment_readers[segment_local_id as usize];
|
||||
segment_reader.doc(doc_id)
|
||||
}
|
||||
|
||||
|
||||
/// Returns the overall number of documents in the index.
|
||||
pub fn num_docs(&self,) -> DocId {
|
||||
pub fn num_docs(&self) -> DocId {
|
||||
self.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.num_docs())
|
||||
.fold(0u32, |acc, val| acc + val)
|
||||
}
|
||||
|
||||
|
||||
/// Return the overall number of documents containing
|
||||
/// the given term.
|
||||
/// the given term.
|
||||
pub fn doc_freq(&self, term: &Term) -> u32 {
|
||||
self.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.doc_freq(term))
|
||||
.map(|segment_reader| segment_reader.inverted_index(term.field()).doc_freq(term))
|
||||
.fold(0u32, |acc, val| acc + val)
|
||||
}
|
||||
|
||||
|
||||
/// Return the list of segment readers
|
||||
pub fn segment_readers(&self,) -> &Vec<SegmentReader> {
|
||||
pub fn segment_readers(&self) -> &[SegmentReader] {
|
||||
&self.segment_readers
|
||||
}
|
||||
|
||||
|
||||
/// Returns the segment_reader associated with the given segment_ordinal
|
||||
pub fn segment_reader(&self, segment_ord: usize) -> &SegmentReader {
|
||||
&self.segment_readers[segment_ord]
|
||||
pub fn segment_reader(&self, segment_ord: u32) -> &SegmentReader {
|
||||
&self.segment_readers[segment_ord as usize]
|
||||
}
|
||||
|
||||
|
||||
/// Runs a query on the segment readers wrapped by the searcher
|
||||
pub fn search<C: Collector>(&self, query: &Query, collector: &mut C) -> Result<TimerTree> {
|
||||
query.search(self, collector)
|
||||
}
|
||||
|
||||
/// Return the field searcher associated to a `Field`.
|
||||
pub fn field(&self, field: Field) -> FieldSearcher {
|
||||
let inv_index_readers = self.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.inverted_index(field))
|
||||
.collect::<Vec<_>>();
|
||||
FieldSearcher::new(inv_index_readers)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FieldSearcher {
|
||||
inv_index_readers: Vec<Arc<InvertedIndexReader>>,
|
||||
}
|
||||
|
||||
impl FieldSearcher {
|
||||
fn new(inv_index_readers: Vec<Arc<InvertedIndexReader>>) -> FieldSearcher {
|
||||
FieldSearcher { inv_index_readers }
|
||||
}
|
||||
|
||||
/// Returns a Stream over all of the sorted unique terms of
|
||||
/// for the given field.
|
||||
pub fn terms(&self) -> TermMerger {
|
||||
let term_streamers: Vec<_> = self.inv_index_readers
|
||||
.iter()
|
||||
.map(|inverted_index| inverted_index.terms().stream())
|
||||
.collect();
|
||||
TermMerger::new(term_streamers)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<SegmentReader>> for Searcher {
|
||||
fn from(segment_readers: Vec<SegmentReader>) -> Searcher {
|
||||
Searcher {
|
||||
segment_readers: segment_readers,
|
||||
}
|
||||
Searcher { segment_readers }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Searcher {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let segment_ids = self.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.segment_id())
|
||||
.collect::<Vec<_>>();
|
||||
write!(f, "Searcher({:?})", segment_ids)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,100 +1,98 @@
|
||||
use Result;
|
||||
use std::path::PathBuf;
|
||||
use schema::Schema;
|
||||
use DocId;
|
||||
use std::fmt;
|
||||
use core::SegmentId;
|
||||
use directory::{ReadOnlySource, WritePtr};
|
||||
use directory::{FileProtection, ReadOnlySource, WritePtr};
|
||||
use indexer::segment_serializer::SegmentSerializer;
|
||||
use super::SegmentComponent;
|
||||
use core::Index;
|
||||
use std::result;
|
||||
use directory::error::{FileError, OpenWriteError};
|
||||
|
||||
|
||||
use directory::Directory;
|
||||
use core::SegmentMeta;
|
||||
use directory::error::{OpenReadError, OpenWriteError};
|
||||
|
||||
/// A segment is a piece of the index.
|
||||
#[derive(Clone)]
|
||||
pub struct Segment {
|
||||
index: Index,
|
||||
segment_id: SegmentId,
|
||||
meta: SegmentMeta,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Segment {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Segment({:?})", self.segment_id.uuid_string())
|
||||
write!(f, "Segment({:?})", self.id().uuid_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Creates a new segment given an `Index` and a `SegmentId`
|
||||
///
|
||||
/// The function is here to make it private outside `tantivy`.
|
||||
pub fn create_segment(index: Index, segment_id: SegmentId) -> Segment {
|
||||
Segment {
|
||||
index: index,
|
||||
segment_id: segment_id,
|
||||
}
|
||||
///
|
||||
/// The function is here to make it private outside `tantivy`.
|
||||
pub fn create_segment(index: Index, meta: SegmentMeta) -> Segment {
|
||||
Segment { index, meta }
|
||||
}
|
||||
|
||||
impl Segment {
|
||||
|
||||
/// Returns the index the segment belongs to.
|
||||
pub fn index(&self) -> &Index {
|
||||
&self.index
|
||||
}
|
||||
|
||||
/// Returns our index's schema.
|
||||
pub fn schema(&self,) -> Schema {
|
||||
pub fn schema(&self) -> Schema {
|
||||
self.index.schema()
|
||||
}
|
||||
|
||||
/// Returns the segment's id.
|
||||
pub fn id(&self,) -> SegmentId {
|
||||
self.segment_id
|
||||
/// Returns the segment meta-information
|
||||
pub fn meta(&self) -> &SegmentMeta {
|
||||
&self.meta
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn set_delete_meta(&mut self, num_deleted_docs: u32, opstamp: u64) {
|
||||
self.meta.set_delete_meta(num_deleted_docs, opstamp);
|
||||
}
|
||||
|
||||
/// Returns the segment's id.
|
||||
pub fn id(&self) -> SegmentId {
|
||||
self.meta.id()
|
||||
}
|
||||
|
||||
|
||||
/// Returns the relative path of a component of our segment.
|
||||
///
|
||||
/// It just joins the segment id with the extension
|
||||
///
|
||||
/// It just joins the segment id with the extension
|
||||
/// associated to a segment component.
|
||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||
self.segment_id.relative_path(component)
|
||||
self.meta.relative_path(component)
|
||||
}
|
||||
|
||||
/// Deletes all of the document of the segment.
|
||||
/// This is called when there is a merge or a rollback.
|
||||
/// Protects a specific component file from being deleted.
|
||||
///
|
||||
/// # Disclaimer
|
||||
/// If deletion of a file fails (e.g. a file
|
||||
/// was read-only.), the method does not
|
||||
/// fail and just logs an error
|
||||
pub fn delete(&self,) {
|
||||
for component in SegmentComponent::values() {
|
||||
let rel_path = self.relative_path(component);
|
||||
if let Err(err) = self.index.directory().delete(&rel_path) {
|
||||
match err {
|
||||
FileError::FileDoesNotExist(_) => {
|
||||
// this is normal behavior.
|
||||
// the position file for instance may not exists.
|
||||
}
|
||||
FileError::IOError(err) => {
|
||||
error!("Failed to remove {:?} : {:?}", self.segment_id, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Returns a FileProtection object. The file is guaranteed
|
||||
/// to not be garbage collected as long as this `FileProtection` object
|
||||
/// lives.
|
||||
pub fn protect_from_delete(&self, component: SegmentComponent) -> FileProtection {
|
||||
let path = self.relative_path(component);
|
||||
self.index.directory().protect_file_from_delete(&path)
|
||||
}
|
||||
|
||||
|
||||
/// Open one of the component file for read.
|
||||
pub fn open_read(&self, component: SegmentComponent) -> result::Result<ReadOnlySource, FileError> {
|
||||
/// Open one of the component file for a *regular* read.
|
||||
pub fn open_read(
|
||||
&self,
|
||||
component: SegmentComponent,
|
||||
) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
let path = self.relative_path(component);
|
||||
let source = try!(self.index.directory().open_read(&path));
|
||||
let source = self.index.directory().open_read(&path)?;
|
||||
Ok(source)
|
||||
}
|
||||
|
||||
/// Open one of the component file for write.
|
||||
pub fn open_write(&mut self, component: SegmentComponent) -> result::Result<WritePtr, OpenWriteError> {
|
||||
/// Open one of the component file for *regular* write.
|
||||
pub fn open_write(
|
||||
&mut self,
|
||||
component: SegmentComponent,
|
||||
) -> result::Result<WritePtr, OpenWriteError> {
|
||||
let path = self.relative_path(component);
|
||||
let write = try!(self.index.directory_mut().open_write(&path));
|
||||
let write = self.index.directory_mut().open_write(&path)?;
|
||||
Ok(write)
|
||||
}
|
||||
}
|
||||
@@ -108,7 +106,34 @@ pub trait SerializableSegment {
|
||||
fn write(&self, serializer: SegmentSerializer) -> Result<u32>;
|
||||
}
|
||||
|
||||
#[derive(Clone,Debug,RustcDecodable,RustcEncodable)]
|
||||
pub struct SegmentInfo {
|
||||
pub max_doc: DocId,
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use core::SegmentComponent;
|
||||
use directory::Directory;
|
||||
use std::collections::HashSet;
|
||||
use schema::SchemaBuilder;
|
||||
use Index;
|
||||
|
||||
#[test]
|
||||
fn test_segment_protect_component() {
|
||||
let mut index = Index::create_in_ram(SchemaBuilder::new().build());
|
||||
let segment = index.new_segment();
|
||||
let path = segment.relative_path(SegmentComponent::POSTINGS);
|
||||
|
||||
let directory = index.directory_mut();
|
||||
directory.atomic_write(&*path, &vec![0u8]).unwrap();
|
||||
|
||||
let living_files = HashSet::new();
|
||||
{
|
||||
let _file_protection = segment.protect_from_delete(SegmentComponent::POSTINGS);
|
||||
assert!(directory.exists(&*path));
|
||||
directory.garbage_collect(|| living_files.clone());
|
||||
assert!(directory.exists(&*path));
|
||||
}
|
||||
|
||||
directory.garbage_collect(|| living_files);
|
||||
assert!(!directory.exists(&*path));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,41 +1,41 @@
|
||||
use std::vec::IntoIter;
|
||||
|
||||
/// Enum describing each component of a tantivy segment.
|
||||
/// Each component is stored in its own file,
|
||||
/// using the pattern `segment_uuid`.`component_extension`,
|
||||
/// except the delete component that takes an `segment_uuid`.`delete_opstamp`.`component_extension`
|
||||
#[derive(Copy, Clone)]
|
||||
pub enum SegmentComponent {
|
||||
INFO,
|
||||
/// Postings (or inverted list). Sorted lists of document ids, associated to terms
|
||||
POSTINGS,
|
||||
/// Positions of terms in each document.
|
||||
POSITIONS,
|
||||
/// Column-oriented random-access storage of fields.
|
||||
FASTFIELDS,
|
||||
/// Stores the sum of the length (in terms) of each field for each document.
|
||||
/// Field norms are stored as a special u64 fast field.
|
||||
FIELDNORMS,
|
||||
/// Dictionary associating `Term`s to `TermInfo`s which is
|
||||
/// simply an address into the `postings` file and the `positions` file.
|
||||
TERMS,
|
||||
/// Row-oriented, LZ4-compressed storage of the documents.
|
||||
/// Accessing a document from the store is relatively slow, as it
|
||||
/// requires to decompress the entire block it belongs to.
|
||||
STORE,
|
||||
/// Bitset describing which document of the segment is deleted.
|
||||
DELETE,
|
||||
}
|
||||
|
||||
impl SegmentComponent {
|
||||
pub fn values() -> IntoIter<SegmentComponent> {
|
||||
vec!(
|
||||
SegmentComponent::INFO,
|
||||
/// Iterates through the components.
|
||||
pub fn iterator() -> impl Iterator<Item = &'static SegmentComponent> {
|
||||
static SEGMENT_COMPONENTS: [SegmentComponent; 7] = [
|
||||
SegmentComponent::POSTINGS,
|
||||
SegmentComponent::POSITIONS,
|
||||
SegmentComponent::FASTFIELDS,
|
||||
SegmentComponent::FIELDNORMS,
|
||||
SegmentComponent::TERMS,
|
||||
SegmentComponent::STORE,
|
||||
).into_iter()
|
||||
}
|
||||
|
||||
pub fn path_suffix(&self)-> &'static str {
|
||||
match *self {
|
||||
SegmentComponent::POSITIONS => ".pos",
|
||||
SegmentComponent::INFO => ".info",
|
||||
SegmentComponent::POSTINGS => ".idx",
|
||||
SegmentComponent::TERMS => ".term",
|
||||
SegmentComponent::STORE => ".store",
|
||||
SegmentComponent::FASTFIELDS => ".fast",
|
||||
SegmentComponent::FIELDNORMS => ".fieldnorm",
|
||||
}
|
||||
SegmentComponent::DELETE,
|
||||
];
|
||||
SEGMENT_COMPONENTS.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,34 +1,36 @@
|
||||
use uuid::Uuid;
|
||||
use std::fmt;
|
||||
use rustc_serialize::{Encoder, Decoder, Encodable, Decodable};
|
||||
use core::SegmentComponent;
|
||||
use std::path::PathBuf;
|
||||
use std::cmp::{Ordering, Ord};
|
||||
|
||||
use std::cmp::{Ord, Ordering};
|
||||
|
||||
#[cfg(test)]
|
||||
use std::sync::atomic;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
|
||||
/// Uuid identifying a segment.
|
||||
///
|
||||
/// Tantivy's segment are identified
|
||||
/// by a UUID which is used to prefix the filenames
|
||||
/// of all of the file associated with the segment.
|
||||
///
|
||||
/// In unit test, for reproducability, the `SegmentId` are
|
||||
/// simply generated in an autoincrement fashion.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct SegmentId(Uuid);
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
lazy_static! {
|
||||
static ref AUTO_INC_COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::default();
|
||||
static ref EMPTY_ARR: [u8; 8] = [0u8; 8];
|
||||
}
|
||||
|
||||
|
||||
// During tests, we generate the segment id in a autoincrement manner
|
||||
// for consistency of segment id between run.
|
||||
//
|
||||
// The order of the test execution is not guaranteed, but the order
|
||||
// The order of the test execution is not guaranteed, but the order
|
||||
// of segments within a single test is guaranteed.
|
||||
#[cfg(test)]
|
||||
fn create_uuid() -> Uuid {
|
||||
let new_auto_inc_id = (*AUTO_INC_COUNTER).fetch_add(1, atomic::Ordering::SeqCst);
|
||||
Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &*EMPTY_ARR)
|
||||
Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &*EMPTY_ARR).unwrap()
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
@@ -37,43 +39,34 @@ fn create_uuid() -> Uuid {
|
||||
}
|
||||
|
||||
impl SegmentId {
|
||||
#[doc(hidden)]
|
||||
pub fn generate_random() -> SegmentId {
|
||||
SegmentId(create_uuid())
|
||||
}
|
||||
|
||||
pub fn short_uuid_string(&self,) -> String {
|
||||
(&self.0.to_simple_string()[..8]).to_string()
|
||||
}
|
||||
|
||||
pub fn uuid_string(&self,) -> String {
|
||||
self.0.to_simple_string()
|
||||
}
|
||||
|
||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||
let filename = self.uuid_string() + component.path_suffix();
|
||||
PathBuf::from(filename)
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable for SegmentId {
|
||||
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
|
||||
self.0.encode(s)
|
||||
/// Returns a shorter identifier of the segment.
|
||||
///
|
||||
/// We are using UUID4, so only 6 bits are fixed,
|
||||
/// and the rest is random.
|
||||
///
|
||||
/// Picking the first 8 chars is ok to identify
|
||||
/// segments in a display message.
|
||||
pub fn short_uuid_string(&self) -> String {
|
||||
(&self.0.simple().to_string()[..8]).to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl Decodable for SegmentId {
|
||||
fn decode<D: Decoder>(d: &mut D) -> Result<Self, D::Error> {
|
||||
Uuid::decode(d).map(SegmentId)
|
||||
/// Returns a segment uuid string.
|
||||
pub fn uuid_string(&self) -> String {
|
||||
self.0.simple().to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "SegmentId({:?})", self.uuid_string())
|
||||
write!(f, "Seg({:?})", self.short_uuid_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl PartialOrd for SegmentId {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
|
||||
115
src/core/segment_meta.rs
Normal file
115
src/core/segment_meta.rs
Normal file
@@ -0,0 +1,115 @@
|
||||
use core::SegmentId;
|
||||
use super::SegmentComponent;
|
||||
use std::path::PathBuf;
|
||||
use std::collections::HashSet;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
struct DeleteMeta {
|
||||
num_deleted_docs: u32,
|
||||
opstamp: u64,
|
||||
}
|
||||
|
||||
/// `SegmentMeta` contains simple meta information about a segment.
|
||||
///
|
||||
/// For instance the number of docs it contains,
|
||||
/// how many are deleted, etc.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct SegmentMeta {
|
||||
segment_id: SegmentId,
|
||||
max_doc: u32,
|
||||
deletes: Option<DeleteMeta>,
|
||||
}
|
||||
|
||||
impl SegmentMeta {
|
||||
/// Creates a new segment meta for
|
||||
/// a segment with no deletes and no documents.
|
||||
pub fn new(segment_id: SegmentId) -> SegmentMeta {
|
||||
SegmentMeta {
|
||||
segment_id,
|
||||
max_doc: 0,
|
||||
deletes: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the segment id.
|
||||
pub fn id(&self) -> SegmentId {
|
||||
self.segment_id
|
||||
}
|
||||
|
||||
/// Returns the number of deleted documents.
|
||||
pub fn num_deleted_docs(&self) -> u32 {
|
||||
self.deletes
|
||||
.as_ref()
|
||||
.map(|delete_meta| delete_meta.num_deleted_docs)
|
||||
.unwrap_or(0u32)
|
||||
}
|
||||
|
||||
/// Returns the list of files that
|
||||
/// are required for the segment meta.
|
||||
///
|
||||
/// This is useful as the way tantivy removes files
|
||||
/// is by removing all files that have been created by tantivy
|
||||
/// and are not used by any segment anymore.
|
||||
pub fn list_files(&self) -> HashSet<PathBuf> {
|
||||
SegmentComponent::iterator()
|
||||
.map(|component| self.relative_path(*component))
|
||||
.collect::<HashSet<PathBuf>>()
|
||||
}
|
||||
|
||||
/// Returns the relative path of a component of our segment.
|
||||
///
|
||||
/// It just joins the segment id with the extension
|
||||
/// associated to a segment component.
|
||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||
let mut path = self.id().uuid_string();
|
||||
path.push_str(&*match component {
|
||||
SegmentComponent::POSITIONS => ".pos".to_string(),
|
||||
SegmentComponent::POSTINGS => ".idx".to_string(),
|
||||
SegmentComponent::TERMS => ".term".to_string(),
|
||||
SegmentComponent::STORE => ".store".to_string(),
|
||||
SegmentComponent::FASTFIELDS => ".fast".to_string(),
|
||||
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
|
||||
SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)),
|
||||
});
|
||||
PathBuf::from(path)
|
||||
}
|
||||
|
||||
/// Return the highest doc id + 1
|
||||
///
|
||||
/// If there are no deletes, then num_docs = max_docs
|
||||
/// and all the doc ids contains in this segment
|
||||
/// are exactly (0..max_doc).
|
||||
pub fn max_doc(&self) -> u32 {
|
||||
self.max_doc
|
||||
}
|
||||
|
||||
/// Return the number of documents in the segment.
|
||||
pub fn num_docs(&self) -> u32 {
|
||||
self.max_doc() - self.num_deleted_docs()
|
||||
}
|
||||
|
||||
/// Returns the opstamp of the last delete operation
|
||||
/// taken in account in this segment.
|
||||
pub fn delete_opstamp(&self) -> Option<u64> {
|
||||
self.deletes.as_ref().map(|delete_meta| delete_meta.opstamp)
|
||||
}
|
||||
|
||||
/// Returns true iff the segment meta contains
|
||||
/// delete information.
|
||||
pub fn has_deletes(&self) -> bool {
|
||||
self.deletes.is_some()
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn set_max_doc(&mut self, max_doc: u32) {
|
||||
self.max_doc = max_doc;
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn set_delete_meta(&mut self, num_deleted_docs: u32, opstamp: u64) {
|
||||
self.deletes = Some(DeleteMeta {
|
||||
num_deleted_docs,
|
||||
opstamp,
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -2,28 +2,23 @@ use Result;
|
||||
use core::Segment;
|
||||
use core::SegmentId;
|
||||
use core::SegmentComponent;
|
||||
use schema::Term;
|
||||
use std::sync::RwLock;
|
||||
use common::HasLen;
|
||||
use core::SegmentMeta;
|
||||
use fastfield::{self, FastFieldNotAvailableError};
|
||||
use fastfield::DeleteBitSet;
|
||||
use store::StoreReader;
|
||||
use schema::Document;
|
||||
use directory::ReadOnlySource;
|
||||
use schema::Document;
|
||||
use DocId;
|
||||
use std::io;
|
||||
use std::str;
|
||||
use postings::TermInfo;
|
||||
use datastruct::FstMap;
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
use common::CompositeFile;
|
||||
use std::fmt;
|
||||
use rustc_serialize::json;
|
||||
use core::SegmentInfo;
|
||||
use core::InvertedIndexReader;
|
||||
use schema::Field;
|
||||
use postings::SegmentPostingsOption;
|
||||
use postings::SegmentPostings;
|
||||
use fastfield::{U32FastFieldsReader, U32FastFieldReader};
|
||||
use fastfield::{FastFieldReader, U64FastFieldReader};
|
||||
use schema::Schema;
|
||||
use schema::FieldType;
|
||||
use postings::FreqHandler;
|
||||
use schema::TextIndexingOptions;
|
||||
use error::Error;
|
||||
|
||||
|
||||
/// Entry point to access all of the datastructures of the `Segment`
|
||||
///
|
||||
@@ -36,15 +31,21 @@ use error::Error;
|
||||
/// The segment reader has a very low memory footprint,
|
||||
/// as close to all of the memory data is mmapped.
|
||||
///
|
||||
#[derive(Clone)]
|
||||
pub struct SegmentReader {
|
||||
segment_info: SegmentInfo,
|
||||
inv_idx_reader_cache: Arc<RwLock<HashMap<Field, Arc<InvertedIndexReader>>>>,
|
||||
|
||||
segment_id: SegmentId,
|
||||
term_infos: FstMap<TermInfo>,
|
||||
postings_data: ReadOnlySource,
|
||||
segment_meta: SegmentMeta,
|
||||
|
||||
termdict_composite: CompositeFile,
|
||||
postings_composite: CompositeFile,
|
||||
positions_composite: CompositeFile,
|
||||
fast_fields_composite: CompositeFile,
|
||||
fieldnorms_composite: CompositeFile,
|
||||
|
||||
store_reader: StoreReader,
|
||||
fast_fields_reader: U32FastFieldsReader,
|
||||
fieldnorms_reader: U32FastFieldsReader,
|
||||
positions_data: ReadOnlySource,
|
||||
delete_bitset: DeleteBitSet,
|
||||
schema: Schema,
|
||||
}
|
||||
|
||||
@@ -54,108 +55,160 @@ impl SegmentReader {
|
||||
/// Today, `tantivy` does not handle deletes, so it happens
|
||||
/// to also be the number of documents in the index.
|
||||
pub fn max_doc(&self) -> DocId {
|
||||
self.segment_info.max_doc
|
||||
self.segment_meta.max_doc()
|
||||
}
|
||||
|
||||
|
||||
/// Returns the number of documents.
|
||||
/// Deleted documents are not counted.
|
||||
///
|
||||
/// Today, `tantivy` does not handle deletes so max doc and
|
||||
/// num_docs are the same.
|
||||
pub fn num_docs(&self) -> DocId {
|
||||
self.segment_info.max_doc
|
||||
self.segment_meta.num_docs()
|
||||
}
|
||||
|
||||
|
||||
/// Return the number of documents that have been
|
||||
/// deleted in the segment.
|
||||
pub fn num_deleted_docs(&self) -> DocId {
|
||||
self.delete_bitset.len() as DocId
|
||||
}
|
||||
|
||||
/// Accessor to a segment's fast field reader given a field.
|
||||
pub fn get_fast_field_reader(&self, field: Field) -> io::Result<U32FastFieldReader> {
|
||||
///
|
||||
/// Returns the u64 fast value reader if the field
|
||||
/// is a u64 field indexed as "fast".
|
||||
///
|
||||
/// Return a FastFieldNotAvailableError if the field is not
|
||||
/// declared as a fast field in the schema.
|
||||
///
|
||||
/// # Panics
|
||||
/// May panic if the index is corrupted.
|
||||
pub fn get_fast_field_reader<TFastFieldReader: FastFieldReader>(
|
||||
&self,
|
||||
field: Field,
|
||||
) -> fastfield::Result<TFastFieldReader> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
match *field_entry.field_type() {
|
||||
FieldType::Str(_) => {
|
||||
Err(io::Error::new(io::ErrorKind::Other, "fast field are not yet supported for text fields."))
|
||||
},
|
||||
FieldType::U32(_) => {
|
||||
// TODO check that the schema allows that
|
||||
//Err(io::Error::new(io::ErrorKind::Other, "fast field are not yet supported for text fields."))
|
||||
self.fast_fields_reader.get_field(field)
|
||||
},
|
||||
if !TFastFieldReader::is_enabled(field_entry.field_type()) {
|
||||
Err(FastFieldNotAvailableError::new(field_entry))
|
||||
} else {
|
||||
self.fast_fields_composite
|
||||
.open_read(field)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
||||
.map(TFastFieldReader::open)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Accessor to the segment's `Field norms`'s reader.
|
||||
///
|
||||
/// Field norms are the length (in tokens) of the fields.
|
||||
/// It is used in the computation of the [TfIdf](https://fulmicoton.gitbooks.io/tantivy-doc/content/tfidf.html).
|
||||
/// It is used in the computation of the [TfIdf]
|
||||
/// (https://fulmicoton.gitbooks.io/tantivy-doc/content/tfidf.html).
|
||||
///
|
||||
/// They are simply stored as a fast field, serialized in
|
||||
/// the `.fieldnorm` file of the segment.
|
||||
pub fn get_fieldnorms_reader(&self, field: Field) -> io::Result<U32FastFieldReader> {
|
||||
self.fieldnorms_reader.get_field(field)
|
||||
/// They are simply stored as a fast field, serialized in
|
||||
/// the `.fieldnorm` file of the segment.
|
||||
pub fn get_fieldnorms_reader(&self, field: Field) -> Option<U64FastFieldReader> {
|
||||
self.fieldnorms_composite
|
||||
.open_read(field)
|
||||
.map(U64FastFieldReader::open)
|
||||
}
|
||||
|
||||
/// Returns the number of documents containing the term.
|
||||
pub fn doc_freq(&self, term: &Term) -> u32 {
|
||||
match self.get_term_info(term) {
|
||||
Some(term_info) => term_info.doc_freq,
|
||||
None => 0,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Accessor to the segment's `StoreReader`.
|
||||
pub fn get_store_reader(&self) -> &StoreReader {
|
||||
&self.store_reader
|
||||
}
|
||||
|
||||
/// Open a new segment for reading.
|
||||
pub fn open(segment: Segment) -> Result<SegmentReader> {
|
||||
let segment_info_reader = try!(segment.open_read(SegmentComponent::INFO));
|
||||
let segment_info_data = try!(
|
||||
str::from_utf8(&*segment_info_reader)
|
||||
.map_err(|err| {
|
||||
let segment_info_filepath = segment.relative_path(SegmentComponent::INFO);
|
||||
Error::CorruptedFile(segment_info_filepath, Box::new(err))
|
||||
})
|
||||
);
|
||||
let segment_info: SegmentInfo = try!(
|
||||
json::decode(&segment_info_data)
|
||||
.map_err(|err| {
|
||||
let file_path = segment.relative_path(SegmentComponent::INFO);
|
||||
Error::CorruptedFile(file_path, Box::new(err))
|
||||
})
|
||||
);
|
||||
let source = try!(segment.open_read(SegmentComponent::TERMS));
|
||||
let term_infos = try!(FstMap::from_source(source));
|
||||
let store_reader = StoreReader::from(try!(segment.open_read(SegmentComponent::STORE)));
|
||||
let postings_shared_mmap = try!(segment.open_read(SegmentComponent::POSTINGS));
|
||||
|
||||
let fast_field_data = try!(segment.open_read(SegmentComponent::FASTFIELDS));
|
||||
let fast_fields_reader = try!(U32FastFieldsReader::open(fast_field_data));
|
||||
|
||||
let fieldnorms_data = try!(segment.open_read(SegmentComponent::FIELDNORMS));
|
||||
let fieldnorms_reader = try!(U32FastFieldsReader::open(fieldnorms_data));
|
||||
|
||||
let positions_data = segment
|
||||
.open_read(SegmentComponent::POSITIONS)
|
||||
.unwrap_or_else(|_| ReadOnlySource::empty());
|
||||
|
||||
pub fn open(segment: &Segment) -> Result<SegmentReader> {
|
||||
let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
|
||||
let termdict_composite = CompositeFile::open(&termdict_source)?;
|
||||
|
||||
let store_source = segment.open_read(SegmentComponent::STORE)?;
|
||||
let store_reader = StoreReader::from_source(store_source);
|
||||
|
||||
let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
|
||||
let postings_composite = CompositeFile::open(&postings_source)?;
|
||||
|
||||
let positions_composite = {
|
||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONS) {
|
||||
CompositeFile::open(&source)?
|
||||
} else {
|
||||
CompositeFile::empty()
|
||||
}
|
||||
};
|
||||
|
||||
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
|
||||
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
||||
|
||||
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
||||
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
|
||||
|
||||
let delete_bitset = if segment.meta().has_deletes() {
|
||||
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
||||
DeleteBitSet::open(delete_data)
|
||||
} else {
|
||||
DeleteBitSet::empty()
|
||||
};
|
||||
|
||||
let schema = segment.schema();
|
||||
Ok(SegmentReader {
|
||||
segment_info: segment_info,
|
||||
postings_data: postings_shared_mmap,
|
||||
term_infos: term_infos,
|
||||
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
segment_meta: segment.meta().clone(),
|
||||
termdict_composite,
|
||||
postings_composite,
|
||||
fast_fields_composite,
|
||||
fieldnorms_composite,
|
||||
segment_id: segment.id(),
|
||||
store_reader: store_reader,
|
||||
fast_fields_reader: fast_fields_reader,
|
||||
fieldnorms_reader: fieldnorms_reader,
|
||||
positions_data: positions_data,
|
||||
schema: schema,
|
||||
store_reader,
|
||||
delete_bitset,
|
||||
positions_composite,
|
||||
schema,
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the term dictionary datastructure.
|
||||
pub fn term_infos(&self) -> &FstMap<TermInfo> {
|
||||
&self.term_infos
|
||||
|
||||
/// Returns a field reader associated to the field given in argument.
|
||||
///
|
||||
/// The field reader is in charge of iterating through the
|
||||
/// term dictionary associated to a specific field,
|
||||
/// and opening the posting list associated to any term.
|
||||
pub fn inverted_index(&self, field: Field) -> Arc<InvertedIndexReader> {
|
||||
if let Some(inv_idx_reader) = self.inv_idx_reader_cache
|
||||
.read()
|
||||
.expect("Lock poisoned. This should never happen")
|
||||
.get(&field)
|
||||
{
|
||||
Arc::clone(inv_idx_reader);
|
||||
}
|
||||
|
||||
let termdict_source: ReadOnlySource = self.termdict_composite
|
||||
.open_read(field)
|
||||
.expect("Index corrupted. Failed to open field term dictionary in composite file.");
|
||||
|
||||
let postings_source = self.postings_composite
|
||||
.open_read(field)
|
||||
.expect("Index corrupted. Failed to open field postings in composite file.");
|
||||
|
||||
let positions_source = self.positions_composite
|
||||
.open_read(field)
|
||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||
|
||||
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
||||
termdict_source,
|
||||
postings_source,
|
||||
positions_source,
|
||||
self.delete_bitset.clone(),
|
||||
self.schema.clone(),
|
||||
));
|
||||
|
||||
// by releasing the lock in between, we may end up opening the inverting index
|
||||
// twice, but this is fine.
|
||||
self.inv_idx_reader_cache
|
||||
.write()
|
||||
.expect("Field reader cache lock poisoned. This should never happen.")
|
||||
.insert(field, Arc::clone(&inv_idx_reader));
|
||||
|
||||
inv_idx_reader
|
||||
}
|
||||
|
||||
|
||||
/// Returns the document (or to be accurate, its stored field)
|
||||
/// bearing the given doc id.
|
||||
/// This method is slow and should seldom be called from
|
||||
@@ -164,82 +217,24 @@ impl SegmentReader {
|
||||
self.store_reader.get(doc_id)
|
||||
}
|
||||
|
||||
/// Returns the segment id
|
||||
pub fn segment_id(&self) -> SegmentId {
|
||||
self.segment_id
|
||||
}
|
||||
|
||||
/// Returns the segment postings associated with the term, and with the given option,
|
||||
/// or `None` if the term has never been encounterred and indexed.
|
||||
///
|
||||
/// If the field was not indexed with the indexing options that cover
|
||||
/// the requested options, the returned `SegmentPostings` the method does not fail
|
||||
/// and returns a `SegmentPostings` with as much information as possible.
|
||||
///
|
||||
/// For instance, requesting `SegmentPostingsOption::FreqAndPositions` for a `TextIndexingOptions`
|
||||
/// that does not index position will return a `SegmentPostings` with `DocId`s and frequencies.
|
||||
pub fn read_postings(&self, term: &Term, option: SegmentPostingsOption) -> Option<SegmentPostings> {
|
||||
let field = term.field();
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let term_info = get!(self.get_term_info(&term));
|
||||
let offset = term_info.postings_offset as usize;
|
||||
let postings_data = &self.postings_data[offset..];
|
||||
let freq_handler = match *field_entry.field_type() {
|
||||
FieldType::Str(ref options) => {
|
||||
let indexing_options = options.get_indexing_options();
|
||||
match option {
|
||||
SegmentPostingsOption::NoFreq => {
|
||||
FreqHandler::new_without_freq()
|
||||
}
|
||||
SegmentPostingsOption::Freq => {
|
||||
if indexing_options.is_termfreq_enabled() {
|
||||
FreqHandler::new_with_freq()
|
||||
}
|
||||
else {
|
||||
FreqHandler::new_without_freq()
|
||||
}
|
||||
}
|
||||
SegmentPostingsOption::FreqAndPositions => {
|
||||
if indexing_options == TextIndexingOptions::TokenizedWithFreqAndPosition {
|
||||
let offseted_position_data = &self.positions_data[term_info.positions_offset as usize ..];
|
||||
FreqHandler::new_with_freq_and_position(offseted_position_data)
|
||||
}
|
||||
else if indexing_options.is_termfreq_enabled()
|
||||
{
|
||||
FreqHandler::new_with_freq()
|
||||
}
|
||||
else {
|
||||
FreqHandler::new_without_freq()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
FreqHandler::new_without_freq()
|
||||
}
|
||||
};
|
||||
Some(SegmentPostings::from_data(term_info.doc_freq, postings_data, freq_handler))
|
||||
/// Returns the bitset representing
|
||||
/// the documents that have been deleted.
|
||||
pub fn delete_bitset(&self) -> &DeleteBitSet {
|
||||
&self.delete_bitset
|
||||
}
|
||||
|
||||
/// Returns the posting list associated with a term.
|
||||
pub fn read_postings_all_info(&self, term: &Term) -> Option<SegmentPostings> {
|
||||
let field_entry = self.schema.get_field_entry(term.field());
|
||||
let segment_posting_option = match *field_entry.field_type() {
|
||||
FieldType::Str(ref text_options) => {
|
||||
match text_options.get_indexing_options() {
|
||||
TextIndexingOptions::TokenizedWithFreq => SegmentPostingsOption::Freq,
|
||||
TextIndexingOptions::TokenizedWithFreqAndPosition => SegmentPostingsOption::FreqAndPositions,
|
||||
_ => SegmentPostingsOption::NoFreq,
|
||||
}
|
||||
}
|
||||
FieldType::U32(_) => SegmentPostingsOption::NoFreq
|
||||
};
|
||||
self.read_postings(term, segment_posting_option)
|
||||
}
|
||||
|
||||
/// Returns the term info associated with the term.
|
||||
pub fn get_term_info(&self, term: &Term) -> Option<TermInfo> {
|
||||
self.term_infos.get(term.as_slice())
|
||||
|
||||
/// Returns true iff the `doc` is marked
|
||||
/// as deleted.
|
||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||
self.delete_bitset.is_deleted(doc)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl fmt::Debug for SegmentReader {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "SegmentReader({:?})", self.segment_id)
|
||||
|
||||
@@ -1,149 +0,0 @@
|
||||
#![allow(should_implement_trait)]
|
||||
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use fst;
|
||||
use fst::raw::Fst;
|
||||
use fst::Streamer;
|
||||
|
||||
use directory::ReadOnlySource;
|
||||
use common::BinarySerializable;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
fn convert_fst_error(e: fst::Error) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, e)
|
||||
}
|
||||
|
||||
pub struct FstMapBuilder<W: Write, V: BinarySerializable> {
|
||||
fst_builder: fst::MapBuilder<W>,
|
||||
data: Vec<u8>,
|
||||
_phantom_: PhantomData<V>,
|
||||
}
|
||||
|
||||
impl<W: Write, V: BinarySerializable> FstMapBuilder<W, V> {
|
||||
|
||||
pub fn new(w: W) -> io::Result<FstMapBuilder<W, V>> {
|
||||
let fst_builder = try!(fst::MapBuilder::new(w).map_err(convert_fst_error));
|
||||
Ok(FstMapBuilder {
|
||||
fst_builder: fst_builder,
|
||||
data: Vec::new(),
|
||||
_phantom_: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, key: &[u8], value: &V) -> io::Result<()>{
|
||||
try!(self.fst_builder
|
||||
.insert(key, self.data.len() as u64)
|
||||
.map_err(convert_fst_error));
|
||||
try!(value.serialize(&mut self.data));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn finish(self,) -> io::Result<W> {
|
||||
let mut file = try!(
|
||||
self.fst_builder
|
||||
.into_inner()
|
||||
.map_err(convert_fst_error));
|
||||
let footer_size = self.data.len() as u32;
|
||||
try!(file.write_all(&self.data));
|
||||
try!((footer_size as u32).serialize(&mut file));
|
||||
try!(file.flush());
|
||||
Ok(file)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FstMap<V: BinarySerializable> {
|
||||
fst_index: fst::Map,
|
||||
values_mmap: ReadOnlySource,
|
||||
_phantom_: PhantomData<V>,
|
||||
}
|
||||
|
||||
|
||||
fn open_fst_index(source: ReadOnlySource) -> io::Result<fst::Map> {
|
||||
Ok(fst::Map::from(match source {
|
||||
ReadOnlySource::Anonymous(data) => try!(Fst::from_shared_bytes(data.data, data.start, data.len).map_err(convert_fst_error)),
|
||||
ReadOnlySource::Mmap(mmap_readonly) => try!(Fst::from_mmap(mmap_readonly).map_err(convert_fst_error)),
|
||||
}))
|
||||
}
|
||||
|
||||
pub struct FstKeyIter<'a, V: 'static + BinarySerializable> {
|
||||
streamer: fst::map::Stream<'a>,
|
||||
__phantom__: PhantomData<V>
|
||||
}
|
||||
|
||||
impl<'a, V: 'static + BinarySerializable> FstKeyIter<'a, V> {
|
||||
pub fn next(&mut self) -> Option<(&[u8])> {
|
||||
self.streamer
|
||||
.next()
|
||||
.map(|(k, _)| k)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<V: BinarySerializable> FstMap<V> {
|
||||
|
||||
pub fn keys(&self,) -> FstKeyIter<V> {
|
||||
FstKeyIter {
|
||||
streamer: self.fst_index.stream(),
|
||||
__phantom__: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_source(source: ReadOnlySource) -> io::Result<FstMap<V>> {
|
||||
let total_len = source.len();
|
||||
let length_offset = total_len - 4;
|
||||
let mut split_len_buffer: &[u8] = &source.as_slice()[length_offset..];
|
||||
let footer_size = try!(u32::deserialize(&mut split_len_buffer)) as usize;
|
||||
let split_len = length_offset - footer_size;
|
||||
let fst_source = source.slice(0, split_len);
|
||||
let values_source = source.slice(split_len, length_offset);
|
||||
let fst_index = try!(open_fst_index(fst_source));
|
||||
Ok(FstMap {
|
||||
fst_index: fst_index,
|
||||
values_mmap: values_source,
|
||||
_phantom_: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
fn read_value(&self, offset: u64) -> V {
|
||||
let buffer = self.values_mmap.as_slice();
|
||||
let mut cursor = &buffer[(offset as usize)..];
|
||||
V::deserialize(&mut cursor).expect("Data in FST is corrupted")
|
||||
}
|
||||
|
||||
pub fn get<K: AsRef<[u8]>>(&self, key: K) -> Option<V> {
|
||||
self.fst_index
|
||||
.get(key)
|
||||
.map(|offset| self.read_value(offset))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use directory::{RAMDirectory, Directory};
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn test_fstmap() {
|
||||
let mut directory = RAMDirectory::create();
|
||||
let path = PathBuf::from("fstmap");
|
||||
{
|
||||
let write = directory.open_write(&path).unwrap();
|
||||
let mut fstmap_builder = FstMapBuilder::new(write).unwrap();
|
||||
fstmap_builder.insert("abc".as_bytes(), &34u32).unwrap();
|
||||
fstmap_builder.insert("abcd".as_bytes(), &346u32).unwrap();
|
||||
fstmap_builder.finish().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
let fstmap: FstMap<u32> = FstMap::from_source(source).unwrap();
|
||||
assert_eq!(fstmap.get("abc"), Some(34u32));
|
||||
assert_eq!(fstmap.get("abcd"), Some(346u32));
|
||||
let mut keys = fstmap.keys();
|
||||
assert_eq!(keys.next().unwrap(), "abc".as_bytes());
|
||||
assert_eq!(keys.next().unwrap(), "abcd".as_bytes());
|
||||
assert_eq!(keys.next(), None);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,8 +1,4 @@
|
||||
mod fstmap;
|
||||
mod skip;
|
||||
pub mod stacker;
|
||||
|
||||
pub use self::fstmap::FstMapBuilder;
|
||||
pub use self::fstmap::FstMap;
|
||||
pub use self::fstmap::FstKeyIter;
|
||||
pub use self::skip::{SkipListBuilder, SkipList};
|
||||
pub use self::skip::{SkipList, SkipListBuilder};
|
||||
|
||||
@@ -6,8 +6,6 @@ mod skiplist;
|
||||
pub use self::skiplist_builder::SkipListBuilder;
|
||||
pub use self::skiplist::SkipList;
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -114,9 +112,9 @@ mod tests {
|
||||
let mut skip_list: SkipList<()> = SkipList::from(output.as_slice());
|
||||
assert_eq!(skip_list.next().unwrap(), (0, ()));
|
||||
skip_list.seek(431);
|
||||
assert_eq!(skip_list.next().unwrap(), (431,()) );
|
||||
assert_eq!(skip_list.next().unwrap(), (431, ()));
|
||||
skip_list.seek(1003);
|
||||
assert_eq!(skip_list.next().unwrap(), (1004,()) );
|
||||
assert_eq!(skip_list.next().unwrap(), (1004, ()));
|
||||
assert_eq!(skip_list.next(), None);
|
||||
}
|
||||
|
||||
|
||||
@@ -13,14 +13,12 @@ struct Layer<'a, T> {
|
||||
}
|
||||
|
||||
impl<'a, T: BinarySerializable> Iterator for Layer<'a, T> {
|
||||
|
||||
type Item = (DocId, T);
|
||||
|
||||
fn next(&mut self,)-> Option<(DocId, T)> {
|
||||
fn next(&mut self) -> Option<(DocId, T)> {
|
||||
if self.next_id == u32::max_value() {
|
||||
None
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
let cur_val = T::deserialize(&mut self.cursor).unwrap();
|
||||
let cur_id = self.next_id;
|
||||
self.next_id = u32::deserialize(&mut self.cursor).unwrap_or(u32::max_value());
|
||||
@@ -31,19 +29,18 @@ impl<'a, T: BinarySerializable> Iterator for Layer<'a, T> {
|
||||
|
||||
impl<'a, T: BinarySerializable> From<&'a [u8]> for Layer<'a, T> {
|
||||
fn from(data: &'a [u8]) -> Layer<'a, T> {
|
||||
let mut cursor = data;
|
||||
let mut cursor = data;
|
||||
let next_id = u32::deserialize(&mut cursor).unwrap_or(u32::max_value());
|
||||
Layer {
|
||||
data: data,
|
||||
cursor: cursor,
|
||||
next_id: next_id,
|
||||
data,
|
||||
cursor,
|
||||
next_id,
|
||||
_phantom_: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: BinarySerializable> Layer<'a, T> {
|
||||
|
||||
fn empty() -> Layer<'a, T> {
|
||||
Layer {
|
||||
data: &EMPTY,
|
||||
@@ -53,11 +50,11 @@ impl<'a, T: BinarySerializable> Layer<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
fn seek_offset(&mut self, offset: usize) {
|
||||
fn seek_offset(&mut self, offset: usize) {
|
||||
self.cursor = &self.data[offset..];
|
||||
self.next_id = u32::deserialize(&mut self.cursor).unwrap_or(u32::max_value());
|
||||
}
|
||||
|
||||
|
||||
// Returns the last element (key, val)
|
||||
// such that (key < doc_id)
|
||||
//
|
||||
@@ -67,31 +64,32 @@ impl<'a, T: BinarySerializable> Layer<'a, T> {
|
||||
let mut val = None;
|
||||
while self.next_id < doc_id {
|
||||
match self.next() {
|
||||
None => { break; },
|
||||
v => { val = v; }
|
||||
None => {
|
||||
break;
|
||||
}
|
||||
v => {
|
||||
val = v;
|
||||
}
|
||||
}
|
||||
}
|
||||
val
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub struct SkipList<'a, T: BinarySerializable> {
|
||||
data_layer: Layer<'a, T>,
|
||||
skip_layers: Vec<Layer<'a, u32>>,
|
||||
}
|
||||
|
||||
impl<'a, T: BinarySerializable> Iterator for SkipList<'a, T> {
|
||||
|
||||
type Item = (DocId, T);
|
||||
|
||||
fn next(&mut self,)-> Option<(DocId, T)> {
|
||||
fn next(&mut self) -> Option<(DocId, T)> {
|
||||
self.data_layer.next()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: BinarySerializable> SkipList<'a, T> {
|
||||
|
||||
pub fn seek(&mut self, doc_id: DocId) -> Option<(DocId, T)> {
|
||||
let mut next_layer_skip: Option<(DocId, u32)> = None;
|
||||
for skip_layer in &mut self.skip_layers {
|
||||
@@ -99,39 +97,32 @@ impl<'a, T: BinarySerializable> SkipList<'a, T> {
|
||||
skip_layer.seek_offset(offset as usize);
|
||||
}
|
||||
next_layer_skip = skip_layer.seek(doc_id);
|
||||
}
|
||||
if let Some((_, offset)) = next_layer_skip {
|
||||
self.data_layer.seek_offset(offset as usize);
|
||||
}
|
||||
self.data_layer.seek(doc_id)
|
||||
}
|
||||
if let Some((_, offset)) = next_layer_skip {
|
||||
self.data_layer.seek_offset(offset as usize);
|
||||
}
|
||||
self.data_layer.seek(doc_id)
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
impl<'a, T: BinarySerializable> From<&'a [u8]> for SkipList<'a, T> {
|
||||
|
||||
fn from(mut data: &'a [u8]) -> SkipList<'a, T> {
|
||||
let offsets: Vec<u32> = Vec::deserialize(&mut data).unwrap();
|
||||
let num_layers = offsets.len();
|
||||
let layers_data: &[u8] = data;
|
||||
let data_layer: Layer<'a, T> =
|
||||
if num_layers == 0 { Layer::empty() }
|
||||
else {
|
||||
let first_layer_data: &[u8] = &layers_data[..offsets[0] as usize];
|
||||
Layer::from(first_layer_data)
|
||||
};
|
||||
let data_layer: Layer<'a, T> = if num_layers == 0 {
|
||||
Layer::empty()
|
||||
} else {
|
||||
let first_layer_data: &[u8] = &layers_data[..offsets[0] as usize];
|
||||
Layer::from(first_layer_data)
|
||||
};
|
||||
let skip_layers = (0..max(1, num_layers) - 1)
|
||||
.map(|i| (offsets[i] as usize, offsets[i + 1] as usize))
|
||||
.map(|(start, stop)| {
|
||||
Layer::from(&layers_data[start..stop])
|
||||
})
|
||||
.map(|(start, stop)| Layer::from(&layers_data[start..stop]))
|
||||
.collect();
|
||||
SkipList {
|
||||
skip_layers: skip_layers,
|
||||
data_layer: data_layer,
|
||||
skip_layers,
|
||||
data_layer,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -13,19 +13,18 @@ struct LayerBuilder<T: BinarySerializable> {
|
||||
}
|
||||
|
||||
impl<T: BinarySerializable> LayerBuilder<T> {
|
||||
|
||||
fn written_size(&self,) -> usize {
|
||||
fn written_size(&self) -> usize {
|
||||
self.buffer.len()
|
||||
}
|
||||
|
||||
fn write(&self, output: &mut Write) -> Result<(), io::Error> {
|
||||
try!(output.write_all(&self.buffer));
|
||||
output.write_all(&self.buffer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn with_period(period: usize) -> LayerBuilder<T> {
|
||||
LayerBuilder {
|
||||
period: period,
|
||||
period,
|
||||
buffer: Vec::new(),
|
||||
remaining: period,
|
||||
len: 0,
|
||||
@@ -36,30 +35,28 @@ impl<T: BinarySerializable> LayerBuilder<T> {
|
||||
fn insert(&mut self, doc_id: DocId, value: &T) -> io::Result<Option<(DocId, u32)>> {
|
||||
self.remaining -= 1;
|
||||
self.len += 1;
|
||||
let offset = self.written_size() as u32; // TODO not sure if we want after or here
|
||||
try!(doc_id.serialize(&mut self.buffer));
|
||||
try!(value.serialize(&mut self.buffer));
|
||||
let offset = self.written_size() as u32;
|
||||
doc_id.serialize(&mut self.buffer)?;
|
||||
value.serialize(&mut self.buffer)?;
|
||||
Ok(if self.remaining == 0 {
|
||||
self.remaining = self.period;
|
||||
Some((doc_id, offset))
|
||||
}
|
||||
else { None })
|
||||
self.remaining = self.period;
|
||||
Some((doc_id, offset))
|
||||
} else {
|
||||
None
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub struct SkipListBuilder<T: BinarySerializable> {
|
||||
period: usize,
|
||||
data_layer: LayerBuilder<T>,
|
||||
skip_layers: Vec<LayerBuilder<u32>>,
|
||||
}
|
||||
|
||||
|
||||
impl<T: BinarySerializable> SkipListBuilder<T> {
|
||||
|
||||
pub fn new(period: usize) -> SkipListBuilder<T> {
|
||||
SkipListBuilder {
|
||||
period: period,
|
||||
period,
|
||||
data_layer: LayerBuilder::with_period(period),
|
||||
skip_layers: Vec::new(),
|
||||
}
|
||||
@@ -75,20 +72,20 @@ impl<T: BinarySerializable> SkipListBuilder<T> {
|
||||
|
||||
pub fn insert(&mut self, doc_id: DocId, dest: &T) -> io::Result<()> {
|
||||
let mut layer_id = 0;
|
||||
let mut skip_pointer = try!(self.data_layer.insert(doc_id, dest));
|
||||
let mut skip_pointer = self.data_layer.insert(doc_id, dest)?;
|
||||
loop {
|
||||
skip_pointer = match skip_pointer {
|
||||
Some((skip_doc_id, skip_offset)) =>
|
||||
try!(self
|
||||
.get_skip_layer(layer_id)
|
||||
.insert(skip_doc_id, &skip_offset)),
|
||||
None => { return Ok(()); }
|
||||
Some((skip_doc_id, skip_offset)) => self.get_skip_layer(layer_id)
|
||||
.insert(skip_doc_id, &skip_offset)?,
|
||||
None => {
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
layer_id += 1;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write<W: Write>(self, output: &mut Write) -> io::Result<()> {
|
||||
pub fn write<W: Write>(self, output: &mut W) -> io::Result<()> {
|
||||
let mut size: u32 = 0;
|
||||
let mut layer_sizes: Vec<u32> = Vec::new();
|
||||
size += self.data_layer.buffer.len() as u32;
|
||||
@@ -97,10 +94,10 @@ impl<T: BinarySerializable> SkipListBuilder<T> {
|
||||
size += layer.buffer.len() as u32;
|
||||
layer_sizes.push(size);
|
||||
}
|
||||
try!(layer_sizes.serialize(output));
|
||||
try!(self.data_layer.write(output));
|
||||
layer_sizes.serialize(output)?;
|
||||
self.data_layer.write(output)?;
|
||||
for layer in self.skip_layers.iter().rev() {
|
||||
try!(layer.write(output));
|
||||
layer.write(output)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::mem;
|
||||
use super::heap::{Heap, HeapAllocable};
|
||||
|
||||
|
||||
#[inline]
|
||||
pub fn is_power_of_2(val: u32) -> bool {
|
||||
val & (val - 1) == 0
|
||||
@@ -9,11 +8,10 @@ pub fn is_power_of_2(val: u32) -> bool {
|
||||
|
||||
#[inline]
|
||||
pub fn jump_needed(val: u32) -> bool {
|
||||
val > 3 && is_power_of_2(val)
|
||||
val > 3 && is_power_of_2(val)
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ExpUnrolledLinkedList {
|
||||
len: u32,
|
||||
end: u32,
|
||||
@@ -24,10 +22,9 @@ pub struct ExpUnrolledLinkedList {
|
||||
}
|
||||
|
||||
impl ExpUnrolledLinkedList {
|
||||
|
||||
pub fn iter<'a>(&self, addr: u32, heap: &'a Heap) -> ExpUnrolledLinkedListIterator<'a> {
|
||||
ExpUnrolledLinkedListIterator {
|
||||
heap: heap,
|
||||
heap,
|
||||
addr: addr + 2u32 * (mem::size_of::<u32>() as u32),
|
||||
len: self.len,
|
||||
consumed: 0,
|
||||
@@ -42,16 +39,21 @@ impl ExpUnrolledLinkedList {
|
||||
// the next block as a size of (length so far),
|
||||
// and we need to add 1u32 to store the pointer
|
||||
// to the next element.
|
||||
let new_block_size: usize = (self.len as usize + 1) * mem::size_of::<u32>();
|
||||
let new_block_size: usize = (self.len as usize + 1) * mem::size_of::<u32>();
|
||||
let new_block_addr: u32 = heap.allocate_space(new_block_size);
|
||||
heap.set(self.end, &new_block_addr);
|
||||
self.end = new_block_addr;
|
||||
self.end = new_block_addr;
|
||||
}
|
||||
heap.set(self.end, &val);
|
||||
self.end += mem::size_of::<u32>() as u32;
|
||||
}
|
||||
}
|
||||
|
||||
impl HeapAllocable for u32 {
|
||||
fn with_addr(_addr: u32) -> u32 {
|
||||
0u32
|
||||
}
|
||||
}
|
||||
|
||||
impl HeapAllocable for ExpUnrolledLinkedList {
|
||||
fn with_addr(addr: u32) -> ExpUnrolledLinkedList {
|
||||
@@ -77,33 +79,26 @@ pub struct ExpUnrolledLinkedListIterator<'a> {
|
||||
impl<'a> Iterator for ExpUnrolledLinkedListIterator<'a> {
|
||||
type Item = u32;
|
||||
|
||||
fn next(&mut self,) -> Option<u32> {
|
||||
fn next(&mut self) -> Option<u32> {
|
||||
if self.consumed == self.len {
|
||||
None
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
let addr: u32;
|
||||
self.consumed += 1;
|
||||
if jump_needed(self.consumed) {
|
||||
addr = *self.heap.get_mut_ref(self.addr);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
addr = self.addr;
|
||||
}
|
||||
self.addr = addr + mem::size_of::<u32>() as u32;
|
||||
Some(*self.heap.get_mut_ref(addr))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
|
||||
use super::*;
|
||||
use super::super::heap::Heap;
|
||||
use test::Bencher;
|
||||
@@ -147,7 +142,7 @@ mod tests {
|
||||
|
||||
#[bench]
|
||||
fn bench_push_stack(bench: &mut Bencher) {
|
||||
let heap = Heap::with_capacity(64_000_000);
|
||||
let heap = Heap::with_capacity(64_000_000);
|
||||
bench.iter(|| {
|
||||
let mut stacks = Vec::with_capacity(100);
|
||||
for _ in 0..NUM_STACK {
|
||||
@@ -163,4 +158,4 @@ mod tests {
|
||||
heap.clear();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,26 +1,76 @@
|
||||
use std::iter;
|
||||
use std::marker::PhantomData;
|
||||
use super::heap::{Heap, HeapAllocable, BytesRef};
|
||||
use std::mem;
|
||||
use super::heap::{BytesRef, Heap, HeapAllocable};
|
||||
|
||||
mod murmurhash2 {
|
||||
|
||||
const SEED: u32 = 3_242_157_231u32;
|
||||
|
||||
#[inline(always)]
|
||||
pub fn murmurhash2(key: &[u8]) -> u32 {
|
||||
let mut key_ptr: *const u32 = key.as_ptr() as *const u32;
|
||||
let m: u32 = 0x5bd1_e995;
|
||||
let r = 24;
|
||||
let len = key.len() as u32;
|
||||
|
||||
/// dbj2 hash function
|
||||
fn djb2(key: &[u8]) -> u64 {
|
||||
let mut state: u64 = 5381;
|
||||
for &b in key {
|
||||
state = (state << 5).wrapping_add(state).wrapping_add(b as u64);
|
||||
let mut h: u32 = SEED ^ len;
|
||||
let num_blocks = len >> 2;
|
||||
for _ in 0..num_blocks {
|
||||
let mut k: u32 = unsafe { *key_ptr };
|
||||
k = k.wrapping_mul(m);
|
||||
k ^= k >> r;
|
||||
k = k.wrapping_mul(m);
|
||||
k = k.wrapping_mul(m);
|
||||
h ^= k;
|
||||
key_ptr = key_ptr.wrapping_offset(1);
|
||||
}
|
||||
|
||||
// Handle the last few bytes of the input array
|
||||
let remaining = len & 3;
|
||||
let key_ptr_u8: *const u8 = key_ptr as *const u8;
|
||||
match remaining {
|
||||
3 => {
|
||||
h ^= unsafe { u32::from(*key_ptr_u8.wrapping_offset(2)) } << 16;
|
||||
h ^= unsafe { u32::from(*key_ptr_u8.wrapping_offset(1)) } << 8;
|
||||
h ^= unsafe { u32::from(*key_ptr_u8) };
|
||||
h = h.wrapping_mul(m);
|
||||
}
|
||||
2 => {
|
||||
h ^= unsafe { u32::from(*key_ptr_u8.wrapping_offset(1)) } << 8;
|
||||
h ^= unsafe { u32::from(*key_ptr_u8) };
|
||||
h = h.wrapping_mul(m);
|
||||
}
|
||||
1 => {
|
||||
h ^= unsafe { u32::from(*key_ptr_u8) };
|
||||
h = h.wrapping_mul(m);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
h ^= h >> 13;
|
||||
h = h.wrapping_mul(m);
|
||||
h ^ (h >> 15)
|
||||
}
|
||||
state
|
||||
}
|
||||
|
||||
impl Default for BytesRef {
|
||||
fn default() -> BytesRef {
|
||||
BytesRef {
|
||||
start: 0u32,
|
||||
stop: 0u32,
|
||||
}
|
||||
}
|
||||
/// Split the thread memory budget into
|
||||
/// - the heap size
|
||||
/// - the hash table "table" itself.
|
||||
///
|
||||
/// Returns (the heap size in bytes, the hash table size in number of bits)
|
||||
pub(crate) fn split_memory(per_thread_memory_budget: usize) -> (usize, usize) {
|
||||
let table_size_limit: usize = per_thread_memory_budget / 3;
|
||||
let compute_table_size = |num_bits: usize| (1 << num_bits) * mem::size_of::<KeyValue>();
|
||||
let table_num_bits: usize = (1..)
|
||||
.into_iter()
|
||||
.take_while(|num_bits: &usize| compute_table_size(*num_bits) < table_size_limit)
|
||||
.last()
|
||||
.expect(&format!(
|
||||
"Per thread memory is too small: {}",
|
||||
per_thread_memory_budget
|
||||
));
|
||||
let table_size = compute_table_size(table_num_bits);
|
||||
let heap_size = per_thread_memory_budget - table_size;
|
||||
(heap_size, table_num_bits)
|
||||
}
|
||||
|
||||
/// `KeyValue` is the item stored in the hash table.
|
||||
@@ -29,27 +79,21 @@ impl Default for BytesRef {
|
||||
///
|
||||
/// The key and the value are actually stored contiguously.
|
||||
/// For this reason, the (start, stop) information is actually redundant
|
||||
/// and can be simplified in the future
|
||||
/// and can be simplified in the future
|
||||
#[derive(Copy, Clone, Default)]
|
||||
struct KeyValue {
|
||||
key: BytesRef,
|
||||
value_addr: u32,
|
||||
key_value_addr: BytesRef,
|
||||
hash: u32,
|
||||
}
|
||||
|
||||
impl KeyValue {
|
||||
fn is_empty(&self,) -> bool {
|
||||
self.key.stop == 0u32
|
||||
fn is_empty(&self) -> bool {
|
||||
self.key_value_addr.is_null()
|
||||
}
|
||||
}
|
||||
|
||||
pub enum Entry {
|
||||
Vacant(usize),
|
||||
Occupied(u32),
|
||||
}
|
||||
|
||||
|
||||
/// Customized `HashMap` with string keys
|
||||
///
|
||||
///
|
||||
/// This `HashMap` takes String as keys. Keys are
|
||||
/// stored in a user defined heap.
|
||||
///
|
||||
@@ -57,114 +101,105 @@ pub enum Entry {
|
||||
/// the computation of the hash of the key twice,
|
||||
/// or copying the key as long as there is no insert.
|
||||
///
|
||||
pub struct HashMap<'a, V> where V: HeapAllocable {
|
||||
pub struct HashMap<'a> {
|
||||
table: Box<[KeyValue]>,
|
||||
heap: &'a Heap,
|
||||
_phantom: PhantomData<V>,
|
||||
mask: usize,
|
||||
occupied: Vec<usize>,
|
||||
}
|
||||
|
||||
impl<'a, V> HashMap<'a, V> where V: HeapAllocable {
|
||||
struct QuadraticProbing {
|
||||
hash: usize,
|
||||
i: usize,
|
||||
mask: usize,
|
||||
}
|
||||
|
||||
pub fn new(num_bucket_power_of_2: usize, heap: &'a Heap) -> HashMap<'a, V> {
|
||||
impl QuadraticProbing {
|
||||
fn compute(hash: usize, mask: usize) -> QuadraticProbing {
|
||||
QuadraticProbing { hash, i: 0, mask }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn next_probe(&mut self) -> usize {
|
||||
self.i += 1;
|
||||
(self.hash + self.i * self.i) & self.mask
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> HashMap<'a> {
|
||||
pub fn new(num_bucket_power_of_2: usize, heap: &'a Heap) -> HashMap<'a> {
|
||||
let table_size = 1 << num_bucket_power_of_2;
|
||||
let table: Vec<KeyValue> = iter::repeat(KeyValue::default())
|
||||
.take(table_size)
|
||||
.collect();
|
||||
let table: Vec<KeyValue> = iter::repeat(KeyValue::default()).take(table_size).collect();
|
||||
HashMap {
|
||||
table: table.into_boxed_slice(),
|
||||
heap: heap,
|
||||
_phantom: PhantomData,
|
||||
heap,
|
||||
mask: table_size - 1,
|
||||
occupied: Vec::with_capacity(table_size / 2),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn bucket(&self, key: &[u8]) -> usize {
|
||||
let hash: u64 = djb2(key);
|
||||
(hash as usize) & self.mask
|
||||
fn probe(&self, hash: u32) -> QuadraticProbing {
|
||||
QuadraticProbing::compute(hash as usize, self.mask)
|
||||
}
|
||||
|
||||
fn get_key(&self, bytes_ref: BytesRef) -> &[u8] {
|
||||
self.heap.get_slice(bytes_ref)
|
||||
pub fn is_saturated(&self) -> bool {
|
||||
self.table.len() < self.occupied.len() * 3
|
||||
}
|
||||
|
||||
pub fn set_bucket(&mut self, key_bytes: &[u8], bucket: usize, addr: u32) -> u32 {
|
||||
#[inline(never)]
|
||||
fn get_key_value(&self, bytes_ref: BytesRef) -> (&[u8], u32) {
|
||||
let key_bytes: &[u8] = self.heap.get_slice(bytes_ref);
|
||||
let expull_addr: u32 = bytes_ref.addr() + 2 + key_bytes.len() as u32;
|
||||
(key_bytes, expull_addr)
|
||||
}
|
||||
|
||||
pub fn set_bucket(&mut self, hash: u32, key_bytes_ref: BytesRef, bucket: usize) {
|
||||
self.occupied.push(bucket);
|
||||
self.table[bucket] = KeyValue {
|
||||
key: self.heap.allocate_and_set(key_bytes),
|
||||
value_addr: addr,
|
||||
key_value_addr: key_bytes_ref,
|
||||
hash,
|
||||
};
|
||||
addr
|
||||
}
|
||||
|
||||
pub fn iter<'b: 'a>(&'b self,) -> impl Iterator<Item=(&'a [u8], (u32, &'a V))> + 'b {
|
||||
let heap: &'a Heap = self.heap;
|
||||
let table: &'b [KeyValue] = &self.table;
|
||||
self.occupied
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(move |bucket: usize| {
|
||||
let kv = table[bucket];
|
||||
let addr = kv.value_addr;
|
||||
let v: &V = heap.get_mut_ref::<V>(addr);
|
||||
(heap.get_slice(kv.key), (addr, v))
|
||||
})
|
||||
// .map(move |addr: u32| (heap.get_mut_ref::<V>(addr)) )
|
||||
}
|
||||
|
||||
pub fn values_mut<'b: 'a>(&'b self,) -> impl Iterator<Item=&'a mut V> + 'b {
|
||||
let heap: &'a Heap = self.heap;
|
||||
let table: &'b [KeyValue] = &self.table;
|
||||
self.occupied
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(move |bucket: usize| table[bucket].value_addr)
|
||||
.map(move |addr: u32| heap.get_mut_ref::<V>(addr))
|
||||
pub fn iter<'b: 'a>(&'b self) -> impl Iterator<Item = (&'a [u8], u32)> + 'b {
|
||||
self.occupied.iter().cloned().map(move |bucket: usize| {
|
||||
let kv = self.table[bucket];
|
||||
self.get_key_value(kv.key_value_addr)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_or_create<S: AsRef<[u8]>>(&mut self, key: S) -> &mut V {
|
||||
let entry = self.lookup(key.as_ref());
|
||||
match entry {
|
||||
Entry::Occupied(addr) => {
|
||||
self.heap.get_mut_ref(addr)
|
||||
}
|
||||
Entry::Vacant(bucket) => {
|
||||
let (addr, val): (u32, &mut V) = self.heap.allocate_object();
|
||||
self.set_bucket(key.as_ref(), bucket, addr);
|
||||
val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn lookup<S: AsRef<[u8]>>(&self, key: S) -> Entry {
|
||||
pub fn get_or_create<S: AsRef<[u8]>, V: HeapAllocable>(&mut self, key: S) -> &mut V {
|
||||
let key_bytes: &[u8] = key.as_ref();
|
||||
let mut bucket = self.bucket(key_bytes);
|
||||
let hash = murmurhash2::murmurhash2(key.as_ref());
|
||||
let mut probe = self.probe(hash);
|
||||
loop {
|
||||
let bucket = probe.next_probe();
|
||||
let kv: KeyValue = self.table[bucket];
|
||||
if kv.is_empty() {
|
||||
return Entry::Vacant(bucket);
|
||||
let key_bytes_ref = self.heap.allocate_and_set(key_bytes);
|
||||
let (addr, val): (u32, &mut V) = self.heap.allocate_object();
|
||||
assert_eq!(addr, key_bytes_ref.addr() + 2 + key_bytes.len() as u32);
|
||||
self.set_bucket(hash, key_bytes_ref, bucket);
|
||||
return val;
|
||||
} else if kv.hash == hash {
|
||||
let (stored_key, expull_addr): (&[u8], u32) = self.get_key_value(kv.key_value_addr);
|
||||
if stored_key == key_bytes {
|
||||
return self.heap.get_mut_ref(expull_addr);
|
||||
}
|
||||
}
|
||||
if self.get_key(kv.key) == key_bytes {
|
||||
return Entry::Occupied(kv.value_addr);
|
||||
}
|
||||
bucket = (bucket + 1) & self.mask;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
|
||||
use super::*;
|
||||
use super::super::heap::{Heap, HeapAllocable};
|
||||
use super::djb2;
|
||||
use super::murmurhash2::murmurhash2;
|
||||
use test::Bencher;
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::Hasher;
|
||||
use std::collections::HashSet;
|
||||
use super::split_memory;
|
||||
|
||||
struct TestValue {
|
||||
val: u32,
|
||||
@@ -180,17 +215,21 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hashmap_size() {
|
||||
assert_eq!(split_memory(100_000), (67232, 12));
|
||||
assert_eq!(split_memory(1_000_000), (737856, 15));
|
||||
assert_eq!(split_memory(10_000_000), (7902848, 18));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hash_map() {
|
||||
let heap = Heap::with_capacity(2_000_000);
|
||||
let mut hash_map: HashMap<TestValue> = HashMap::new(18, &heap);
|
||||
let mut hash_map: HashMap = HashMap::new(18, &heap);
|
||||
{
|
||||
{
|
||||
let v: &mut TestValue = hash_map.get_or_create("abc");
|
||||
assert_eq!(v.val, 0u32);
|
||||
v.val = 3u32;
|
||||
|
||||
}
|
||||
}
|
||||
{
|
||||
let v: &mut TestValue = hash_map.get_or_create("abcd");
|
||||
@@ -205,29 +244,55 @@ mod tests {
|
||||
let v: &mut TestValue = hash_map.get_or_create("abcd");
|
||||
assert_eq!(v.val, 4u32);
|
||||
}
|
||||
let mut iter_values = hash_map.values_mut();
|
||||
assert_eq!(iter_values.next().unwrap().val, 3u32);
|
||||
assert_eq!(iter_values.next().unwrap().val, 4u32);
|
||||
assert!(!iter_values.next().is_some());
|
||||
let mut iter_values = hash_map.iter();
|
||||
{
|
||||
let (_, addr) = iter_values.next().unwrap();
|
||||
let val: &TestValue = heap.get_ref(addr);
|
||||
assert_eq!(val.val, 3u32);
|
||||
}
|
||||
{
|
||||
let (_, addr) = iter_values.next().unwrap();
|
||||
let val: &TestValue = heap.get_ref(addr);
|
||||
assert_eq!(val.val, 4u32);
|
||||
}
|
||||
assert!(iter_values.next().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_murmur() {
|
||||
let s1 = "abcdef";
|
||||
let s2 = "abcdeg";
|
||||
for i in 0..5 {
|
||||
assert_eq!(
|
||||
murmurhash2(&s1[i..5].as_bytes()),
|
||||
murmurhash2(&s2[i..5].as_bytes())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_murmur_collisions() {
|
||||
let mut set: HashSet<u32> = HashSet::default();
|
||||
for i in 0..10_000 {
|
||||
let s = format!("hash{}", i);
|
||||
let hash = murmurhash2(s.as_bytes());
|
||||
set.insert(hash);
|
||||
}
|
||||
assert_eq!(set.len(), 10_000);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_djb2(bench: &mut Bencher) {
|
||||
let v = String::from("abwer");
|
||||
bench.iter(|| {
|
||||
djb2(v.as_bytes())
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_siphasher(bench: &mut Bencher) {
|
||||
let v = String::from("abwer");
|
||||
bench.iter(|| {
|
||||
let mut h = DefaultHasher::new();
|
||||
h.write(v.as_bytes());
|
||||
h.finish()
|
||||
fn bench_murmurhash_2(b: &mut Bencher) {
|
||||
let keys: Vec<&'static str> =
|
||||
vec!["wer qwe qwe qwe ", "werbq weqweqwe2 ", "weraq weqweqwe3 "];
|
||||
b.iter(|| {
|
||||
keys.iter()
|
||||
.map(|&s| s.as_bytes())
|
||||
.map(murmurhash2::murmurhash2)
|
||||
.map(|h| h as u64)
|
||||
.last()
|
||||
.unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,29 @@
|
||||
use std::cell::UnsafeCell;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use byteorder::{ByteOrder, NativeEndian};
|
||||
|
||||
/// `BytesRef` refers to a slice in tantivy's custom `Heap`.
|
||||
///
|
||||
/// The slice will encode the length of the `&[u8]` slice
|
||||
/// on 16-bits, and then the data is encoded.
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct BytesRef {
|
||||
pub start: u32,
|
||||
pub stop: u32,
|
||||
pub struct BytesRef(u32);
|
||||
|
||||
impl BytesRef {
|
||||
pub fn is_null(&self) -> bool {
|
||||
self.0 == u32::max_value()
|
||||
}
|
||||
|
||||
pub fn addr(&self) -> u32 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BytesRef {
|
||||
fn default() -> BytesRef {
|
||||
BytesRef(u32::max_value())
|
||||
}
|
||||
}
|
||||
|
||||
/// Object that can be allocated in tantivy's custom `Heap`.
|
||||
@@ -19,20 +36,19 @@ pub struct Heap {
|
||||
inner: UnsafeCell<InnerHeap>,
|
||||
}
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(mut_from_ref))]
|
||||
impl Heap {
|
||||
/// Creates a new heap with a given capacity
|
||||
pub fn with_capacity(num_bytes: usize) -> Heap {
|
||||
Heap {
|
||||
inner: UnsafeCell::new(
|
||||
InnerHeap::with_capacity(num_bytes)
|
||||
),
|
||||
inner: UnsafeCell::new(InnerHeap::with_capacity(num_bytes)),
|
||||
}
|
||||
}
|
||||
|
||||
fn inner(&self,) -> &mut InnerHeap {
|
||||
unsafe { &mut *self.inner.get() }
|
||||
fn inner(&self) -> &mut InnerHeap {
|
||||
unsafe { &mut *self.inner.get() }
|
||||
}
|
||||
|
||||
|
||||
/// Clears the heap. All the underlying data is lost.
|
||||
///
|
||||
/// This heap does not support deallocation.
|
||||
@@ -40,20 +56,9 @@ impl Heap {
|
||||
pub fn clear(&self) {
|
||||
self.inner().clear();
|
||||
}
|
||||
|
||||
|
||||
/// Return the heap capacity.
|
||||
pub fn capacity(&self,) -> u32 {
|
||||
self.inner().capacity()
|
||||
}
|
||||
|
||||
/// Return the amount of memory that has been allocated so far.
|
||||
pub fn len(&self,) -> u32 {
|
||||
self.inner().len()
|
||||
}
|
||||
|
||||
|
||||
/// Return amount of free space, in bytes.
|
||||
pub fn num_free_bytes(&self,) -> u32 {
|
||||
pub fn num_free_bytes(&self) -> u32 {
|
||||
self.inner().num_free_bytes()
|
||||
}
|
||||
|
||||
@@ -62,37 +67,42 @@ impl Heap {
|
||||
pub fn allocate_space(&self, num_bytes: usize) -> u32 {
|
||||
self.inner().allocate_space(num_bytes)
|
||||
}
|
||||
|
||||
|
||||
/// Allocate an object in the heap
|
||||
pub fn allocate_object<V: HeapAllocable>(&self,) -> (u32, &mut V) {
|
||||
pub fn allocate_object<V: HeapAllocable>(&self) -> (u32, &mut V) {
|
||||
let addr = self.inner().allocate_space(mem::size_of::<V>());
|
||||
let v: V = V::with_addr(addr);
|
||||
self.inner().set(addr, &v);
|
||||
(addr, self.inner().get_mut_ref(addr))
|
||||
}
|
||||
|
||||
|
||||
/// Stores a `&[u8]` in the heap and returns the destination BytesRef.
|
||||
pub fn allocate_and_set(&self, data: &[u8]) -> BytesRef {
|
||||
self.inner().allocate_and_set(data)
|
||||
}
|
||||
|
||||
|
||||
/// Fetches the `&[u8]` stored on the slice defined by the `BytesRef`
|
||||
/// given as argumetn
|
||||
pub fn get_slice(&self, bytes_ref: BytesRef) -> &[u8] {
|
||||
self.inner().get_slice(bytes_ref.start, bytes_ref.stop)
|
||||
self.inner().get_slice(bytes_ref)
|
||||
}
|
||||
|
||||
|
||||
/// Stores an item's data in the heap, at the given `address`.
|
||||
pub fn set<Item>(&self, addr: u32, val: &Item) {
|
||||
self.inner().set(addr, val);
|
||||
}
|
||||
|
||||
|
||||
/// Returns a mutable reference for an object at a given Item.
|
||||
pub fn get_mut_ref<Item>(&self, addr: u32) -> &mut Item {
|
||||
self.inner().get_mut_ref(addr)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to an `Item` at a given `addr`.
|
||||
#[cfg(test)]
|
||||
pub fn get_ref<Item>(&self, addr: u32) -> &mut Item {
|
||||
self.get_mut_ref(addr)
|
||||
}
|
||||
}
|
||||
|
||||
struct InnerHeap {
|
||||
buffer: Vec<u8>,
|
||||
@@ -101,24 +111,11 @@ struct InnerHeap {
|
||||
next_heap: Option<Box<InnerHeap>>,
|
||||
}
|
||||
|
||||
/// initializing a long Vec<u8> is crazy slow in
|
||||
/// debug mode.
|
||||
/// We use this unsafe trick to make unit test
|
||||
/// way faster.
|
||||
fn allocate_fast(num_bytes: usize) -> Vec<u8> {
|
||||
let mut buffer = Vec::with_capacity(num_bytes);
|
||||
unsafe {
|
||||
buffer.set_len(num_bytes);
|
||||
}
|
||||
buffer
|
||||
}
|
||||
|
||||
impl InnerHeap {
|
||||
|
||||
pub fn with_capacity(num_bytes: usize) -> InnerHeap {
|
||||
let buffer: Vec<u8> = allocate_fast(num_bytes);
|
||||
let buffer: Vec<u8> = vec![0u8; num_bytes];
|
||||
InnerHeap {
|
||||
buffer: buffer,
|
||||
buffer,
|
||||
buffer_len: num_bytes as u32,
|
||||
next_heap: None,
|
||||
used: 0u32,
|
||||
@@ -130,23 +127,14 @@ impl InnerHeap {
|
||||
self.next_heap = None;
|
||||
}
|
||||
|
||||
pub fn capacity(&self,) -> u32 {
|
||||
self.buffer.len() as u32
|
||||
}
|
||||
|
||||
pub fn len(&self,) -> u32 {
|
||||
self.used
|
||||
}
|
||||
|
||||
// Returns the number of free bytes. If the buffer
|
||||
// has reached it's capacity and overflowed to another buffer, return 0.
|
||||
pub fn num_free_bytes(&self,) -> u32 {
|
||||
pub fn num_free_bytes(&self) -> u32 {
|
||||
if self.next_heap.is_some() {
|
||||
0u32
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
self.buffer_len - self.used
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn allocate_space(&mut self, num_bytes: usize) -> u32 {
|
||||
@@ -154,74 +142,85 @@ impl InnerHeap {
|
||||
self.used += num_bytes as u32;
|
||||
if self.used <= self.buffer_len {
|
||||
addr
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
if self.next_heap.is_none() {
|
||||
warn!("Exceeded heap size. The margin was apparently unsufficient. The segment will be committed right after indexing this very last document.");
|
||||
info!(
|
||||
r#"Exceeded heap size. The segment will be committed right
|
||||
after indexing this document."#,
|
||||
);
|
||||
self.next_heap = Some(Box::new(InnerHeap::with_capacity(self.buffer_len as usize)));
|
||||
}
|
||||
self.next_heap.as_mut().unwrap().allocate_space(num_bytes) + self.buffer_len
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
fn get_slice(&self, start: u32, stop: u32) -> &[u8] {
|
||||
|
||||
fn get_slice(&self, bytes_ref: BytesRef) -> &[u8] {
|
||||
let start = bytes_ref.0;
|
||||
if start >= self.buffer_len {
|
||||
self.next_heap.as_ref().unwrap().get_slice(start - self.buffer_len, stop - self.buffer_len)
|
||||
}
|
||||
else {
|
||||
&self.buffer[start as usize..stop as usize]
|
||||
self.next_heap
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.get_slice(BytesRef(start - self.buffer_len))
|
||||
} else {
|
||||
let start = start as usize;
|
||||
let len = NativeEndian::read_u16(&self.buffer[start..start + 2]) as usize;
|
||||
&self.buffer[start + 2..start + 2 + len]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn get_mut_slice(&mut self, start: u32, stop: u32) -> &mut [u8] {
|
||||
if start >= self.buffer_len {
|
||||
self.next_heap.as_mut().unwrap().get_mut_slice(start - self.buffer_len, stop - self.buffer_len)
|
||||
}
|
||||
else {
|
||||
self.next_heap
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.get_mut_slice(start - self.buffer_len, stop - self.buffer_len)
|
||||
} else {
|
||||
&mut self.buffer[start as usize..stop as usize]
|
||||
}
|
||||
}
|
||||
|
||||
fn allocate_and_set(&mut self, data: &[u8]) -> BytesRef {
|
||||
let start = self.allocate_space(data.len());
|
||||
let stop = start + data.len() as u32;
|
||||
self.get_mut_slice(start, stop).clone_from_slice(data);
|
||||
BytesRef {
|
||||
start: start as u32,
|
||||
stop: stop as u32,
|
||||
}
|
||||
assert!(data.len() < u16::max_value() as usize);
|
||||
let total_len = 2 + data.len();
|
||||
let start = self.allocate_space(total_len);
|
||||
let total_buff = self.get_mut_slice(start, start + total_len as u32);
|
||||
NativeEndian::write_u16(&mut total_buff[0..2], data.len() as u16);
|
||||
total_buff[2..].clone_from_slice(data);
|
||||
BytesRef(start)
|
||||
}
|
||||
|
||||
fn get_mut(&mut self, addr: u32) -> *mut u8 {
|
||||
if addr >= self.buffer_len {
|
||||
self.next_heap.as_mut().unwrap().get_mut(addr - self.buffer_len)
|
||||
}
|
||||
else {
|
||||
self.next_heap
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.get_mut(addr - self.buffer_len)
|
||||
} else {
|
||||
let addr_isize = addr as isize;
|
||||
unsafe { self.buffer.as_mut_ptr().offset(addr_isize) }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
fn get_mut_ref<Item>(&mut self, addr: u32) -> &mut Item {
|
||||
if addr >= self.buffer_len {
|
||||
self.next_heap.as_mut().unwrap().get_mut_ref(addr - self.buffer_len)
|
||||
}
|
||||
else {
|
||||
self.next_heap
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.get_mut_ref(addr - self.buffer_len)
|
||||
} else {
|
||||
let v_ptr_u8 = self.get_mut(addr) as *mut u8;
|
||||
let v_ptr = v_ptr_u8 as *mut Item;
|
||||
unsafe { &mut *v_ptr }
|
||||
}
|
||||
}
|
||||
|
||||
fn set<Item>(&mut self, addr: u32, val: &Item) {
|
||||
pub fn set<Item>(&mut self, addr: u32, val: &Item) {
|
||||
if addr >= self.buffer_len {
|
||||
self.next_heap.as_mut().unwrap().set(addr - self.buffer_len, val);
|
||||
}
|
||||
else {
|
||||
self.next_heap
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.set(addr - self.buffer_len, val);
|
||||
} else {
|
||||
let v_ptr: *const Item = val as *const Item;
|
||||
let v_ptr_u8: *const u8 = v_ptr as *const u8;
|
||||
debug_assert!(addr + mem::size_of::<Item>() as u32 <= self.used);
|
||||
@@ -231,4 +230,4 @@ impl InnerHeap {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,46 +1,43 @@
|
||||
mod hashmap;
|
||||
pub(crate) mod hashmap;
|
||||
mod heap;
|
||||
mod expull;
|
||||
|
||||
pub use self::heap::{Heap, HeapAllocable};
|
||||
pub use self::expull::ExpUnrolledLinkedList;
|
||||
pub use self::hashmap::{HashMap, Entry};
|
||||
|
||||
|
||||
|
||||
pub use self::hashmap::HashMap;
|
||||
|
||||
#[test]
|
||||
fn test_unrolled_linked_list() {
|
||||
use std::collections;
|
||||
let heap = Heap::with_capacity(30_000_000);
|
||||
{
|
||||
heap.clear();
|
||||
let mut ks: Vec<usize> = (1..5).map(|k| k * 100).collect();
|
||||
ks.push(2);
|
||||
ks.push(3);
|
||||
for k in (1..5).map(|k| k * 100) {
|
||||
let mut hashmap: HashMap<ExpUnrolledLinkedList> = HashMap::new(10, &heap);
|
||||
for k in (1..5).map(|k| k * 100) {
|
||||
let mut hashmap: HashMap = HashMap::new(10, &heap);
|
||||
for j in 0..k {
|
||||
for i in 0..500 {
|
||||
let mut list = hashmap.get_or_create(i.to_string());
|
||||
list.push(i*j, &heap);
|
||||
let v: &mut ExpUnrolledLinkedList = hashmap.get_or_create(i.to_string());
|
||||
v.push(i * j, &heap);
|
||||
}
|
||||
}
|
||||
let mut map_addr: collections::HashMap<Vec<u8>, u32> = collections::HashMap::new();
|
||||
for (key, addr) in hashmap.iter() {
|
||||
map_addr.insert(Vec::from(key), addr);
|
||||
}
|
||||
|
||||
for i in 0..500 {
|
||||
match hashmap.lookup(i.to_string()) {
|
||||
Entry::Occupied(addr) => {
|
||||
let v: &mut ExpUnrolledLinkedList = heap.get_mut_ref(addr);
|
||||
let mut it = v.iter(addr, &heap);
|
||||
for j in 0..k {
|
||||
assert_eq!(it.next().unwrap(), i*j);
|
||||
}
|
||||
assert!(!it.next().is_some());
|
||||
}
|
||||
_ => {
|
||||
panic!("should never happen");
|
||||
}
|
||||
let key: String = i.to_string();
|
||||
let addr: u32 = *map_addr.get(key.as_bytes()).unwrap();
|
||||
let exp_pull: &ExpUnrolledLinkedList = heap.get_ref(addr);
|
||||
let mut it = exp_pull.iter(addr, &heap);
|
||||
for j in 0..k {
|
||||
assert_eq!(it.next().unwrap(), i * j);
|
||||
}
|
||||
assert!(!it.next().is_some());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,56 +1,56 @@
|
||||
use std::marker::Send;
|
||||
use std::fmt;
|
||||
use std::path::Path;
|
||||
use directory::error::{FileError, OpenWriteError};
|
||||
use directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||
use directory::{ReadOnlySource, WritePtr};
|
||||
use std::result;
|
||||
use std::io;
|
||||
use std::marker::Sync;
|
||||
|
||||
/// Write-once read many (WORM) abstraction for where tantivy's index should be stored.
|
||||
/// Write-once read many (WORM) abstraction for where
|
||||
/// tantivy's data should be stored.
|
||||
///
|
||||
/// There are currently two implementations of `Directory`
|
||||
///
|
||||
///
|
||||
/// - The [`MMapDirectory`](struct.MmapDirectory.html), this
|
||||
/// should be your default choice.
|
||||
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
|
||||
/// should be your default choice.
|
||||
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
|
||||
/// should be used mostly for tests.
|
||||
///
|
||||
///
|
||||
pub trait Directory: fmt::Debug + Send + Sync + 'static {
|
||||
|
||||
/// Opens a virtual file for read.
|
||||
///
|
||||
///
|
||||
/// Once a virtual file is open, its data may not
|
||||
/// change.
|
||||
///
|
||||
/// Specifically, subsequent writes or flushes should
|
||||
/// have no effect on the returned `ReadOnlySource` object.
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, FileError>;
|
||||
|
||||
/// have no effect on the returned `ReadOnlySource` object.
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
|
||||
|
||||
/// Removes a file
|
||||
///
|
||||
/// Removing a file will not affect an eventual
|
||||
/// existing ReadOnlySource pointing to it.
|
||||
///
|
||||
///
|
||||
/// Removing a nonexistent file, yields a
|
||||
/// `FileError::DoesNotExist`.
|
||||
fn delete(&self, path: &Path) -> result::Result<(), FileError>;
|
||||
/// `DeleteError::DoesNotExist`.
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
|
||||
|
||||
/// Returns true iff the file exists
|
||||
fn exists(&self, path: &Path) -> bool;
|
||||
|
||||
/// Opens a writer for the *virtual file* associated with
|
||||
/// Opens a writer for the *virtual file* associated with
|
||||
/// a Path.
|
||||
///
|
||||
/// Right after this call, the file should be created
|
||||
/// and any subsequent call to `open_read` for the
|
||||
/// and any subsequent call to `open_read` for the
|
||||
/// same path should return a `ReadOnlySource`.
|
||||
///
|
||||
///
|
||||
/// Write operations may be aggressively buffered.
|
||||
/// The client of this trait is responsible for calling flush
|
||||
/// to ensure that subsequent `read` operations
|
||||
/// to ensure that subsequent `read` operations
|
||||
/// will take into account preceding `write` operations.
|
||||
///
|
||||
///
|
||||
/// Flush operation should also be persistent.
|
||||
///
|
||||
/// The user shall not rely on `Drop` triggering `flush`.
|
||||
@@ -59,18 +59,21 @@ pub trait Directory: fmt::Debug + Send + Sync + 'static {
|
||||
///
|
||||
/// The file may not previously exist.
|
||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
||||
|
||||
|
||||
/// Reads the full content file that has been written using
|
||||
/// atomic_write.
|
||||
///
|
||||
/// This should only be used for small files.
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
|
||||
|
||||
/// Atomically replace the content of a file with data.
|
||||
///
|
||||
///
|
||||
/// This calls ensure that reads can never *observe*
|
||||
/// a partially written file.
|
||||
///
|
||||
///
|
||||
/// The file may or may not previously exist.
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()>;
|
||||
|
||||
/// Clones the directory and boxes the clone
|
||||
|
||||
/// Clones the directory and boxes the clone
|
||||
fn box_clone(&self) -> Box<Directory>;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user