Compare commits

...

195 Commits
0.3.1 ... 0.4.0

Author SHA1 Message Date
Paul Masurel
76e07b9705 NOBUG Small fixes. 2017-07-14 18:09:54 +09:00
Paul Masurel
ea4e9fdaf1 NOBUG updated README 2017-07-14 14:09:13 +09:00
Paul Masurel
e418bee693 NOBUG Garbage collection after end merge. 2017-07-14 12:09:47 +09:00
Paul Masurel
af4f1a86bc Merge remote-tracking branch 'origin/exp/hash_intable' 2017-07-13 20:50:54 +09:00
Paul Masurel
753b639454 NOBUG splitting the per-thread memory between the table and the heap 2017-07-13 17:11:39 +09:00
Paul Masurel
5907a47547 NOBUG Added whitespaces. 2017-07-13 15:14:12 +09:00
Paul Masurel
586a6e62a2 NOBUG Added Changelog for 4.0 2017-07-13 15:06:09 +09:00
Paul Masurel
fdae0eff5a NOBUG Remove range step_by 2017-07-13 14:05:33 +09:00
Paul Masurel
6eea407f20 Removing usage of step_by 2017-06-23 17:46:39 +09:00
Paul Masurel
1ba51d4dc4 NOBUG removed using range.step_by 2017-06-22 22:10:53 +09:00
Paul Masurel
6e742d5145 NOBUG removing batch add docs 2017-06-22 11:35:22 +09:00
Paul Masurel
1843259e91 NOBUG Simplified addr definitions 2017-06-22 11:27:32 +09:00
Paul Masurel
4ebacb7297 BytesRef is now wrapping an addr 2017-06-21 22:32:05 +09:00
Paul Masurel
fb75e60c6e issue/136 Added hashmaps. 2017-06-21 15:47:55 +09:00
Paul Masurel
04b15c6c11 Merge branch 'master' into exp/hash_intable
Conflicts:
	src/datastruct/stacker/hashmap.rs
2017-06-21 11:40:49 +09:00
Paul Masurel
e51feea574 Removed cargo fmt from travis. 2017-06-14 13:45:11 +09:00
Paul Masurel
93e7f28cc0 Added unit test 2017-06-14 10:46:06 +09:00
Paul Masurel
8875b9794a Added API to get range from fastfield 2017-06-13 23:16:50 +09:00
Paul Masurel
90fcfb3f43 issue/188 Using murmurhash 2017-06-07 09:30:34 +09:00
Paul Masurel
e547e8abad Closes #184
Resizing the `Vec` was a bad idea, as for some stacker operation,
we may have a living reference to an object in the current heap.
2017-06-06 23:16:28 +09:00
Paul Masurel
5aa4565424 Tiny cleaning 2017-06-05 23:40:08 +09:00
Paul Masurel
3637620187 Merge branch 'master' of github.com:tantivy-search/tantivy 2017-06-02 21:03:37 +09:00
Laurentiu Nicola
a94679d74d Use four terms in the intersection bench 2017-05-31 08:31:33 +09:00
Laurentiu Nicola
a35a8638cc Comment nit 2017-05-31 08:31:33 +09:00
Paul Masurel
97a051996f issue 171. Hopefully bugfix? 2017-05-31 08:31:33 +09:00
Laurentiu Nicola
69525cb3c7 Add extra intersection test 2017-05-31 08:31:33 +09:00
Laurentiu Nicola
63867a7150 Fix document generation for posting benchmarks 2017-05-31 08:31:33 +09:00
Paul Masurel
19c073385a Better intersection and added size_hint 2017-05-31 08:31:33 +09:00
Paul Masurel
0521844e56 Format, small changes in VInt 2017-05-31 08:31:20 +09:00
Paul Masurel
8d4778f94d issue/181 BinarySerializable does not return the len + Generics over Read+Write 2017-05-31 08:31:20 +09:00
Paul Masurel
1d5464351d generic read 2017-05-31 08:31:20 +09:00
Paul Masurel
522ebdc674 made ResultExt public 2017-05-31 08:31:20 +09:00
Paul Masurel
4a805733db another hash 2017-05-30 15:36:48 +09:00
Paul Masurel
568d149db8 Merge branch 'master' into exp/hash_intable 2017-05-30 08:27:33 +09:00
Paul Masurel
4cfc9806c0 made ResultExt public 2017-05-30 08:22:17 +09:00
Paul Masurel
37042e3ccb Send and Sync impl now useless 2017-05-29 18:53:49 +09:00
Paul Masurel
b316cd337a Optimization in bitpacker 2017-05-29 18:53:49 +09:00
Paul Masurel
c04991e5ad Removed pointer in fastfield 2017-05-29 18:53:49 +09:00
Paul Masurel
c59b712eeb Added hash info in the table 2017-05-29 18:47:20 +09:00
Ashley Mannix
da61baed3b run fmt 2017-05-29 18:29:39 +09:00
Ashley Mannix
b6140d2962 drop some patch bounds 2017-05-29 18:29:39 +09:00
Ashley Mannix
6a9a71bb1b re-export ErrorKind 2017-05-29 18:29:39 +09:00
Ashley Mannix
e8fc4c77e2 fix delete error msg 2017-05-29 18:29:39 +09:00
Ashley Mannix
80837601ea remove error::* imports 2017-05-29 18:29:39 +09:00
Ashley Mannix
2b2703cf51 run cargo fmt 2017-05-29 18:29:39 +09:00
Ashley Mannix
d79018a7f8 fix build warnings 2017-05-29 18:29:39 +09:00
Ashley Mannix
d8a7c428f7 impl std error for directory errors 2017-05-29 18:29:39 +09:00
Ashley Mannix
45595234cc fix error match 2017-05-29 18:29:39 +09:00
Ashley Mannix
1bcebdd29e initial error-chain 2017-05-29 18:29:39 +09:00
Paul Masurel
ed0333a404 Optimized streamer 2017-05-28 19:58:28 +09:00
Paul Masurel
ac0b1a21eb Term as a wrapper
Small changes

Plastic
2017-05-25 23:49:54 +09:00
Paul Masurel
6bbc789d84 Fmt fix 2017-05-25 23:49:54 +09:00
Paul Masurel
87152daef3 issue/174 Added doc, and made field private 2017-05-25 23:49:54 +09:00
Paul Masurel
e0fce4782a Added documentation 2017-05-25 23:49:54 +09:00
Paul Masurel
a633c2a49a Avoid exposing common. Exposes u64 to i64 conversion instead. 2017-05-25 23:49:54 +09:00
Paul Masurel
51623d593e Avoid exposign schema from segment_reader 2017-05-25 23:49:54 +09:00
Paul Masurel
29bf740ddf Exposing the remaining API 2017-05-25 23:49:54 +09:00
Paul Masurel
511bd25a31 trailing whitespace 2017-05-25 18:17:37 +09:00
Paul Masurel
66e14ac1b1 clippy 2017-05-25 18:17:37 +09:00
Paul Masurel
09e94072ba Cargo fmt 2017-05-25 18:17:37 +09:00
Paul Masurel
6c68136d31 Reorganized code 2017-05-25 18:17:37 +09:00
Paul Masurel
aaf1b2c6b6 Reorganized code and added documentation. 2017-05-25 18:17:37 +09:00
Paul Masurel
8a6af2aefa Added unit test and bugfix 2017-05-25 18:17:37 +09:00
Paul Masurel
7a6e62976b Added stream dictionary code, merge unit test 2017-05-25 18:17:37 +09:00
Paul Masurel
2712930bd6 Added the feature 2017-05-25 18:17:37 +09:00
Paul Masurel
cb05f8c098 Prevent execution of the code in the macro doc 2017-05-22 10:55:45 +09:00
Paul Masurel
c0c9d04ca9 Added extra doc 2017-05-22 10:55:45 +09:00
Paul Masurel
7ea5e740e0 Using the $crate thing to make the macro usable in and outside tantivy 2017-05-22 10:55:45 +09:00
Paul Masurel
2afa6c372a issue/168 Make doc! macro usable outside tantivy 2017-05-22 10:55:45 +09:00
Paul Masurel
c7db8866b5 Merge branch 'facets' 2017-05-21 22:57:01 +09:00
Paul Masurel
02d992324a simplified facets. 2017-05-21 22:56:43 +09:00
Paul Masurel
4ab511ffc6 Merging 2017-05-21 22:15:02 +09:00
Paul Masurel
f318172ea4 Merge branch 'issue/162' 2017-05-21 20:04:03 +09:00
Paul Masurel
581449a824 issue/162 Docs and unit tests 2017-05-21 18:58:04 +09:00
Maciej Dziardziel
272589a381 faceting for fast numerical fields 2017-05-21 12:04:29 +03:00
Laurentiu Nicola
73d54c6379 Inline block_len 2017-05-21 10:44:49 +03:00
Paul Masurel
3e4606de5d Simplifying, and reordering the members 2017-05-21 16:31:52 +09:00
Laurentiu Nicola
020779f61b Make things faster 2017-05-20 20:56:37 +03:00
Laurentiu Nicola
835936585f Don't search whole blocks, but only the remaining part 2017-05-20 18:45:41 +03:00
Paul Masurel
bdd05e97d1 Added bench for segment postings 2017-05-20 23:38:53 +09:00
Paul Masurel
2be5f08cd6 issue/162 Added block iteration API 2017-05-20 11:46:40 +09:00
Paul Masurel
3f49d65a87 issue/162 Create block postings 2017-05-20 00:46:23 +09:00
Paul Masurel
f9baf4bcc8 Merge branch 'issue/155'
Conflicts:
	src/indexer/merger.rs
	src/indexer/segment_writer.rs
2017-05-19 20:14:36 +09:00
Paul Masurel
7ee93fbed5 Cleaning 2017-05-19 20:08:04 +09:00
Paul Masurel
57a5547ae8 Comments and cleaning up API 2017-05-19 11:20:27 +09:00
Paul Masurel
c57ab6a335 Renamed fstmap to termdict 2017-05-19 09:26:18 +09:00
Paul Masurel
02bfa9be52 Moving to termdict 2017-05-19 08:43:52 +09:00
Paul Masurel
b3f62b8acc Better API 2017-05-18 23:35:39 +09:00
Paul Masurel
2a08c247af Clippy 2017-05-18 23:20:41 +09:00
Paul Masurel
d2926b6ee0 Format 2017-05-18 23:09:20 +09:00
Paul Masurel
0272167c2e Code cleaning 2017-05-18 23:06:02 +09:00
Laurentiu Nicola
a9cf0bde16 Format code 2017-05-18 22:07:49 +09:00
Laurentiu Nicola
5a457df45d VInt encode values in IntFastFieldWriter
Closes #131
2017-05-18 22:07:49 +09:00
Paul Masurel
ca76fd5ba0 Uncommenting unit test 2017-05-18 20:41:56 +09:00
Paul Masurel
e79a316e41 Issue 155 - Trying to avoid term lookup when merging terms
+ Adds a proper Streamer interface
2017-05-18 20:12:00 +09:00
Paul Masurel
733f54d80e Making clippy happy. 2017-05-17 19:07:39 +09:00
Paul Masurel
7b2b181652 Merge branch 'master' into issue/136
Conflicts:
	src/datastruct/stacker/hashmap.rs
	src/datastruct/stacker/heap.rs
	src/datastruct/stacker/mod.rs
	src/indexer/index_writer.rs
	src/indexer/merger.rs
	src/indexer/segment_updater.rs
	src/indexer/segment_writer.rs
	src/postings/postings_writer.rs
	src/postings/recorder.rs
	src/schema/term.rs
2017-05-17 18:40:09 +09:00
Laurentiu Nicola
b3f39f2343 Remove unneeded suppressions, make clippy lints explicit 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
a13122d392 use explicit drop instead of suppression 2017-05-17 15:50:07 +09:00
Paul Masurel
113917c521 Making clippy happy.
+ Simplifying bitpacking by adding a 7 byte padding.
+ Bugfix in a unit test.
2017-05-17 15:50:07 +09:00
Laurentiu Nicola
1352b95b07 clippy: fix never_loop warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
c0538dbe9a clippy: fix mut_from_ref warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
0d5ea98132 clippy: fix inline_always warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
0404df3fd5 Fix typo in docstring 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
a67caee141 clippy: fix len_zero warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
f5fb29422a clippy: fix while_let_loop warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
4e48bbf0ea clippy: fix needless_lifetimes warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
6fea510869 clippy: fix redundant_closure warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
39958ec476 clippy: fix single_match warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
36f51e289e clippy: fix match_same_arms warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
5c83153035 clippy: fix or_fun_call warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
8e407bb314 clippy: fix needless_borrow warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
103ba6ba35 clippy: fix match_ref_pats warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
3965b26cd2 clippy: fix useless_let_if_seq warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
1cd0b378fb clippy: fix map_clone warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
92f383fa51 clippy: fix let_unit_value warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
6ae34d2a77 clippy: fix toplevel_ref_arg warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
1af1f7e0d1 clippy: fix if_let_redundant_pattern_matching warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
feec2e2620 clippy: fix needless_bool warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
3e2ad7542d clippy: fix needless_return warnings 2017-05-17 15:50:07 +09:00
Laurentiu Nicola
ac02c76b1e clippy: fix doc_markdown warnings 2017-05-17 15:50:07 +09:00
Paul Masurel
e5c7c0b8b9 Update CHANGELOG.md 2017-05-16 21:13:33 +09:00
Laurentiu Nicola
49dbe4722f Add a test for SegmentPostings::skip_len 2017-05-16 21:12:43 +09:00
Laurentiu Nicola
f64ff77424 Use an exponential search 2017-05-16 21:12:43 +09:00
Laurentiu Nicola
2bf93e9e51 Avoid rebuilding simdcomp when running tests 2017-05-16 08:37:43 +09:00
Laurentiu Nicola
3dde748b25 Make rustfmt happy 2017-05-16 00:49:05 +03:00
Laurentiu Nicola
1dabe26395 Add comment about block_len 2017-05-15 21:26:28 +03:00
Laurentiu Nicola
5590537739 Disable early exit 2017-05-15 21:18:06 +03:00
Laurentiu Nicola
ccf0f9cb2f Merge branch 'master' of github.com:tantivy-search/tantivy into issue/130 2017-05-15 18:54:16 +03:00
Laurentiu Nicola
e21913ecdc Use binary search for SegmentPostings::skip_next 2017-05-15 18:33:43 +03:00
Laurentiu Nicola
2cc826adc7 Add a bench for SegmentPostings::SkipNext 2017-05-15 18:33:43 +03:00
Laurentiu Nicola
4d90d8fc1d Move the random sampling helpers to the tests module 2017-05-15 18:33:43 +03:00
Paul Masurel
0606a8ae73 Bugfix in travis yml 2017-05-16 00:22:11 +09:00
Paul Masurel
03564214e7 Added check for rustfmt in travis 2017-05-15 22:46:43 +09:00
Paul Masurel
4c8f9742f8 format 2017-05-15 22:30:18 +09:00
Paul Masurel
a23b7a1815 Test the size of complete 0..128 block 2017-05-15 19:09:52 +09:00
Paul Masurel
6f89a86b14 Added simple search in travis CI 2017-05-15 12:10:23 +09:00
Laurentiu Nicola
b2beac1203 Check the result of wait_merging_threads 2017-05-15 08:00:25 +09:00
Paul Masurel
8cd5a2d81d Fixed logging deleted files twice 2017-05-15 00:25:49 +09:00
Paul Masurel
b26c22ada0 Merge branch 'issue/148' 2017-05-15 00:02:51 +09:00
Laurentiu Nicola
8a35259300 Avoid clone() call 2017-05-14 23:28:17 +09:00
Paul Masurel
db56167a5d Display backtrace 2017-05-14 23:28:17 +09:00
Paul Masurel
ab66ffed4e Closes #147 2017-05-14 23:28:17 +09:00
Laurentiu Nicola
e04f2f0b08 issue/148 Wait for the index writer threads to shut down in simple_search 2017-05-14 16:35:24 +03:00
Paul Masurel
7a5df33c85 issue/148 Wrapping MsQueue to drop all of its concent on Drop 2017-05-14 16:25:33 +03:00
Laurentiu Nicola
ee0873dd07 Avoid clone() call 2017-05-13 16:11:58 +03:00
Paul Masurel
695c8828b8 Display backtrace 2017-05-13 18:51:38 +09:00
Paul Masurel
4ff7dc7a4f Closes #147 2017-05-13 18:46:50 +09:00
Paul Masurel
69832bfd03 NOBUG Disabling running examples in CI as it is not working. 2017-05-12 14:35:50 +09:00
Paul Masurel
ecbdd70c37 Removed the clunky linked list logic of the heap. 2017-05-12 14:01:52 +09:00
Paul Masurel
fb1b2be782 issue/136 Fix following CR 2017-05-12 13:51:09 +09:00
Paul Masurel
9cd7458978 NOBUG Hiding methods making it possible to build a incorrect Term. 2017-05-11 21:12:59 +09:00
Paul Masurel
4c4c28e2c4 Fix broke compile 2017-05-11 20:57:32 +09:00
Paul Masurel
9f9e588905 Merge branch 'master' into issue/136
Conflicts:
	src/postings/postings_writer.rs
2017-05-11 20:50:24 +09:00
Paul Masurel
6fd17e0ead Code cleaning 2017-05-11 20:47:30 +09:00
Paul Masurel
65dc5b0d83 Closes #145 2017-05-11 19:48:06 +09:00
Paul Masurel
15d15c01f8 Runing examples in CI
Closes #143
2017-05-11 19:43:36 +09:00
Paul Masurel
106832a66a Make Term::with_capacity crate-public 2017-05-11 19:37:15 +09:00
Paul Masurel
477b9136b9 FIXED inconsistent Term's field serialization.
Also.

Cleaned up the code to make sure that the logic
is only in one place.
Removed allocate_vec

Closes #141
Closes #139
Closes #142
Closes #138
2017-05-11 19:37:15 +09:00
Paul Masurel
7852d097b8 CHANGELOG 0.3.1 did not included the fix of the Field(u32) 2017-05-11 09:48:37 +09:00
Ashley Mannix
0bd56241bb pretty print meta.json 2017-05-10 20:13:53 +09:00
Paul Masurel
54ab897755 Added comment 2017-05-10 19:30:24 +09:00
Paul Masurel
1369d2d144 Quadratic probing. 2017-05-10 10:38:47 +09:00
Paul Masurel
d3f829dc8a Bugfix 2017-05-10 00:29:37 +09:00
Paul Masurel
e82ccf9627 Merge branch 'master' into issue/indexing-refactoring 2017-05-09 16:43:33 +09:00
Paul Masurel
d3d29f7f54 NOBUG Updated CHANGELOG with the serde change for 0.4.0 2017-05-09 16:42:25 +09:00
Paul Masurel
3566717979 Merge pull request #134 from tantivy-search/chore/serde-rebase
Replace rustc_serialize with serde (updated)
2017-05-09 16:38:42 +09:00
Paul Masurel
90bc3e3773 Added limitation on term dictionary saturation 2017-05-09 14:10:33 +09:00
Paul Masurel
ffb62b6835 working 2017-05-09 10:17:05 +09:00
Ashley Mannix
4f9ce91d6a update underflow test 2017-05-08 14:40:58 +10:00
Laurentiu Nicola
3c3a2fbfe8 Remove old serialization code 2017-05-08 07:36:15 +03:00
Laurentiu Nicola
0508571d1a Use the proper error type on u64 overflow 2017-05-08 07:35:33 +03:00
Laurentiu Nicola
7b733dd34f Fix i64 overflow check and merge NotJSON with NotJSONObject 2017-05-08 07:09:54 +03:00
Ashley Mannix
2c798e3147 Replace rustc_serialize with serde 2017-05-07 20:21:22 +03:00
Paul Masurel
2c13f210bc Bugfix on merging i64 fast fields 2017-05-07 15:57:29 +09:00
Paul Masurel
0dad02791c issues/65 Added comments
Closes #65
Closes #132
2017-05-06 23:09:45 +09:00
Paul Masurel
2947364ae1 issues/65 Phrase query for untokenized fields are not tokenized. 2017-05-06 22:14:26 +09:00
Paul Masurel
05111599b3 Removed several TODOs 2017-05-05 16:08:09 +08:00
Paul Masurel
83263eabbb issues/65 Updated changelog added some doc. 2017-05-04 17:13:14 +08:00
Paul Masurel
5cb5c9a8f2 issues/65 Added i64 fast fields 2017-05-04 16:46:14 +08:00
Paul Masurel
9ab92b7739 i64 fast field working 2017-05-04 16:46:14 +08:00
Paul Masurel
962bddfbbf Merge with panicks. 2017-05-04 16:46:14 +08:00
Paul Masurel
26cfe2909f FastField with different types 2017-05-04 16:46:13 +08:00
Paul Masurel
afdfb1a69b Compiling... fastfield not implemented yet 2017-05-04 16:46:13 +08:00
Paul Masurel
b26ad1d57a Added int options 2017-05-04 16:46:13 +08:00
Paul Masurel
1dbd54edbb Renamed u64options 2017-05-04 16:46:13 +08:00
Paul Masurel
deb04eb090 issue/65 Switching to u64. 2017-05-04 16:46:13 +08:00
Paul Masurel
bed34bf502 Merge branch 'issues/122' 2017-04-23 16:14:40 +08:00
Paul Masurel
95bfb71901 NOBUG Remove 256 num fields limit 2017-04-19 22:37:34 +09:00
Paul Masurel
74e10843a7 issue/120 Disabled SIMD vbyte compression for msvc 2017-04-17 22:36:32 +09:00
Paul Masurel
1b922e6d23 issue 120. Using streamvbyte codec for the vbyte part of the encoding 2017-04-16 18:49:53 +09:00
Paul Masurel
a7c6c31538 Merge commit '9d071c8d4610aa61f4b1f7dd489210415a05cfc0' as 'cpp/streamvbyte' 2017-04-16 15:22:43 +09:00
Paul Masurel
9d071c8d46 Squashed 'cpp/streamvbyte/' content from commit f38aa6b
git-subtree-dir: cpp/streamvbyte
git-subtree-split: f38aa6b6ec4c5cee9d72c94ef305e6a79a108252
2017-04-16 15:22:43 +09:00
Paul Masurel
04074f7bcb Merge pull request #119 from tantivy-search/issue/118
Using u32 for field ids
2017-04-15 13:11:22 +09:00
Paul Masurel
8a28d1643d Using u32 for field ids 2017-04-15 13:04:33 +09:00
148 changed files with 9820 additions and 4597 deletions

5
.gitignore vendored
View File

@@ -5,4 +5,7 @@ target/release
Cargo.lock
benchmark
.DS_Store
cpp/simdcomp/bitpackingbenchmark
cpp/simdcomp/bitpackingbenchmark
*.bk
.idea
trace.dat

View File

@@ -28,6 +28,7 @@ script:
travis-cargo test &&
travis-cargo bench &&
travis-cargo doc
- cargo run --example simple_search
after_success:
- bash ./script/build-doc.sh
- travis-cargo doc-upload

View File

@@ -1,3 +1,36 @@
Tantivy 0.4.0
==========================
- Raise the limit of number of fields (previously 256 fields) (@fulmicoton)
- Removed u32 fields. They are replaced by u64 and i64 fields (#65) (@fulmicoton)
- Optimized skip in SegmentPostings (#130) (@lnicola)
- Replacing rustc_serialize by serde. Kudos to @KodrAus and @lnicola
- Using error-chain (@KodrAus)
- QueryParser: (@fulmicoton)
- Explicit error returned when searched for a term that is not indexed
- Searching for a int term via the query parser was broken `(age:1)`
- Searching for a non-indexed field returns an explicit Error
- Phrase query for non-tokenized field are not tokenized by the query parser.
- Faster/Better indexing (@fulmicoton)
- using murmurhash2
- faster merging
- more memory efficient fast field writer (@lnicola )
- better handling of collisions
- lesser memory usage
- Added API, most notably to iterate over ranges of terms (@fulmicoton)
- Bugfix that was preventing to unmap segment files, on index drop (@fulmicoton)
- Made the doc! macro public (@fulmicoton)
- Added an alternative implementation of the streaming dictionary (@fulmicoton)
Tantivy 0.3.1
==========================
- Expose a method to trigger files garbage collection
Tantivy 0.3
==========================
@@ -19,6 +52,7 @@ You should not expect backward compatibility before
tantivy 1.0.
New Features
------------

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy"
version = "0.3.1"
version = "0.4.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
build = "build.rs"
license = "MIT"
@@ -20,23 +20,28 @@ regex = "0.2"
fst = "0.1.37"
atomicwrites = "0.1.3"
tempfile = "2.1"
rustc-serialize = "0.3"
log = "0.3.6"
combine = "2.2"
tempdir = "0.3"
bincode = "0.5"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
bincode = "0.8"
libc = {version = "0.2.20", optional=true}
num_cpus = "1.2"
itertools = "0.5.9"
lz4 = "1.20"
bit-set = "0.4.0"
time = "0.1"
uuid = { version = "0.4", features = ["v4", "rustc-serialize"] }
uuid = { version = "0.5", features = ["v4", "serde"] }
chan = "0.1"
version = "2"
crossbeam = "0.2"
futures = "0.1.9"
futures-cpupool = "0.1.2"
futures = "0.1"
futures-cpupool = "0.1"
error-chain = "0.8"
owning_ref = "0.3"
stable_deref_trait = "1.0.0"
[target.'cfg(windows)'.dependencies]
winapi = "0.2"
@@ -58,6 +63,7 @@ debug-assertions = false
[features]
default = ["simdcompression"]
simdcompression = ["libc", "gcc"]
streamdict = []
[badges]

View File

@@ -19,10 +19,10 @@ It is strongly inspired by Lucene's design.
- Basic query language
- Phrase queries
- Incremental indexing
- Multithreaded indexing (indexing English Wikipedia takes 4 minutes on my desktop)
- Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop)
- mmap based
- optional SIMD integer compression
- u32 fast fields (equivalent of doc values in Lucene)
- u64 and i64 fast fields (equivalent of doc values in Lucene)
- LZ4 compressed document store
- Cheesy logo with a horse

View File

@@ -21,4 +21,5 @@ install:
build: false
test_script:
- REM SET RUST_LOG=tantivy,test & cargo test --verbose
- REM SET RUST_LOG=tantivy,test & cargo test --verbose
- REM SET RUST_BACKTRACE=1 & cargo run --example simple_search

View File

@@ -4,7 +4,8 @@ mod build {
pub fn build() {
let mut config = gcc::Config::new();
config.include("./cpp/simdcomp/include")
config
.include("./cpp/simdcomp/include")
.file("cpp/simdcomp/src/avxbitpacking.c")
.file("cpp/simdcomp/src/simdintegratedbitpacking.c")
.file("cpp/simdcomp/src/simdbitpacking.c")
@@ -18,18 +19,26 @@ mod build {
config.opt_level(3);
if cfg!(target_env = "msvc") {
config.define("NDEBUG", None)
config
.define("NDEBUG", None)
.flag("/Gm-")
.flag("/GS-")
.flag("/Gy")
.flag("/Oi")
.flag("/GL");
} else {
config.flag("-msse4.1")
.flag("-march=native");
}
}
if !cfg!(target_env = "msvc") {
config
.include("./cpp/streamvbyte/include")
.file("cpp/streamvbyte/src/streamvbyte.c")
.file("cpp/streamvbyte/src/streamvbytedelta.c")
.flag("-msse4.1")
.flag("-march=native")
.flag("-std=c99");
}
config.compile("libsimdcomp.a");
// Workaround for linking static libraries built with /GL
@@ -37,6 +46,8 @@ mod build {
if !cfg!(debug_assertions) && cfg!(target_env = "msvc") {
println!("cargo:rustc-link-lib=dylib=simdcomp");
}
println!("cargo:rerun-if-changed=cpp");
}
}

32
cpp/streamvbyte/.gitignore vendored Normal file
View File

@@ -0,0 +1,32 @@
# Object files
*.o
*.ko
*.obj
*.elf
# Precompiled Headers
*.gch
*.pch
# Libraries
*.lib
*.a
*.la
*.lo
# Shared objects (inc. Windows DLLs)
*.dll
*.so
*.so.*
*.dylib
# Executables
*.exe
*.out
*.app
*.i*86
*.x86_64
*.hex
# Debug files
*.dSYM/

View File

@@ -0,0 +1,7 @@
language: c
sudo: false
compiler:
- gcc
- clang
script: make && ./unit

202
cpp/streamvbyte/LICENSE Normal file
View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

60
cpp/streamvbyte/README.md Normal file
View File

@@ -0,0 +1,60 @@
streamvbyte
===========
[![Build Status](https://travis-ci.org/lemire/streamvbyte.png)](https://travis-ci.org/lemire/streamvbyte)
StreamVByte is a new integer compression technique that applies SIMD instructions (vectorization) to
Google's Group Varint approach. The net result is faster than other byte-oriented compression
techniques.
The approach is patent-free, the code is available under the Apache License.
It includes fast differential coding.
It assumes a recent Intel processor (e.g., haswell or better) .
The code should build using most standard-compliant C99 compilers. The provided makefile
expects a Linux-like system.
Usage:
make
./unit
See example.c for an example.
Short code sample:
```C
// suppose that datain is an array of uint32_t integers
size_t compsize = streamvbyte_encode(datain, N, compressedbuffer); // encoding
// here the result is stored in compressedbuffer using compsize bytes
streamvbyte_decode(compressedbuffer, recovdata, N); // decoding (fast)
```
If the values are sorted, then it might be preferable to use differential coding:
```C
// suppose that datain is an array of uint32_t integers
size_t compsize = streamvbyte_delta_encode(datain, N, compressedbuffer,0); // encoding
// here the result is stored in compressedbuffer using compsize bytes
streamvbyte_delta_decode(compressedbuffer, recovdata, N,0); // decoding (fast)
```
You have to know how many integers were coded when you decompress. You can store this
information along with the compressed stream.
See also
--------
* SIMDCompressionAndIntersection: A C++ library to compress and intersect sorted lists of integers using SIMD instructions https://github.com/lemire/SIMDCompressionAndIntersect
* The FastPFOR C++ library : Fast integer compression https://github.com/lemire/FastPFor
* High-performance dictionary coding https://github.com/lemire/dictionary
* LittleIntPacker: C library to pack and unpack short arrays of integers as fast as possible https://github.com/lemire/LittleIntPacker
* The SIMDComp library: A simple C library for compressing lists of integers using binary packing https://github.com/lemire/simdcomp
* MaskedVByte: Fast decoder for VByte-compressed integers https://github.com/lemire/MaskedVByte
* CSharpFastPFOR: A C# integer compression library https://github.com/Genbox/CSharpFastPFOR
* JavaFastPFOR: A java integer compression library https://github.com/lemire/JavaFastPFOR
* Encoding: Integer Compression Libraries for Go https://github.com/zhenjl/encoding
* FrameOfReference is a C++ library dedicated to frame-of-reference (FOR) compression: https://github.com/lemire/FrameOfReference
* libvbyte: A fast implementation for varbyte 32bit/64bit integer compression https://github.com/cruppstahl/libvbyte
* TurboPFor is a C library that offers lots of interesting optimizations. Well worth checking! (GPL license) https://github.com/powturbo/TurboPFor
* Oroch is a C++ library that offers a usable API (MIT license) https://github.com/ademakov/Oroch

24
cpp/streamvbyte/example.c Normal file
View File

@@ -0,0 +1,24 @@
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "streamvbyte.h"
int main() {
int N = 5000;
uint32_t * datain = malloc(N * sizeof(uint32_t));
uint8_t * compressedbuffer = malloc(N * sizeof(uint32_t));
uint32_t * recovdata = malloc(N * sizeof(uint32_t));
for (int k = 0; k < N; ++k)
datain[k] = 120;
size_t compsize = streamvbyte_encode(datain, N, compressedbuffer); // encoding
// here the result is stored in compressedbuffer using compsize bytes
size_t compsize2 = streamvbyte_decode(compressedbuffer, recovdata,
N); // decoding (fast)
assert(compsize == compsize2);
free(datain);
free(compressedbuffer);
free(recovdata);
printf("Compressed %d integers down to %d bytes.\n",N,(int) compsize);
return 0;
}

View File

@@ -0,0 +1,19 @@
#ifndef VARINTDECODE_H_
#define VARINTDECODE_H_
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
#include <stdint.h>// please use a C99-compatible compiler
#include <stddef.h>
// Encode an array of a given length read from in to bout in varint format.
// Returns the number of bytes written.
size_t streamvbyte_encode(const uint32_t *in, uint32_t length, uint8_t *out);
// Read "length" 32-bit integers in varint format from in, storing the result in out.
// Returns the number of bytes read.
size_t streamvbyte_decode(const uint8_t* in, uint32_t* out, uint32_t length);
#endif /* VARINTDECODE_H_ */

View File

@@ -0,0 +1,24 @@
/*
* streamvbytedelta.h
*
* Created on: Apr 14, 2016
* Author: lemire
*/
#ifndef INCLUDE_STREAMVBYTEDELTA_H_
#define INCLUDE_STREAMVBYTEDELTA_H_
// Encode an array of a given length read from in to bout in StreamVByte format.
// Returns the number of bytes written.
// this version uses differential coding (coding differences between values) starting at prev (you can often set prev to zero)
size_t streamvbyte_delta_encode(const uint32_t *in, uint32_t length, uint8_t *out, uint32_t prev);
// Read "length" 32-bit integers in StreamVByte format from in, storing the result in out.
// Returns the number of bytes read.
// this version uses differential coding (coding differences between values) starting at prev (you can often set prev to zero)
size_t streamvbyte_delta_decode(const uint8_t* in, uint32_t* out, uint32_t length, uint32_t prev);
#endif /* INCLUDE_STREAMVBYTEDELTA_H_ */

58
cpp/streamvbyte/makefile Normal file
View File

@@ -0,0 +1,58 @@
# minimalist makefile
.SUFFIXES:
#
.SUFFIXES: .cpp .o .c .h
CFLAGS = -fPIC -march=native -std=c99 -O3 -Wall -Wextra -pedantic -Wshadow
LDFLAGS = -shared
LIBNAME=libstreamvbyte.so.0.0.1
all: unit $(LIBNAME)
test:
./unit
install: $(OBJECTS)
cp $(LIBNAME) /usr/local/lib
ln -s /usr/local/lib/$(LIBNAME) /usr/local/lib/libstreamvbyte.so
ldconfig
cp $(HEADERS) /usr/local/include
HEADERS=./include/streamvbyte.h ./include/streamvbytedelta.h
uninstall:
for h in $(HEADERS) ; do rm /usr/local/$$h; done
rm /usr/local/lib/$(LIBNAME)
rm /usr/local/lib/libstreamvbyte.so
ldconfig
OBJECTS= streamvbyte.o streamvbytedelta.o
streamvbytedelta.o: ./src/streamvbytedelta.c $(HEADERS)
$(CC) $(CFLAGS) -c ./src/streamvbytedelta.c -Iinclude
streamvbyte.o: ./src/streamvbyte.c $(HEADERS)
$(CC) $(CFLAGS) -c ./src/streamvbyte.c -Iinclude
$(LIBNAME): $(OBJECTS)
$(CC) $(CFLAGS) -o $(LIBNAME) $(OBJECTS) $(LDFLAGS)
example: ./example.c $(HEADERS) $(OBJECTS)
$(CC) $(CFLAGS) -o example ./example.c -Iinclude $(OBJECTS)
unit: ./tests/unit.c $(HEADERS) $(OBJECTS)
$(CC) $(CFLAGS) -o unit ./tests/unit.c -Iinclude $(OBJECTS)
dynunit: ./tests/unit.c $(HEADERS) $(LIBNAME)
$(CC) $(CFLAGS) -o dynunit ./tests/unit.c -Iinclude -lstreamvbyte
clean:
rm -f unit *.o $(LIBNAME) example

View File

@@ -0,0 +1,495 @@
#include "streamvbyte.h"
#if defined(_MSC_VER)
/* Microsoft C/C++-compatible compiler */
#include <intrin.h>
#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
/* GCC-compatible compiler, targeting x86/x86-64 */
#include <x86intrin.h>
#elif defined(__GNUC__) && defined(__ARM_NEON__)
/* GCC-compatible compiler, targeting ARM with NEON */
#include <arm_neon.h>
#elif defined(__GNUC__) && defined(__IWMMXT__)
/* GCC-compatible compiler, targeting ARM with WMMX */
#include <mmintrin.h>
#elif (defined(__GNUC__) || defined(__xlC__)) && (defined(__VEC__) || defined(__ALTIVEC__))
/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
#include <altivec.h>
#elif defined(__GNUC__) && defined(__SPE__)
/* GCC-compatible compiler, targeting PowerPC with SPE */
#include <spe.h>
#endif
static uint8_t lengthTable[256] = { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9,
10, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 6, 7, 8, 9, 7, 8,
9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10,
11, 12, 10, 11, 12, 13, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10,
11, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9, 10,
8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11,
12, 10, 11, 12, 13, 11, 12, 13, 14, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10,
11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12,
13, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 9, 10,
11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 12, 13, 14, 15, 7, 8, 9, 10, 8,
9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11, 12,
10, 11, 12, 13, 11, 12, 13, 14, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12,
13, 14, 12, 13, 14, 15, 10, 11, 12, 13, 11, 12, 13, 14, 12, 13, 14, 15,
13, 14, 15, 16 };
static uint8_t shuffleTable[256][16] = { { 0, -1, -1, -1, 1, -1, -1, -1, 2, -1,
-1, -1, 3, -1, -1, -1 }, // 1111
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1 }, // 2111
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 3111
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 4111
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1 }, // 1211
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 2211
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 3211
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, -1, -1, -1 }, // 4211
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 1311
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 2311
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, -1, -1, -1 }, // 3311
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, -1, -1, -1 }, // 4311
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, -1, -1, -1 }, // 1411
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, -1, -1, -1 }, // 2411
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, -1, -1, -1 }, // 3411
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, -1, -1, -1 }, // 4411
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1 }, // 1121
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, -1, -1, -1 }, // 2121
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 3121
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 4121
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, -1, -1, -1 }, // 1221
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 2221
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 3221
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, -1, -1, -1 }, // 4221
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 1321
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 2321
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, -1, -1, -1 }, // 3321
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, -1, -1, -1 }, // 4321
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, -1, -1, -1 }, // 1421
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, -1, -1, -1 }, // 2421
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, -1, -1, -1 }, // 3421
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, -1, -1, -1 }, // 4421
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1 }, // 1131
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, -1, -1, -1 }, // 2131
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 3131
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 4131
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, -1, -1, -1 }, // 1231
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 2231
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 3231
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, -1, -1, -1 }, // 4231
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 1331
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 2331
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, -1, -1, -1 }, // 3331
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, -1, -1, -1 }, // 4331
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, -1, -1 }, // 1431
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, -1, -1, -1 }, // 2431
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, -1, -1, -1 }, // 3431
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, -1, -1, -1 }, // 4431
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1 }, // 1141
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, -1, -1, -1 }, // 2141
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 3141
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 4141
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, -1, -1, -1 }, // 1241
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 2241
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 3241
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, -1, -1, -1 }, // 4241
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 1341
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 2341
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, -1, -1, -1 }, // 3341
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, -1, -1, -1 }, // 4341
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1 }, // 1441
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, -1, -1 }, // 2441
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1, -1 }, // 3441
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1, -1 }, // 4441
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1 }, // 1112
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, -1, -1 }, // 2112
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 3112
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 4112
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, -1, -1 }, // 1212
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 2212
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 3212
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, -1, -1 }, // 4212
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 1312
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 2312
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, -1, -1 }, // 3312
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, -1, -1 }, // 4312
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, -1, -1 }, // 1412
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, -1, -1 }, // 2412
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, -1, -1 }, // 3412
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, -1, -1 }, // 4412
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1 }, // 1122
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, -1, -1 }, // 2122
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 3122
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 4122
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, -1, -1 }, // 1222
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 2222
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 3222
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, -1, -1 }, // 4222
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 1322
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 2322
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, -1, -1 }, // 3322
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, -1, -1 }, // 4322
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, -1, -1 }, // 1422
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, -1, -1 }, // 2422
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, -1, -1 }, // 3422
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, -1, -1 }, // 4422
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1 }, // 1132
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, -1, -1 }, // 2132
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 3132
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 4132
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, -1, -1 }, // 1232
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 2232
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 3232
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, -1, -1 }, // 4232
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 1332
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 2332
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, -1, -1 }, // 3332
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, -1, -1 }, // 4332
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, -1, -1 }, // 1432
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, -1, -1 }, // 2432
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, -1, -1 }, // 3432
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, -1, -1 }, // 4432
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1 }, // 1142
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, -1, -1 }, // 2142
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 3142
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 4142
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, -1, -1 }, // 1242
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 2242
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 3242
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, -1, -1 }, // 4242
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 1342
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 2342
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, -1, -1 }, // 3342
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, -1, -1 }, // 4342
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, -1 }, // 1442
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1 }, // 2442
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1 }, // 3442
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1, -1 }, // 4442
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1 }, // 1113
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, 6, -1 }, // 2113
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 3113
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 4113
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, 6, -1 }, // 1213
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 2213
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 3213
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, 9, -1 }, // 4213
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 1313
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 2313
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, 9, -1 }, // 3313
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, 10, -1 }, // 4313
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, 8, -1 }, // 1413
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, 9, -1 }, // 2413
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, 10, -1 }, // 3413
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, 11, -1 }, // 4413
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1 }, // 1123
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, 7, -1 }, // 2123
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 3123
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 4123
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, 7, -1 }, // 1223
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 2223
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 3223
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, 10, -1 }, // 4223
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 1323
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 2323
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, 10, -1 }, // 3323
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, 11, -1 }, // 4323
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, 9, -1 }, // 1423
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, -1 }, // 2423
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, 11, -1 }, // 3423
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, 12, -1 }, // 4423
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1 }, // 1133
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, 8, -1 }, // 2133
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 3133
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 4133
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, 8, -1 }, // 1233
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 2233
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 3233
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, 11, -1 }, // 4233
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 1333
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 2333
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, -1 }, // 3333
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, 12, -1 }, // 4333
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, 10, -1 }, // 1433
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, 11, -1 }, // 2433
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, 12, -1 }, // 3433
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13, -1 }, // 4433
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1 }, // 1143
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, 9, -1 }, // 2143
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 3143
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 4143
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, 9, -1 }, // 1243
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 2243
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 3243
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, -1 }, // 4243
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 1343
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 2343
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, 12, -1 }, // 3343
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, 13, -1 }, // 4343
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1 }, // 1443
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1 }, // 2443
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1 }, // 3443
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -1 }, // 4443
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6 }, // 1114
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, 6, 7 }, // 2114
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 3114
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 4114
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, 6, 7 }, // 1214
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 2214
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 3214
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, 9, 10 }, // 4214
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 1314
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 2314
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, 9, 10 }, // 3314
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, 10, 11 }, // 4314
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, 8, 9 }, // 1414
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, 9, 10 }, // 2414
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, 10, 11 }, // 3414
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, 11, 12 }, // 4414
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7 }, // 1124
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, 7, 8 }, // 2124
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 3124
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 4124
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, 7, 8 }, // 1224
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 2224
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 3224
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, 10, 11 }, // 4224
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 1324
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 2324
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, 10, 11 }, // 3324
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, 11, 12 }, // 4324
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, 9, 10 }, // 1424
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11 }, // 2424
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, 11, 12 }, // 3424
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, 12, 13 }, // 4424
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8 }, // 1134
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, 8, 9 }, // 2134
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 3134
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 4134
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, 8, 9 }, // 1234
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 2234
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 3234
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, 11, 12 }, // 4234
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 1334
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 2334
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, 12 }, // 3334
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, 12, 13 }, // 4334
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, 10, 11 }, // 1434
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, 11, 12 }, // 2434
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, 12, 13 }, // 3434
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13, 14 }, // 4434
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9 }, // 1144
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, 9, 10 }, // 2144
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 3144
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 4144
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, 9, 10 }, // 1244
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 2244
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 3244
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13 }, // 4244
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 1344
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 2344
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, 12, 13 }, // 3344
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, 13, 14 }, // 4344
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }, // 1444
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, // 2444
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }, // 3444
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } // 4444
};
static uint8_t _encode_data(uint32_t val, uint8_t *__restrict__ *dataPtrPtr) {
uint8_t *dataPtr = *dataPtrPtr;
uint8_t code;
if (val < (1 << 8)) { // 1 byte
*dataPtr = (uint8_t)(val);
*dataPtrPtr += 1;
code = 0;
} else if (val < (1 << 16)) { // 2 bytes
*(uint16_t *) dataPtr = (uint16_t)(val);
*dataPtrPtr += 2;
code = 1;
} else if (val < (1 << 24)) { // 3 bytes
*(uint16_t *) dataPtr = (uint16_t)(val);
*(dataPtr + 2) = (uint8_t)(val >> 16);
*dataPtrPtr += 3;
code = 2;
} else { // 4 bytes
*(uint32_t *) dataPtr = val;
*dataPtrPtr += 4;
code = 3;
}
return code;
}
static uint8_t *svb_encode_scalar(const uint32_t *in,
uint8_t *__restrict__ keyPtr, uint8_t *__restrict__ dataPtr,
uint32_t count) {
if (count == 0)
return dataPtr; // exit immediately if no data
uint8_t shift = 0; // cycles 0, 2, 4, 6, 0, 2, 4, 6, ...
uint8_t key = 0;
for (uint32_t c = 0; c < count; c++) {
if (shift == 8) {
shift = 0;
*keyPtr++ = key;
key = 0;
}
uint32_t val = in[c];
uint8_t code = _encode_data(val, &dataPtr);
key |= code << shift;
shift += 2;
}
*keyPtr = key; // write last key (no increment needed)
return dataPtr; // pointer to first unused data byte
}
// Encode an array of a given length read from in to bout in streamvbyte format.
// Returns the number of bytes written.
size_t streamvbyte_encode(const uint32_t *in, uint32_t count, uint8_t *out) {
uint8_t *keyPtr = out;
uint32_t keyLen = (count + 3) / 4; // 2-bits rounded to full byte
uint8_t *dataPtr = keyPtr + keyLen; // variable byte data after all keys
return svb_encode_scalar(in, keyPtr, dataPtr, count) - out;
}
static inline __m128i _decode_avx(uint32_t key,
const uint8_t *__restrict__ *dataPtrPtr) {
uint8_t len = lengthTable[key];
__m128i Data = _mm_loadu_si128((__m128i *) *dataPtrPtr);
__m128i Shuf = *(__m128i *) &shuffleTable[key];
Data = _mm_shuffle_epi8(Data, Shuf);
*dataPtrPtr += len;
return Data;
}
static inline void _write_avx(uint32_t *out, __m128i Vec) {
_mm_storeu_si128((__m128i *) out, Vec);
}
static inline uint32_t _decode_data(const uint8_t **dataPtrPtr, uint8_t code) {
const uint8_t *dataPtr = *dataPtrPtr;
uint32_t val;
if (code == 0) { // 1 byte
val = (uint32_t) * dataPtr;
dataPtr += 1;
} else if (code == 1) { // 2 bytes
val = (uint32_t) * (uint16_t *) dataPtr;
dataPtr += 2;
} else if (code == 2) { // 3 bytes
val = (uint32_t) * (uint16_t *) dataPtr;
val |= *(dataPtr + 2) << 16;
dataPtr += 3;
} else { // code == 3
val = *(uint32_t *) dataPtr; // 4 bytes
dataPtr += 4;
}
*dataPtrPtr = dataPtr;
return val;
}
static const uint8_t *svb_decode_scalar(uint32_t *outPtr, const uint8_t *keyPtr,
const uint8_t *dataPtr, uint32_t count) {
if (count == 0)
return dataPtr; // no reads or writes if no data
uint8_t shift = 0;
uint32_t key = *keyPtr++;
for (uint32_t c = 0; c < count; c++) {
if (shift == 8) {
shift = 0;
key = *keyPtr++;
}
uint32_t val = _decode_data(&dataPtr, (key >> shift) & 0x3);
*outPtr++ = val;
shift += 2;
}
return dataPtr; // pointer to first unused byte after end
}
const uint8_t *svb_decode_avx_simple(uint32_t *out,
const uint8_t *__restrict__ keyPtr, const uint8_t *__restrict__ dataPtr,
uint64_t count) {
uint64_t keybytes = count / 4; // number of key bytes
__m128i Data;
if (keybytes >= 8) {
int64_t Offset = -(int64_t) keybytes / 8 + 1;
const uint64_t *keyPtr64 = (const uint64_t *) keyPtr - Offset;
uint64_t nextkeys = keyPtr64[Offset];
for (; Offset != 0; ++Offset) {
uint64_t keys = nextkeys;
nextkeys = keyPtr64[Offset + 1];
Data = _decode_avx((keys & 0xFF), &dataPtr);
_write_avx(out, Data);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
_write_avx(out + 4, Data);
keys >>= 16;
Data = _decode_avx((keys & 0xFF), &dataPtr);
_write_avx(out + 8, Data);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
_write_avx(out + 12, Data);
keys >>= 16;
Data = _decode_avx((keys & 0xFF), &dataPtr);
_write_avx(out + 16, Data);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
_write_avx(out + 20, Data);
keys >>= 16;
Data = _decode_avx((keys & 0xFF), &dataPtr);
_write_avx(out + 24, Data);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
_write_avx(out + 28, Data);
out += 32;
}
{
uint64_t keys = nextkeys;
Data = _decode_avx((keys & 0xFF), &dataPtr);
_write_avx(out, Data);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
_write_avx(out + 4, Data);
keys >>= 16;
Data = _decode_avx((keys & 0xFF), &dataPtr);
_write_avx(out + 8, Data);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
_write_avx(out + 12, Data);
keys >>= 16;
Data = _decode_avx((keys & 0xFF), &dataPtr);
_write_avx(out + 16, Data);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
_write_avx(out + 20, Data);
keys >>= 16;
Data = _decode_avx((keys & 0xFF), &dataPtr);
_write_avx(out + 24, Data);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
_write_avx(out + 28, Data);
out += 32;
}
}
uint64_t consumedkeys = keybytes - (keybytes & 7);
return svb_decode_scalar(out, keyPtr + consumedkeys, dataPtr, count & 31);
}
// Read count 32-bit integers in maskedvbyte format from in, storing the result in out. Returns the number of bytes read.
size_t streamvbyte_decode(const uint8_t* in, uint32_t* out, uint32_t count) {
if (count == 0)
return 0;
const uint8_t *keyPtr = in; // full list of keys is next
uint32_t keyLen = ((count + 3) / 4); // 2-bits per key (rounded up)
const uint8_t *dataPtr = keyPtr + keyLen; // data starts at end of keys
return svb_decode_avx_simple(out, keyPtr, dataPtr, count) - in;
}

View File

@@ -0,0 +1,575 @@
#include "streamvbyte.h"
#if defined(_MSC_VER)
/* Microsoft C/C++-compatible compiler */
#include <intrin.h>
#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
/* GCC-compatible compiler, targeting x86/x86-64 */
#include <x86intrin.h>
#elif defined(__GNUC__) && defined(__ARM_NEON__)
/* GCC-compatible compiler, targeting ARM with NEON */
#include <arm_neon.h>
#elif defined(__GNUC__) && defined(__IWMMXT__)
/* GCC-compatible compiler, targeting ARM with WMMX */
#include <mmintrin.h>
#elif (defined(__GNUC__) || defined(__xlC__)) && (defined(__VEC__) || defined(__ALTIVEC__))
/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
#include <altivec.h>
#elif defined(__GNUC__) && defined(__SPE__)
/* GCC-compatible compiler, targeting PowerPC with SPE */
#include <spe.h>
#endif
static uint8_t lengthTable[256] = { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9,
10, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 6, 7, 8, 9, 7, 8,
9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10,
11, 12, 10, 11, 12, 13, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10,
11, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9, 10,
8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11,
12, 10, 11, 12, 13, 11, 12, 13, 14, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10,
11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12,
13, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 9, 10,
11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 12, 13, 14, 15, 7, 8, 9, 10, 8,
9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11, 12,
10, 11, 12, 13, 11, 12, 13, 14, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12,
13, 14, 12, 13, 14, 15, 10, 11, 12, 13, 11, 12, 13, 14, 12, 13, 14, 15,
13, 14, 15, 16 };
static uint8_t shuffleTable[256][16] = { { 0, -1, -1, -1, 1, -1, -1, -1, 2, -1,
-1, -1, 3, -1, -1, -1 }, // 1111
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1 }, // 2111
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 3111
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 4111
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1 }, // 1211
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 2211
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 3211
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, -1, -1, -1 }, // 4211
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 1311
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 2311
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, -1, -1, -1 }, // 3311
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, -1, -1, -1 }, // 4311
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, -1, -1, -1 }, // 1411
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, -1, -1, -1 }, // 2411
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, -1, -1, -1 }, // 3411
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, -1, -1, -1 }, // 4411
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1 }, // 1121
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, -1, -1, -1 }, // 2121
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 3121
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 4121
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, -1, -1, -1 }, // 1221
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 2221
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 3221
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, -1, -1, -1 }, // 4221
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 1321
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 2321
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, -1, -1, -1 }, // 3321
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, -1, -1, -1 }, // 4321
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, -1, -1, -1 }, // 1421
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, -1, -1, -1 }, // 2421
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, -1, -1, -1 }, // 3421
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, -1, -1, -1 }, // 4421
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1 }, // 1131
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, -1, -1, -1 }, // 2131
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 3131
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 4131
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, -1, -1, -1 }, // 1231
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 2231
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 3231
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, -1, -1, -1 }, // 4231
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 1331
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 2331
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, -1, -1, -1 }, // 3331
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, -1, -1, -1 }, // 4331
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, -1, -1 }, // 1431
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, -1, -1, -1 }, // 2431
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, -1, -1, -1 }, // 3431
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, -1, -1, -1 }, // 4431
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1 }, // 1141
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, -1, -1, -1 }, // 2141
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 3141
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 4141
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, -1, -1, -1 }, // 1241
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 2241
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 3241
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, -1, -1, -1 }, // 4241
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 1341
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 2341
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, -1, -1, -1 }, // 3341
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, -1, -1, -1 }, // 4341
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1 }, // 1441
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, -1, -1 }, // 2441
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1, -1 }, // 3441
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1, -1 }, // 4441
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1 }, // 1112
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, -1, -1 }, // 2112
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 3112
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 4112
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, -1, -1 }, // 1212
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 2212
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 3212
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, -1, -1 }, // 4212
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 1312
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 2312
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, -1, -1 }, // 3312
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, -1, -1 }, // 4312
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, -1, -1 }, // 1412
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, -1, -1 }, // 2412
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, -1, -1 }, // 3412
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, -1, -1 }, // 4412
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1 }, // 1122
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, -1, -1 }, // 2122
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 3122
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 4122
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, -1, -1 }, // 1222
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 2222
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 3222
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, -1, -1 }, // 4222
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 1322
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 2322
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, -1, -1 }, // 3322
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, -1, -1 }, // 4322
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, -1, -1 }, // 1422
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, -1, -1 }, // 2422
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, -1, -1 }, // 3422
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, -1, -1 }, // 4422
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1 }, // 1132
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, -1, -1 }, // 2132
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 3132
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 4132
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, -1, -1 }, // 1232
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 2232
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 3232
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, -1, -1 }, // 4232
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 1332
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 2332
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, -1, -1 }, // 3332
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, -1, -1 }, // 4332
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, -1, -1 }, // 1432
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, -1, -1 }, // 2432
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, -1, -1 }, // 3432
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, -1, -1 }, // 4432
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1 }, // 1142
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, -1, -1 }, // 2142
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 3142
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 4142
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, -1, -1 }, // 1242
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 2242
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 3242
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, -1, -1 }, // 4242
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 1342
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 2342
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, -1, -1 }, // 3342
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, -1, -1 }, // 4342
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, -1 }, // 1442
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1 }, // 2442
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1 }, // 3442
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1, -1 }, // 4442
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1 }, // 1113
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, 6, -1 }, // 2113
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 3113
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 4113
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, 6, -1 }, // 1213
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 2213
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 3213
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, 9, -1 }, // 4213
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 1313
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 2313
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, 9, -1 }, // 3313
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, 10, -1 }, // 4313
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, 8, -1 }, // 1413
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, 9, -1 }, // 2413
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, 10, -1 }, // 3413
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, 11, -1 }, // 4413
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1 }, // 1123
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, 7, -1 }, // 2123
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 3123
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 4123
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, 7, -1 }, // 1223
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 2223
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 3223
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, 10, -1 }, // 4223
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 1323
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 2323
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, 10, -1 }, // 3323
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, 11, -1 }, // 4323
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, 9, -1 }, // 1423
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, -1 }, // 2423
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, 11, -1 }, // 3423
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, 12, -1 }, // 4423
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1 }, // 1133
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, 8, -1 }, // 2133
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 3133
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 4133
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, 8, -1 }, // 1233
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 2233
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 3233
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, 11, -1 }, // 4233
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 1333
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 2333
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, -1 }, // 3333
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, 12, -1 }, // 4333
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, 10, -1 }, // 1433
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, 11, -1 }, // 2433
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, 12, -1 }, // 3433
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13, -1 }, // 4433
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1 }, // 1143
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, 9, -1 }, // 2143
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 3143
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 4143
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, 9, -1 }, // 1243
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 2243
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 3243
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, -1 }, // 4243
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 1343
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 2343
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, 12, -1 }, // 3343
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, 13, -1 }, // 4343
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1 }, // 1443
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1 }, // 2443
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1 }, // 3443
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -1 }, // 4443
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6 }, // 1114
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, 6, 7 }, // 2114
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 3114
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 4114
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, 6, 7 }, // 1214
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 2214
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 3214
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, 9, 10 }, // 4214
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 1314
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 2314
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, 9, 10 }, // 3314
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, 10, 11 }, // 4314
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, 8, 9 }, // 1414
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, 9, 10 }, // 2414
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, 10, 11 }, // 3414
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, 11, 12 }, // 4414
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7 }, // 1124
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, 7, 8 }, // 2124
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 3124
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 4124
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, 7, 8 }, // 1224
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 2224
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 3224
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, 10, 11 }, // 4224
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 1324
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 2324
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, 10, 11 }, // 3324
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, 11, 12 }, // 4324
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, 9, 10 }, // 1424
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11 }, // 2424
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, 11, 12 }, // 3424
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, 12, 13 }, // 4424
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8 }, // 1134
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, 8, 9 }, // 2134
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 3134
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 4134
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, 8, 9 }, // 1234
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 2234
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 3234
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, 11, 12 }, // 4234
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 1334
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 2334
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, 12 }, // 3334
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, 12, 13 }, // 4334
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, 10, 11 }, // 1434
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, 11, 12 }, // 2434
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, 12, 13 }, // 3434
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13, 14 }, // 4434
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9 }, // 1144
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, 9, 10 }, // 2144
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 3144
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 4144
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, 9, 10 }, // 1244
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 2244
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 3244
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13 }, // 4244
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 1344
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 2344
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, 12, 13 }, // 3344
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, 13, 14 }, // 4344
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }, // 1444
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, // 2444
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }, // 3444
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } // 4444
};
static uint8_t _encode_data(uint32_t val, uint8_t *__restrict__ *dataPtrPtr) {
uint8_t *dataPtr = *dataPtrPtr;
uint8_t code;
if (val < (1 << 8)) { // 1 byte
*dataPtr = (uint8_t)(val);
*dataPtrPtr += 1;
code = 0;
} else if (val < (1 << 16)) { // 2 bytes
*(uint16_t *) dataPtr = (uint16_t)(val);
*dataPtrPtr += 2;
code = 1;
} else if (val < (1 << 24)) { // 3 bytes
*(uint16_t *) dataPtr = (uint16_t)(val);
*(dataPtr + 2) = (uint8_t)(val >> 16);
*dataPtrPtr += 3;
code = 2;
} else { // 4 bytes
*(uint32_t *) dataPtr = val;
*dataPtrPtr += 4;
code = 3;
}
return code;
}
static uint8_t *svb_encode_scalar_d1_init(const uint32_t *in,
uint8_t *__restrict__ keyPtr, uint8_t *__restrict__ dataPtr,
uint32_t count, uint32_t prev) {
if (count == 0)
return dataPtr; // exit immediately if no data
uint8_t shift = 0; // cycles 0, 2, 4, 6, 0, 2, 4, 6, ...
uint8_t key = 0;
for (uint32_t c = 0; c < count; c++) {
if (shift == 8) {
shift = 0;
*keyPtr++ = key;
key = 0;
}
uint32_t val = in[c] - prev;
prev = in[c];
uint8_t code = _encode_data(val, &dataPtr);
key |= code << shift;
shift += 2;
}
*keyPtr = key; // write last key (no increment needed)
return dataPtr; // pointer to first unused data byte
}
size_t streamvbyte_delta_encode(const uint32_t *in, uint32_t count, uint8_t *out,
uint32_t prev) {
uint8_t *keyPtr = out; // keys come immediately after 32-bit count
uint32_t keyLen = (count + 3) / 4; // 2-bits rounded to full byte
uint8_t *dataPtr = keyPtr + keyLen; // variable byte data after all keys
return svb_encode_scalar_d1_init(in, keyPtr, dataPtr, count, prev) - out;
}
static inline __m128i _decode_avx(uint32_t key, const uint8_t *__restrict__ *dataPtrPtr) {
uint8_t len = lengthTable[key];
__m128i Data = _mm_loadu_si128((__m128i *) *dataPtrPtr);
__m128i Shuf = *(__m128i *) &shuffleTable[key];
Data = _mm_shuffle_epi8(Data, Shuf);
*dataPtrPtr += len;
return Data;
}
#define BroadcastLastXMM 0xFF // bits 0-7 all set to choose highest element
static inline void _write_avx(uint32_t *out, __m128i Vec) {
_mm_storeu_si128((__m128i *) out, Vec);
}
static __m128i _write_avx_d1(uint32_t *out, __m128i Vec, __m128i Prev) {
__m128i Add = _mm_slli_si128(Vec, 4); // Cycle 1: [- A B C] (already done)
Prev = _mm_shuffle_epi32(Prev, BroadcastLastXMM); // Cycle 2: [P P P P]
Vec = _mm_add_epi32(Vec, Add); // Cycle 2: [A AB BC CD]
Add = _mm_slli_si128(Vec, 8); // Cycle 3: [- - A AB]
Vec = _mm_add_epi32(Vec, Prev); // Cycle 3: [PA PAB PBC PCD]
Vec = _mm_add_epi32(Vec, Add); // Cycle 4: [PA PAB PABC PABCD]
_write_avx(out, Vec);
return Vec;
}
#ifndef _MSC_VER
static __m128i High16To32 = {0xFFFF0B0AFFFF0908, 0xFFFF0F0EFFFF0D0C};
#else
static __m128i High16To32 = {8, 9, -1, -1, 10, 11, -1, -1,
12, 13, -1, -1, 14, 15, -1, -1};
#endif
static inline __m128i _write_16bit_avx_d1(uint32_t *out, __m128i Vec, __m128i Prev) {
// vec == [A B C D E F G H] (16 bit values)
__m128i Add = _mm_slli_si128(Vec, 2); // [- A B C D E F G]
Prev = _mm_shuffle_epi32(Prev, BroadcastLastXMM); // [P P P P] (32-bit)
Vec = _mm_add_epi32(Vec, Add); // [A AB BC CD DE FG GH]
Add = _mm_slli_si128(Vec, 4); // [- - A AB BC CD DE EF]
Vec = _mm_add_epi32(Vec, Add); // [A AB ABC ABCD BCDE CDEF DEFG EFGH]
__m128i V1 = _mm_cvtepu16_epi32(Vec); // [A AB ABC ABCD] (32-bit)
V1 = _mm_add_epi32(V1, Prev); // [PA PAB PABC PABCD] (32-bit)
__m128i V2 =
_mm_shuffle_epi8(Vec, High16To32); // [BCDE CDEF DEFG EFGH] (32-bit)
V2 = _mm_add_epi32(V1, V2); // [PABCDE PABCDEF PABCDEFG PABCDEFGH] (32-bit)
_write_avx(out, V1);
_write_avx(out + 4, V2);
return V2;
}
static inline uint32_t _decode_data(const uint8_t **dataPtrPtr, uint8_t code) {
const uint8_t *dataPtr = *dataPtrPtr;
uint32_t val;
if (code == 0) { // 1 byte
val = (uint32_t) * dataPtr;
dataPtr += 1;
} else if (code == 1) { // 2 bytes
val = (uint32_t) * (uint16_t *) dataPtr;
dataPtr += 2;
} else if (code == 2) { // 3 bytes
val = (uint32_t) * (uint16_t *) dataPtr;
val |= *(dataPtr + 2) << 16;
dataPtr += 3;
} else { // code == 3
val = *(uint32_t *) dataPtr; // 4 bytes
dataPtr += 4;
}
*dataPtrPtr = dataPtr;
return val;
}
const uint8_t *svb_decode_scalar_d1_init(uint32_t *outPtr, const uint8_t *keyPtr,
const uint8_t *dataPtr, uint32_t count,
uint32_t prev) {
if (count == 0)
return dataPtr; // no reads or writes if no data
uint8_t shift = 0;
uint32_t key = *keyPtr++;
for (uint32_t c = 0; c < count; c++) {
if (shift == 8) {
shift = 0;
key = *keyPtr++;
}
uint32_t val = _decode_data(&dataPtr, (key >> shift) & 0x3);
val += prev;
*outPtr++ = val;
prev = val;
shift += 2;
}
return dataPtr; // pointer to first unused byte after end
}
const uint8_t *svb_decode_avx_d1_init(uint32_t *out, const uint8_t *__restrict__ keyPtr,
const uint8_t *__restrict__ dataPtr, uint64_t count, uint32_t prev) {
uint64_t keybytes = count / 4; // number of key bytes
if (keybytes >= 8) {
__m128i Prev = _mm_set1_epi32(prev);
__m128i Data;
int64_t Offset = -(int64_t) keybytes / 8 + 1;
const uint64_t *keyPtr64 = (const uint64_t *) keyPtr - Offset;
uint64_t nextkeys = keyPtr64[Offset];
for (; Offset != 0; ++Offset) {
uint64_t keys = nextkeys;
nextkeys = keyPtr64[Offset + 1];
// faster 16-bit delta since we only have 8-bit values
if (!keys) { // 32 1-byte ints in a row
Data = _mm_cvtepu8_epi16(_mm_lddqu_si128((__m128i *) (dataPtr)));
Prev = _write_16bit_avx_d1(out, Data, Prev);
Data = _mm_cvtepu8_epi16(
_mm_lddqu_si128((__m128i *) (dataPtr + 8)));
Prev = _write_16bit_avx_d1(out + 8, Data, Prev);
Data = _mm_cvtepu8_epi16(
_mm_lddqu_si128((__m128i *) (dataPtr + 16)));
Prev = _write_16bit_avx_d1(out + 16, Data, Prev);
Data = _mm_cvtepu8_epi16(
_mm_lddqu_si128((__m128i *) (dataPtr + 24)));
Prev = _write_16bit_avx_d1(out + 24, Data, Prev);
out += 32;
dataPtr += 32;
continue;
}
Data = _decode_avx(keys & 0x00FF, &dataPtr);
Prev = _write_avx_d1(out, Data, Prev);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
Prev = _write_avx_d1(out + 4, Data, Prev);
keys >>= 16;
Data = _decode_avx((keys & 0x00FF), &dataPtr);
Prev = _write_avx_d1(out + 8, Data, Prev);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
Prev = _write_avx_d1(out + 12, Data, Prev);
keys >>= 16;
Data = _decode_avx((keys & 0x00FF), &dataPtr);
Prev = _write_avx_d1(out + 16, Data, Prev);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
Prev = _write_avx_d1(out + 20, Data, Prev);
keys >>= 16;
Data = _decode_avx((keys & 0x00FF), &dataPtr);
Prev = _write_avx_d1(out + 24, Data, Prev);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
Prev = _write_avx_d1(out + 28, Data, Prev);
out += 32;
}
{
uint64_t keys = nextkeys;
// faster 16-bit delta since we only have 8-bit values
if (!keys) { // 32 1-byte ints in a row
Data = _mm_cvtepu8_epi16(_mm_lddqu_si128((__m128i *) (dataPtr)));
Prev = _write_16bit_avx_d1(out, Data, Prev);
Data = _mm_cvtepu8_epi16(
_mm_lddqu_si128((__m128i *) (dataPtr + 8)));
Prev = _write_16bit_avx_d1(out + 8, Data, Prev);
Data = _mm_cvtepu8_epi16(
_mm_lddqu_si128((__m128i *) (dataPtr + 16)));
Prev = _write_16bit_avx_d1(out + 16, Data, Prev);
Data = _mm_cvtepu8_epi16(
_mm_loadl_epi64((__m128i *) (dataPtr + 24)));
Prev = _write_16bit_avx_d1(out + 24, Data, Prev);
out += 32;
dataPtr += 32;
} else {
Data = _decode_avx(keys & 0x00FF, &dataPtr);
Prev = _write_avx_d1(out, Data, Prev);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
Prev = _write_avx_d1(out + 4, Data, Prev);
keys >>= 16;
Data = _decode_avx((keys & 0x00FF), &dataPtr);
Prev = _write_avx_d1(out + 8, Data, Prev);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
Prev = _write_avx_d1(out + 12, Data, Prev);
keys >>= 16;
Data = _decode_avx((keys & 0x00FF), &dataPtr);
Prev = _write_avx_d1(out + 16, Data, Prev);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
Prev = _write_avx_d1(out + 20, Data, Prev);
keys >>= 16;
Data = _decode_avx((keys & 0x00FF), &dataPtr);
Prev = _write_avx_d1(out + 24, Data, Prev);
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
Prev = _write_avx_d1(out + 28, Data, Prev);
out += 32;
}
}
prev = out[-1];
}
uint64_t consumedkeys = keybytes - (keybytes & 7);
return svb_decode_scalar_d1_init(out, keyPtr + consumedkeys, dataPtr,
count & 31, prev);
}
size_t streamvbyte_delta_decode(const uint8_t* in, uint32_t* out,
uint32_t count, uint32_t prev) {
uint32_t keyLen = ((count + 3) / 4); // 2-bits per key (rounded up)
const uint8_t *keyPtr = in;
const uint8_t *dataPtr = keyPtr + keyLen; // data starts at end of keys
return svb_decode_avx_d1_init(out, keyPtr, dataPtr, count, prev) - in;
}

View File

@@ -0,0 +1,73 @@
#include <stdio.h>
#include <stdlib.h>
#include "streamvbyte.h"
#include "streamvbytedelta.h"
int main() {
int N = 4096;
uint32_t * datain = malloc(N * sizeof(uint32_t));
uint8_t * compressedbuffer = malloc(2 * N * sizeof(uint32_t));
uint32_t * recovdata = malloc(N * sizeof(uint32_t));
for (int length = 0; length <= N;) {
printf("length = %d \n", length);
for (uint32_t gap = 1; gap <= 387420489; gap *= 3) {
for (int k = 0; k < length; ++k)
datain[k] = gap;
size_t compsize = streamvbyte_encode(datain, length,
compressedbuffer);
size_t usedbytes = streamvbyte_decode(compressedbuffer, recovdata,
length);
if (compsize != usedbytes) {
printf(
"[streamvbyte_decode] code is buggy gap = %d, size mismatch %d %d \n",
(int) gap, (int) compsize, (int) usedbytes);
return -1;
}
for (int k = 0; k < length; ++k) {
if (recovdata[k] != datain[k]) {
printf("[streamvbyte_decode] code is buggy gap = %d\n",
(int) gap);
return -1;
}
}
}
printf("Delta \n");
for (size_t gap = 1; gap <= 531441; gap *= 3) {
for (int k = 0; k < length; ++k)
datain[k] = gap * k;
size_t compsize = streamvbyte_delta_encode(datain, length,
compressedbuffer, 0);
size_t usedbytes = streamvbyte_delta_decode(compressedbuffer,
recovdata, length, 0);
if (compsize != usedbytes) {
printf(
"[streamvbyte_delta_decode] code is buggy gap = %d, size mismatch %d %d \n",
(int) gap, (int) compsize, (int) usedbytes);
return -1;
}
for (int k = 0; k < length; ++k) {
if (recovdata[k] != datain[k]) {
printf(
"[streamvbyte_delta_decode] code is buggy gap = %d\n",
(int) gap);
return -1;
}
}
}
if (length < 128)
++length;
else {
length *= 2;
}
}
free(datain);
free(compressedbuffer);
free(recovdata);
printf("Code looks good.\n");
return 0;
}

View File

@@ -1,7 +1,9 @@
extern crate rustc_serialize;
extern crate tantivy;
extern crate tempdir;
#[macro_use]
extern crate serde_json;
use std::path::Path;
use tempdir::TempDir;
use tantivy::Index;
@@ -64,8 +66,7 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
//
// This will actually just save a meta.json
// with our schema in the directory.
let index = try!(Index::create(index_path, schema.clone()));
let index = Index::create(index_path, schema.clone())?;
// To insert document we need an index writer.
@@ -75,7 +76,7 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
//
// Here we use a buffer of 50MB per thread. Using a bigger
// heap for the indexer can increase its throughput.
let mut index_writer = try!(index.writer(50_000_000));
let mut index_writer = index.writer(50_000_000)?;
// Let's index our documents!
// We first need a handle on the title and the body field.
@@ -99,23 +100,37 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
// ### Create a document directly from json.
//
// Alternatively, we can use our schema to parse
// a document object directly from json.
let mice_and_men_doc = try!(schema.parse_document(r#"{
// Alternatively, we can use our schema to parse a
// document object directly from json.
// The document is a string, but we use the `json` macro
// from `serde_json` for the convenience of multi-line support.
let json = json!({
"title": "Of Mice and Men",
"body": "few miles south of Soledad, the Salinas River drops in close to the hillside bank and runs deep and green. The water is warm too, for it has slipped twinkling over the yellow sands in the sunlight before reaching the narrow pool. On one side of the river the golden foothill slopes curve up to the strong and rocky Gabilan Mountains, but on the valley side the water is lined with trees—willows fresh and green with every spring, carrying in their lower leaf junctures the debris of the winters flooding; and sycamores with mottled, white,recumbent limbs and branches that arch over the pool"
}"#));
"body": "A few miles south of Soledad, the Salinas River drops in close to the hillside \
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
over the yellow sands in the sunlight before reaching the narrow pool. On one \
side of the river the golden foothill slopes curve up to the strong and rocky \
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
});
let mice_and_men_doc = schema.parse_document(&json.to_string())?;
index_writer.add_document(mice_and_men_doc);
// Multi-valued field are allowed, they are
// expressed in JSON by an array.
// The following document has two titles.
let frankenstein_doc = try!(schema.parse_document(r#"{
"title": ["Frankenstein", "The Modern Promotheus"],
"body": "You will rejoice to hear that no disaster has accompanied the commencement of an enterprise which you have regarded with such evil forebodings. I arrived here yesterday, and my first task is to assure my dear sister of my welfare and increasing confidence in the success of my undertaking."
}"#));
let json = json!({
"title": ["Frankenstein", "The Modern Prometheus"],
"body": "You will rejoice to hear that no disaster has accompanied the commencement of an \
enterprise which you have regarded with such evil forebodings. I arrived here \
yesterday, and my first task is to assure my dear sister of my welfare and \
increasing confidence in the success of my undertaking."
});
let frankenstein_doc = schema.parse_document(&json.to_string())?;
index_writer.add_document(frankenstein_doc);
// This is an example, so we will only index 3 documents
@@ -136,7 +151,7 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
// the existence of new documents.
//
// This call is blocking.
try!(index_writer.commit());
index_writer.commit()?;
// If `.commit()` returns correctly, then all of the
// documents that have been added are guaranteed to be
@@ -152,7 +167,7 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
// Let's search our index. Start by reloading
// searchers in the index. This should be done
// after every commit().
try!(index.load_searchers());
index.load_searchers()?;
// Afterwards create one (or more) searchers.
//
@@ -169,7 +184,7 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
// QueryParser may fail if the query is not in the right
// format. For user facing applications, this can be a problem.
// A ticket has been opened regarding this problem.
let query = try!(query_parser.parse_query("sea whale"));
let query = query_parser.parse_query("sea whale")?;
// A query defines a set of documents, as
@@ -187,7 +202,7 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
let mut top_collector = TopCollector::with_limit(10);
// We can now perform our query.
try!(searcher.search(&*query, &mut top_collector));
searcher.search(&*query, &mut top_collector)?;
// Our top collector now contains the 10
// most relevant doc ids...
@@ -201,9 +216,15 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
// a title.
for doc_address in doc_addresses {
let retrieved_doc = try!(searcher.doc(&doc_address));
let retrieved_doc = searcher.doc(&doc_address)?;
println!("{}", schema.to_json(&retrieved_doc));
}
// Wait for indexing and merging threads to shut down.
// Usually this isn't needed, but in `main` we try to
// delete the temporary directory and that fails on
// Windows if the files are still open.
index_writer.wait_merging_threads()?;
Ok(())
}

View File

@@ -18,11 +18,10 @@ pub trait StreamingIterator<'a, T> {
impl<'a, 'b> TokenIter<'b> {
fn consume_token(&'a mut self) -> Option<&'a str> {
for c in &mut self.chars {
for c in &mut self.chars {
if c.is_alphanumeric() {
append_char_lowercase(c, &mut self.term_buffer);
}
else {
} else {
break;
}
}
@@ -32,9 +31,8 @@ impl<'a, 'b> TokenIter<'b> {
impl<'a, 'b> StreamingIterator<'a, &'a str> for TokenIter<'b> {
#[inline]
fn next(&'a mut self,) -> Option<&'a str> {
fn next(&'a mut self) -> Option<&'a str> {
self.term_buffer.clear();
// skipping non-letter characters.
loop {
@@ -45,24 +43,24 @@ impl<'a, 'b> StreamingIterator<'a, &'a str> for TokenIter<'b> {
return self.consume_token();
}
}
None => { return None; }
None => {
return None;
}
}
}
}
}
pub struct SimpleTokenizer;
impl SimpleTokenizer {
pub fn tokenize<'a>(&self, text: &'a str) -> TokenIter<'a> {
TokenIter {
term_buffer: String::new(),
chars: text.chars(),
term_buffer: String::new(),
chars: text.chars(),
}
}
}
}

View File

@@ -7,8 +7,8 @@ use Score;
/// Collector that does nothing.
/// This is used in the chain Collector and will hopefully
/// be optimized away by the compiler.
/// This is used in the chain Collector and will hopefully
/// be optimized away by the compiler.
pub struct DoNothingCollector;
impl Collector for DoNothingCollector {
#[inline]
@@ -24,10 +24,10 @@ impl Collector for DoNothingCollector {
/// are known at compile time.
pub struct ChainedCollector<Left: Collector, Right: Collector> {
left: Left,
right: Right
right: Right,
}
impl<Left: Collector, Right: Collector> ChainedCollector<Left, Right> {
impl<Left: Collector, Right: Collector> ChainedCollector<Left, Right> {
/// Adds a collector
pub fn push<C: Collector>(self, new_collector: &mut C) -> ChainedCollector<Self, &mut C> {
ChainedCollector {
@@ -38,7 +38,10 @@ impl<Left: Collector, Right: Collector> ChainedCollector<Left, Right> {
}
impl<Left: Collector, Right: Collector> Collector for ChainedCollector<Left, Right> {
fn set_segment(&mut self, segment_local_id: SegmentLocalId, segment: &SegmentReader) -> Result<()> {
fn set_segment(&mut self,
segment_local_id: SegmentLocalId,
segment: &SegmentReader)
-> Result<()> {
try!(self.left.set_segment(segment_local_id, segment));
try!(self.right.set_segment(segment_local_id, segment));
Ok(())
@@ -70,9 +73,7 @@ mod tests {
let mut top_collector = TopCollector::with_limit(2);
let mut count_collector = CountCollector::default();
{
let mut collectors = chain()
.push(&mut top_collector)
.push(&mut count_collector);
let mut collectors = chain().push(&mut top_collector).push(&mut count_collector);
collectors.collect(1, 0.2);
collectors.collect(2, 0.1);
collectors.collect(3, 0.5);
@@ -80,4 +81,4 @@ mod tests {
assert_eq!(count_collector.count(), 3);
assert!(top_collector.at_capacity());
}
}
}

View File

@@ -6,7 +6,7 @@ use SegmentReader;
use SegmentLocalId;
/// `CountCollector` collector only counts how many
/// documents match the query.
/// documents match the query.
pub struct CountCollector {
count: usize,
}
@@ -14,20 +14,18 @@ pub struct CountCollector {
impl CountCollector {
/// Returns the count of documents that were
/// collected.
pub fn count(&self,) -> usize {
pub fn count(&self) -> usize {
self.count
}
}
impl Default for CountCollector {
fn default() -> CountCollector {
CountCollector {count: 0,
}
CountCollector { count: 0 }
}
}
impl Collector for CountCollector {
fn set_segment(&mut self, _: SegmentLocalId, _: &SegmentReader) -> Result<()> {
Ok(())
}
@@ -47,11 +45,11 @@ mod tests {
#[bench]
fn build_collector(b: &mut Bencher) {
b.iter(|| {
let mut count_collector = CountCollector::default();
for doc in 0..1_000_000 {
count_collector.collect(doc, 1f32);
}
count_collector.count()
});
let mut count_collector = CountCollector::default();
for doc in 0..1_000_000 {
count_collector.collect(doc, 1f32);
}
count_collector.count()
});
}
}

View File

@@ -0,0 +1,117 @@
use std::cmp::Eq;
use std::collections::HashMap;
use std::hash::Hash;
use collector::Collector;
use fastfield::FastFieldReader;
use schema::Field;
use DocId;
use Result;
use Score;
use SegmentReader;
use SegmentLocalId;
/// Facet collector for i64/u64 fast field
pub struct FacetCollector<T>
where T: FastFieldReader,
T::ValueType: Eq + Hash
{
counters: HashMap<T::ValueType, u64>,
field: Field,
ff_reader: Option<T>,
}
impl<T> FacetCollector<T>
where T: FastFieldReader,
T::ValueType: Eq + Hash
{
/// Creates a new facet collector for aggregating a given field.
pub fn new(field: Field) -> FacetCollector<T> {
FacetCollector {
counters: HashMap::new(),
field: field,
ff_reader: None,
}
}
}
impl<T> Collector for FacetCollector<T>
where T: FastFieldReader,
T::ValueType: Eq + Hash
{
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
Ok(())
}
fn collect(&mut self, doc: DocId, _: Score) {
let val = self.ff_reader
.as_ref()
.expect("collect() was called before set_segment. This should never happen.")
.get(doc);
*(self.counters.entry(val).or_insert(0)) += 1;
}
}
#[cfg(test)]
mod tests {
use collector::{chain, FacetCollector};
use query::QueryParser;
use fastfield::{I64FastFieldReader, U64FastFieldReader};
use schema::{self, FAST, STRING};
use Index;
#[test]
// create 10 documents, set num field value to 0 or 1 for even/odd ones
// make sure we have facet counters correctly filled
fn test_facet_collector_results() {
let mut schema_builder = schema::SchemaBuilder::new();
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
{
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{
for i in 0u64..10u64 {
index_writer.add_document(doc!(
num_field_i64 => ((i as i64) % 3i64) as i64,
num_field_u64 => (i % 2u64) as u64,
text_field => "text"
));
}
}
assert_eq!(index_writer.commit().unwrap(), 10u64);
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let mut ffvf_i64: FacetCollector<I64FastFieldReader> = FacetCollector::new(num_field_i64);
let mut ffvf_u64: FacetCollector<U64FastFieldReader> = FacetCollector::new(num_field_u64);
{
// perform the query
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64);
let query_parser = QueryParser::new(schema, vec![text_field]);
let query = query_parser.parse_query("text:text").unwrap();
query.search(&searcher, &mut facet_collectors).unwrap();
}
assert_eq!(ffvf_u64.counters[&0], 5);
assert_eq!(ffvf_u64.counters[&1], 5);
assert_eq!(ffvf_i64.counters[&0], 4);
assert_eq!(ffvf_i64.counters[&1], 3);
}
}

View File

@@ -13,14 +13,17 @@ pub use self::multi_collector::MultiCollector;
mod top_collector;
pub use self::top_collector::TopCollector;
mod facet_collector;
pub use self::facet_collector::FacetCollector;
mod chained_collector;
pub use self::chained_collector::chain;
/// Collectors are in charge of collecting and retaining relevant
/// Collectors are in charge of collecting and retaining relevant
/// information from the document found and scored by the query.
///
///
/// For instance,
/// For instance,
///
/// - keeping track of the top 10 best documents
/// - computing a breakdown over a fast field
@@ -29,7 +32,7 @@ pub use self::chained_collector::chain;
/// Queries are in charge of pushing the `DocSet` to the collector.
///
/// As they work on multiple segments, they first inform
/// the collector of a change in a segment and then
/// the collector of a change in a segment and then
/// call the `collect` method to push the document to the collector.
///
/// Temporally, our collector will receive calls
@@ -46,16 +49,22 @@ pub use self::chained_collector::chain;
///
/// Segments are not guaranteed to be visited in any specific order.
pub trait Collector {
/// `set_segment` is called before beginning to enumerate
/// `set_segment` is called before beginning to enumerate
/// on this segment.
fn set_segment(&mut self, segment_local_id: SegmentLocalId, segment: &SegmentReader) -> Result<()>;
fn set_segment(&mut self,
segment_local_id: SegmentLocalId,
segment: &SegmentReader)
-> Result<()>;
/// The query pushes the scored document to the collector via this method.
fn collect(&mut self, doc: DocId, score: Score);
}
impl<'a, C: Collector> Collector for &'a mut C {
fn set_segment(&mut self, segment_local_id: SegmentLocalId, segment: &SegmentReader) -> Result<()> {
fn set_segment(&mut self,
segment_local_id: SegmentLocalId,
segment: &SegmentReader)
-> Result<()> {
(*self).set_segment(segment_local_id, segment)
}
/// The query pushes the scored document to the collector via this method.
@@ -74,9 +83,10 @@ pub mod tests {
use Score;
use core::SegmentReader;
use SegmentLocalId;
use fastfield::U32FastFieldReader;
use fastfield::U64FastFieldReader;
use fastfield::FastFieldReader;
use schema::Field;
/// Stores all of the doc ids.
/// This collector is only used for tests.
/// It is unusable in practise, as it does not store
@@ -89,7 +99,7 @@ pub mod tests {
impl TestCollector {
/// Return the exhalist of documents.
pub fn docs(self,) -> Vec<DocId> {
pub fn docs(self) -> Vec<DocId> {
self.docs
}
}
@@ -105,7 +115,6 @@ pub mod tests {
}
impl Collector for TestCollector {
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
self.offset += self.segment_max_doc;
self.segment_max_doc = reader.max_doc();
@@ -116,18 +125,18 @@ pub mod tests {
self.docs.push(doc + self.offset);
}
}
/// Collects in order all of the fast fields for all of the
/// doc in the `DocSet`
///
/// This collector is mainly useful for tests.
pub struct FastFieldTestCollector {
vals: Vec<u32>,
vals: Vec<u64>,
field: Field,
ff_reader: Option<U32FastFieldReader>,
ff_reader: Option<U64FastFieldReader>,
}
impl FastFieldTestCollector {
@@ -139,14 +148,14 @@ pub mod tests {
}
}
pub fn vals(self,) -> Vec<u32> {
pub fn vals(self) -> Vec<u64> {
self.vals
}
}
impl Collector for FastFieldTestCollector {
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
self.ff_reader = reader.get_fast_field_reader(self.field);
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
Ok(())
}
@@ -160,12 +169,12 @@ pub mod tests {
#[bench]
fn build_collector(b: &mut Bencher) {
b.iter(|| {
let mut count_collector = CountCollector::default();
let docs: Vec<u32> = (0..1_000_000).collect();
for doc in docs {
count_collector.collect(doc, 1f32);
}
count_collector.count()
});
let mut count_collector = CountCollector::default();
let docs: Vec<u32> = (0..1_000_000).collect();
for doc in docs {
count_collector.collect(doc, 1f32);
}
count_collector.count()
});
}
}

View File

@@ -7,7 +7,7 @@ use SegmentLocalId;
/// Multicollector makes it possible to collect on more than one collector.
/// It should only be used for use cases where the Collector types is unknown
/// It should only be used for use cases where the Collector types is unknown
/// at compile time.
/// If the type of the collectors is known, you should prefer to use `ChainedCollector`.
pub struct MultiCollector<'a> {
@@ -17,15 +17,16 @@ pub struct MultiCollector<'a> {
impl<'a> MultiCollector<'a> {
/// Constructor
pub fn from(collectors: Vec<&'a mut Collector>) -> MultiCollector {
MultiCollector {
collectors: collectors,
}
MultiCollector { collectors: collectors }
}
}
impl<'a> Collector for MultiCollector<'a> {
fn set_segment(&mut self, segment_local_id: SegmentLocalId, segment: &SegmentReader) -> Result<()> {
fn set_segment(&mut self,
segment_local_id: SegmentLocalId,
segment: &SegmentReader)
-> Result<()> {
for collector in &mut self.collectors {
try!(collector.set_segment(segment_local_id, segment));
}
@@ -52,7 +53,8 @@ mod tests {
let mut top_collector = TopCollector::with_limit(2);
let mut count_collector = CountCollector::default();
{
let mut collectors = MultiCollector::from(vec!(&mut top_collector, &mut count_collector));
let mut collectors = MultiCollector::from(vec![&mut top_collector,
&mut count_collector]);
collectors.collect(1, 0.2);
collectors.collect(2, 0.1);
collectors.collect(3, 0.5);

View File

@@ -12,8 +12,7 @@ use Score;
#[derive(Clone, Copy)]
struct GlobalScoredDoc {
score: Score,
doc_address: DocAddress
doc_address: DocAddress,
}
impl PartialOrd for GlobalScoredDoc {
@@ -25,10 +24,10 @@ impl PartialOrd for GlobalScoredDoc {
impl Ord for GlobalScoredDoc {
#[inline]
fn cmp(&self, other: &GlobalScoredDoc) -> Ordering {
other.score.partial_cmp(&self.score)
.unwrap_or(
other.doc_address.cmp(&self.doc_address)
)
other
.score
.partial_cmp(&self.score)
.unwrap_or_else(|| other.doc_address.cmp(&self.doc_address))
}
}
@@ -53,7 +52,6 @@ pub struct TopCollector {
}
impl TopCollector {
/// Creates a top collector, with a number of documents equal to "limit".
///
/// # Panics
@@ -68,9 +66,9 @@ impl TopCollector {
segment_id: 0,
}
}
/// Returns K best documents sorted in decreasing order.
///
///
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
pub fn docs(&self) -> Vec<DocAddress> {
@@ -81,30 +79,27 @@ impl TopCollector {
}
/// Returns K best ScoredDocument sorted in decreasing order.
///
///
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
pub fn score_docs(&self) -> Vec<(Score, DocAddress)> {
let mut scored_docs: Vec<GlobalScoredDoc> = self.heap
.iter()
.cloned()
.collect();
let mut scored_docs: Vec<GlobalScoredDoc> = self.heap.iter().cloned().collect();
scored_docs.sort();
scored_docs.into_iter()
.map(|GlobalScoredDoc {score, doc_address}| (score, doc_address))
scored_docs
.into_iter()
.map(|GlobalScoredDoc { score, doc_address }| (score, doc_address))
.collect()
}
/// Return true iff at least K documents have gone through
/// the collector.
#[inline]
pub fn at_capacity(&self, ) -> bool {
pub fn at_capacity(&self) -> bool {
self.heap.len() >= self.limit
}
}
impl Collector for TopCollector {
fn set_segment(&mut self, segment_id: SegmentLocalId, _: &SegmentReader) -> Result<()> {
self.segment_id = segment_id;
Ok(())
@@ -113,17 +108,21 @@ impl Collector for TopCollector {
fn collect(&mut self, doc: DocId, score: Score) {
if self.at_capacity() {
// It's ok to unwrap as long as a limit of 0 is forbidden.
let limit_doc: GlobalScoredDoc = *self.heap.peek().expect("Top collector with size 0 is forbidden");
let limit_doc: GlobalScoredDoc =
*self.heap
.peek()
.expect("Top collector with size 0 is forbidden");
if limit_doc.score < score {
let mut mut_head = self.heap.peek_mut().expect("Top collector with size 0 is forbidden");
let mut mut_head = self.heap
.peek_mut()
.expect("Top collector with size 0 is forbidden");
mut_head.score = score;
mut_head.doc_address = DocAddress(self.segment_id, doc);
mut_head.doc_address = DocAddress(self.segment_id, doc);
}
}
else {
} else {
let wrapped_doc = GlobalScoredDoc {
score: score,
doc_address: DocAddress(self.segment_id, doc)
doc_address: DocAddress(self.segment_id, doc),
};
self.heap.push(wrapped_doc);
}
@@ -147,13 +146,12 @@ mod tests {
top_collector.collect(3, 0.2);
top_collector.collect(5, 0.3);
assert!(!top_collector.at_capacity());
let score_docs: Vec<(Score, DocId)> = top_collector.score_docs()
let score_docs: Vec<(Score, DocId)> = top_collector
.score_docs()
.into_iter()
.map(|(score, doc_address)| (score, doc_address.doc()))
.collect();
assert_eq!(score_docs, vec!(
(0.8, 1), (0.3, 5), (0.2, 3),
));
assert_eq!(score_docs, vec![(0.8, 1), (0.3, 5), (0.2, 3)]);
}
#[test]
@@ -171,9 +169,7 @@ mod tests {
.into_iter()
.map(|(score, doc_address)| (score, doc_address.doc()))
.collect();
assert_eq!(score_docs, vec!(
(0.9, 7), (0.8, 1), (0.3, 5), (0.2, 3)
));
assert_eq!(score_docs, vec![(0.9, 7), (0.8, 1), (0.3, 5), (0.2, 3)]);
}
{
let docs: Vec<DocId> = top_collector
@@ -181,7 +177,7 @@ mod tests {
.into_iter()
.map(|doc_address| doc_address.doc())
.collect();
assert_eq!(docs, vec!(7, 1, 5, 3));
assert_eq!(docs, vec![7, 1, 5, 3]);
}

View File

@@ -2,109 +2,153 @@ use std::io::Write;
use std::io;
use common::serialize::BinarySerializable;
use std::mem;
use std::ops::Deref;
pub fn compute_num_bits(amplitude: u32) -> u8 {
(32u32 - amplitude.leading_zeros()) as u8
/// Computes the number of bits that will be used for bitpacking.
///
/// In general the target is the minimum number of bits
/// required to express the amplitude given in argument.
///
/// e.g. If the amplitude is 10, we can store all ints on simply 4bits.
///
/// The logic is slightly more convoluted here as for optimization
/// reasons, we want to ensure that a value spawns over at most 8 bytes
/// of aligns bytes.
///
/// Spawning over 9 bytes is possible for instance, if we do
/// bitpacking with an amplitude of 63 bits.
/// In this case, the second int will start on bit
/// 63 (which belongs to byte 7) and ends at byte 15;
/// Hence 9 bytes (from byte 7 to byte 15 included).
///
/// To avoid this, we force the number of bits to 64bits
/// when the result is greater than `64-8 = 56 bits`.
///
/// Note that this only affects rare use cases spawning over
/// a very large range of values. Even in this case, it results
/// in an extra cost of at most 12% compared to the optimal
/// number of bits.
pub fn compute_num_bits(amplitude: u64) -> u8 {
let amplitude = (64u32 - amplitude.leading_zeros()) as u8;
if amplitude <= 64 - 8 { amplitude } else { 64 }
}
pub struct BitPacker {
mini_buffer: u64,
mini_buffer_written: usize,
num_bits: usize,
written_size: usize,
}
impl BitPacker {
impl BitPacker {
pub fn new(num_bits: usize) -> BitPacker {
BitPacker {
mini_buffer: 0u64,
mini_buffer_written: 0,
num_bits: num_bits,
written_size: 0,
}
}
pub fn write<TWrite: Write>(&mut self, val: u32, output: &mut TWrite) -> io::Result<()> {
pub fn write<TWrite: Write>(&mut self, val: u64, output: &mut TWrite) -> io::Result<()> {
let val_u64 = val as u64;
if self.mini_buffer_written + self.num_bits > 64 {
self.mini_buffer |= val_u64.wrapping_shl(self.mini_buffer_written as u32);
self.written_size += self.mini_buffer.serialize(output)?;
self.mini_buffer.serialize(output)?;
self.mini_buffer = val_u64.wrapping_shr((64 - self.mini_buffer_written) as u32);
self.mini_buffer_written = self.mini_buffer_written + (self.num_bits as usize) - 64;
}
else {
} else {
self.mini_buffer |= val_u64 << self.mini_buffer_written;
self.mini_buffer_written += self.num_bits;
if self.mini_buffer_written == 64 {
self.written_size += self.mini_buffer.serialize(output)?;
self.mini_buffer.serialize(output)?;
self.mini_buffer_written = 0;
self.mini_buffer = 0u64;
}
}
}
Ok(())
}
fn flush<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<()>{
fn flush<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
if self.mini_buffer_written > 0 {
let num_bytes = (self.mini_buffer_written + 7) / 8;
let arr: [u8; 8] = unsafe { mem::transmute::<u64, [u8; 8]>(self.mini_buffer) };
let arr: [u8; 8] = unsafe { mem::transmute::<u64, [u8; 8]>(self.mini_buffer) };
output.write_all(&arr[..num_bytes])?;
self.written_size += num_bytes;
self.mini_buffer_written = 0;
}
Ok(())
}
pub fn close<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<usize> {
pub fn close<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
self.flush(output)?;
Ok(self.written_size)
// Padding the write file to simplify reads.
output.write_all(&[0u8; 7])?;
Ok(())
}
}
pub struct BitUnpacker {
pub struct BitUnpacker<Data>
where Data: Deref<Target = [u8]>
{
num_bits: usize,
mask: u32,
data_ptr: *const u8,
data_len: usize,
mask: u64,
data: Data,
}
impl BitUnpacker {
pub fn new(data: &[u8], num_bits: usize) -> BitUnpacker {
impl<Data> BitUnpacker<Data>
where Data: Deref<Target = [u8]>
{
pub fn new(data: Data, num_bits: usize) -> BitUnpacker<Data> {
let mask: u64 = if num_bits == 64 {
!0u64
} else {
(1u64 << num_bits) - 1u64
};
BitUnpacker {
num_bits: num_bits,
mask: (1u32 << num_bits) - 1u32,
data_ptr: data.as_ptr(),
data_len: data.len()
mask: mask,
data: data,
}
}
pub fn get(&self, idx: usize) -> u32 {
pub fn get(&self, idx: usize) -> u64 {
if self.num_bits == 0 {
return 0;
}
let addr = (idx * self.num_bits) / 8;
let bit_shift = idx * self.num_bits - addr * 8;
let val_unshifted_unmasked: u64;
if addr + 8 <= self.data_len {
val_unshifted_unmasked = unsafe { * (self.data_ptr.offset(addr as isize) as *const u64) };
}
else {
let mut arr = [0u8; 8];
if addr < self.data_len {
for i in 0..self.data_len - addr {
arr[i] = unsafe { *self.data_ptr.offset( (addr + i) as isize) };
}
}
val_unshifted_unmasked = unsafe { mem::transmute::<[u8; 8], u64>(arr) };
}
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u32;
(val_shifted & self.mask)
let data: &[u8] = &*self.data;
let num_bits = self.num_bits;
let mask = self.mask;
let addr_in_bits = idx * num_bits;
let addr = addr_in_bits >> 3;
let bit_shift = addr_in_bits & 7;
debug_assert!(addr + 8 <= data.len(),
"The fast field field should have been padded with 7 bytes.");
let val_unshifted_unmasked: u64 = unsafe { *(data[addr..].as_ptr() as *const u64) };
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
(val_shifted & mask)
}
pub fn get_range(&self, start: u32, output: &mut [u64]) {
if self.num_bits == 0 {
for val in output.iter_mut() {
*val = 0;
}
} else {
let data: &[u8] = &*self.data;
let num_bits = self.num_bits;
let mask = self.mask;
let mut addr_in_bits = (start as usize) * num_bits;
for output_val in output.iter_mut() {
let addr = addr_in_bits >> 3;
let bit_shift = addr_in_bits & 7;
let val_unshifted_unmasked: u64 = unsafe { *(data[addr..].as_ptr() as *const u64) };
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
*output_val = val_shifted & mask;
addr_in_bits += num_bits;
}
}
}
}
@@ -113,7 +157,7 @@ impl BitUnpacker {
#[cfg(test)]
mod test {
use super::{BitPacker, BitUnpacker, compute_num_bits};
#[test]
fn test_compute_num_bits() {
assert_eq!(compute_num_bits(1), 1u8);
@@ -123,32 +167,32 @@ mod test {
assert_eq!(compute_num_bits(4), 3u8);
assert_eq!(compute_num_bits(255), 8u8);
assert_eq!(compute_num_bits(256), 9u8);
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
}
fn test_bitpacker_util(len: usize, num_bits: usize) {
fn create_fastfield_bitpacker(len: usize, num_bits: usize) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
let mut data = Vec::new();
let mut bitpacker = BitPacker::new(num_bits);
let max_val: u32 = (1 << num_bits) - 1;
let vals: Vec<u32> = (0u32..len as u32).map(|i| {
if max_val == 0 {
0
}
else {
i % max_val
}
}).collect();
let max_val: u64 = (1 << num_bits) - 1;
let vals: Vec<u64> = (0u64..len as u64)
.map(|i| if max_val == 0 { 0 } else { i % max_val })
.collect();
for &val in &vals {
bitpacker.write(val, &mut data).unwrap();
}
let num_bytes = bitpacker.close(&mut data).unwrap();
assert_eq!(num_bytes, (num_bits * len + 7) / 8);
assert_eq!(data.len(), num_bytes);
let bitunpacker = BitUnpacker::new(&data, num_bits);
bitpacker.close(&mut data).unwrap();
assert_eq!(data.len(), (num_bits * len + 7) / 8 + 7);
let bitunpacker = BitUnpacker::new(data, num_bits);
(bitunpacker, vals)
}
fn test_bitpacker_util(len: usize, num_bits: usize) {
let (bitunpacker, vals) = create_fastfield_bitpacker(len, num_bits);
for (i, val) in vals.iter().enumerate() {
assert_eq!(bitunpacker.get(i), *val);
}
}
#[test]
fn test_bitpacker() {
test_bitpacker_util(10, 3);
@@ -157,4 +201,17 @@ mod test {
test_bitpacker_util(6, 14);
test_bitpacker_util(1000, 14);
}
}
#[test]
fn test_bitpacker_range() {
let (bitunpacker, vals) = create_fastfield_bitpacker(100_000, 12);
let buffer_len = 100;
let mut buffer = vec![0u64; buffer_len];
for start in vec![0, 10, 20, 100, 1_000] {
bitunpacker.get_range(start as u32, &mut buffer[..]);
for i in 0..buffer_len {
assert_eq!(buffer[i], vals[start + i]);
}
}
}
}

View File

@@ -0,0 +1,58 @@
use std::io::Write;
use std::io;
pub struct CountingWriter<W: Write> {
underlying: W,
written_bytes: usize,
}
impl<W: Write> CountingWriter<W> {
pub fn wrap(underlying: W) -> CountingWriter<W> {
CountingWriter {
underlying: underlying,
written_bytes: 0,
}
}
pub fn written_bytes(&self) -> usize {
self.written_bytes
}
pub fn finish(mut self) -> io::Result<(W, usize)> {
self.flush()?;
Ok((self.underlying, self.written_bytes))
}
}
impl<W: Write> Write for CountingWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let written_size = self.underlying.write(buf)?;
self.written_bytes += written_size;
Ok(written_size)
}
fn flush(&mut self) -> io::Result<()> {
self.underlying.flush()
}
}
#[cfg(test)]
mod test {
use super::CountingWriter;
use std::io::Write;
#[test]
fn test_counting_writer() {
let buffer: Vec<u8> = vec![];
let mut counting_writer = CountingWriter::wrap(buffer);
let bytes = (0u8..10u8).collect::<Vec<u8>>();
counting_writer.write_all(&bytes).unwrap();
let (w, len): (Vec<u8>, usize) = counting_writer.finish().unwrap();
assert_eq!(len, 10);
assert_eq!(w.len(), 10);
}
}

View File

@@ -1,6 +1,7 @@
mod serialize;
mod timer;
mod vint;
mod counting_writer;
pub mod bitpacker;
pub use self::serialize::BinarySerializable;
@@ -8,6 +9,7 @@ pub use self::timer::Timing;
pub use self::timer::TimerTree;
pub use self::timer::OpenTimer;
pub use self::vint::VInt;
pub use self::counting_writer::CountingWriter;
use std::io;
@@ -20,23 +22,67 @@ pub fn make_io_err(msg: String) -> io::Error {
/// Has length trait
pub trait HasLen {
/// Return length
fn len(&self,) -> usize;
fn len(&self) -> usize;
/// Returns true iff empty.
fn is_empty(&self,) -> bool {
fn is_empty(&self) -> bool {
self.len() == 0
}
}
const HIGHEST_BIT: u64 = 1 << 63;
/// Creates an uninitialized Vec of a given usize
/// Maps a `i64` to `u64`
///
/// `allocate_vec` does an unsafe call to `set_len`
/// as other solution are extremely slow in debug mode.
pub fn allocate_vec<T>(capacity: usize) -> Vec<T> {
let mut v = Vec::with_capacity(capacity);
unsafe {
v.set_len(capacity);
/// For simplicity, tantivy internally handles `i64` as `u64`.
/// The mapping is defined by this function.
///
/// Maps `i64` to `u64` so that
/// `-2^63 .. 2^63-1` is mapped
/// to
/// `0 .. 2^64-1`
/// in that order.
///
/// This is more suited than simply casting (`val as u64`)
/// because of bitpacking.
///
/// Imagine a list of `i64` ranging from -10 to 10.
/// When casting negative values, the negative values are projected
/// to values over 2^63, and all values end up requiring 64 bits.
///
/// # See also
/// The [reverse mapping is `u64_to_i64`](./fn.u64_to_i64.html).
#[inline(always)]
pub fn i64_to_u64(val: i64) -> u64 {
(val as u64) ^ HIGHEST_BIT
}
/// Reverse the mapping given by [`i64_to_u64`](./fn.i64_to_u64.html).
#[inline(always)]
pub fn u64_to_i64(val: u64) -> i64 {
(val ^ HIGHEST_BIT) as i64
}
#[cfg(test)]
mod test {
use super::{i64_to_u64, u64_to_i64};
fn test_i64_converter_helper(val: i64) {
assert_eq!(u64_to_i64(i64_to_u64(val)), val);
}
v
}
#[test]
fn test_i64_converter() {
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
assert_eq!(i64_to_u64(i64::max_value()), u64::max_value());
test_i64_converter_helper(0i64);
test_i64_converter_helper(i64::min_value());
test_i64_converter_helper(i64::max_value());
for i in -1000i64..1000i64 {
test_i64_converter_helper(i);
}
}
}

View File

@@ -6,33 +6,35 @@ use std::io::Read;
use std::io;
use common::VInt;
pub trait BinarySerializable : fmt::Debug + Sized {
fn serialize(&self, writer: &mut Write) -> io::Result<usize>;
fn deserialize(reader: &mut Read) -> io::Result<Self>;
pub trait BinarySerializable: fmt::Debug + Sized {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()>;
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self>;
}
impl BinarySerializable for () {
fn serialize(&self, _: &mut Write) -> io::Result<usize> {
Ok(0)
fn serialize<W: Write>(&self, _: &mut W) -> io::Result<()> {
Ok(())
}
fn deserialize(_: &mut Read) -> io::Result<Self> {
fn deserialize<R: Read>(_: &mut R) -> io::Result<Self> {
Ok(())
}
}
impl<T: BinarySerializable> BinarySerializable for Vec<T> {
fn serialize(&self, writer: &mut Write) -> io::Result<usize> {
let mut total_size = try!(VInt(self.len() as u64).serialize(writer));
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
VInt(self.len() as u64).serialize(writer)?;
for it in self {
total_size += try!(it.serialize(writer));
it.serialize(writer)?;
}
Ok(total_size)
Ok(())
}
fn deserialize(reader: &mut Read) -> io::Result<Vec<T>> {
let num_items = try!(VInt::deserialize(reader)).val();
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Vec<T>> {
let num_items = VInt::deserialize(reader)?.val();
let mut items: Vec<T> = Vec::with_capacity(num_items as usize);
for _ in 0..num_items {
let item = try!(T::deserialize(reader));
let item = T::deserialize(reader)?;
items.push(item);
}
Ok(items)
@@ -41,60 +43,67 @@ impl<T: BinarySerializable> BinarySerializable for Vec<T> {
impl<Left: BinarySerializable, Right: BinarySerializable> BinarySerializable for (Left, Right) {
fn serialize(&self, write: &mut Write) -> io::Result<usize> {
Ok(try!(self.0.serialize(write)) + try!(self.1.serialize(write)))
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
self.0.serialize(write)?;
self.1.serialize(write)
}
fn deserialize(reader: &mut Read) -> io::Result<Self> {
Ok( (try!(Left::deserialize(reader)), try!(Right::deserialize(reader))) )
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
Ok((Left::deserialize(reader)?, Right::deserialize(reader)?))
}
}
impl BinarySerializable for u32 {
fn serialize(&self, writer: &mut Write) -> io::Result<usize> {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_u32::<Endianness>(*self)
.map(|_| 4)
}
fn deserialize(reader: &mut Read) -> io::Result<u32> {
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u32> {
reader.read_u32::<Endianness>()
}
}
impl BinarySerializable for u64 {
fn serialize(&self, writer: &mut Write) -> io::Result<usize> {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_u64::<Endianness>(*self)
.map(|_| 8)
}
fn deserialize(reader: &mut Read) -> io::Result<u64> {
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
reader.read_u64::<Endianness>()
}
}
impl BinarySerializable for i64 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_i64::<Endianness>(*self)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
reader.read_i64::<Endianness>()
}
}
impl BinarySerializable for u8 {
fn serialize(&self, writer: &mut Write) -> io::Result<usize> {
try!(writer.write_u8(*self));
Ok(1)
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_u8(*self)
}
fn deserialize(reader: &mut Read) -> io::Result<u8> {
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u8> {
reader.read_u8()
}
}
impl BinarySerializable for String {
fn serialize(&self, writer: &mut Write) -> io::Result<usize> {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
let data: &[u8] = self.as_bytes();
let mut size = try!(VInt(data.len() as u64).serialize(writer));
size += data.len();
try!(writer.write_all(data));
Ok(size)
VInt(data.len() as u64).serialize(writer)?;
writer.write_all(data)
}
fn deserialize(reader: &mut Read) -> io::Result<String> {
let string_length = try!(VInt::deserialize(reader)).val() as usize;
fn deserialize<R: Read>(reader: &mut R) -> io::Result<String> {
let string_length = VInt::deserialize(reader)?.val() as usize;
let mut result = String::with_capacity(string_length);
try!(reader.take(string_length as u64).read_to_string(&mut result));
reader
.take(string_length as u64)
.read_to_string(&mut result)?;
Ok(result)
}
}
@@ -108,12 +117,10 @@ mod test {
fn serialize_test<T: BinarySerializable + Eq>(v: T, num_bytes: usize) {
let mut buffer: Vec<u8> = Vec::new();
if num_bytes != 0 {
assert_eq!(v.serialize(&mut buffer).unwrap(), num_bytes);
v.serialize(&mut buffer).unwrap();
assert_eq!(buffer.len(), num_bytes);
}
else {
} else {
v.serialize(&mut buffer).unwrap();
}
let mut cursor = &buffer[..];
@@ -137,15 +144,15 @@ mod test {
#[test]
fn test_serialize_string() {
serialize_test(String::from(""), 1);
serialize_test(String::from("ぽよぽよ"), 1 + 3*4);
serialize_test(String::from("富士さん見える。"), 1 + 3*8);
serialize_test(String::from("ぽよぽよ"), 1 + 3 * 4);
serialize_test(String::from("富士さん見える。"), 1 + 3 * 8);
}
#[test]
fn test_serialize_vec() {
let v: Vec<u8> = Vec::new();
serialize_test(v, 1);
serialize_test(vec!(1u32, 3u32), 1 + 4*2);
serialize_test(vec![1u32, 3u32], 1 + 4 * 2);
}
#[test]

View File

@@ -10,7 +10,7 @@ pub struct OpenTimer<'a> {
impl<'a> OpenTimer<'a> {
/// Starts timing a new named subtask
///
/// The timer is stopped automatically
/// The timer is stopped automatically
/// when the `OpenTimer` is dropped.
pub fn open(&mut self, name: &'static str) -> OpenTimer {
OpenTimer {
@@ -23,17 +23,22 @@ impl<'a> OpenTimer<'a> {
}
impl<'a> Drop for OpenTimer<'a> {
fn drop(&mut self,) {
self.timer_tree.timings.push(Timing {
name: self.name,
duration: self.start.to(PreciseTime::now()).num_microseconds().unwrap(),
depth: self.depth,
});
fn drop(&mut self) {
self.timer_tree
.timings
.push(Timing {
name: self.name,
duration: self.start
.to(PreciseTime::now())
.num_microseconds()
.unwrap(),
depth: self.depth,
});
}
}
/// Timing recording
#[derive(Debug, RustcEncodable)]
#[derive(Debug, Serialize)]
pub struct Timing {
name: &'static str,
duration: i64,
@@ -41,18 +46,17 @@ pub struct Timing {
}
/// Timer tree
#[derive(Debug, RustcEncodable)]
#[derive(Debug, Serialize)]
pub struct TimerTree {
timings: Vec<Timing>,
}
impl TimerTree {
/// Returns the total time elapsed in microseconds
pub fn total_time(&self,) -> i64 {
/// Returns the total time elapsed in microseconds
pub fn total_time(&self) -> i64 {
self.timings.last().unwrap().duration
}
/// Open a new named subtask
pub fn open(&mut self, name: &'static str) -> OpenTimer {
OpenTimer {
@@ -66,9 +70,7 @@ impl TimerTree {
impl Default for TimerTree {
fn default() -> TimerTree {
TimerTree {
timings: Vec::new(),
}
TimerTree { timings: Vec::new() }
}
}

View File

@@ -5,39 +5,36 @@ use std::io::Read;
/// Wrapper over a `u64` that serializes as a variable int.
/// Wrapper over a `u64` that serializes as a variable int.
#[derive(Debug, Eq, PartialEq)]
pub struct VInt(pub u64);
impl VInt {
pub fn val(&self,) -> u64 {
pub fn val(&self) -> u64 {
self.0
}
}
impl BinarySerializable for VInt {
fn serialize(&self, writer: &mut Write) -> io::Result<usize> {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
let mut remaining = self.0;
let mut written: usize = 0;
let mut buffer = [0u8; 10];
let mut i = 0;
loop {
let next_byte: u8 = (remaining % 128u64) as u8;
remaining /= 128u64;
if remaining == 0u64 {
buffer[written] = next_byte | 128u8;
written += 1;
break;
}
else {
buffer[written] = next_byte;
written += 1;
buffer[i] = next_byte | 128u8;
return writer.write_all(&buffer[0..i + 1]);
} else {
buffer[i] = next_byte;
}
i += 1;
}
try!(writer.write_all(&buffer[0..written]));
Ok(written)
}
fn deserialize(reader: &mut Read) -> io::Result<Self> {
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
let mut bytes = reader.bytes();
let mut result = 0u64;
let mut shift = 0u64;
@@ -50,12 +47,9 @@ impl BinarySerializable for VInt {
}
shift += 7;
}
_ => {
return Err(io::Error::new(io::ErrorKind::InvalidData, "Reach end of buffer"))
}
_ => return Err(io::Error::new(io::ErrorKind::InvalidData, "Reach end of buffer")),
}
}
Ok(VInt(result))
}
}

View File

@@ -8,38 +8,40 @@ pub struct CompositeEncoder {
}
impl CompositeEncoder {
pub fn new() -> CompositeEncoder {
CompositeEncoder {
block_encoder: BlockEncoder::new(),
output: Vec::with_capacity(500_000),
}
}
pub fn compress_sorted(&mut self, vals: &[u32]) -> &[u8] {
self.output.clear();
let num_blocks = vals.len() / NUM_DOCS_PER_BLOCK;
let mut offset = 0u32;
for i in 0..num_blocks {
let vals_slice = &vals[i * NUM_DOCS_PER_BLOCK .. (i + 1) * NUM_DOCS_PER_BLOCK];
let vals_slice = &vals[i * NUM_DOCS_PER_BLOCK..(i + 1) * NUM_DOCS_PER_BLOCK];
let block_compressed = self.block_encoder.compress_block_sorted(vals_slice, offset);
offset = vals_slice[NUM_DOCS_PER_BLOCK - 1];
self.output.extend_from_slice(block_compressed);
}
let vint_compressed = self.block_encoder.compress_vint_sorted(&vals[num_blocks * NUM_DOCS_PER_BLOCK..], offset);
let vint_compressed =
self.block_encoder
.compress_vint_sorted(&vals[num_blocks * NUM_DOCS_PER_BLOCK..], offset);
self.output.extend_from_slice(vint_compressed);
&self.output
}
pub fn compress_unsorted(&mut self, vals: &[u32]) -> &[u8] {
self.output.clear();
let num_blocks = vals.len() / NUM_DOCS_PER_BLOCK;
for i in 0..num_blocks {
let vals_slice = &vals[i * NUM_DOCS_PER_BLOCK .. (i + 1) * NUM_DOCS_PER_BLOCK];
let vals_slice = &vals[i * NUM_DOCS_PER_BLOCK..(i + 1) * NUM_DOCS_PER_BLOCK];
let block_compressed = self.block_encoder.compress_block_unsorted(vals_slice);
self.output.extend_from_slice(block_compressed);
}
let vint_compressed = self.block_encoder.compress_vint_unsorted(&vals[num_blocks * NUM_DOCS_PER_BLOCK..]);
let vint_compressed = self.block_encoder
.compress_vint_unsorted(&vals[num_blocks * NUM_DOCS_PER_BLOCK..]);
self.output.extend_from_slice(vint_compressed);
&self.output
}
@@ -57,10 +59,13 @@ impl CompositeDecoder {
CompositeDecoder {
block_decoder: BlockDecoder::new(),
vals: Vec::with_capacity(500_000),
}
}
}
pub fn uncompress_sorted(&mut self, mut compressed_data: &[u8], uncompressed_len: usize) -> &[u32] {
pub fn uncompress_sorted(&mut self,
mut compressed_data: &[u8],
uncompressed_len: usize)
-> &[u32] {
if uncompressed_len > self.vals.capacity() {
let extra_capacity = uncompressed_len - self.vals.capacity();
self.vals.reserve(extra_capacity);
@@ -69,24 +74,37 @@ impl CompositeDecoder {
self.vals.clear();
let num_blocks = uncompressed_len / NUM_DOCS_PER_BLOCK;
for _ in 0..num_blocks {
compressed_data = self.block_decoder.uncompress_block_sorted(compressed_data, offset);
compressed_data = self.block_decoder
.uncompress_block_sorted(compressed_data, offset);
offset = self.block_decoder.output(NUM_DOCS_PER_BLOCK - 1);
self.vals.extend_from_slice(self.block_decoder.output_array());
self.vals
.extend_from_slice(self.block_decoder.output_array());
}
self.block_decoder.uncompress_vint_sorted(compressed_data, offset, uncompressed_len % NUM_DOCS_PER_BLOCK);
self.vals.extend_from_slice(self.block_decoder.output_array());
self.block_decoder
.uncompress_vint_sorted(compressed_data,
offset,
uncompressed_len % NUM_DOCS_PER_BLOCK);
self.vals
.extend_from_slice(self.block_decoder.output_array());
&self.vals
}
pub fn uncompress_unsorted(&mut self, mut compressed_data: &[u8], uncompressed_len: usize) -> &[u32] {
pub fn uncompress_unsorted(&mut self,
mut compressed_data: &[u8],
uncompressed_len: usize)
-> &[u32] {
self.vals.clear();
let num_blocks = uncompressed_len / NUM_DOCS_PER_BLOCK;
for _ in 0..num_blocks {
compressed_data = self.block_decoder.uncompress_block_unsorted(compressed_data);
self.vals.extend_from_slice(self.block_decoder.output_array());
compressed_data = self.block_decoder
.uncompress_block_unsorted(compressed_data);
self.vals
.extend_from_slice(self.block_decoder.output_array());
}
self.block_decoder.uncompress_vint_unsorted(compressed_data, uncompressed_len % NUM_DOCS_PER_BLOCK);
self.vals.extend_from_slice(self.block_decoder.output_array());
self.block_decoder
.uncompress_vint_unsorted(compressed_data, uncompressed_len % NUM_DOCS_PER_BLOCK);
self.vals
.extend_from_slice(self.block_decoder.output_array());
&self.vals
}
}
@@ -100,60 +118,53 @@ impl Into<Vec<u32>> for CompositeDecoder {
#[cfg(test)]
pub mod tests {
use test::Bencher;
use super::*;
use compression::tests::generate_array;
use tests;
#[test]
fn test_composite_unsorted() {
let data = generate_array(10_000, 0.1);
let data = tests::generate_array(10_000, 0.1);
let mut encoder = CompositeEncoder::new();
let compressed = encoder.compress_unsorted(&data);
assert_eq!(compressed.len(), 19_790);
assert!(compressed.len() <= 19_794);
let mut decoder = CompositeDecoder::new();
let result = decoder.uncompress_unsorted(&compressed, data.len());
for i in 0..data.len() {
assert_eq!(data[i], result[i]);
}
}
}
#[test]
fn test_composite_sorted() {
let data = generate_array(10_000, 0.1);
let data = tests::generate_array(10_000, 0.1);
let mut encoder = CompositeEncoder::new();
let compressed = encoder.compress_sorted(&data);
assert_eq!(compressed.len(), 7_822);
assert!(compressed.len() <= 7_826);
let mut decoder = CompositeDecoder::new();
let result = decoder.uncompress_sorted(&compressed, data.len());
for i in 0..data.len() {
assert_eq!(data[i], result[i]);
}
}
}
const BENCH_NUM_INTS: usize = 99_968;
#[bench]
fn bench_compress(b: &mut Bencher) {
let mut encoder = CompositeEncoder::new();
let data = generate_array(BENCH_NUM_INTS, 0.1);
b.iter(|| {
encoder.compress_sorted(&data);
});
let data = tests::generate_array(BENCH_NUM_INTS, 0.1);
b.iter(|| { encoder.compress_sorted(&data); });
}
#[bench]
fn bench_uncompress(b: &mut Bencher) {
let mut encoder = CompositeEncoder::new();
let data = generate_array(BENCH_NUM_INTS, 0.1);
let data = tests::generate_array(BENCH_NUM_INTS, 0.1);
let compressed = encoder.compress_sorted(&data);
let mut decoder = CompositeDecoder::new();
b.iter(|| {
decoder.uncompress_sorted(compressed, BENCH_NUM_INTS);
});
let mut decoder = CompositeDecoder::new();
b.iter(|| { decoder.uncompress_sorted(compressed, BENCH_NUM_INTS); });
}
}

View File

@@ -4,16 +4,32 @@
mod composite;
pub use self::composite::{CompositeEncoder, CompositeDecoder};
#[cfg(feature="simdcompression")]
mod compression_simd;
#[cfg(feature="simdcompression")]
pub use self::compression_simd::{BlockEncoder, BlockDecoder};
#[cfg(not(feature="simdcompression"))]
mod compression_nosimd;
#[cfg(not(feature="simdcompression"))]
pub use self::compression_nosimd::{BlockEncoder, BlockDecoder};
mod pack {
mod compression_pack_nosimd;
pub use self::compression_pack_nosimd::*;
}
#[cfg(feature="simdcompression")]
mod pack {
mod compression_pack_simd;
pub use self::compression_pack_simd::*;
}
pub use self::pack::{BlockEncoder, BlockDecoder};
#[cfg( any(not(feature="simdcompression"), target_env="msvc") )]
mod vint {
mod compression_vint_nosimd;
pub use self::compression_vint_nosimd::*;
}
#[cfg( all(feature="simdcompression", not(target_env="msvc")) )]
mod vint {
mod compression_vint_simd;
pub use self::compression_vint_simd::*;
}
pub trait VIntEncoder {
@@ -22,142 +38,64 @@ pub trait VIntEncoder {
}
pub trait VIntDecoder {
fn uncompress_vint_sorted<'a>(&mut self, compressed_data: &'a [u8], offset: u32, num_els: usize) -> &'a [u8];
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> &'a [u8];
fn uncompress_vint_sorted<'a>(&mut self,
compressed_data: &'a [u8],
offset: u32,
num_els: usize)
-> &'a [u8];
fn uncompress_vint_unsorted<'a>(&mut self,
compressed_data: &'a [u8],
num_els: usize)
-> &'a [u8];
}
impl VIntEncoder for BlockEncoder{
fn compress_vint_sorted(&mut self, input: &[u32], mut offset: u32) -> &[u8] {
let mut byte_written = 0;
for &v in input {
let mut to_encode: u32 = v - offset;
offset = v;
loop {
let next_byte: u8 = (to_encode % 128u32) as u8;
to_encode /= 128u32;
if to_encode == 0u32 {
self.output[byte_written] = next_byte | 128u8;
byte_written += 1;
break;
}
else {
self.output[byte_written] = next_byte;
byte_written += 1;
}
}
}
&self.output[..byte_written]
impl VIntEncoder for BlockEncoder {
fn compress_vint_sorted(&mut self, input: &[u32], offset: u32) -> &[u8] {
vint::compress_sorted(input, &mut self.output, offset)
}
fn compress_vint_unsorted(&mut self, input: &[u32]) -> &[u8] {
let mut byte_written = 0;
for &v in input {
let mut to_encode: u32 = v;
loop {
let next_byte: u8 = (to_encode % 128u32) as u8;
to_encode /= 128u32;
if to_encode == 0u32 {
self.output[byte_written] = next_byte | 128u8;
byte_written += 1;
break;
}
else {
self.output[byte_written] = next_byte;
byte_written += 1;
}
}
}
&self.output[..byte_written]
vint::compress_unsorted(input, &mut self.output)
}
}
}
impl VIntDecoder for BlockDecoder {
fn uncompress_vint_sorted<'a>(
&mut self,
compressed_data: &'a [u8],
offset: u32,
num_els: usize) -> &'a [u8] {
let mut read_byte = 0;
let mut result = offset;
for i in 0..num_els {
let mut shift = 0u32;
loop {
let cur_byte = compressed_data[read_byte];
read_byte += 1;
result += ((cur_byte % 128u8) as u32) << shift;
if cur_byte & 128u8 != 0u8 {
break;
}
shift += 7;
}
self.output[i] = result;
}
fn uncompress_vint_sorted<'a>(&mut self,
compressed_data: &'a [u8],
offset: u32,
num_els: usize)
-> &'a [u8] {
self.output_len = num_els;
&compressed_data[read_byte..]
vint::uncompress_sorted(compressed_data, &mut self.output[..num_els], offset)
}
fn uncompress_vint_unsorted<'a>(
&mut self,
compressed_data: &'a [u8],
num_els: usize) -> &'a [u8] {
let mut read_byte = 0;
for i in 0..num_els {
let mut result = 0u32;
let mut shift = 0u32;
loop {
let cur_byte = compressed_data[read_byte];
read_byte += 1;
result += ((cur_byte % 128u8) as u32) << shift;
if cur_byte & 128u8 != 0u8 {
break;
}
shift += 7;
}
self.output[i] = result;
}
fn uncompress_vint_unsorted<'a>(&mut self,
compressed_data: &'a [u8],
num_els: usize)
-> &'a [u8] {
self.output_len = num_els;
&compressed_data[read_byte..]
vint::uncompress_unsorted(compressed_data, &mut self.output[..num_els])
}
}
pub const NUM_DOCS_PER_BLOCK: usize = 128; //< should be a power of 2 to let the compiler optimize.
#[cfg(test)]
pub mod tests {
use rand::Rng;
use rand::SeedableRng;
use rand::XorShiftRng;
use super::*;
use tests;
use test::Bencher;
fn generate_array_with_seed(n: usize, ratio: f32, seed_val: u32) -> Vec<u32> {
let seed: &[u32; 4] = &[1, 2, 3, seed_val];
let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed);
(0..u32::max_value())
.filter(|_| rng.next_f32()< ratio)
.take(n)
.collect()
}
pub fn generate_array(n: usize, ratio: f32) -> Vec<u32> {
generate_array_with_seed(n, ratio, 4)
}
#[test]
fn test_encode_sorted_block() {
let vals: Vec<u32> = (0u32..128u32).map(|i| i*7).collect();
let vals: Vec<u32> = (0u32..128u32).map(|i| i * 7).collect();
let mut encoder = BlockEncoder::new();
let compressed_data = encoder.compress_block_sorted(&vals, 0);
let mut decoder = BlockDecoder::new();
{
let remaining_data = decoder.uncompress_block_sorted(compressed_data, 0);
let remaining_data = decoder.uncompress_block_sorted(compressed_data, 0);
assert_eq!(remaining_data.len(), 0);
}
for i in 0..128 {
@@ -167,31 +105,31 @@ pub mod tests {
#[test]
fn test_encode_sorted_block_with_offset() {
let vals: Vec<u32> = (0u32..128u32).map(|i| 11 + i*7).collect();
let vals: Vec<u32> = (0u32..128u32).map(|i| 11 + i * 7).collect();
let mut encoder = BlockEncoder::new();
let compressed_data = encoder.compress_block_sorted(&vals, 10);
let mut decoder = BlockDecoder::new();
{
let remaining_data = decoder.uncompress_block_sorted(compressed_data, 10);
let remaining_data = decoder.uncompress_block_sorted(compressed_data, 10);
assert_eq!(remaining_data.len(), 0);
}
for i in 0..128 {
assert_eq!(vals[i], decoder.output(i));
}
}
#[test]
fn test_encode_sorted_block_with_junk() {
let mut compressed: Vec<u8> = Vec::new();
let n = 128;
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32)*7u32).collect();
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32).collect();
let mut encoder = BlockEncoder::new();
let compressed_data = encoder.compress_block_sorted(&vals, 10);
compressed.extend_from_slice(compressed_data);
compressed.push(173u8);
let mut decoder = BlockDecoder::new();
{
let remaining_data = decoder.uncompress_block_sorted(&compressed, 10);
let remaining_data = decoder.uncompress_block_sorted(&compressed, 10);
assert_eq!(remaining_data.len(), 1);
assert_eq!(remaining_data[0], 173u8);
}
@@ -204,14 +142,14 @@ pub mod tests {
fn test_encode_unsorted_block_with_junk() {
let mut compressed: Vec<u8> = Vec::new();
let n = 128;
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32)*7u32 % 12).collect();
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32 % 12).collect();
let mut encoder = BlockEncoder::new();
let compressed_data = encoder.compress_block_unsorted(&vals);
compressed.extend_from_slice(compressed_data);
compressed.push(173u8);
let mut decoder = BlockDecoder::new();
{
let remaining_data = decoder.uncompress_block_unsorted(&compressed);
let remaining_data = decoder.uncompress_block_unsorted(&compressed);
assert_eq!(remaining_data.len(), 1);
assert_eq!(remaining_data[0], 173u8);
}
@@ -219,57 +157,60 @@ pub mod tests {
assert_eq!(vals[i], decoder.output(i));
}
}
#[test]
fn test_encode_vint() {
{
let expected_length = 123;
let expected_length = 154;
let mut encoder = BlockEncoder::new();
let input: Vec<u32> = (0u32..123u32)
.map(|i| 4 + i * 7 / 2)
.into_iter()
.collect();
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
for offset in &[0u32, 1u32, 2u32] {
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
assert_eq!(encoded_data.len(), expected_length);
assert!(encoded_data.len() <= expected_length);
let mut decoder = BlockDecoder::new();
let remaining_data = decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
let remaining_data =
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
assert_eq!(0, remaining_data.len());
assert_eq!(input, decoder.output_array());
}
}
{
let mut encoder = BlockEncoder::new();
let input = vec!(3u32, 17u32, 187u32);
let encoded_data = encoder.compress_vint_sorted(&input, 0);
assert_eq!(encoded_data.len(), 4);
assert_eq!(encoded_data[0], 3u8 + 128u8);
assert_eq!(encoded_data[1], (17u8 - 3u8) + 128u8);
assert_eq!(encoded_data[2], (187u8 - 17u8 - 128u8));
assert_eq!(encoded_data[3], (1u8 + 128u8));
}
}
#[bench]
fn bench_compress(b: &mut Bencher) {
let mut encoder = BlockEncoder::new();
let data = generate_array(NUM_DOCS_PER_BLOCK, 0.1);
b.iter(|| {
encoder.compress_block_sorted(&data, 0u32);
});
let data = tests::generate_array(NUM_DOCS_PER_BLOCK, 0.1);
b.iter(|| { encoder.compress_block_sorted(&data, 0u32); });
}
#[bench]
fn bench_uncompress(b: &mut Bencher) {
let mut encoder = BlockEncoder::new();
let data = generate_array(NUM_DOCS_PER_BLOCK, 0.1);
let data = tests::generate_array(NUM_DOCS_PER_BLOCK, 0.1);
let compressed = encoder.compress_block_sorted(&data, 0u32);
let mut decoder = BlockDecoder::new();
b.iter(|| {
decoder.uncompress_block_sorted(compressed, 0u32);
});
let mut decoder = BlockDecoder::new();
b.iter(|| { decoder.uncompress_block_sorted(compressed, 0u32); });
}
const NUM_INTS_BENCH_VINT: usize = 10;
#[bench]
fn bench_compress_vint(b: &mut Bencher) {
let mut encoder = BlockEncoder::new();
let data = tests::generate_array(NUM_INTS_BENCH_VINT, 0.001);
b.iter(|| { encoder.compress_vint_sorted(&data, 0u32); });
}
#[bench]
fn bench_uncompress_vint(b: &mut Bencher) {
let mut encoder = BlockEncoder::new();
let data = tests::generate_array(NUM_INTS_BENCH_VINT, 0.001);
let compressed = encoder.compress_vint_sorted(&data, 0u32);
let mut decoder = BlockDecoder::new();
b.iter(|| { decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT); });
}
}

View File

@@ -2,12 +2,12 @@ use common::bitpacker::compute_num_bits;
use common::bitpacker::{BitPacker, BitUnpacker};
use std::cmp;
use std::io::Write;
use super::NUM_DOCS_PER_BLOCK;
use super::super::NUM_DOCS_PER_BLOCK;
const COMPRESSED_BLOCK_MAX_SIZE: usize = NUM_DOCS_PER_BLOCK * 4 + 1;
const COMPRESSED_BLOCK_MAX_SIZE: usize = NUM_DOCS_PER_BLOCK * 4 + 1;
pub fn compress_sorted(vals: &mut [u32], mut output: &mut [u8], offset: u32) -> usize {
let mut max_delta = 0;
let mut max_delta = 0;
{
let mut local_offset = offset;
for i in 0..NUM_DOCS_PER_BLOCK {
@@ -24,7 +24,10 @@ pub fn compress_sorted(vals: &mut [u32], mut output: &mut [u8], offset: u32) ->
for val in vals {
bit_packer.write(*val, &mut output).unwrap();
}
1 + bit_packer.close(&mut output).expect("packing in memory should never fail")
1 +
bit_packer
.close(&mut output)
.expect("packing in memory should never fail")
}
@@ -36,36 +39,40 @@ pub struct BlockEncoder {
}
impl BlockEncoder {
pub fn new() -> BlockEncoder {
BlockEncoder {
output: [0u8; COMPRESSED_BLOCK_MAX_SIZE],
output_len: 0,
input_buffer: [0u32; NUM_DOCS_PER_BLOCK],
}
}
}
pub fn compress_block_sorted(&mut self, vals: &[u32], offset: u32) -> &[u8] {
self.input_buffer.clone_from_slice(vals);
let compressed_size = compress_sorted(&mut self.input_buffer, &mut self.output, offset);
&self.output[..compressed_size]
}
pub fn compress_block_unsorted(&mut self, vals: &[u32]) -> &[u8] {
pub fn compress_block_unsorted(&mut self, vals: &[u32]) -> &[u8] {
let compressed_size: usize = {
let mut output: &mut [u8] = &mut self.output;
let max = vals.iter().cloned().max().expect("compress unsorted called with an empty array");
let mut output: &mut [u8] = &mut self.output;
let max = vals.iter()
.cloned()
.max()
.expect("compress unsorted called with an empty array");
let num_bits = compute_num_bits(max);
output.write_all(&[num_bits]).unwrap();
let mut bit_packer = BitPacker::new(num_bits as usize);
for val in vals {
bit_packer.write(*val, &mut output).unwrap();
}
1 + bit_packer.close(&mut output).expect("packing in memory should never fail")
1 +
bit_packer
.close(&mut output)
.expect("packing in memory should never fail")
};
&self.output[..compressed_size]
}
}
pub struct BlockDecoder {
@@ -78,15 +85,18 @@ impl BlockDecoder {
pub fn new() -> BlockDecoder {
BlockDecoder::with_val(0u32)
}
pub fn with_val(val: u32) -> BlockDecoder {
BlockDecoder {
output: [val; COMPRESSED_BLOCK_MAX_SIZE],
output_len: 0,
}
}
pub fn uncompress_block_sorted<'a>(&mut self, compressed_data: &'a [u8], mut offset: u32) -> &'a[u8] {
pub fn uncompress_block_sorted<'a>(&mut self,
compressed_data: &'a [u8],
mut offset: u32)
-> &'a [u8] {
let consumed_size = {
let num_bits = compressed_data[0];
let bit_unpacker = BitUnpacker::new(&compressed_data[1..], num_bits as usize);
@@ -96,13 +106,13 @@ impl BlockDecoder {
self.output[i] = val;
offset = val;
}
1 + (num_bits as usize * NUM_DOCS_PER_BLOCK + 7) / 8
1 + (num_bits as usize * NUM_DOCS_PER_BLOCK + 7) / 8
};
self.output_len = NUM_DOCS_PER_BLOCK;
&compressed_data[consumed_size..]
}
pub fn uncompress_block_unsorted<'a>(&mut self, compressed_data: &'a [u8]) -> &'a[u8] {
pub fn uncompress_block_unsorted<'a>(&mut self, compressed_data: &'a [u8]) -> &'a [u8] {
let num_bits = compressed_data[0];
let bit_unpacker = BitUnpacker::new(&compressed_data[1..], num_bits as usize);
for i in 0..NUM_DOCS_PER_BLOCK {
@@ -112,16 +122,14 @@ impl BlockDecoder {
self.output_len = NUM_DOCS_PER_BLOCK;
&compressed_data[consumed_size..]
}
#[inline]
pub fn output_array(&self,) -> &[u32] {
pub fn output_array(&self) -> &[u32] {
&self.output[..self.output_len]
}
#[inline]
pub fn output(&self, idx: usize) -> u32 {
self.output[idx]
}
}

View File

@@ -1,28 +1,21 @@
use super::NUM_DOCS_PER_BLOCK;
use super::super::NUM_DOCS_PER_BLOCK;
const COMPRESSED_BLOCK_MAX_SIZE: usize = NUM_DOCS_PER_BLOCK * 4 + 1;
const COMPRESSED_BLOCK_MAX_SIZE: usize = NUM_DOCS_PER_BLOCK * 4 + 1;
mod simdcomp {
use libc::size_t;
extern {
pub fn compress_sorted(
data: *const u32,
output: *mut u8,
offset: u32) -> size_t;
extern "C" {
pub fn compress_sorted(data: *const u32, output: *mut u8, offset: u32) -> size_t;
pub fn uncompress_sorted(
compressed_data: *const u8,
output: *mut u32,
offset: u32) -> size_t;
pub fn compress_unsorted(
data: *const u32,
output: *mut u8) -> size_t;
pub fn uncompress_sorted(compressed_data: *const u8,
output: *mut u32,
offset: u32)
-> size_t;
pub fn uncompress_unsorted(
compressed_data: *const u8,
output: *mut u32) -> size_t;
pub fn compress_unsorted(data: *const u32, output: *mut u8) -> size_t;
pub fn uncompress_unsorted(compressed_data: *const u8, output: *mut u32) -> size_t;
}
}
@@ -49,24 +42,22 @@ pub struct BlockEncoder {
}
impl BlockEncoder {
pub fn new() -> BlockEncoder {
BlockEncoder {
output: [0u8; COMPRESSED_BLOCK_MAX_SIZE],
output_len: 0,
}
}
}
pub fn compress_block_sorted(&mut self, vals: &[u32], offset: u32) -> &[u8] {
let compressed_size = compress_sorted(vals, &mut self.output, offset);
&self.output[..compressed_size]
}
pub fn compress_block_unsorted(&mut self, vals: &[u32]) -> &[u8] {
let compressed_size = compress_unsorted(vals, &mut self.output);
&self.output[..compressed_size]
}
}
pub struct BlockDecoder {
@@ -79,31 +70,34 @@ impl BlockDecoder {
pub fn new() -> BlockDecoder {
BlockDecoder::with_val(0u32)
}
pub fn with_val(val: u32) -> BlockDecoder {
BlockDecoder {
output: [val; COMPRESSED_BLOCK_MAX_SIZE],
output_len: 0,
}
}
pub fn uncompress_block_sorted<'a>(&mut self, compressed_data: &'a [u8], offset: u32) -> &'a[u8] {
pub fn uncompress_block_sorted<'a>(&mut self,
compressed_data: &'a [u8],
offset: u32)
-> &'a [u8] {
let consumed_size = uncompress_sorted(compressed_data, &mut self.output, offset);
self.output_len = NUM_DOCS_PER_BLOCK;
&compressed_data[consumed_size..]
}
pub fn uncompress_block_unsorted<'a>(&mut self, compressed_data: &'a [u8]) -> &'a[u8] {
pub fn uncompress_block_unsorted<'a>(&mut self, compressed_data: &'a [u8]) -> &'a [u8] {
let consumed_size = uncompress_unsorted(compressed_data, &mut self.output);
self.output_len = NUM_DOCS_PER_BLOCK;
&compressed_data[consumed_size..]
}
#[inline]
pub fn output_array(&self,) -> &[u32] {
pub fn output_array(&self) -> &[u32] {
&self.output[..self.output_len]
}
#[inline]
pub fn output(&self, idx: usize) -> u32 {
self.output[idx]
@@ -111,3 +105,16 @@ impl BlockDecoder {
}
#[cfg(test)]
mod tests {
use super::BlockEncoder;
#[test]
fn test_all_docs_compression_len() {
let data: Vec<u32> = (0u32..128u32).collect();
let mut encoder = BlockEncoder::new();
let compressed = encoder.compress_block_sorted(&data, 0u32);
assert_eq!(compressed.len(), 17);
}
}

View File

@@ -0,0 +1,88 @@
#[inline(always)]
pub fn compress_sorted<'a>(input: &[u32], output: &'a mut [u8], mut offset: u32) -> &'a [u8] {
let mut byte_written = 0;
for &v in input {
let mut to_encode: u32 = v - offset;
offset = v;
loop {
let next_byte: u8 = (to_encode % 128u32) as u8;
to_encode /= 128u32;
if to_encode == 0u32 {
output[byte_written] = next_byte | 128u8;
byte_written += 1;
break;
} else {
output[byte_written] = next_byte;
byte_written += 1;
}
}
}
&output[..byte_written]
}
#[inline(always)]
pub fn compress_unsorted<'a>(input: &[u32], output: &'a mut [u8]) -> &'a [u8] {
let mut byte_written = 0;
for &v in input {
let mut to_encode: u32 = v;
loop {
let next_byte: u8 = (to_encode % 128u32) as u8;
to_encode /= 128u32;
if to_encode == 0u32 {
output[byte_written] = next_byte | 128u8;
byte_written += 1;
break;
} else {
output[byte_written] = next_byte;
byte_written += 1;
}
}
}
&output[..byte_written]
}
#[inline(always)]
pub fn uncompress_sorted<'a>(compressed_data: &'a [u8],
output: &mut [u32],
offset: u32)
-> &'a [u8] {
let mut read_byte = 0;
let mut result = offset;
let num_els = output.len();
for i in 0..num_els {
let mut shift = 0u32;
loop {
let cur_byte = compressed_data[read_byte];
read_byte += 1;
result += ((cur_byte % 128u8) as u32) << shift;
if cur_byte & 128u8 != 0u8 {
break;
}
shift += 7;
}
output[i] = result;
}
&compressed_data[read_byte..]
}
#[inline(always)]
pub fn uncompress_unsorted<'a>(compressed_data: &'a [u8], output: &mut [u32]) -> &'a [u8] {
let mut read_byte = 0;
let num_els = output.len();
for i in 0..num_els {
let mut result = 0u32;
let mut shift = 0u32;
loop {
let cur_byte = compressed_data[read_byte];
read_byte += 1;
result += ((cur_byte % 128u8) as u32) << shift;
if cur_byte & 128u8 != 0u8 {
break;
}
shift += 7;
}
output[i] = result;
}
&compressed_data[read_byte..]
}

View File

@@ -0,0 +1,68 @@
mod streamvbyte {
use libc::size_t;
extern "C" {
pub fn streamvbyte_delta_encode(data: *const u32,
num_els: u32,
output: *mut u8,
offset: u32)
-> size_t;
pub fn streamvbyte_delta_decode(compressed_data: *const u8,
output: *mut u32,
num_els: u32,
offset: u32)
-> size_t;
pub fn streamvbyte_encode(data: *const u32, num_els: u32, output: *mut u8) -> size_t;
pub fn streamvbyte_decode(compressed_data: *const u8,
output: *mut u32,
num_els: usize)
-> size_t;
}
}
#[inline(always)]
pub fn compress_sorted<'a>(input: &[u32], output: &'a mut [u8], offset: u32) -> &'a [u8] {
let compress_length = unsafe {
streamvbyte::streamvbyte_delta_encode(input.as_ptr(),
input.len() as u32,
output.as_mut_ptr(),
offset)
};
&output[..compress_length]
}
#[inline(always)]
pub fn compress_unsorted<'a>(input: &[u32], output: &'a mut [u8]) -> &'a [u8] {
let compress_length = unsafe {
streamvbyte::streamvbyte_encode(input.as_ptr(), input.len() as u32, output.as_mut_ptr())
};
&output[..compress_length]
}
#[inline(always)]
pub fn uncompress_sorted<'a>(compressed_data: &'a [u8],
output: &mut [u32],
offset: u32)
-> &'a [u8] {
let consumed_bytes = unsafe {
streamvbyte::streamvbyte_delta_decode(compressed_data.as_ptr(),
output.as_mut_ptr(),
output.len() as u32,
offset)
};
&compressed_data[consumed_bytes..]
}
#[inline(always)]
pub fn uncompress_unsorted<'a>(compressed_data: &'a [u8], output: &mut [u32]) -> &'a [u8] {
let consumed_bytes = unsafe {
streamvbyte::streamvbyte_decode(compressed_data.as_ptr(), output.as_mut_ptr(), output.len())
};
&compressed_data[consumed_bytes..]
}

View File

@@ -1,10 +1,10 @@
use Result;
use Error;
use error::{ErrorKind, ResultExt};
use serde_json;
use schema::Schema;
use std::sync::Arc;
use std::borrow::BorrowMut;
use std::fmt;
use rustc_serialize::json;
use core::SegmentId;
use directory::{Directory, MmapDirectory, RAMDirectory};
use indexer::index_writer::open_index_writer;
@@ -29,8 +29,7 @@ const NUM_SEARCHERS: usize = 12;
fn load_metas(directory: &Directory) -> Result<IndexMeta> {
let meta_data = directory.atomic_read(&META_FILEPATH)?;
let meta_string = String::from_utf8_lossy(&meta_data);
json::decode(&meta_string)
.map_err(|e| Error::CorruptedFile(META_FILEPATH.clone(), Box::new(e)))
serde_json::from_str(&meta_string).chain_err(|| ErrorKind::CorruptedFile(META_FILEPATH.clone()))
}
/// Tantivy's Search Index
@@ -48,8 +47,11 @@ impl Index {
/// This should only be used for unit tests.
pub fn create_in_ram(schema: Schema) -> Index {
let ram_directory = RAMDirectory::create();
let directory = ManagedDirectory::new(ram_directory).expect("Creating a managed directory from a brand new RAM directory should never fail.");
Index::from_directory(directory, schema).expect("Creating a RAMDirectory should never fail") // unwrap is ok here
// unwrap is ok here
let directory = ManagedDirectory::new(ram_directory)
.expect("Creating a managed directory from a brand new RAM directory \
should never fail.");
Index::from_directory(directory, schema).expect("Creating a RAMDirectory should never fail")
}
/// Creates a new index in a given filepath.
@@ -153,11 +155,10 @@ impl Index {
/// Returns the list of segments that are searchable
pub fn searchable_segments(&self) -> Result<Vec<Segment>> {
Ok(self
.searchable_segment_metas()?
.into_iter()
.map(|segment_meta| self.segment(segment_meta))
.collect())
Ok(self.searchable_segment_metas()?
.into_iter()
.map(|segment_meta| self.segment(segment_meta))
.collect())
}
#[doc(hidden)]
@@ -186,13 +187,13 @@ impl Index {
pub fn searchable_segment_metas(&self) -> Result<Vec<SegmentMeta>> {
Ok(load_metas(self.directory())?.segments)
}
/// Returns the list of segment ids that are searchable.
pub fn searchable_segment_ids(&self) -> Result<Vec<SegmentId>> {
Ok(self.searchable_segment_metas()?
.iter()
.map(|segment_meta| segment_meta.id())
.collect())
.collect())
}
/// Creates a new generation of searchers after
@@ -203,9 +204,9 @@ impl Index {
pub fn load_searchers(&self) -> Result<()> {
let searchable_segments = self.searchable_segments()?;
let segment_readers: Vec<SegmentReader> = try!(searchable_segments
.into_iter()
.map(SegmentReader::open)
.collect());
.into_iter()
.map(SegmentReader::open)
.collect());
let searchers = (0..NUM_SEARCHERS)
.map(|_| Searcher::from(segment_readers.clone()))
.collect();

View File

@@ -2,14 +2,14 @@ use schema::Schema;
use core::SegmentMeta;
/// Meta information about the `Index`.
///
///
/// This object is serialized on disk in the `meta.json` file.
/// It keeps information about
/// It keeps information about
/// * the searchable segments,
/// * the index docstamp
/// * the schema
///
#[derive(Clone,Debug,RustcDecodable,RustcEncodable)]
#[derive(Clone,Debug,Serialize, Deserialize)]
pub struct IndexMeta {
pub segments: Vec<SegmentMeta>,
pub schema: Schema,
@@ -19,7 +19,7 @@ pub struct IndexMeta {
impl IndexMeta {
pub fn with_schema(schema: Schema) -> IndexMeta {
IndexMeta {
segments: vec!(),
segments: vec![],
schema: schema,
opstamp: 0u64,
}

View File

@@ -7,7 +7,6 @@ mod segment;
mod index_meta;
mod pool;
mod segment_meta;
mod term_iterator;
pub use self::searcher::Searcher;
pub use self::segment_component::SegmentComponent;
@@ -18,7 +17,6 @@ pub use self::segment::SerializableSegment;
pub use self::index::Index;
pub use self::segment_meta::SegmentMeta;
pub use self::index_meta::IndexMeta;
pub use self::term_iterator::TermIterator;
use std::path::PathBuf;
@@ -27,7 +25,7 @@ lazy_static! {
/// The meta file contains all the information about the list of segments and the schema
/// of the index.
pub static ref META_FILEPATH: PathBuf = PathBuf::from("meta.json");
/// The managed file contains a list of files that were created by the tantivy
/// and will therefore be garbage collected when they are deemed useless by tantivy.
///
@@ -40,4 +38,4 @@ lazy_static! {
///
/// If the process is killed and this file remains, it is safe to remove it manually.
pub static ref LOCKFILE_FILEPATH: PathBuf = PathBuf::from(".tantivy-indexer.lock");
}
}

View File

@@ -10,17 +10,44 @@ pub struct GenerationItem<T> {
item: T,
}
// See https://github.com/crossbeam-rs/crossbeam/issues/91
struct NonLeakingMsQueue<T> {
underlying_queue: MsQueue<T>,
}
impl<T> Default for NonLeakingMsQueue<T> {
fn default() -> NonLeakingMsQueue<T> {
NonLeakingMsQueue { underlying_queue: MsQueue::new() }
}
}
impl<T> NonLeakingMsQueue<T> {
fn pop(&self) -> T {
self.underlying_queue.pop()
}
fn push(&self, el: T) {
self.underlying_queue.push(el);
}
}
impl<T> Drop for NonLeakingMsQueue<T> {
fn drop(&mut self) {
while let Some(_popped_item_to_be_dropped) = self.underlying_queue.try_pop() {}
}
}
pub struct Pool<T> {
queue: Arc<MsQueue<GenerationItem<T>>>,
queue: Arc<NonLeakingMsQueue<GenerationItem<T>>>,
freshest_generation: AtomicUsize,
next_generation: AtomicUsize,
}
impl<T> Pool<T> {
pub fn new() -> Pool<T> {
Pool {
queue: Arc::new(MsQueue::new()),
queue: Arc::default(),
freshest_generation: AtomicUsize::default(),
next_generation: AtomicUsize::default(),
}
@@ -37,70 +64,74 @@ impl<T> Pool<T> {
}
self.advertise_generation(next_generation);
}
/// At the exit of this method,
/// At the exit of this method,
/// - freshest_generation has a value greater or equal than generation
/// - freshest_generation has a value that has been advertised
/// - freshest_generation has
/// - freshest_generation has
fn advertise_generation(&self, generation: usize) {
// not optimal at all but the easiest to read proof.
// not optimal at all but the easiest to read proof.
loop {
let former_generation = self.freshest_generation.load(Ordering::Acquire);
if former_generation >= generation {
break;
}
self.freshest_generation.compare_and_swap(former_generation, generation, Ordering::SeqCst);
}
self.freshest_generation
.compare_and_swap(former_generation, generation, Ordering::SeqCst);
}
}
fn generation(&self,) -> usize {
fn generation(&self) -> usize {
self.freshest_generation.load(Ordering::Acquire)
}
pub fn acquire(&self,) -> LeasedItem<T> {
pub fn acquire(&self) -> LeasedItem<T> {
let generation = self.generation();
loop {
let gen_item = self.queue.pop();
if gen_item.generation >= generation {
return LeasedItem {
gen_item: Some(gen_item),
recycle_queue: self.queue.clone(),
}
}
else {
gen_item: Some(gen_item),
recycle_queue: self.queue.clone(),
};
} else {
// this searcher is obsolete,
// removing it from the pool.
}
}
}
}
pub struct LeasedItem<T> {
gen_item: Option<GenerationItem<T>>,
recycle_queue: Arc<MsQueue<GenerationItem<T>>>,
recycle_queue: Arc<NonLeakingMsQueue<GenerationItem<T>>>,
}
impl<T> Deref for LeasedItem<T> {
type Target = T;
fn deref(&self) -> &T {
&self.gen_item.as_ref().expect("Unwrapping a leased item should never fail").item // unwrap is safe here
&self.gen_item
.as_ref()
.expect("Unwrapping a leased item should never fail")
.item // unwrap is safe here
}
}
impl<T> DerefMut for LeasedItem<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.gen_item.as_mut().expect("Unwrapping a mut leased item should never fail").item // unwrap is safe here
&mut self.gen_item
.as_mut()
.expect("Unwrapping a mut leased item should never fail")
.item // unwrap is safe here
}
}
impl<T> Drop for LeasedItem<T> {
fn drop(&mut self) {
let gen_item: GenerationItem<T> = mem::replace(&mut self.gen_item, None).expect("Unwrapping a leased item should never fail");
let gen_item: GenerationItem<T> = mem::replace(&mut self.gen_item, None)
.expect("Unwrapping a leased item should never fail");
self.recycle_queue.push(gen_item);
}
}
@@ -127,4 +158,4 @@ mod tests {
assert_eq!(*pool.acquire(), 11);
}
}
}
}

View File

@@ -7,42 +7,42 @@ use query::Query;
use DocId;
use DocAddress;
use schema::Term;
use core::TermIterator;
use termdict::TermMerger;
use std::fmt;
use postings::TermInfo;
/// Holds a list of `SegmentReader`s ready for search.
///
/// It guarantees that the `Segment` will not be removed before
/// It guarantees that the `Segment` will not be removed before
/// the destruction of the `Searcher`.
///
///
pub struct Searcher {
segment_readers: Vec<SegmentReader>,
}
impl Searcher {
/// Fetches a document from tantivy's store given a `DocAddress`.
///
/// The searcher uses the segment ordinal to route the
/// the request to the right `Segment`.
/// the request to the right `Segment`.
pub fn doc(&self, doc_address: &DocAddress) -> Result<Document> {
let DocAddress(segment_local_id, doc_id) = *doc_address;
let segment_reader = &self.segment_readers[segment_local_id as usize];
segment_reader.doc(doc_id)
}
/// Returns the overall number of documents in the index.
pub fn num_docs(&self,) -> DocId {
pub fn num_docs(&self) -> DocId {
self.segment_readers
.iter()
.map(|segment_reader| segment_reader.num_docs())
.fold(0u32, |acc, val| acc + val)
}
/// Return the overall number of documents containing
/// the given term.
/// the given term.
pub fn doc_freq(&self, term: &Term) -> u32 {
self.segment_readers
.iter()
@@ -50,39 +50,38 @@ impl Searcher {
.fold(0u32, |acc, val| acc + val)
}
/// Returns a Stream over all of the sorted unique terms of
/// the searcher.
///
/// This includes all of the fields from all of the segment_readers.
/// See [TermIterator](struct.TermIterator.html).
///
/// # Warning
/// This API is very likely to change in the future.
pub fn terms<'a>(&'a self) -> TermIterator<'a> {
TermIterator::from(self.segment_readers())
}
/// Return the list of segment readers
pub fn segment_readers(&self,) -> &[SegmentReader] {
pub fn segment_readers(&self) -> &[SegmentReader] {
&self.segment_readers
}
/// Returns the segment_reader associated with the given segment_ordinal
pub fn segment_reader(&self, segment_ord: usize) -> &SegmentReader {
&self.segment_readers[segment_ord]
pub fn segment_reader(&self, segment_ord: u32) -> &SegmentReader {
&self.segment_readers[segment_ord as usize]
}
/// Runs a query on the segment readers wrapped by the searcher
pub fn search<C: Collector>(&self, query: &Query, collector: &mut C) -> Result<TimerTree> {
query.search(self, collector)
}
/// Returns a Stream over all of the sorted unique terms of
/// the searcher.
///
/// This includes all of the fields from all of the segment_readers.
/// See [`TermIterator`](struct.TermIterator.html).
///
/// # Warning
/// This API is very likely to change in the future.
pub fn terms(&self) -> TermMerger<TermInfo> {
TermMerger::from(self.segment_readers())
}
}
impl From<Vec<SegmentReader>> for Searcher {
fn from(segment_readers: Vec<SegmentReader>) -> Searcher {
Searcher {
segment_readers: segment_readers,
}
Searcher { segment_readers: segment_readers }
}
}
@@ -94,4 +93,4 @@ impl fmt::Debug for Searcher {
.collect::<Vec<_>>();
write!(f, "Searcher({:?})", segment_ids)
}
}
}

View File

@@ -26,8 +26,8 @@ impl fmt::Debug for Segment {
}
/// Creates a new segment given an `Index` and a `SegmentId`
///
/// The function is here to make it private outside `tantivy`.
///
/// The function is here to make it private outside `tantivy`.
pub fn create_segment(index: Index, meta: SegmentMeta) -> Segment {
Segment {
index: index,
@@ -36,9 +36,8 @@ pub fn create_segment(index: Index, meta: SegmentMeta) -> Segment {
}
impl Segment {
/// Returns our index's schema.
pub fn schema(&self,) -> Schema {
pub fn schema(&self) -> Schema {
self.index.schema()
}
@@ -53,13 +52,13 @@ impl Segment {
}
/// Returns the segment's id.
pub fn id(&self,) -> SegmentId {
pub fn id(&self) -> SegmentId {
self.meta.id()
}
/// Returns the relative path of a component of our segment.
///
/// It just joins the segment id with the extension
///
/// It just joins the segment id with the extension
/// associated to a segment component.
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
self.meta.relative_path(component)
@@ -77,14 +76,18 @@ impl Segment {
}
/// Open one of the component file for a *regular* read.
pub fn open_read(&self, component: SegmentComponent) -> result::Result<ReadOnlySource, OpenReadError> {
pub fn open_read(&self,
component: SegmentComponent)
-> result::Result<ReadOnlySource, OpenReadError> {
let path = self.relative_path(component);
let source = try!(self.index.directory().open_read(&path));
Ok(source)
}
/// Open one of the component file for *regular* write.
pub fn open_write(&mut self, component: SegmentComponent) -> result::Result<WritePtr, OpenWriteError> {
pub fn open_write(&mut self,
component: SegmentComponent)
-> result::Result<WritePtr, OpenWriteError> {
let path = self.relative_path(component);
let write = try!(self.index.directory_mut().open_write(&path));
Ok(write)
@@ -114,10 +117,10 @@ mod tests {
let mut index = Index::create_in_ram(SchemaBuilder::new().build());
let segment = index.new_segment();
let path = segment.relative_path(SegmentComponent::POSTINGS);
let directory = index.directory_mut();
directory.atomic_write(&*path, &vec!(0u8)).unwrap();
directory.atomic_write(&*path, &vec![0u8]).unwrap();
let living_files = HashSet::new();
{
let _file_protection = segment.protect_from_delete(SegmentComponent::POSTINGS);
@@ -130,4 +133,4 @@ mod tests {
assert!(!directory.exists(&*path));
}
}
}

View File

@@ -1,27 +1,40 @@
/// Enum describing each component of a tantivy segment.
/// Each component is stored in its own file,
/// using the pattern `segment_uuid`.`component_extension`,
/// except the delete component that takes an `segment_uuid`.`delete_opstamp`.`component_extension`
#[derive(Copy, Clone)]
pub enum SegmentComponent {
/// Postings (or inverted list). Sorted lists of document ids, associated to terms
POSTINGS,
/// Positions of terms in each document.
POSITIONS,
/// Column-oriented random-access storage of fields.
FASTFIELDS,
/// Stores the sum of the length (in terms) of each field for each document.
/// Field norms are stored as a special u64 fast field.
FIELDNORMS,
/// Dictionary associating `Term`s to `TermInfo`s which is
/// simply an address into the `postings` file and the `positions` file.
TERMS,
/// Row-oriented, LZ4-compressed storage of the documents.
/// Accessing a document from the store is relatively slow, as it
/// requires to decompress the entire block it belongs to.
STORE,
DELETE
/// Bitset describing which document of the segment is deleted.
DELETE,
}
impl SegmentComponent {
pub fn iterator() -> impl Iterator<Item=&'static SegmentComponent> {
static SEGMENT_COMPONENTS: [SegmentComponent; 7] = [
SegmentComponent::POSTINGS,
SegmentComponent::POSITIONS,
SegmentComponent::FASTFIELDS,
SegmentComponent::FIELDNORMS,
SegmentComponent::TERMS,
SegmentComponent::STORE,
SegmentComponent::DELETE
];
/// Iterates through the components.
pub fn iterator() -> impl Iterator<Item = &'static SegmentComponent> {
static SEGMENT_COMPONENTS: [SegmentComponent; 7] = [SegmentComponent::POSTINGS,
SegmentComponent::POSITIONS,
SegmentComponent::FASTFIELDS,
SegmentComponent::FIELDNORMS,
SegmentComponent::TERMS,
SegmentComponent::STORE,
SegmentComponent::DELETE];
SEGMENT_COMPONENTS.into_iter()
}
}
}

View File

@@ -1,20 +1,19 @@
use uuid::Uuid;
use std::fmt;
use rustc_serialize::{Encoder, Decoder, Encodable, Decodable};
use std::cmp::{Ordering, Ord};
#[cfg(test)]
use std::sync::atomic;
/// Tantivy SegmentId.
/// Tantivy `SegmentId`.
///
/// Tantivy's segment are identified
/// Tantivy's segment are identified
/// by a UUID which is used to prefix the filenames
/// of all of the file associated with the segment.
///
/// In unit test, for reproducability, the SegmentId are
/// In unit test, for reproducability, the `SegmentId` are
/// simply generated in an autoincrement fashion.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct SegmentId(Uuid);
@@ -53,30 +52,18 @@ impl SegmentId {
/// We are using UUID4, so only 6 bits are fixed,
/// and the rest is random.
///
/// Picking the first 8 chars is ok to identify
/// Picking the first 8 chars is ok to identify
/// segments in a display message.
pub fn short_uuid_string(&self,) -> String {
pub fn short_uuid_string(&self) -> String {
(&self.0.simple().to_string()[..8]).to_string()
}
/// Returns a segment uuid string.
pub fn uuid_string(&self,) -> String {
pub fn uuid_string(&self) -> String {
self.0.simple().to_string()
}
}
impl Encodable for SegmentId {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
self.0.encode(s)
}
}
impl Decodable for SegmentId {
fn decode<D: Decoder>(d: &mut D) -> Result<Self, D::Error> {
Uuid::decode(d).map(SegmentId)
}
}
impl fmt::Debug for SegmentId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Seg({:?})", self.short_uuid_string())

View File

@@ -3,26 +3,25 @@ use super::SegmentComponent;
use std::path::PathBuf;
use std::collections::HashSet;
#[derive(Clone, Debug, RustcDecodable,RustcEncodable)]
#[derive(Clone, Debug, Serialize, Deserialize)]
struct DeleteMeta {
num_deleted_docs: u32,
opstamp: u64,
}
/// SegmentMeta contains simple meta information about a segment.
/// `SegmentMeta` contains simple meta information about a segment.
///
/// For instance the number of docs it contains,
/// how many are deleted, etc.
#[derive(Clone, Debug, RustcDecodable,RustcEncodable)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SegmentMeta {
segment_id: SegmentId,
max_doc: u32,
deletes: Option<DeleteMeta>,
deletes: Option<DeleteMeta>,
}
impl SegmentMeta {
/// Creates a new segment meta for
/// Creates a new segment meta for
/// a segment with no deletes and no documents.
pub fn new(segment_id: SegmentId) -> SegmentMeta {
SegmentMeta {
@@ -53,28 +52,28 @@ impl SegmentMeta {
/// and are not used by any segment anymore.
pub fn list_files(&self) -> HashSet<PathBuf> {
SegmentComponent::iterator()
.map(|component| {
self.relative_path(*component)
})
.map(|component| self.relative_path(*component))
.collect::<HashSet<PathBuf>>()
}
/// Returns the relative path of a component of our segment.
///
/// It just joins the segment id with the extension
///
/// It just joins the segment id with the extension
/// associated to a segment component.
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
let mut path = self.id().uuid_string();
path.push_str(&*match component {
SegmentComponent::POSITIONS => ".pos".to_string(),
SegmentComponent::POSTINGS => ".idx".to_string(),
SegmentComponent::TERMS => ".term".to_string(),
SegmentComponent::STORE => ".store".to_string(),
SegmentComponent::FASTFIELDS => ".fast".to_string(),
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
SegmentComponent::DELETE => {format!(".{}.del", self.delete_opstamp().unwrap_or(0))},
});
SegmentComponent::POSITIONS => ".pos".to_string(),
SegmentComponent::POSTINGS => ".idx".to_string(),
SegmentComponent::TERMS => ".term".to_string(),
SegmentComponent::STORE => ".store".to_string(),
SegmentComponent::FASTFIELDS => ".fast".to_string(),
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
SegmentComponent::DELETE => {
format!(".{}.del", self.delete_opstamp().unwrap_or(0))
}
});
PathBuf::from(path)
}
@@ -95,9 +94,7 @@ impl SegmentMeta {
/// Returns the opstamp of the last delete operation
/// taken in account in this segment.
pub fn delete_opstamp(&self) -> Option<u64> {
self.deletes
.as_ref()
.map(|delete_meta| delete_meta.opstamp)
self.deletes.as_ref().map(|delete_meta| delete_meta.opstamp)
}
/// Returns true iff the segment meta contains
@@ -114,8 +111,8 @@ impl SegmentMeta {
#[doc(hidden)]
pub fn set_delete_meta(&mut self, num_deleted_docs: u32, opstamp: u64) {
self.deletes = Some(DeleteMeta {
num_deleted_docs: num_deleted_docs,
opstamp: opstamp,
});
num_deleted_docs: num_deleted_docs,
opstamp: opstamp,
});
}
}

View File

@@ -5,24 +5,27 @@ use core::SegmentComponent;
use schema::Term;
use common::HasLen;
use core::SegmentMeta;
use fastfield::delete::DeleteBitSet;
use fastfield::{self, FastFieldNotAvailableError};
use fastfield::DeleteBitSet;
use store::StoreReader;
use schema::Document;
use directory::ReadOnlySource;
use DocId;
use std::str;
use termdict::TermDictionary;
use std::cmp;
use postings::TermInfo;
use datastruct::FstMap;
use termdict::TermDictionaryImpl;
use std::sync::Arc;
use std::fmt;
use schema::Field;
use postings::SegmentPostingsOption;
use postings::SegmentPostings;
use fastfield::{U32FastFieldsReader, U32FastFieldReader};
use postings::{SegmentPostings, BlockSegmentPostings};
use fastfield::{FastFieldsReader, FastFieldReader, U64FastFieldReader};
use schema::Schema;
use schema::FieldType;
use postings::FreqHandler;
use schema::TextIndexingOptions;
/// Entry point to access all of the datastructures of the `Segment`
///
@@ -39,11 +42,11 @@ use schema::TextIndexingOptions;
pub struct SegmentReader {
segment_id: SegmentId,
segment_meta: SegmentMeta,
term_infos: Arc<FstMap<TermInfo>>,
terms: Arc<TermDictionaryImpl>,
postings_data: ReadOnlySource,
store_reader: StoreReader,
fast_fields_reader: Arc<U32FastFieldsReader>,
fieldnorms_reader: Arc<U32FastFieldsReader>,
fast_fields_reader: Arc<FastFieldsReader>,
fieldnorms_reader: Arc<FastFieldsReader>,
delete_bitset: DeleteBitSet,
positions_data: ReadOnlySource,
schema: Schema,
@@ -57,7 +60,7 @@ impl SegmentReader {
pub fn max_doc(&self) -> DocId {
self.segment_meta.max_doc()
}
/// Returns the number of documents.
/// Deleted documents are not counted.
///
@@ -66,60 +69,62 @@ impl SegmentReader {
pub fn num_docs(&self) -> DocId {
self.segment_meta.num_docs()
}
/// Return the number of documents that have been
/// deleted in the segment.
pub fn num_deleted_docs(&self) -> DocId {
self.delete_bitset.len() as DocId
}
#[doc(hidden)]
pub fn fast_fields_reader(&self) -> &FastFieldsReader {
&*self.fast_fields_reader
}
/// Accessor to a segment's fast field reader given a field.
pub fn get_fast_field_reader(&self, field: Field) -> Option<U32FastFieldReader> {
/// Returns the u32 fast value reader if the field
/// is a u32 field indexed as "fast".
///
/// Return None if the field is not a u32 field
/// indexed with the fast option.
///
/// # Panics
/// May panic if the index is corrupted.
///
/// Returns the u64 fast value reader if the field
/// is a u64 field indexed as "fast".
///
/// Return a FastFieldNotAvailableError if the field is not
/// declared as a fast field in the schema.
///
/// # Panics
/// May panic if the index is corrupted.
pub fn get_fast_field_reader<TFastFieldReader: FastFieldReader>
(&self,
field: Field)
-> fastfield::Result<TFastFieldReader> {
let field_entry = self.schema.get_field_entry(field);
match field_entry.field_type() {
&FieldType::Str(_) => {
warn!("Field <{}> is not a fast field. It is a text field, and fast text fields are not supported yet.", field_entry.name());
None
},
&FieldType::U32(ref u32_options) => {
if u32_options.is_fast() {
self.fast_fields_reader.get_field(field)
}
else {
warn!("Field <{}> is not defined as a fast field.", field_entry.name());
None
}
},
if !TFastFieldReader::is_enabled(field_entry.field_type()) {
Err(FastFieldNotAvailableError::new(field_entry))
} else {
Ok(self.fast_fields_reader
.open_reader(field)
.expect("Fast field file corrupted."))
}
}
/// Accessor to the segment's `Field norms`'s reader.
///
/// Field norms are the length (in tokens) of the fields.
/// It is used in the computation of the [TfIdf](https://fulmicoton.gitbooks.io/tantivy-doc/content/tfidf.html).
/// It is used in the computation of the [TfIdf]
/// (https://fulmicoton.gitbooks.io/tantivy-doc/content/tfidf.html).
///
/// They are simply stored as a fast field, serialized in
/// the `.fieldnorm` file of the segment.
pub fn get_fieldnorms_reader(&self, field: Field) -> Option<U32FastFieldReader> {
self.fieldnorms_reader.get_field(field)
/// They are simply stored as a fast field, serialized in
/// the `.fieldnorm` file of the segment.
pub fn get_fieldnorms_reader(&self, field: Field) -> Option<U64FastFieldReader> {
self.fieldnorms_reader.open_reader(field)
}
/// Returns the number of documents containing the term.
pub fn doc_freq(&self, term: &Term) -> u32 {
match self.get_term_info(term) {
Some(term_info) => term_info.doc_freq,
None => 0,
}
}
}
/// Accessor to the segment's `StoreReader`.
pub fn get_store_reader(&self) -> &StoreReader {
&self.store_reader
@@ -128,50 +133,51 @@ impl SegmentReader {
/// Open a new segment for reading.
pub fn open(segment: Segment) -> Result<SegmentReader> {
let source = try!(segment.open_read(SegmentComponent::TERMS));
let term_infos = try!(FstMap::from_source(source));
let store_reader = StoreReader::from(try!(segment.open_read(SegmentComponent::STORE)));
let postings_shared_mmap = try!(segment.open_read(SegmentComponent::POSTINGS));
let fast_field_data = try!(segment.open_read(SegmentComponent::FASTFIELDS));
let fast_fields_reader = try!(U32FastFieldsReader::open(fast_field_data));
let fieldnorms_data = try!(segment.open_read(SegmentComponent::FIELDNORMS));
let fieldnorms_reader = try!(U32FastFieldsReader::open(fieldnorms_data));
let source = segment.open_read(SegmentComponent::TERMS)?;
let terms = TermDictionaryImpl::from_source(source)?;
let store_source = segment.open_read(SegmentComponent::STORE)?;
let store_reader = StoreReader::from_source(store_source);
let postings_shared_mmap = segment.open_read(SegmentComponent::POSTINGS)?;
let fast_field_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
let fast_fields_reader = FastFieldsReader::from_source(fast_field_data)?;
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorms_reader = FastFieldsReader::from_source(fieldnorms_data)?;
let positions_data = segment
.open_read(SegmentComponent::POSITIONS)
.unwrap_or_else(|_| ReadOnlySource::empty());
let delete_bitset =
if segment.meta().has_deletes() {
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
DeleteBitSet::open(delete_data)
}
else {
DeleteBitSet::empty()
};
let delete_bitset = if segment.meta().has_deletes() {
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
DeleteBitSet::open(delete_data)
} else {
DeleteBitSet::empty()
};
let schema = segment.schema();
Ok(SegmentReader {
segment_meta: segment.meta().clone(),
postings_data: postings_shared_mmap,
term_infos: Arc::new(term_infos),
segment_id: segment.id(),
store_reader: store_reader,
fast_fields_reader: Arc::new(fast_fields_reader),
fieldnorms_reader: Arc::new(fieldnorms_reader),
delete_bitset: delete_bitset,
positions_data: positions_data,
schema: schema,
})
segment_meta: segment.meta().clone(),
postings_data: postings_shared_mmap,
terms: Arc::new(terms),
segment_id: segment.id(),
store_reader: store_reader,
fast_fields_reader: Arc::new(fast_fields_reader),
fieldnorms_reader: Arc::new(fieldnorms_reader),
delete_bitset: delete_bitset,
positions_data: positions_data,
schema: schema,
})
}
/// Return the term dictionary datastructure.
pub fn term_infos(&self) -> &FstMap<TermInfo> {
&self.term_infos
pub fn terms(&self) -> &TermDictionaryImpl {
&self.terms
}
/// Returns the document (or to be accurate, its stored field)
/// bearing the given doc id.
/// This method is slow and should seldom be called from
@@ -182,81 +188,86 @@ impl SegmentReader {
/// Returns the segment postings associated with the term, and with the given option,
/// or `None` if the term has never been encounterred and indexed.
///
/// If the field was not indexed with the indexing options that cover
/// or `None` if the term has never been encounterred and indexed.
///
/// If the field was not indexed with the indexing options that cover
/// the requested options, the returned `SegmentPostings` the method does not fail
/// and returns a `SegmentPostings` with as much information as possible.
///
/// For instance, requesting `SegmentPostingsOption::FreqAndPositions` for a `TextIndexingOptions`
/// that does not index position will return a `SegmentPostings` with `DocId`s and frequencies.
pub fn read_postings(&self, term: &Term, option: SegmentPostingsOption) -> Option<SegmentPostings> {
/// For instance, requesting `SegmentPostingsOption::FreqAndPositions` for a
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
/// with `DocId`s and frequencies.
pub fn read_postings(&self,
term: &Term,
option: SegmentPostingsOption)
-> Option<SegmentPostings> {
let field = term.field();
let field_entry = self.schema.get_field_entry(field);
let term_info = get!(self.get_term_info(&term));
let term_info = get!(self.get_term_info(term));
let maximum_option = get!(field_entry.field_type().get_segment_postings_option());
let best_effort_option = cmp::min(maximum_option, option);
Some(self.read_postings_from_terminfo(&term_info, best_effort_option))
}
/// Returns a posting object given a `term_info`.
/// This method is for an advanced usage only.
///
/// Most user should prefer using `read_postings` instead.
pub fn read_postings_from_terminfo(&self,
term_info: &TermInfo,
option: SegmentPostingsOption)
-> SegmentPostings {
let block_postings = self.read_block_postings_from_terminfo(term_info, option);
let delete_bitset = self.delete_bitset.clone();
SegmentPostings::from_block_postings(block_postings, delete_bitset)
}
/// Returns a block postings given a `term_info`.
/// This method is for an advanced usage only.
///
/// Most user should prefer using `read_postings` instead.
pub fn read_block_postings_from_terminfo(&self,
term_info: &TermInfo,
option: SegmentPostingsOption)
-> BlockSegmentPostings {
let offset = term_info.postings_offset as usize;
let postings_data = &self.postings_data[offset..];
let freq_handler = match *field_entry.field_type() {
FieldType::Str(ref options) => {
let indexing_options = options.get_indexing_options();
match option {
SegmentPostingsOption::NoFreq => {
FreqHandler::new_without_freq()
}
SegmentPostingsOption::Freq => {
if indexing_options.is_termfreq_enabled() {
FreqHandler::new_with_freq()
}
else {
FreqHandler::new_without_freq()
}
}
SegmentPostingsOption::FreqAndPositions => {
if indexing_options == TextIndexingOptions::TokenizedWithFreqAndPosition {
let offseted_position_data = &self.positions_data[term_info.positions_offset as usize ..];
FreqHandler::new_with_freq_and_position(offseted_position_data)
}
else if indexing_options.is_termfreq_enabled()
{
FreqHandler::new_with_freq()
}
else {
FreqHandler::new_without_freq()
}
}
}
}
_ => {
FreqHandler::new_without_freq()
let freq_handler = match option {
SegmentPostingsOption::NoFreq => FreqHandler::new_without_freq(),
SegmentPostingsOption::Freq => FreqHandler::new_with_freq(),
SegmentPostingsOption::FreqAndPositions => {
let offset = term_info.positions_offset as usize;
let offseted_position_data = &self.positions_data[offset..];
FreqHandler::new_with_freq_and_position(offseted_position_data)
}
};
Some(SegmentPostings::from_data(term_info.doc_freq, postings_data, &self.delete_bitset, freq_handler))
BlockSegmentPostings::from_data(term_info.doc_freq as usize, postings_data, freq_handler)
}
/// Returns the posting list associated with a term.
/// Resets the block segment to another position of the postings
/// file.
///
/// If the term is not found, return None.
/// Even when non-null, because of deletes, the posting object
/// returned by this method may contain no documents.
pub fn read_postings_all_info(&self, term: &Term) -> Option<SegmentPostings> {
let field_entry = self.schema.get_field_entry(term.field());
let segment_posting_option = match *field_entry.field_type() {
FieldType::Str(ref text_options) => {
match text_options.get_indexing_options() {
TextIndexingOptions::TokenizedWithFreq => SegmentPostingsOption::Freq,
TextIndexingOptions::TokenizedWithFreqAndPosition => SegmentPostingsOption::FreqAndPositions,
_ => SegmentPostingsOption::NoFreq,
}
}
FieldType::U32(_) => SegmentPostingsOption::NoFreq
};
self.read_postings(term, segment_posting_option)
/// This is useful for enumerating through a list of terms,
/// and consuming the associated posting lists while avoiding
/// reallocating a `BlockSegmentPostings`.
///
/// # Warning
///
/// This does not reset the positions list.
pub fn reset_block_postings_from_terminfo<'a>(&'a self,
term_info: &TermInfo,
block_postings: &mut BlockSegmentPostings<'a>) {
let offset = term_info.postings_offset as usize;
let postings_data: &'a [u8] = &self.postings_data[offset..];
block_postings.reset(term_info.doc_freq as usize, postings_data);
}
/// Returns the term info associated with the term.
pub fn get_term_info(&self, term: &Term) -> Option<TermInfo> {
self.term_infos.get(term.as_slice())
self.terms.get(term.as_slice())
}
/// Returns the segment id

View File

@@ -1,185 +0,0 @@
use fst::Streamer;
use std::mem;
use std::collections::BinaryHeap;
use fst::map::Keys;
use schema::Field;
use schema::Term;
use core::SegmentReader;
use std::cmp::Ordering;
#[derive(PartialEq, Eq, Debug)]
struct HeapItem {
term: Term,
segment_ord: usize,
}
impl PartialOrd for HeapItem {
fn partial_cmp(&self, other: &HeapItem) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for HeapItem {
fn cmp(&self, other: &HeapItem) -> Ordering {
(&other.term, &other.segment_ord).cmp(&(&self.term, &self.segment_ord))
}
}
/// Given a list of sorted term streams,
/// returns an iterator over sorted unique terms.
///
/// The item yield is actually a pair with
/// - the term
/// - a slice with the ordinal of the segments containing
/// the terms.
pub struct TermIterator<'a> {
key_streams: Vec<Keys<'a>>,
heap: BinaryHeap<HeapItem>,
// Buffer hosting the list of segment ordinals containing
// the current term.
current_term: Term,
current_segment_ords: Vec<usize>,
}
impl<'a> TermIterator<'a> {
fn new(key_streams: Vec<Keys<'a>>) -> TermIterator<'a> {
let key_streams_len = key_streams.len();
TermIterator {
key_streams: key_streams,
heap: BinaryHeap::new(),
current_term: Term::from_field_text(Field(0), ""),
current_segment_ords: (0..key_streams_len).collect(),
}
}
/// Advance the term iterator to the next term.
/// Returns true if there is indeed another term
/// False if there is none.
pub fn advance(&mut self) -> bool {
self.advance_segments();
if let Some(mut head) = self.heap.pop() {
mem::swap(&mut self.current_term, &mut head.term);
self.current_segment_ords.push(head.segment_ord);
loop {
match self.heap.peek() {
Some(&ref next_heap_it) if next_heap_it.term == self.current_term => {}
_ => { break; }
}
let next_heap_it = self.heap.pop().unwrap(); // safe : we peeked beforehand
self.current_segment_ords.push(next_heap_it.segment_ord);
}
true
}
else {
false
}
}
/// Returns the current term.
///
/// This method may be called
/// iff advance() has been called before
/// and "true" was returned.
pub fn term(&self) -> &Term {
&self.current_term
}
/// Returns the sorted list of segment ordinals
/// that include the current term.
///
/// This method may be called
/// iff advance() has been called before
/// and "true" was returned.
pub fn segment_ords(&self) -> &[usize]{
&self.current_segment_ords[..]
}
fn advance_segments(&mut self) {
for segment_ord in self.current_segment_ords.drain(..) {
if let Some(term) = self.key_streams[segment_ord].next() {
self.heap.push(HeapItem {
term: Term::from_bytes(term),
segment_ord: segment_ord,
});
}
}
}
}
impl<'a, 'f> Streamer<'a> for TermIterator<'f> {
type Item = &'a Term;
fn next(&'a mut self) -> Option<Self::Item> {
if self.advance() {
Some(&self.current_term)
}
else {
None
}
}
}
impl<'a> From<&'a [SegmentReader]> for TermIterator<'a> {
fn from(segment_readers: &'a [SegmentReader]) -> TermIterator<'a> {
TermIterator::new(
segment_readers
.iter()
.map(|reader| reader.term_infos().keys())
.collect()
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use schema::{SchemaBuilder, Document, TEXT};
use core::Index;
#[test]
fn test_term_iterator() {
let mut schema_builder = SchemaBuilder::default();
let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build());
{
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{
{
let mut doc = Document::default();
doc.add_text(text_field, "a b d f");
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
}
{
{
let mut doc = Document::default();
doc.add_text(text_field, "a b c d f");
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
}
{
{
let mut doc = Document::default();
doc.add_text(text_field, "e f");
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
}
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let mut term_it = searcher.terms();
let mut terms = String::new();
while let Some(term) = term_it.next() {
unsafe {
terms.push_str(term.text());
}
}
assert_eq!(terms, "abcdef");
}
}

View File

@@ -1,152 +0,0 @@
#![allow(should_implement_trait)]
use std::io;
use std::io::Write;
use fst;
use fst::raw::Fst;
use directory::ReadOnlySource;
use common::BinarySerializable;
use std::marker::PhantomData;
fn convert_fst_error(e: fst::Error) -> io::Error {
io::Error::new(io::ErrorKind::Other, e)
}
pub struct FstMapBuilder<W: Write, V: BinarySerializable> {
fst_builder: fst::MapBuilder<W>,
data: Vec<u8>,
_phantom_: PhantomData<V>,
}
impl<W: Write, V: BinarySerializable> FstMapBuilder<W, V> {
pub fn new(w: W) -> io::Result<FstMapBuilder<W, V>> {
let fst_builder = try!(fst::MapBuilder::new(w).map_err(convert_fst_error));
Ok(FstMapBuilder {
fst_builder: fst_builder,
data: Vec::new(),
_phantom_: PhantomData,
})
}
/// Horribly unsafe, nobody should ever do that... except me :)
///
/// If used, it must be used by systematically alternating calls
/// to insert_key and insert_value.
///
/// TODO see if I can bend Rust typesystem to enforce that
/// in a nice way.
pub fn insert_key(&mut self, key: &[u8]) -> io::Result<()> {
try!(self.fst_builder
.insert(key, self.data.len() as u64)
.map_err(convert_fst_error));
Ok(())
}
/// Horribly unsafe, nobody should ever do that... except me :)
pub fn insert_value(&mut self, value: &V) -> io::Result<()> {
try!(value.serialize(&mut self.data));
Ok(())
}
#[cfg(test)]
pub fn insert(&mut self, key: &[u8], value: &V) -> io::Result<()> {
try!(self.fst_builder
.insert(key, self.data.len() as u64)
.map_err(convert_fst_error));
try!(value.serialize(&mut self.data));
Ok(())
}
pub fn finish(self,) -> io::Result<W> {
let mut file = try!(
self.fst_builder
.into_inner()
.map_err(convert_fst_error));
let footer_size = self.data.len() as u32;
try!(file.write_all(&self.data));
try!((footer_size as u32).serialize(&mut file));
try!(file.flush());
Ok(file)
}
}
pub struct FstMap<V: BinarySerializable> {
fst_index: fst::Map,
values_mmap: ReadOnlySource,
_phantom_: PhantomData<V>,
}
fn open_fst_index(source: ReadOnlySource) -> io::Result<fst::Map> {
Ok(fst::Map::from(match source {
ReadOnlySource::Anonymous(data) => try!(Fst::from_shared_bytes(data.data, data.start, data.len).map_err(convert_fst_error)),
ReadOnlySource::Mmap(mmap_readonly) => try!(Fst::from_mmap(mmap_readonly).map_err(convert_fst_error)),
}))
}
impl<V: BinarySerializable> FstMap<V> {
pub fn keys(&self,) -> fst::map::Keys {
self.fst_index.keys()
}
pub fn from_source(source: ReadOnlySource) -> io::Result<FstMap<V>> {
let total_len = source.len();
let length_offset = total_len - 4;
let mut split_len_buffer: &[u8] = &source.as_slice()[length_offset..];
let footer_size = try!(u32::deserialize(&mut split_len_buffer)) as usize;
let split_len = length_offset - footer_size;
let fst_source = source.slice(0, split_len);
let values_source = source.slice(split_len, length_offset);
let fst_index = try!(open_fst_index(fst_source));
Ok(FstMap {
fst_index: fst_index,
values_mmap: values_source,
_phantom_: PhantomData,
})
}
fn read_value(&self, offset: u64) -> V {
let buffer = self.values_mmap.as_slice();
let mut cursor = &buffer[(offset as usize)..];
V::deserialize(&mut cursor).expect("Data in FST is corrupted")
}
pub fn get<K: AsRef<[u8]>>(&self, key: K) -> Option<V> {
self.fst_index
.get(key)
.map(|offset| self.read_value(offset))
}
}
#[cfg(test)]
mod tests {
use super::*;
use directory::{RAMDirectory, Directory};
use std::path::PathBuf;
use fst::Streamer;
#[test]
fn test_fstmap() {
let mut directory = RAMDirectory::create();
let path = PathBuf::from("fstmap");
{
let write = directory.open_write(&path).unwrap();
let mut fstmap_builder = FstMapBuilder::new(write).unwrap();
fstmap_builder.insert("abc".as_bytes(), &34u32).unwrap();
fstmap_builder.insert("abcd".as_bytes(), &346u32).unwrap();
fstmap_builder.finish().unwrap();
}
let source = directory.open_read(&path).unwrap();
let fstmap: FstMap<u32> = FstMap::from_source(source).unwrap();
assert_eq!(fstmap.get("abc"), Some(34u32));
assert_eq!(fstmap.get("abcd"), Some(346u32));
let mut keys = fstmap.keys();
assert_eq!(keys.next().unwrap(), "abc".as_bytes());
assert_eq!(keys.next().unwrap(), "abcd".as_bytes());
assert_eq!(keys.next(), None);
}
}

View File

@@ -1,7 +1,4 @@
mod fstmap;
mod skip;
pub mod stacker;
pub use self::fstmap::FstMapBuilder;
pub use self::fstmap::FstMap;
pub use self::skip::{SkipListBuilder, SkipList};

View File

@@ -114,9 +114,9 @@ mod tests {
let mut skip_list: SkipList<()> = SkipList::from(output.as_slice());
assert_eq!(skip_list.next().unwrap(), (0, ()));
skip_list.seek(431);
assert_eq!(skip_list.next().unwrap(), (431,()) );
assert_eq!(skip_list.next().unwrap(), (431, ()));
skip_list.seek(1003);
assert_eq!(skip_list.next().unwrap(), (1004,()) );
assert_eq!(skip_list.next().unwrap(), (1004, ()));
assert_eq!(skip_list.next(), None);
}

View File

@@ -13,14 +13,12 @@ struct Layer<'a, T> {
}
impl<'a, T: BinarySerializable> Iterator for Layer<'a, T> {
type Item = (DocId, T);
fn next(&mut self,)-> Option<(DocId, T)> {
fn next(&mut self) -> Option<(DocId, T)> {
if self.next_id == u32::max_value() {
None
}
else {
} else {
let cur_val = T::deserialize(&mut self.cursor).unwrap();
let cur_id = self.next_id;
self.next_id = u32::deserialize(&mut self.cursor).unwrap_or(u32::max_value());
@@ -31,7 +29,7 @@ impl<'a, T: BinarySerializable> Iterator for Layer<'a, T> {
impl<'a, T: BinarySerializable> From<&'a [u8]> for Layer<'a, T> {
fn from(data: &'a [u8]) -> Layer<'a, T> {
let mut cursor = data;
let mut cursor = data;
let next_id = u32::deserialize(&mut cursor).unwrap_or(u32::max_value());
Layer {
data: data,
@@ -43,7 +41,6 @@ impl<'a, T: BinarySerializable> From<&'a [u8]> for Layer<'a, T> {
}
impl<'a, T: BinarySerializable> Layer<'a, T> {
fn empty() -> Layer<'a, T> {
Layer {
data: &EMPTY,
@@ -53,11 +50,11 @@ impl<'a, T: BinarySerializable> Layer<'a, T> {
}
}
fn seek_offset(&mut self, offset: usize) {
fn seek_offset(&mut self, offset: usize) {
self.cursor = &self.data[offset..];
self.next_id = u32::deserialize(&mut self.cursor).unwrap_or(u32::max_value());
}
// Returns the last element (key, val)
// such that (key < doc_id)
//
@@ -67,8 +64,12 @@ impl<'a, T: BinarySerializable> Layer<'a, T> {
let mut val = None;
while self.next_id < doc_id {
match self.next() {
None => { break; },
v => { val = v; }
None => {
break;
}
v => {
val = v;
}
}
}
val
@@ -82,16 +83,14 @@ pub struct SkipList<'a, T: BinarySerializable> {
}
impl<'a, T: BinarySerializable> Iterator for SkipList<'a, T> {
type Item = (DocId, T);
fn next(&mut self,)-> Option<(DocId, T)> {
fn next(&mut self) -> Option<(DocId, T)> {
self.data_layer.next()
}
}
impl<'a, T: BinarySerializable> SkipList<'a, T> {
pub fn seek(&mut self, doc_id: DocId) -> Option<(DocId, T)> {
let mut next_layer_skip: Option<(DocId, u32)> = None;
for skip_layer in &mut self.skip_layers {
@@ -99,39 +98,33 @@ impl<'a, T: BinarySerializable> SkipList<'a, T> {
skip_layer.seek_offset(offset as usize);
}
next_layer_skip = skip_layer.seek(doc_id);
}
if let Some((_, offset)) = next_layer_skip {
self.data_layer.seek_offset(offset as usize);
}
self.data_layer.seek(doc_id)
}
if let Some((_, offset)) = next_layer_skip {
self.data_layer.seek_offset(offset as usize);
}
self.data_layer.seek(doc_id)
}
}
impl<'a, T: BinarySerializable> From<&'a [u8]> for SkipList<'a, T> {
fn from(mut data: &'a [u8]) -> SkipList<'a, T> {
let offsets: Vec<u32> = Vec::deserialize(&mut data).unwrap();
let num_layers = offsets.len();
let layers_data: &[u8] = data;
let data_layer: Layer<'a, T> =
if num_layers == 0 { Layer::empty() }
else {
let first_layer_data: &[u8] = &layers_data[..offsets[0] as usize];
Layer::from(first_layer_data)
};
let data_layer: Layer<'a, T> = if num_layers == 0 {
Layer::empty()
} else {
let first_layer_data: &[u8] = &layers_data[..offsets[0] as usize];
Layer::from(first_layer_data)
};
let skip_layers = (0..max(1, num_layers) - 1)
.map(|i| (offsets[i] as usize, offsets[i + 1] as usize))
.map(|(start, stop)| {
Layer::from(&layers_data[start..stop])
})
.map(|(start, stop)| Layer::from(&layers_data[start..stop]))
.collect();
SkipList {
skip_layers: skip_layers,
data_layer: data_layer,
}
}
}

View File

@@ -13,13 +13,12 @@ struct LayerBuilder<T: BinarySerializable> {
}
impl<T: BinarySerializable> LayerBuilder<T> {
fn written_size(&self,) -> usize {
fn written_size(&self) -> usize {
self.buffer.len()
}
fn write(&self, output: &mut Write) -> Result<(), io::Error> {
try!(output.write_all(&self.buffer));
output.write_all(&self.buffer)?;
Ok(())
}
@@ -37,13 +36,14 @@ impl<T: BinarySerializable> LayerBuilder<T> {
self.remaining -= 1;
self.len += 1;
let offset = self.written_size() as u32;
try!(doc_id.serialize(&mut self.buffer));
try!(value.serialize(&mut self.buffer));
doc_id.serialize(&mut self.buffer)?;
value.serialize(&mut self.buffer)?;
Ok(if self.remaining == 0 {
self.remaining = self.period;
Some((doc_id, offset))
}
else { None })
} else {
None
})
}
}
@@ -56,7 +56,6 @@ pub struct SkipListBuilder<T: BinarySerializable> {
impl<T: BinarySerializable> SkipListBuilder<T> {
pub fn new(period: usize) -> SkipListBuilder<T> {
SkipListBuilder {
period: period,
@@ -78,17 +77,19 @@ impl<T: BinarySerializable> SkipListBuilder<T> {
let mut skip_pointer = try!(self.data_layer.insert(doc_id, dest));
loop {
skip_pointer = match skip_pointer {
Some((skip_doc_id, skip_offset)) =>
try!(self
.get_skip_layer(layer_id)
.insert(skip_doc_id, &skip_offset)),
None => { return Ok(()); }
Some((skip_doc_id, skip_offset)) => {
try!(self.get_skip_layer(layer_id)
.insert(skip_doc_id, &skip_offset))
}
None => {
return Ok(());
}
};
layer_id += 1;
}
}
pub fn write<W: Write>(self, output: &mut Write) -> io::Result<()> {
pub fn write<W: Write>(self, output: &mut W) -> io::Result<()> {
let mut size: u32 = 0;
let mut layer_sizes: Vec<u32> = Vec::new();
size += self.data_layer.buffer.len() as u32;
@@ -97,10 +98,10 @@ impl<T: BinarySerializable> SkipListBuilder<T> {
size += layer.buffer.len() as u32;
layer_sizes.push(size);
}
try!(layer_sizes.serialize(output));
try!(self.data_layer.write(output));
layer_sizes.serialize(output)?;
self.data_layer.write(output)?;
for layer in self.skip_layers.iter().rev() {
try!(layer.write(output));
layer.write(output)?;
}
Ok(())
}

View File

@@ -9,11 +9,11 @@ pub fn is_power_of_2(val: u32) -> bool {
#[inline]
pub fn jump_needed(val: u32) -> bool {
val > 3 && is_power_of_2(val)
val > 3 && is_power_of_2(val)
}
#[derive(Debug)]
#[derive(Debug, Clone)]
pub struct ExpUnrolledLinkedList {
len: u32,
end: u32,
@@ -24,7 +24,6 @@ pub struct ExpUnrolledLinkedList {
}
impl ExpUnrolledLinkedList {
pub fn iter<'a>(&self, addr: u32, heap: &'a Heap) -> ExpUnrolledLinkedListIterator<'a> {
ExpUnrolledLinkedListIterator {
heap: heap,
@@ -42,10 +41,10 @@ impl ExpUnrolledLinkedList {
// the next block as a size of (length so far),
// and we need to add 1u32 to store the pointer
// to the next element.
let new_block_size: usize = (self.len as usize + 1) * mem::size_of::<u32>();
let new_block_size: usize = (self.len as usize + 1) * mem::size_of::<u32>();
let new_block_addr: u32 = heap.allocate_space(new_block_size);
heap.set(self.end, &new_block_addr);
self.end = new_block_addr;
self.end = new_block_addr;
}
heap.set(self.end, &val);
self.end += mem::size_of::<u32>() as u32;
@@ -53,6 +52,12 @@ impl ExpUnrolledLinkedList {
}
impl HeapAllocable for u32 {
fn with_addr(_addr: u32) -> u32 {
0u32
}
}
impl HeapAllocable for ExpUnrolledLinkedList {
fn with_addr(addr: u32) -> ExpUnrolledLinkedList {
let last_addr = addr + mem::size_of::<u32>() as u32 * 2u32;
@@ -77,23 +82,21 @@ pub struct ExpUnrolledLinkedListIterator<'a> {
impl<'a> Iterator for ExpUnrolledLinkedListIterator<'a> {
type Item = u32;
fn next(&mut self,) -> Option<u32> {
fn next(&mut self) -> Option<u32> {
if self.consumed == self.len {
None
}
else {
} else {
let addr: u32;
self.consumed += 1;
if jump_needed(self.consumed) {
addr = *self.heap.get_mut_ref(self.addr);
}
else {
} else {
addr = self.addr;
}
self.addr = addr + mem::size_of::<u32>() as u32;
Some(*self.heap.get_mut_ref(addr))
}
}
}
}
@@ -103,7 +106,7 @@ impl<'a> Iterator for ExpUnrolledLinkedListIterator<'a> {
#[cfg(test)]
mod tests {
use super::*;
use super::super::heap::Heap;
use test::Bencher;
@@ -147,7 +150,7 @@ mod tests {
#[bench]
fn bench_push_stack(bench: &mut Bencher) {
let heap = Heap::with_capacity(64_000_000);
let heap = Heap::with_capacity(64_000_000);
bench.iter(|| {
let mut stacks = Vec::with_capacity(100);
for _ in 0..NUM_STACK {
@@ -163,4 +166,4 @@ mod tests {
heap.clear();
});
}
}
}

View File

@@ -1,55 +1,105 @@
use std::iter;
use std::marker::PhantomData;
use std::mem;
use super::heap::{Heap, HeapAllocable, BytesRef};
mod murmurhash2 {
const SEED: u32 = 3_242_157_231u32;
#[inline(always)]
pub fn murmurhash2(key: &[u8]) -> u32 {
let mut key_ptr: *const u32 = key.as_ptr() as *const u32;
let m: u32 = 0x5bd1e995;
let r = 24;
let len = key.len() as u32;
/// dbj2 hash function
fn djb2(key: &[u8]) -> u64 {
let mut state: u64 = 5381;
for &b in key {
state = (state << 5).wrapping_add(state).wrapping_add(b as u64);
}
state
}
impl Default for BytesRef {
fn default() -> BytesRef {
BytesRef {
start: 0u32,
stop: 0u32,
let mut h: u32 = SEED ^ len;
let num_blocks = len >> 2;
for _ in 0..num_blocks {
let mut k: u32 = unsafe { *key_ptr };
k = k.wrapping_mul(m);
k ^= k >> r;
k = k.wrapping_mul(m);
k = k.wrapping_mul(m);
h ^= k;
key_ptr = key_ptr.wrapping_offset(1);
}
// Handle the last few bytes of the input array
let remaining = len & 3;
let key_ptr_u8: *const u8 = key_ptr as *const u8;
match remaining {
3 => {
h ^= unsafe { *key_ptr_u8.wrapping_offset(2) as u32 } << 16;
h ^= unsafe { *key_ptr_u8.wrapping_offset(1) as u32 } << 8;
h ^= unsafe { *key_ptr_u8 as u32 };
h = h.wrapping_mul(m);
}
2 => {
h ^= unsafe { *key_ptr_u8.wrapping_offset(1) as u32 } << 8;
h ^= unsafe { *key_ptr_u8 as u32 };
h = h.wrapping_mul(m);
}
1 => {
h ^= unsafe { *key_ptr_u8 as u32 };
h = h.wrapping_mul(m);
}
_ => {}
}
h ^= h >> 13;
h = h.wrapping_mul(m);
h ^ (h >> 15)
}
}
/// Split the thread memory budget into
/// - the heap size
/// - the hash table "table" itself.
///
/// Returns (the heap size in bytes, the hash table size in number of bits)
pub(crate) fn split_memory(per_thread_memory_budget: usize) -> (usize, usize) {
let table_size_limit: usize = per_thread_memory_budget / 3;
let compute_table_size = |num_bits: usize| {
let table_size: usize = (1 << num_bits) * mem::size_of::<KeyValue>();
table_size * mem::size_of::<KeyValue>()
};
let table_num_bits: usize = (1..)
.into_iter()
.take_while(|num_bits: &usize| compute_table_size(*num_bits) < table_size_limit)
.last()
.expect(&format!("Per thread memory is too small: {}", per_thread_memory_budget));
let table_size = compute_table_size(table_num_bits);
let heap_size = per_thread_memory_budget - table_size;
(heap_size, table_num_bits)
}
/// `KeyValue` is the item stored in the hash table.
/// The key is actually a `BytesRef` object stored in an external heap.
/// The `value_addr` also points to an address in the heap.
///
/// The key and the value are actually stored contiguously.
/// For this reason, the (start, stop) information is actually redundant
/// and can be simplified in the future
/// and can be simplified in the future
#[derive(Copy, Clone, Default)]
#[repr(packed)]
struct KeyValue {
key: BytesRef,
value_addr: u32,
key_value_addr: BytesRef,
hash: u32,
}
impl KeyValue {
fn is_empty(&self,) -> bool {
self.key.stop == 0u32
fn is_empty(&self) -> bool {
self.key_value_addr.is_null()
}
}
pub enum Entry {
Vacant(usize),
Occupied(u32),
}
/// Customized `HashMap` with string keys
///
///
/// This `HashMap` takes String as keys. Keys are
/// stored in a user defined heap.
///
@@ -57,100 +107,102 @@ pub enum Entry {
/// the computation of the hash of the key twice,
/// or copying the key as long as there is no insert.
///
pub struct HashMap<'a, V> where V: HeapAllocable {
pub struct HashMap<'a> {
table: Box<[KeyValue]>,
heap: &'a Heap,
_phantom: PhantomData<V>,
mask: usize,
occupied: Vec<usize>,
}
impl<'a, V> HashMap<'a, V> where V: HeapAllocable {
pub fn new(num_bucket_power_of_2: usize, heap: &'a Heap) -> HashMap<'a, V> {
struct QuadraticProbing {
hash: usize,
i: usize,
mask: usize,
}
impl QuadraticProbing {
fn compute(hash: usize, mask: usize) -> QuadraticProbing {
QuadraticProbing {
hash: hash,
i: 0,
mask: mask,
}
}
#[inline]
fn next_probe(&mut self) -> usize {
self.i += 1;
(self.hash + self.i * self.i) & self.mask
}
}
impl<'a> HashMap<'a> {
pub fn new(num_bucket_power_of_2: usize, heap: &'a Heap) -> HashMap<'a> {
let table_size = 1 << num_bucket_power_of_2;
let table: Vec<KeyValue> = iter::repeat(KeyValue::default())
.take(table_size)
.collect();
let table: Vec<KeyValue> = iter::repeat(KeyValue::default()).take(table_size).collect();
HashMap {
table: table.into_boxed_slice(),
heap: heap,
_phantom: PhantomData,
mask: table_size - 1,
occupied: Vec::with_capacity(table_size / 2),
}
}
#[inline]
fn bucket(&self, key: &[u8]) -> usize {
let hash: u64 = djb2(key);
(hash as usize) & self.mask
fn probe(&self, hash: u32) -> QuadraticProbing {
QuadraticProbing::compute(hash as usize, self.mask)
}
fn get_key(&self, bytes_ref: BytesRef) -> &[u8] {
self.heap.get_slice(bytes_ref)
pub fn is_saturated(&self) -> bool {
self.table.len() < self.occupied.len() * 3
}
pub fn set_bucket(&mut self, key_bytes: &[u8], bucket: usize, addr: u32) -> u32 {
#[inline(never)]
fn get_key_value(&self, bytes_ref: BytesRef) -> (&[u8], u32) {
let key_bytes: &[u8] = self.heap.get_slice(bytes_ref);
let expull_addr: u32 = bytes_ref.addr() + 2 + key_bytes.len() as u32;
(key_bytes, expull_addr)
}
pub fn set_bucket(&mut self, hash: u32, key_bytes_ref: BytesRef, bucket: usize) {
self.occupied.push(bucket);
self.table[bucket] = KeyValue {
key: self.heap.allocate_and_set(key_bytes),
value_addr: addr,
key_value_addr: key_bytes_ref,
hash: hash,
};
addr
}
pub fn iter<'b: 'a>(&'b self,) -> impl Iterator<Item=(&'a [u8], (u32, &'a V))> + 'b {
let heap: &'a Heap = self.heap;
let table: &'b [KeyValue] = &self.table;
pub fn iter<'b: 'a>(&'b self) -> impl Iterator<Item = (&'a [u8], u32)> + 'b {
self.occupied
.iter()
.cloned()
.map(move |bucket: usize| {
let kv = table[bucket];
let addr = kv.value_addr;
let v: &V = heap.get_mut_ref::<V>(addr);
(heap.get_slice(kv.key), (addr, v))
})
// .map(move |addr: u32| (heap.get_mut_ref::<V>(addr)) )
let kv = self.table[bucket];
self.get_key_value(kv.key_value_addr)
})
}
pub fn values_mut<'b: 'a>(&'b self,) -> impl Iterator<Item=&'a mut V> + 'b {
let heap: &'a Heap = self.heap;
let table: &'b [KeyValue] = &self.table;
self.occupied
.iter()
.cloned()
.map(move |bucket: usize| table[bucket].value_addr)
.map(move |addr: u32| heap.get_mut_ref::<V>(addr))
}
pub fn get_or_create<S: AsRef<[u8]>>(&mut self, key: S) -> &mut V {
let entry = self.lookup(key.as_ref());
match entry {
Entry::Occupied(addr) => {
self.heap.get_mut_ref(addr)
}
Entry::Vacant(bucket) => {
let (addr, val): (u32, &mut V) = self.heap.allocate_object();
self.set_bucket(key.as_ref(), bucket, addr);
val
}
}
}
pub fn lookup<S: AsRef<[u8]>>(&self, key: S) -> Entry {
pub fn get_or_create<S: AsRef<[u8]>, V: HeapAllocable>(&mut self, key: S) -> &mut V {
let key_bytes: &[u8] = key.as_ref();
let mut bucket = self.bucket(key_bytes);
let hash = murmurhash2::murmurhash2(key.as_ref());
let mut probe = self.probe(hash);
loop {
let bucket = probe.next_probe();
let kv: KeyValue = self.table[bucket];
if kv.is_empty() {
return Entry::Vacant(bucket);
let key_bytes_ref = self.heap.allocate_and_set(key_bytes);
let (addr, val): (u32, &mut V) = self.heap.allocate_object();
assert_eq!(addr, key_bytes_ref.addr() + 2 + key_bytes.len() as u32);
self.set_bucket(hash, key_bytes_ref, bucket);
return val;
} else if kv.hash == hash {
let (stored_key, expull_addr): (&[u8], u32) = self.get_key_value(kv.key_value_addr);
if stored_key == key_bytes {
return self.heap.get_mut_ref(expull_addr);
}
}
if self.get_key(kv.key) == key_bytes {
return Entry::Occupied(kv.value_addr);
}
bucket = (bucket + 1) & self.mask;
}
}
}
@@ -158,13 +210,14 @@ impl<'a, V> HashMap<'a, V> where V: HeapAllocable {
#[cfg(test)]
mod tests {
use super::*;
use super::super::heap::{Heap, HeapAllocable};
use super::djb2;
use super::murmurhash2::murmurhash2;
use test::Bencher;
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
use std::collections::HashSet;
use super::split_memory;
struct TestValue {
val: u32,
@@ -180,17 +233,22 @@ mod tests {
}
}
#[test]
fn test_hashmap_size() {
assert_eq!(split_memory(100_000), (67232, 9));
assert_eq!(split_memory(1_000_000), (737856, 12));
assert_eq!(split_memory(10_000_000), (7902848, 15));
}
#[test]
fn test_hash_map() {
let heap = Heap::with_capacity(2_000_000);
let mut hash_map: HashMap<TestValue> = HashMap::new(18, &heap);
let mut hash_map: HashMap = HashMap::new(18, &heap);
{
{
let v: &mut TestValue = hash_map.get_or_create("abc");
assert_eq!(v.val, 0u32);
v.val = 3u32;
}
}
{
let v: &mut TestValue = hash_map.get_or_create("abcd");
@@ -205,29 +263,54 @@ mod tests {
let v: &mut TestValue = hash_map.get_or_create("abcd");
assert_eq!(v.val, 4u32);
}
let mut iter_values = hash_map.values_mut();
assert_eq!(iter_values.next().unwrap().val, 3u32);
assert_eq!(iter_values.next().unwrap().val, 4u32);
assert!(!iter_values.next().is_some());
let mut iter_values = hash_map.iter();
{
let (_, addr) = iter_values.next().unwrap();
let val: &TestValue = heap.get_ref(addr);
assert_eq!(val.val, 3u32);
}
{
let (_, addr) = iter_values.next().unwrap();
let val: &TestValue = heap.get_ref(addr);
assert_eq!(val.val, 4u32);
}
assert!(iter_values.next().is_none());
}
#[test]
fn test_murmur() {
let s1 = "abcdef";
let s2 = "abcdeg";
for i in 0..5 {
assert_eq!(murmurhash2(&s1[i..5].as_bytes()),
murmurhash2(&s2[i..5].as_bytes()));
}
}
#[test]
fn test_murmur_collisions() {
let mut set: HashSet<u32> = HashSet::default();
for i in 0..10_000 {
let s = format!("hash{}", i);
let hash = murmurhash2(s.as_bytes());
set.insert(hash);
}
assert_eq!(set.len(), 10_000);
}
#[bench]
fn bench_djb2(bench: &mut Bencher) {
let v = String::from("abwer");
bench.iter(|| {
djb2(v.as_bytes())
});
fn bench_murmurhash_2(b: &mut Bencher) {
let keys: Vec<&'static str> =
vec!["wer qwe qwe qwe ", "werbq weqweqwe2 ", "weraq weqweqwe3 "];
b.iter(|| {
keys.iter()
.map(|&s| s.as_bytes())
.map(murmurhash2::murmurhash2)
.map(|h| h as u64)
.last()
.unwrap()
});
}
#[bench]
fn bench_siphasher(bench: &mut Bencher) {
let v = String::from("abwer");
bench.iter(|| {
let mut h = DefaultHasher::new();
h.write(v.as_bytes());
h.finish()
});
}
}

View File

@@ -1,13 +1,29 @@
use std::cell::UnsafeCell;
use std::mem;
use common::allocate_vec;
use std::ptr;
use byteorder::{NativeEndian, ByteOrder};
/// `BytesRef` refers to a slice in tantivy's custom `Heap`.
///
/// The slice will encode the length of the `&[u8]` slice
/// on 16-bits, and then the data is encoded.
#[derive(Copy, Clone)]
pub struct BytesRef {
pub start: u32,
pub stop: u32,
pub struct BytesRef(u32);
impl BytesRef {
pub fn is_null(&self) -> bool {
self.0 == u32::max_value()
}
pub fn addr(&self) -> u32 {
self.0
}
}
impl Default for BytesRef {
fn default() -> BytesRef {
BytesRef(u32::max_value())
}
}
/// Object that can be allocated in tantivy's custom `Heap`.
@@ -20,20 +36,17 @@ pub struct Heap {
inner: UnsafeCell<InnerHeap>,
}
#[cfg_attr(feature = "cargo-clippy", allow(mut_from_ref))]
impl Heap {
/// Creates a new heap with a given capacity
pub fn with_capacity(num_bytes: usize) -> Heap {
Heap {
inner: UnsafeCell::new(
InnerHeap::with_capacity(num_bytes)
),
}
Heap { inner: UnsafeCell::new(InnerHeap::with_capacity(num_bytes)) }
}
fn inner(&self,) -> &mut InnerHeap {
unsafe { &mut *self.inner.get() }
fn inner(&self) -> &mut InnerHeap {
unsafe { &mut *self.inner.get() }
}
/// Clears the heap. All the underlying data is lost.
///
/// This heap does not support deallocation.
@@ -41,19 +54,9 @@ impl Heap {
pub fn clear(&self) {
self.inner().clear();
}
/// Return the heap capacity.
pub fn capacity(&self,) -> u32 {
self.inner().capacity()
}
/// Return the amount of memory that has been allocated so far.
pub fn len(&self,) -> u32 {
self.inner().len()
}
/// Return amount of free space, in bytes.
pub fn num_free_bytes(&self,) -> u32 {
pub fn num_free_bytes(&self) -> u32 {
self.inner().num_free_bytes()
}
@@ -62,38 +65,40 @@ impl Heap {
pub fn allocate_space(&self, num_bytes: usize) -> u32 {
self.inner().allocate_space(num_bytes)
}
/// Allocate an object in the heap
pub fn allocate_object<V: HeapAllocable>(&self,) -> (u32, &mut V) {
pub fn allocate_object<V: HeapAllocable>(&self) -> (u32, &mut V) {
let addr = self.inner().allocate_space(mem::size_of::<V>());
let v: V = V::with_addr(addr);
self.inner().set(addr, &v);
(addr, self.inner().get_mut_ref(addr))
}
/// Stores a `&[u8]` in the heap and returns the destination BytesRef.
pub fn allocate_and_set(&self, data: &[u8]) -> BytesRef {
self.inner().allocate_and_set(data)
}
/// Fetches the `&[u8]` stored on the slice defined by the `BytesRef`
/// given as argumetn
pub fn get_slice(&self, bytes_ref: BytesRef) -> &[u8] {
self.inner().get_slice(bytes_ref.start, bytes_ref.stop)
self.inner().get_slice(bytes_ref)
}
/// Stores an item's data in the heap, at the given `address`.
pub fn set<Item>(&self, addr: u32, val: &Item) {
self.inner().set(addr, val);
}
/// Returns a mutable reference for an object at a given Item.
pub fn get_mut_ref<Item>(&self, addr: u32) -> &mut Item {
self.inner().get_mut_ref(addr)
}
pub fn get_ref<Item>(&self, addr: u32) -> &Item {
self.inner().get_mut_ref(addr)
/// Returns a mutable reference to an `Item` at a given `addr`.
#[cfg(test)]
pub fn get_ref<Item>(&self, addr: u32) -> &mut Item {
self.get_mut_ref(addr)
}
}
@@ -107,9 +112,8 @@ struct InnerHeap {
impl InnerHeap {
pub fn with_capacity(num_bytes: usize) -> InnerHeap {
let buffer: Vec<u8> = allocate_vec(num_bytes);
let buffer: Vec<u8> = vec![0u8; num_bytes];
InnerHeap {
buffer: buffer,
buffer_len: num_bytes as u32,
@@ -123,23 +127,14 @@ impl InnerHeap {
self.next_heap = None;
}
pub fn capacity(&self,) -> u32 {
self.buffer.len() as u32
}
pub fn len(&self,) -> u32 {
self.used
}
// Returns the number of free bytes. If the buffer
// has reached it's capacity and overflowed to another buffer, return 0.
pub fn num_free_bytes(&self,) -> u32 {
pub fn num_free_bytes(&self) -> u32 {
if self.next_heap.is_some() {
0u32
}
else {
} else {
self.buffer_len - self.used
}
}
}
pub fn allocate_space(&mut self, num_bytes: usize) -> u32 {
@@ -147,74 +142,82 @@ impl InnerHeap {
self.used += num_bytes as u32;
if self.used <= self.buffer_len {
addr
}
else {
} else {
if self.next_heap.is_none() {
warn!("Exceeded heap size. The margin was apparently unsufficient. The segment will be committed right after indexing this very last document.");
info!(r#"Exceeded heap size. The segment will be committed right after indexing this document."#,);
self.next_heap = Some(Box::new(InnerHeap::with_capacity(self.buffer_len as usize)));
}
self.next_heap.as_mut().unwrap().allocate_space(num_bytes) + self.buffer_len
}
}
fn get_slice(&self, start: u32, stop: u32) -> &[u8] {
fn get_slice(&self, bytes_ref: BytesRef) -> &[u8] {
let start = bytes_ref.0;
if start >= self.buffer_len {
self.next_heap.as_ref().unwrap().get_slice(start - self.buffer_len, stop - self.buffer_len)
}
else {
&self.buffer[start as usize..stop as usize]
self.next_heap
.as_ref()
.unwrap()
.get_slice(BytesRef(start - self.buffer_len))
} else {
let start = start as usize;
let len = NativeEndian::read_u16(&self.buffer[start..start + 2]) as usize;
&self.buffer[start + 2..start + 2 + len]
}
}
fn get_mut_slice(&mut self, start: u32, stop: u32) -> &mut [u8] {
if start >= self.buffer_len {
self.next_heap.as_mut().unwrap().get_mut_slice(start - self.buffer_len, stop - self.buffer_len)
}
else {
self.next_heap
.as_mut()
.unwrap()
.get_mut_slice(start - self.buffer_len, stop - self.buffer_len)
} else {
&mut self.buffer[start as usize..stop as usize]
}
}
fn allocate_and_set(&mut self, data: &[u8]) -> BytesRef {
let start = self.allocate_space(data.len());
let stop = start + data.len() as u32;
self.get_mut_slice(start, stop).clone_from_slice(data);
BytesRef {
start: start as u32,
stop: stop as u32,
}
assert!(data.len() < u16::max_value() as usize);
let total_len = 2 + data.len();
let start = self.allocate_space(total_len);
let total_buff = self.get_mut_slice(start, start + total_len as u32);
NativeEndian::write_u16(&mut total_buff[0..2], data.len() as u16);
total_buff[2..].clone_from_slice(data);
BytesRef(start)
}
fn get_mut(&mut self, addr: u32) -> *mut u8 {
if addr >= self.buffer_len {
self.next_heap.as_mut().unwrap().get_mut(addr - self.buffer_len)
}
else {
self.next_heap
.as_mut()
.unwrap()
.get_mut(addr - self.buffer_len)
} else {
let addr_isize = addr as isize;
unsafe { self.buffer.as_mut_ptr().offset(addr_isize) }
}
}
fn get_mut_ref<Item>(&mut self, addr: u32) -> &mut Item {
if addr >= self.buffer_len {
self.next_heap.as_mut().unwrap().get_mut_ref(addr - self.buffer_len)
}
else {
self.next_heap
.as_mut()
.unwrap()
.get_mut_ref(addr - self.buffer_len)
} else {
let v_ptr_u8 = self.get_mut(addr) as *mut u8;
let v_ptr = v_ptr_u8 as *mut Item;
unsafe { &mut *v_ptr }
}
}
fn set<Item>(&mut self, addr: u32, val: &Item) {
pub fn set<Item>(&mut self, addr: u32, val: &Item) {
if addr >= self.buffer_len {
self.next_heap.as_mut().unwrap().set(addr - self.buffer_len, val);
}
else {
self.next_heap
.as_mut()
.unwrap()
.set(addr - self.buffer_len, val);
} else {
let v_ptr: *const Item = val as *const Item;
let v_ptr_u8: *const u8 = v_ptr as *const u8;
debug_assert!(addr + mem::size_of::<Item>() as u32 <= self.used);
@@ -224,4 +227,4 @@ impl InnerHeap {
}
}
}
}
}

View File

@@ -1,46 +1,44 @@
mod hashmap;
pub(crate) mod hashmap;
mod heap;
mod expull;
pub use self::heap::{Heap, HeapAllocable};
pub use self::expull::ExpUnrolledLinkedList;
pub use self::hashmap::{HashMap, Entry};
pub use self::hashmap::HashMap;
#[test]
fn test_unrolled_linked_list() {
use std::collections;
let heap = Heap::with_capacity(30_000_000);
{
heap.clear();
let mut ks: Vec<usize> = (1..5).map(|k| k * 100).collect();
ks.push(2);
ks.push(3);
for k in (1..5).map(|k| k * 100) {
let mut hashmap: HashMap<ExpUnrolledLinkedList> = HashMap::new(10, &heap);
for k in (1..5).map(|k| k * 100) {
let mut hashmap: HashMap = HashMap::new(10, &heap);
for j in 0..k {
for i in 0..500 {
let mut list = hashmap.get_or_create(i.to_string());
list.push(i*j, &heap);
let v: &mut ExpUnrolledLinkedList = hashmap.get_or_create(i.to_string());
v.push(i * j, &heap);
}
}
let mut map_addr: collections::HashMap<Vec<u8>, u32> = collections::HashMap::new();
for (key, addr) in hashmap.iter() {
map_addr.insert(Vec::from(key), addr);
}
for i in 0..500 {
match hashmap.lookup(i.to_string()) {
Entry::Occupied(addr) => {
let v: &mut ExpUnrolledLinkedList = heap.get_mut_ref(addr);
let mut it = v.iter(addr, &heap);
for j in 0..k {
assert_eq!(it.next().unwrap(), i*j);
}
assert!(!it.next().is_some());
}
_ => {
panic!("should never happen");
}
let key: String = i.to_string();
let addr: u32 = *map_addr.get(key.as_bytes()).unwrap();
let exp_pull: &ExpUnrolledLinkedList = heap.get_ref(addr);
let mut it = exp_pull.iter(addr, &heap);
for j in 0..k {
assert_eq!(it.next().unwrap(), i * j);
}
assert!(!it.next().is_some());
}
}
}
}
}

View File

@@ -8,31 +8,30 @@ use std::io;
use std::marker::Sync;
/// Write-once read many (WORM) abstraction for where
/// tantivy's data should be stored.
/// tantivy's data should be stored.
///
/// There are currently two implementations of `Directory`
///
///
/// - The [`MMapDirectory`](struct.MmapDirectory.html), this
/// should be your default choice.
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
/// should be your default choice.
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
/// should be used mostly for tests.
///
///
pub trait Directory: fmt::Debug + Send + Sync + 'static {
/// Opens a virtual file for read.
///
///
/// Once a virtual file is open, its data may not
/// change.
///
/// Specifically, subsequent writes or flushes should
/// have no effect on the returned `ReadOnlySource` object.
/// have no effect on the returned `ReadOnlySource` object.
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Removes a file
///
/// Removing a file will not affect an eventual
/// existing ReadOnlySource pointing to it.
///
///
/// Removing a nonexistent file, yields a
/// `DeleteError::DoesNotExist`.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
@@ -40,18 +39,18 @@ pub trait Directory: fmt::Debug + Send + Sync + 'static {
/// Returns true iff the file exists
fn exists(&self, path: &Path) -> bool;
/// Opens a writer for the *virtual file* associated with
/// Opens a writer for the *virtual file* associated with
/// a Path.
///
/// Right after this call, the file should be created
/// and any subsequent call to `open_read` for the
/// and any subsequent call to `open_read` for the
/// same path should return a `ReadOnlySource`.
///
///
/// Write operations may be aggressively buffered.
/// The client of this trait is responsible for calling flush
/// to ensure that subsequent `read` operations
/// to ensure that subsequent `read` operations
/// will take into account preceding `write` operations.
///
///
/// Flush operation should also be persistent.
///
/// The user shall not rely on `Drop` triggering `flush`.
@@ -60,7 +59,7 @@ pub trait Directory: fmt::Debug + Send + Sync + 'static {
///
/// The file may not previously exist.
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
/// Reads the full content file that has been written using
/// atomic_write.
///
@@ -68,17 +67,13 @@ pub trait Directory: fmt::Debug + Send + Sync + 'static {
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
/// Atomically replace the content of a file with data.
///
///
/// This calls ensure that reads can never *observe*
/// a partially written file.
///
///
/// The file may or may not previously exist.
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()>;
/// Clones the directory and boxes the clone
/// Clones the directory and boxes the clone
fn box_clone(&self) -> Box<Directory>;
}

View File

@@ -1,52 +1,218 @@
use std::error::Error as StdError;
use std::path::PathBuf;
use std::io;
use std::fmt;
/// General IO error with an optional path to the offending file.
#[derive(Debug)]
pub struct IOError {
path: Option<PathBuf>,
err: io::Error,
}
impl fmt::Display for IOError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.path {
Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err),
None => write!(f, "io error occurred: '{}'", self.err),
}
}
}
impl StdError for IOError {
fn description(&self) -> &str {
"io error occurred"
}
fn cause(&self) -> Option<&StdError> {
Some(&self.err)
}
}
impl IOError {
pub(crate) fn with_path(path: PathBuf, err: io::Error) -> Self {
IOError {
path: Some(path),
err: err,
}
}
}
impl From<io::Error> for IOError {
fn from(err: io::Error) -> IOError {
IOError {
path: None,
err: err,
}
}
}
/// Error that may occur when opening a directory
#[derive(Debug)]
pub enum OpenDirectoryError {
/// The underlying directory does not exists.
/// The underlying directory does not exists.
DoesNotExist(PathBuf),
/// The path exists but is not a directory.
NotADirectory(PathBuf),
}
impl fmt::Display for OpenDirectoryError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
OpenDirectoryError::DoesNotExist(ref path) => {
write!(f, "the underlying directory '{:?}' does not exist", path)
}
OpenDirectoryError::NotADirectory(ref path) => {
write!(f, "the path '{:?}' exists but is not a directory", path)
}
}
}
}
impl StdError for OpenDirectoryError {
fn description(&self) -> &str {
"error occurred while opening a directory"
}
fn cause(&self) -> Option<&StdError> {
None
}
}
/// Error that may occur when starting to write in a file
#[derive(Debug)]
pub enum OpenWriteError {
/// Our directory is WORM, writing an existing file is forbidden.
/// Checkout the `Directory` documentation.
/// Checkout the `Directory` documentation.
FileAlreadyExists(PathBuf),
/// Any kind of IO error that happens when
/// Any kind of IO error that happens when
/// writing in the underlying IO device.
IOError(io::Error),
IOError(IOError),
}
impl From<io::Error> for OpenWriteError {
fn from(err: io::Error) -> OpenWriteError {
impl From<IOError> for OpenWriteError {
fn from(err: IOError) -> OpenWriteError {
OpenWriteError::IOError(err)
}
}
impl fmt::Display for OpenWriteError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
OpenWriteError::FileAlreadyExists(ref path) => {
write!(f, "the file '{:?}' already exists", path)
}
OpenWriteError::IOError(ref err) => {
write!(f,
"an io error occurred while opening a file for writing: '{}'",
err)
}
}
}
}
impl StdError for OpenWriteError {
fn description(&self) -> &str {
"error occurred while opening a file for writing"
}
fn cause(&self) -> Option<&StdError> {
match *self {
OpenWriteError::FileAlreadyExists(_) => None,
OpenWriteError::IOError(ref err) => Some(err),
}
}
}
/// Error that may occur when accessing a file read
#[derive(Debug)]
pub enum OpenReadError {
/// The file does not exists.
FileDoesNotExist(PathBuf),
/// Any kind of IO error that happens when
/// Any kind of IO error that happens when
/// interacting with the underlying IO device.
IOError(io::Error),
IOError(IOError),
}
impl From<IOError> for OpenReadError {
fn from(err: IOError) -> OpenReadError {
OpenReadError::IOError(err)
}
}
impl fmt::Display for OpenReadError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
OpenReadError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path)
}
OpenReadError::IOError(ref err) => {
write!(f,
"an io error occurred while opening a file for reading: '{}'",
err)
}
}
}
}
impl StdError for OpenReadError {
fn description(&self) -> &str {
"error occurred while opening a file for reading"
}
fn cause(&self) -> Option<&StdError> {
match *self {
OpenReadError::FileDoesNotExist(_) => None,
OpenReadError::IOError(ref err) => Some(err),
}
}
}
/// Error that may occur when trying to delete a file
#[derive(Debug)]
pub enum DeleteError {
/// The file does not exists.
FileDoesNotExist(PathBuf),
/// Any kind of IO error that happens when
/// Any kind of IO error that happens when
/// interacting with the underlying IO device.
IOError(io::Error),
/// The file may not be deleted because it is
IOError(IOError),
/// The file may not be deleted because it is
/// protected.
FileProtected(PathBuf),
}
impl From<IOError> for DeleteError {
fn from(err: IOError) -> DeleteError {
DeleteError::IOError(err)
}
}
impl fmt::Display for DeleteError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
DeleteError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path)
}
DeleteError::FileProtected(ref path) => {
write!(f, "the file '{:?}' is protected and can't be deleted", path)
}
DeleteError::IOError(ref err) => {
write!(f, "an io error occurred while deleting a file: '{}'", err)
}
}
}
}
impl StdError for DeleteError {
fn description(&self) -> &str {
"error occurred while deleting a file"
}
fn cause(&self) -> Option<&StdError> {
match *self {
DeleteError::FileDoesNotExist(_) |
DeleteError::FileProtected(_) => None,
DeleteError::IOError(ref err) => Some(err),
}
}
}

View File

@@ -1,23 +1,23 @@
use std::path::{Path, PathBuf};
use directory::error::{OpenReadError, DeleteError, OpenWriteError};
use serde_json;
use directory::error::{IOError, OpenReadError, DeleteError, OpenWriteError};
use directory::{ReadOnlySource, WritePtr};
use std::result;
use std::io;
use Directory;
use std::sync::{Arc, RwLock};
use std::collections::HashSet;
use std::sync::RwLockWriteGuard;
use std::io::Write;
use rustc_serialize::json;
use core::MANAGED_FILEPATH;
use std::collections::HashMap;
use std::fmt;
use Result;
use Error;
use error::{Result, ErrorKind, ResultExt};
/// Wrapper of directories that keeps track of files created by Tantivy.
///
/// A managed directory is just a wrapper of a directory
/// that keeps a (persisted) list of the files that
/// that keeps a (persisted) list of the files that
/// have been created (and not deleted) by tantivy so far.
///
/// Thanks to this list, it implements a `garbage_collect` method
@@ -45,19 +45,18 @@ pub struct FileProtection {
}
fn unprotect_file_from_delete(directory: &ManagedDirectory, path: &Path) {
let mut meta_informations_wlock = directory.meta_informations
let mut meta_informations_wlock = directory
.meta_informations
.write()
.expect("Managed file lock poisoned");
if let Some(counter_ref_mut) = meta_informations_wlock
.protected_files
.get_mut(path) {
if let Some(counter_ref_mut) = meta_informations_wlock.protected_files.get_mut(path) {
(*counter_ref_mut) -= 1;
}
}
impl fmt::Debug for FileProtection {
fn fmt(&self, formatter: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(formatter, "FileProtectionFor({:?})", self.path)
write!(formatter, "FileProtectionFor({:?})", self.path)
}
}
@@ -67,33 +66,42 @@ impl Drop for FileProtection {
}
}
impl ManagedDirectory {
/// Saves the file containing the list of existing files
/// that were created by tantivy.
fn save_managed_paths(directory: &mut Directory,
wlock: &RwLockWriteGuard<MetaInformation>)
-> io::Result<()> {
let mut w = serde_json::to_vec(&wlock.managed_paths)?;
write!(&mut w, "\n")?;
directory.atomic_write(&MANAGED_FILEPATH, &w[..])?;
Ok(())
}
impl ManagedDirectory {
/// Wraps a directory as managed directory.
pub fn new<Dir: Directory>(directory: Dir) -> Result<ManagedDirectory> {
match directory.atomic_read(&MANAGED_FILEPATH) {
Ok(data) => {
let managed_files_json = String::from_utf8_lossy(&data);
let managed_files: HashSet<PathBuf> = json::decode(&managed_files_json)
.map_err(|e| Error::CorruptedFile(MANAGED_FILEPATH.clone(), Box::new(e)))?;
let managed_files: HashSet<PathBuf> =
serde_json::from_str(&managed_files_json)
.chain_err(|| ErrorKind::CorruptedFile(MANAGED_FILEPATH.clone()))?;
Ok(ManagedDirectory {
directory: box directory,
meta_informations: Arc::new(RwLock::new(
MetaInformation {
managed_paths: managed_files,
protected_files: HashMap::default()
})),
})
directory: box directory,
meta_informations: Arc::new(RwLock::new(MetaInformation {
managed_paths: managed_files,
protected_files:
HashMap::default(),
})),
})
}
Err(OpenReadError::FileDoesNotExist(_)) => {
Ok(ManagedDirectory {
directory: box directory,
meta_informations: Arc::default(),
})
}
Err(OpenReadError::IOError(e)) => {
Err(From::from(e))
directory: box directory,
meta_informations: Arc::default(),
})
}
Err(OpenReadError::IOError(e)) => Err(From::from(e)),
}
}
@@ -101,7 +109,7 @@ impl ManagedDirectory {
///
/// Removes the files that were created by `tantivy` and are not
/// used by any segment anymore.
///
///
/// * `living_files` - List of files that are still used by the index.
///
/// This method does not panick nor returns errors.
@@ -109,19 +117,22 @@ impl ManagedDirectory {
/// an error is simply logged, and the file remains in the list of managed
/// files.
pub fn garbage_collect(&mut self, living_files: HashSet<PathBuf>) {
let mut files_to_delete = vec!();
{ // releasing the lock as .delete() will use it too.
let meta_informations_rlock = self.meta_informations
.read()
.expect("Managed directory rlock poisoned in garbage collect.");
info!("Garbage collect");
let mut files_to_delete = vec![];
{
// releasing the lock as .delete() will use it too.
let meta_informations_rlock =
self.meta_informations
.read()
.expect("Managed directory rlock poisoned in garbage collect.");
for managed_path in &meta_informations_rlock.managed_paths {
if !living_files.contains(managed_path) {
files_to_delete.push(managed_path.clone());
}
}
}
let mut deleted_files = vec!();
let mut deleted_files = vec![];
{
for file_to_delete in files_to_delete {
match self.delete(&file_to_delete) {
@@ -130,13 +141,14 @@ impl ManagedDirectory {
deleted_files.push(file_to_delete);
}
Err(file_error) => {
error!("Failed to delete {:?}", file_to_delete);
match file_error {
DeleteError::FileDoesNotExist(_) => {
deleted_files.push(file_to_delete);
}
DeleteError::IOError(_) => {
if !cfg!(target_os = "windows") {
// On windows, delete is expected to fail if the file
// is mmapped.
error!("Failed to delete {:?}", file_to_delete);
}
}
@@ -144,7 +156,7 @@ impl ManagedDirectory {
// this is expected.
}
}
}
}
}
@@ -152,18 +164,18 @@ impl ManagedDirectory {
if !deleted_files.is_empty() {
// update the list of managed files by removing
// update the list of managed files by removing
// the file that were removed.
let mut meta_informations_wlock = self.meta_informations
.write()
.expect("Managed directory wlock poisoned (2).");
{
let mut meta_informations_wlock = self.meta_informations
.write()
.expect("Managed directory wlock poisoned (2).");
let managed_paths_write = &mut meta_informations_wlock.managed_paths;
for delete_file in &deleted_files {
managed_paths_write.remove(delete_file);
}
}
if let Err(_) = self.save_managed_paths() {
if save_managed_paths(self.directory.as_mut(), &meta_informations_wlock).is_err() {
error!("Failed to save the list of managed files.");
}
}
@@ -175,7 +187,7 @@ impl ManagedDirectory {
///
/// The method returns a `FileProtection` object.
/// The file will not be garbage collected as long as the
/// `FileProtection` object is kept alive.
/// `FileProtection` object is kept alive.
pub fn protect_file_from_delete(&self, path: &Path) -> FileProtection {
let pathbuf = path.to_owned();
{
@@ -183,9 +195,9 @@ impl ManagedDirectory {
.write()
.expect("Managed file lock poisoned on protect");
*meta_informations_wlock
.protected_files
.entry(pathbuf.clone())
.or_insert(0) += 1;
.protected_files
.entry(pathbuf.clone())
.or_insert(0) += 1;
}
FileProtection {
directory: self.clone(),
@@ -193,52 +205,33 @@ impl ManagedDirectory {
}
}
/// Saves the file containing the list of existing files
/// that were created by tantivy.
fn save_managed_paths(&mut self,) -> io::Result<()> {
let managed_paths;
{
let meta_informations_rlock = self.meta_informations
.read()
.expect("Managed file lock poisoned");
managed_paths = meta_informations_rlock.managed_paths.clone();
}
let mut w = vec!();
try!(write!(&mut w, "{}\n", json::as_pretty_json(&managed_paths)));
self.directory.atomic_write(&MANAGED_FILEPATH, &w[..])?;
Ok(())
}
/// Registers a file as managed
///
/// This method must be called before the file is
///
/// This method must be called before the file is
/// actually created to ensure that a failure between
/// registering the filepath and creating the file
/// will not lead to garbage files that will
/// will not lead to garbage files that will
/// never get removed.
fn register_file_as_managed(&mut self, filepath: &Path) -> io::Result<()> {
let has_changed = {
let mut meta_wlock = self.meta_informations
.write()
.expect("Managed file lock poisoned");
meta_wlock.managed_paths.insert(filepath.to_owned())
};
let mut meta_wlock = self.meta_informations
.write()
.expect("Managed file lock poisoned");
let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned());
if has_changed {
self.save_managed_paths()?;
save_managed_paths(self.directory.as_mut(), &meta_wlock)?;
}
Ok(())
}
}
impl Directory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.directory.open_read(path)
}
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
self.register_file_as_managed(path)?;
self.register_file_as_managed(path)
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
self.directory.open_write(path)
}
@@ -258,7 +251,7 @@ impl Directory for ManagedDirectory {
.expect("poisoned lock in managed directory meta");
if let Some(counter) = metas_rlock.protected_files.get(path) {
if *counter > 0 {
return Err(DeleteError::FileProtected(path.to_owned()))
return Err(DeleteError::FileProtected(path.to_owned()));
}
}
}
@@ -268,11 +261,10 @@ impl Directory for ManagedDirectory {
fn exists(&self, path: &Path) -> bool {
self.directory.exists(path)
}
fn box_clone(&self) -> Box<Directory> {
box self.clone()
}
}
impl Clone for ManagedDirectory {
@@ -292,10 +284,10 @@ mod tests {
use super::*;
use directory::MmapDirectory;
use std::path::Path;
use std::path::Path;
use std::io::Write;
use tempdir::TempDir;
lazy_static! {
static ref TEST_PATH1: &'static Path = Path::new("some_path_for_test");
static ref TEST_PATH2: &'static Path = Path::new("some_path_for_test2");
@@ -313,17 +305,17 @@ mod tests {
write_file.flush().unwrap();
}
{
managed_directory.atomic_write(*TEST_PATH2, &vec!(0u8,1u8)).unwrap();
managed_directory
.atomic_write(*TEST_PATH2, &vec![0u8, 1u8])
.unwrap();
}
{
assert!(managed_directory.exists(*TEST_PATH1));
assert!(managed_directory.exists(*TEST_PATH2));
}
{
let living_files: HashSet<PathBuf> = [TEST_PATH1.to_owned()]
.into_iter()
.cloned()
.collect();
let living_files: HashSet<PathBuf> =
[TEST_PATH1.to_owned()].into_iter().cloned().collect();
managed_directory.garbage_collect(living_files);
}
{
@@ -346,7 +338,7 @@ mod tests {
assert!(!managed_directory.exists(*TEST_PATH1));
assert!(!managed_directory.exists(*TEST_PATH2));
}
}
}
}
#[test]
@@ -357,10 +349,12 @@ mod tests {
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::new(mmap_directory).unwrap();
managed_directory.atomic_write(*TEST_PATH1, &vec!(0u8,1u8)).unwrap();
managed_directory
.atomic_write(*TEST_PATH1, &vec![0u8, 1u8])
.unwrap();
assert!(managed_directory.exists(*TEST_PATH1));
let _mmap_read = managed_directory.open_read(*TEST_PATH1).unwrap();
let _mmap_read = managed_directory.open_read(*TEST_PATH1).unwrap();
managed_directory.garbage_collect(living_files.clone());
if cfg!(target_os = "windows") {
// On Windows, gc should try and fail the file as it is mmapped.
@@ -371,8 +365,7 @@ mod tests {
// eventually be deleted once mmap is released.
managed_directory.garbage_collect(living_files);
assert!(!managed_directory.exists(*TEST_PATH1));
}
else {
} else {
assert!(!managed_directory.exists(*TEST_PATH1));
}
@@ -387,7 +380,9 @@ mod tests {
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::new(mmap_directory).unwrap();
managed_directory.atomic_write(*TEST_PATH1, &vec!(0u8,1u8)).unwrap();
managed_directory
.atomic_write(*TEST_PATH1, &vec![0u8, 1u8])
.unwrap();
assert!(managed_directory.exists(*TEST_PATH1));
{
@@ -398,7 +393,7 @@ mod tests {
managed_directory.garbage_collect(living_files.clone());
assert!(!managed_directory.exists(*TEST_PATH1));
}

View File

@@ -1,7 +1,7 @@
use atomicwrites;
use common::make_io_err;
use directory::Directory;
use directory::error::{OpenWriteError, OpenReadError, DeleteError, OpenDirectoryError};
use directory::error::{IOError, OpenWriteError, OpenReadError, DeleteError, OpenDirectoryError};
use directory::ReadOnlySource;
use directory::shared_vec_slice::SharedVecSlice;
use directory::WritePtr;
@@ -24,36 +24,29 @@ use std::sync::Weak;
use tempdir::TempDir;
fn open_mmap(full_path: &PathBuf) -> result::Result<Option<Arc<Mmap>>, OpenReadError> {
let convert_file_error = |err: io::Error| {
if err.kind() == io::ErrorKind::NotFound {
OpenReadError::FileDoesNotExist(full_path.clone())
}
else {
OpenReadError::IOError(err)
}
};
let file = File::open(&full_path).map_err(convert_file_error)?;
let meta_data = file
.metadata()
.map_err(|e| OpenReadError::IOError(e))?;
let file = File::open(&full_path)
.map_err(|e| if e.kind() == io::ErrorKind::NotFound {
OpenReadError::FileDoesNotExist(full_path.clone())
} else {
OpenReadError::IOError(IOError::with_path(full_path.to_owned(), e))
})?;
let meta_data = file.metadata()
.map_err(|e| IOError::with_path(full_path.to_owned(), e))?;
if meta_data.len() == 0 {
// if the file size is 0, it will not be possible
// if the file size is 0, it will not be possible
// to mmap the file, so we return an anonymous mmap_cache
// instead.
return Ok(None)
return Ok(None);
}
match Mmap::open(&file, Protection::Read) {
Ok(mmap) => {
Ok(Some(Arc::new(mmap)))
}
Err(e) => {
Err(OpenReadError::IOError(e))
}
Ok(mmap) => Ok(Some(Arc::new(mmap))),
Err(e) => Err(IOError::with_path(full_path.to_owned(), e))?,
}
}
#[derive(Default,Clone,Debug,RustcDecodable,RustcEncodable)]
#[derive(Default,Clone,Debug,Serialize,Deserialize)]
pub struct CacheCounters {
// Number of time the cache prevents to call `mmap`
pub hit: usize,
@@ -65,7 +58,7 @@ pub struct CacheCounters {
pub miss_weak: usize,
}
#[derive(Clone,Debug,RustcDecodable,RustcEncodable)]
#[derive(Clone,Debug,Serialize,Deserialize)]
pub struct CacheInfo {
pub counters: CacheCounters,
pub mmapped: Vec<PathBuf>,
@@ -91,8 +84,7 @@ impl Default for MmapCache {
impl MmapCache {
fn cleanup(&mut self) {
fn cleanup(&mut self) {
let previous_cache_size = self.cache.len();
let mut new_cache = HashMap::new();
mem::swap(&mut new_cache, &mut self.cache);
@@ -107,9 +99,7 @@ impl MmapCache {
fn get_info(&mut self) -> CacheInfo {
self.cleanup();
let paths: Vec<PathBuf> = self.cache.keys()
.cloned()
.collect();
let paths: Vec<PathBuf> = self.cache.keys().cloned().collect();
CacheInfo {
counters: self.counters.clone(),
mmapped: paths,
@@ -123,68 +113,63 @@ impl MmapCache {
self.cleanup();
}
Ok(match self.cache.entry(full_path.clone()) {
HashMapEntry::Occupied(mut occupied_entry) => {
if let Some(mmap_arc) = occupied_entry.get().upgrade() {
self.counters.hit += 1;
Some(mmap_arc.clone())
}
else {
// The entry exists but the weak ref has been destroyed.
self.counters.miss_weak += 1;
if let Some(mmap_arc) = open_mmap(&full_path)? {
occupied_entry.insert(Arc::downgrade(&mmap_arc));
Some(mmap_arc)
}
else {
None
}
}
}
HashMapEntry::Vacant(vacant_entry) => {
self.counters.miss_empty += 1;
if let Some(mmap_arc) = open_mmap(&full_path)? {
vacant_entry.insert(Arc::downgrade(&mmap_arc));
Some(mmap_arc)
}
else {
None
}
}
})
HashMapEntry::Occupied(mut occupied_entry) => {
if let Some(mmap_arc) = occupied_entry.get().upgrade() {
self.counters.hit += 1;
Some(mmap_arc.clone())
} else {
// The entry exists but the weak ref has been destroyed.
self.counters.miss_weak += 1;
if let Some(mmap_arc) = open_mmap(&full_path)? {
occupied_entry.insert(Arc::downgrade(&mmap_arc));
Some(mmap_arc)
} else {
None
}
}
}
HashMapEntry::Vacant(vacant_entry) => {
self.counters.miss_empty += 1;
if let Some(mmap_arc) = open_mmap(&full_path)? {
vacant_entry.insert(Arc::downgrade(&mmap_arc));
Some(mmap_arc)
} else {
None
}
}
})
}
}
/// Directory storing data in files, read via mmap.
///
/// The Mmap object are cached to limit the
/// system calls.
/// The Mmap object are cached to limit the
/// system calls.
#[derive(Clone)]
pub struct MmapDirectory {
root_path: PathBuf,
mmap_cache: Arc<RwLock<MmapCache>>,
_temp_directory: Arc<Option<TempDir>>,
}
impl fmt::Debug for MmapDirectory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "MmapDirectory({:?})", self.root_path)
}
write!(f, "MmapDirectory({:?})", self.root_path)
}
}
impl MmapDirectory {
/// Creates a new MmapDirectory in a temporary directory.
///
/// This is mostly useful to test the MmapDirectory itself.
/// For your unit tests, prefer the RAMDirectory.
/// For your unit tests, prefer the RAMDirectory.
pub fn create_from_tempdir() -> io::Result<MmapDirectory> {
let tempdir = try!(TempDir::new("index"));
let tempdir_path = PathBuf::from(tempdir.path());
let directory = MmapDirectory {
root_path: PathBuf::from(tempdir_path),
mmap_cache: Arc::new(RwLock::new(MmapCache::default())),
_temp_directory: Arc::new(Some(tempdir))
_temp_directory: Arc::new(Some(tempdir)),
};
Ok(directory)
}
@@ -196,16 +181,14 @@ impl MmapDirectory {
pub fn open(directory_path: &Path) -> Result<MmapDirectory, OpenDirectoryError> {
if !directory_path.exists() {
Err(OpenDirectoryError::DoesNotExist(PathBuf::from(directory_path)))
}
else if !directory_path.is_dir() {
} else if !directory_path.is_dir() {
Err(OpenDirectoryError::NotADirectory(PathBuf::from(directory_path)))
}
else {
} else {
Ok(MmapDirectory {
root_path: PathBuf::from(directory_path),
mmap_cache: Arc::new(RwLock::new(MmapCache::default())),
_temp_directory: Arc::new(None)
})
root_path: PathBuf::from(directory_path),
mmap_cache: Arc::new(RwLock::new(MmapCache::default())),
_temp_directory: Arc::new(None),
})
}
}
@@ -232,7 +215,8 @@ impl MmapDirectory {
use std::os::windows::fs::OpenOptionsExt;
use winapi::winbase;
open_opts.write(true)
open_opts
.write(true)
.custom_flags(winbase::FILE_FLAG_BACKUP_SEMANTICS);
}
@@ -242,8 +226,8 @@ impl MmapDirectory {
}
/// Returns some statistical information
/// about the Mmap cache.
///
/// The `MmapDirectory` embeds a `MmapDirectory`
///
/// The `MmapDirectory` embeds a `MmapDirectory`
/// to avoid multiplying the `mmap` system calls.
pub fn get_cache_info(&mut self) -> CacheInfo {
self.mmap_cache
@@ -251,12 +235,10 @@ impl MmapDirectory {
.expect("Mmap cache lock is poisoned.")
.get_info()
}
}
/// This Write wraps a File, but has the specificity of
/// call `sync_all` on flush.
/// This Write wraps a File, but has the specificity of
/// call `sync_all` on flush.
struct SafeFileWriter(File);
impl SafeFileWriter {
@@ -266,7 +248,6 @@ impl SafeFileWriter {
}
impl Write for SafeFileWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
@@ -285,51 +266,51 @@ impl Seek for SafeFileWriter {
impl Directory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path);
let mut mmap_cache = self.mmap_cache
.write()
.map_err(|_| OpenReadError::IOError(
make_io_err(format!("Failed to acquired write lock on mmap cache while reading {:?}", path))
))?;
Ok(mmap_cache.get_mmap(full_path)?
.map(MmapReadOnly::from)
.map(ReadOnlySource::Mmap)
.unwrap_or(ReadOnlySource::Anonymous(SharedVecSlice::empty()))
)
.map_err(|_| {
let msg = format!("Failed to acquired write lock \
on mmap cache while reading {:?}",
path);
IOError::with_path(path.to_owned(), make_io_err(msg))
})?;
Ok(mmap_cache
.get_mmap(full_path)?
.map(MmapReadOnly::from)
.map(ReadOnlySource::Mmap)
.unwrap_or_else(|| ReadOnlySource::Anonymous(SharedVecSlice::empty())))
}
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
debug!("Open Write {:?}", path);
let full_path = self.resolve_path(path);
let open_res = OpenOptions::new()
.write(true)
.create_new(true)
.open(full_path);
let mut file = try!(
open_res.map_err(|err| {
if err.kind() == io::ErrorKind::AlreadyExists {
OpenWriteError::FileAlreadyExists(PathBuf::from(path))
}
else {
OpenWriteError::IOError(err)
}
})
);
let mut file = open_res
.map_err(|err| if err.kind() == io::ErrorKind::AlreadyExists {
OpenWriteError::FileAlreadyExists(path.to_owned())
} else {
IOError::with_path(path.to_owned(), err).into()
})?;
// making sure the file is created.
try!(file.flush());
file.flush()
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
// Apparetntly, on some filesystem syncing the parent
// directory is required.
try!(self.sync_directory());
self.sync_directory()
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
let writer = SafeFileWriter::new(file);
Ok(BufWriter::new(Box::new(writer)))
}
@@ -337,11 +318,14 @@ impl Directory for MmapDirectory {
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
debug!("Deleting file {:?}", path);
let full_path = self.resolve_path(path);
let mut mmap_cache = try!(self.mmap_cache
let mut mmap_cache = self.mmap_cache
.write()
.map_err(|_|
DeleteError::IOError(make_io_err(format!("Failed to acquired write lock on mmap cache while deleting {:?}", path))))
);
.map_err(|_| {
let msg = format!("Failed to acquired write lock \
on mmap cache while deleting {:?}",
path);
IOError::with_path(path.to_owned(), make_io_err(msg))
})?;
// Removing the entry in the MMap cache.
// The munmap will appear on Drop,
// when the last reference is gone.
@@ -349,14 +333,13 @@ impl Directory for MmapDirectory {
match fs::remove_file(&full_path) {
Ok(_) => {
self.sync_directory()
.map_err(|e| DeleteError::IOError(e))
.map_err(|e| IOError::with_path(path.to_owned(), e).into())
}
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(DeleteError::FileDoesNotExist(path.to_owned()))
}
else {
Err(DeleteError::IOError(e))
} else {
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
@@ -373,32 +356,29 @@ impl Directory for MmapDirectory {
match File::open(&full_path) {
Ok(mut file) => {
file.read_to_end(&mut buffer)
.map_err(|e| OpenReadError::IOError(e))?;
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(buffer)
}
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
}
else {
Err(OpenReadError::IOError(e))
} else {
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
debug!("Atomic Write {:?}", path);
let full_path = self.resolve_path(path);
let meta_file = atomicwrites::AtomicFile::new(full_path, atomicwrites::AllowOverwrite);
try!(meta_file.write(|f| {
f.write_all(data)
}));
try!(meta_file.write(|f| f.write_all(data)));
Ok(())
}
fn box_clone(&self,) -> Box<Directory> {
fn box_clone(&self) -> Box<Directory> {
Box::new(self.clone())
}
}
@@ -457,9 +437,9 @@ mod tests {
}
}
assert_eq!(mmap_directory.get_cache_info().counters.miss_empty, 10);
{
{
// test weak miss
// the first pass create the weak refs.
for path in &paths {
@@ -475,7 +455,7 @@ mod tests {
}
{
let mut saved_readmmaps = vec!();
let mut saved_readmmaps = vec![];
// Keeps reference alive
for (i, path) in paths.iter().enumerate() {
let r = mmap_directory.open_read(path).unwrap();
@@ -483,7 +463,6 @@ mod tests {
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), i + 1);
}
let cache_info = mmap_directory.get_cache_info();
println!("{:?}", cache_info);
assert_eq!(cache_info.counters.miss_empty, 30);
assert_eq!(cache_info.counters.miss_weak, 10);
assert_eq!(cache_info.mmapped.len(), 10);
@@ -494,7 +473,7 @@ mod tests {
}
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
}
}

View File

@@ -1,3 +1,8 @@
/*!
WORM directory abstraction.
*/
mod mmap_directory;
mod ram_directory;
mod directory;
@@ -31,7 +36,7 @@ pub type WritePtr = BufWriter<Box<SeekableWrite>>;
mod tests {
use super::*;
use std::path::Path;
use std::path::Path;
use std::io::{Write, Seek, SeekFrom};
lazy_static! {
@@ -65,7 +70,7 @@ mod tests {
assert!(directory.exists(*TEST_PATH));
write_file.write_all(&[4]).unwrap();
write_file.write_all(&[3]).unwrap();
write_file.write_all(&[7,3,5]).unwrap();
write_file.write_all(&[7, 3, 5]).unwrap();
write_file.flush().unwrap();
}
let read_file = directory.open_read(*TEST_PATH).unwrap();
@@ -81,9 +86,9 @@ mod tests {
{
{
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
write_file.write_all(&[4, 3, 7,3,5]).unwrap();
write_file.write_all(&[4, 3, 7, 3, 5]).unwrap();
write_file.seek(SeekFrom::Start(0)).unwrap();
write_file.write_all(&[3,1]).unwrap();
write_file.write_all(&[3, 1]).unwrap();
write_file.flush().unwrap();
}
let read_file = directory.open_read(*TEST_PATH).unwrap();
@@ -98,7 +103,7 @@ mod tests {
{
directory.open_write(*TEST_PATH).unwrap();
assert!(directory.exists(*TEST_PATH));
}
{
assert!(directory.open_write(*TEST_PATH).is_err());

View File

@@ -6,19 +6,19 @@ use std::result;
use std::sync::{Arc, RwLock};
use common::make_io_err;
use directory::{Directory, ReadOnlySource};
use directory::error::{OpenWriteError, OpenReadError, DeleteError};
use directory::error::{IOError, OpenWriteError, OpenReadError, DeleteError};
use directory::WritePtr;
use super::shared_vec_slice::SharedVecSlice;
/// Writer associated with the `RAMDirectory`
///
///
/// The Writer just writes a buffer.
///
/// # Panics
///
/// On drop, if the writer was left in a *dirty* state.
/// That is, if flush was not called after the last call
/// to write.
/// to write.
///
struct VecWriter {
path: PathBuf,
@@ -40,8 +40,9 @@ impl VecWriter {
impl Drop for VecWriter {
fn drop(&mut self) {
if !self.is_flushed {
panic!("You forgot to flush {:?} before its writter got Drop. Do not rely on drop.", self.path)
if !self.is_flushed {
panic!("You forgot to flush {:?} before its writter got Drop. Do not rely on drop.",
self.path)
}
}
}
@@ -61,7 +62,8 @@ impl Write for VecWriter {
fn flush(&mut self) -> io::Result<()> {
self.is_flushed = true;
try!(self.shared_directory.write(self.path.clone(), self.data.get_ref()));
try!(self.shared_directory
.write(self.path.clone(), self.data.get_ref()));
Ok(())
}
}
@@ -72,35 +74,36 @@ struct InnerDirectory(Arc<RwLock<HashMap<PathBuf, Arc<Vec<u8>>>>>);
impl InnerDirectory {
fn new() -> InnerDirectory {
InnerDirectory(Arc::new(RwLock::new(HashMap::new())))
}
fn write(&self, path: PathBuf, data: &[u8]) -> io::Result<bool> {
let mut map = try!(
self.0
.write()
.map_err(|_| make_io_err(format!("Failed to lock the directory, when trying to write {:?}", path)))
);
let mut map = try!(self.0
.write()
.map_err(|_| {
make_io_err(format!("Failed to lock the directory, when trying to write {:?}",
path))
}));
let prev_value = map.insert(path, Arc::new(Vec::from(data)));
Ok(prev_value.is_some())
}
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
self.0
.read()
.map_err(|_| {
let io_err = make_io_err(format!("Failed to acquire read lock for the directory, when trying to read {:?}", path));
OpenReadError::IOError(io_err)
})
let msg = format!("Failed to acquire read lock for the \
directory when trying to read {:?}",
path);
let io_err = make_io_err(msg);
OpenReadError::IOError(IOError::with_path(path.to_owned(), io_err))
})
.and_then(|readable_map| {
readable_map
.get(path)
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
.map(|data| {
ReadOnlySource::Anonymous(SharedVecSlice::new(data.clone()))
})
.get(path)
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
.map(|data| ReadOnlySource::Anonymous(SharedVecSlice::new(data.clone())))
})
}
@@ -108,19 +111,16 @@ impl InnerDirectory {
self.0
.write()
.map_err(|_| {
let io_err = make_io_err(format!("Failed to acquire write lock for the directory, when trying to delete {:?}", path));
DeleteError::IOError(io_err)
})
.and_then(|mut writable_map| {
match writable_map.remove(path) {
Some(_) => {
Ok(())
},
None => {
Err(DeleteError::FileDoesNotExist(PathBuf::from(path)))
}
}
})
let msg = format!("Failed to acquire write lock for the \
directory when trying to delete {:?}",
path);
let io_err = make_io_err(msg);
DeleteError::IOError(IOError::with_path(path.to_owned(), io_err))
})
.and_then(|mut writable_map| match writable_map.remove(path) {
Some(_) => Ok(()),
None => Err(DeleteError::FileDoesNotExist(PathBuf::from(path))),
})
}
fn exists(&self, path: &Path) -> bool {
@@ -129,13 +129,12 @@ impl InnerDirectory {
.expect("Failed to get read lock directory.")
.contains_key(path)
}
}
impl fmt::Debug for RAMDirectory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RAMDirectory")
}
write!(f, "RAMDirectory")
}
}
@@ -150,12 +149,9 @@ pub struct RAMDirectory {
}
impl RAMDirectory {
/// Constructor
pub fn create() -> RAMDirectory {
RAMDirectory {
fs: InnerDirectory::new()
}
RAMDirectory { fs: InnerDirectory::new() }
}
}
@@ -163,15 +159,19 @@ impl Directory for RAMDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.fs.open_read(path)
}
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
let path_buf = PathBuf::from(path);
let vec_writer = VecWriter::new(path_buf.clone(), self.fs.clone());
let exists = self.fs
.write(path_buf.clone(), &Vec::new())
.map_err(|err| IOError::with_path(path.to_owned(), err))?;
// force the creation of the file to mimic the MMap directory.
if try!(self.fs.write(path_buf.clone(), &Vec::new())) {
if exists {
Err(OpenWriteError::FileAlreadyExists(path_buf))
}
else {
} else {
Ok(BufWriter::new(Box::new(vec_writer)))
}
}
@@ -180,15 +180,14 @@ impl Directory for RAMDirectory {
self.fs.delete(path)
}
fn exists(&self, path: &Path) -> bool {
self.fs.exists(path)
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let read = self.open_read(path)?;
Ok(read.as_slice()
.to_owned())
Ok(read.as_slice().to_owned())
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
@@ -200,8 +199,7 @@ impl Directory for RAMDirectory {
Ok(())
}
fn box_clone(&self,) -> Box<Directory> {
fn box_clone(&self) -> Box<Directory> {
Box::new(self.clone())
}
}

View File

@@ -2,10 +2,10 @@ use fst::raw::MmapReadOnly;
use std::ops::Deref;
use super::shared_vec_slice::SharedVecSlice;
use common::HasLen;
use stable_deref_trait::StableDeref;
/// Read object that represents files in tantivy.
///
///
/// These read objects are only in charge to deliver
/// the data in the form of a constant read-only `&[u8]`.
/// Whatever happens to the directory file, the data
@@ -13,12 +13,13 @@ use common::HasLen;
pub enum ReadOnlySource {
/// Mmap source of data
Mmap(MmapReadOnly),
/// Wrapping a `Vec<u8>`
/// Wrapping a `Vec<u8>`
Anonymous(SharedVecSlice),
}
unsafe impl StableDeref for ReadOnlySource {}
impl Deref for ReadOnlySource {
type Target = [u8];
fn deref(&self) -> &[u8] {
@@ -27,35 +28,30 @@ impl Deref for ReadOnlySource {
}
impl ReadOnlySource {
/// Creates an empty ReadOnlySource
pub fn empty() -> ReadOnlySource {
ReadOnlySource::Anonymous(SharedVecSlice::empty())
}
/// Returns the data underlying the ReadOnlySource object.
pub fn as_slice(&self,) -> &[u8] {
pub fn as_slice(&self) -> &[u8] {
match *self {
ReadOnlySource::Mmap(ref mmap_read_only) => unsafe {
mmap_read_only.as_slice()
},
ReadOnlySource::Anonymous(ref shared_vec) => {
shared_vec.as_slice()
},
ReadOnlySource::Mmap(ref mmap_read_only) => unsafe { mmap_read_only.as_slice() },
ReadOnlySource::Anonymous(ref shared_vec) => shared_vec.as_slice(),
}
}
/// Creates a ReadOnlySource that is just a
/// Creates a ReadOnlySource that is just a
/// view over a slice of the data.
///
///
/// Keep in mind that any living slice extends
/// the lifetime of the original ReadOnlySource,
///
///
/// For instance, if `ReadOnlySource` wraps 500MB
/// worth of data in anonymous memory, and only a
/// 1KB slice is remaining, the whole `500MBs`
/// 1KB slice is remaining, the whole `500MBs`
/// are retained in memory.
pub fn slice(&self, from_offset:usize, to_offset:usize) -> ReadOnlySource {
pub fn slice(&self, from_offset: usize, to_offset: usize) -> ReadOnlySource {
match *self {
ReadOnlySource::Mmap(ref mmap_read_only) => {
let sliced_mmap = mmap_read_only.range(from_offset, to_offset - from_offset);
@@ -63,13 +59,13 @@ impl ReadOnlySource {
}
ReadOnlySource::Anonymous(ref shared_vec) => {
ReadOnlySource::Anonymous(shared_vec.slice(from_offset, to_offset))
},
}
}
}
}
impl HasLen for ReadOnlySource {
fn len(&self,) -> usize {
fn len(&self) -> usize {
self.as_slice().len()
}
}
@@ -79,3 +75,10 @@ impl Clone for ReadOnlySource {
self.slice(0, self.len())
}
}
impl From<Vec<u8>> for ReadOnlySource {
fn from(data: Vec<u8>) -> ReadOnlySource {
let shared_data = SharedVecSlice::from(data);
ReadOnlySource::Anonymous(shared_data)
}
}

View File

@@ -4,12 +4,11 @@ use std::sync::Arc;
#[derive(Clone)]
pub struct SharedVecSlice {
pub data: Arc<Vec<u8>>,
pub start: usize,
pub len: usize
pub start: usize,
pub len: usize,
}
impl SharedVecSlice {
pub fn empty() -> SharedVecSlice {
SharedVecSlice::new(Arc::new(Vec::new()))
}
@@ -23,11 +22,11 @@ impl SharedVecSlice {
}
}
pub fn as_slice(&self,) -> &[u8] {
pub fn as_slice(&self) -> &[u8] {
&self.data[self.start..self.start + self.len]
}
pub fn slice(&self, from_offset: usize, to_offset:usize) -> SharedVecSlice {
pub fn slice(&self, from_offset: usize, to_offset: usize) -> SharedVecSlice {
SharedVecSlice {
data: self.data.clone(),
start: self.start + from_offset,
@@ -35,3 +34,10 @@ impl SharedVecSlice {
}
}
}
impl From<Vec<u8>> for SharedVecSlice {
fn from(data: Vec<u8>) -> SharedVecSlice {
SharedVecSlice::new(Arc::new(data))
}
}

View File

@@ -1,97 +1,142 @@
#![allow(enum_variant_names)]
/// Definition of Tantivy's error and result.
//! Definition of Tantivy's error and result.
use std::io;
use std::path::PathBuf;
use std::error;
use std::sync::PoisonError;
use directory::error::{OpenReadError, OpenWriteError, OpenDirectoryError};
use directory::error::{IOError, OpenReadError, OpenWriteError, OpenDirectoryError};
use query;
use schema;
use fastfield::FastFieldNotAvailableError;
use serde_json;
error_chain!(
errors {
/// Path does not exist.
PathDoesNotExist(buf: PathBuf) {
description("path does not exist")
display("path does not exist: '{:?}'", buf)
}
/// File already exists, this is a problem when we try to write into a new file.
FileAlreadyExists(buf: PathBuf) {
description("file already exists")
display("file already exists: '{:?}'", buf)
}
/// IO Error.
IOError(err: IOError) {
description("an IO error occurred")
display("an IO error occurred: '{}'", err)
}
/// The data within is corrupted.
///
/// For instance, it contains invalid JSON.
CorruptedFile(buf: PathBuf) {
description("file contains corrupted data")
display("file contains corrupted data: '{:?}'", buf)
}
/// A thread holding the locked panicked and poisoned the lock.
Poisoned {
description("a thread holding the locked panicked and poisoned the lock")
}
/// Invalid argument was passed by the user.
InvalidArgument(arg: String) {
description("an invalid argument was passed")
display("an invalid argument was passed: '{}'", arg)
}
/// An Error happened in one of the thread.
ErrorInThread(err: String) {
description("an error occurred in a thread")
display("an error occurred in a thread: '{}'", err)
}
/// An Error appeared related to the lack of a field.
SchemaError(field: String) {
description("a schema field is missing")
display("a schema field is missing: '{}'", field)
}
/// Tried to access a fastfield reader for a field not configured accordingly.
FastFieldError(err: FastFieldNotAvailableError) {
description("fast field not available")
display("fast field not available: '{:?}'", err)
}
}
);
impl From<FastFieldNotAvailableError> for Error {
fn from(fastfield_error: FastFieldNotAvailableError) -> Error {
ErrorKind::FastFieldError(fastfield_error).into()
}
}
/// Generic tantivy error.
///
/// Any specialized error return in tantivy can be converted in `tantivy::Error`.
#[derive(Debug)]
pub enum Error {
/// Path does not exist.
PathDoesNotExist(PathBuf),
/// File already exists, this is a problem when we try to write into a new file.
FileAlreadyExists(PathBuf),
/// IO Error
IOError(io::Error),
/// A thread holding the locked panicked and poisoned the lock.
Poisoned,
/// The data within is corrupted.
///
/// For instance, it contains invalid JSON.
CorruptedFile(PathBuf, Box<error::Error + Send + Sync>),
/// Invalid argument was passed by the user.
InvalidArgument(String),
/// An Error happened in one of the thread
ErrorInThread(String),
/// An Error appeared related to the lack of a field.
SchemaError(String),
impl From<IOError> for Error {
fn from(io_error: IOError) -> Error {
ErrorKind::IOError(io_error).into()
}
}
impl From<io::Error> for Error {
fn from(io_error: io::Error) -> Error {
Error::IOError(io_error)
ErrorKind::IOError(io_error.into()).into()
}
}
impl From<query::QueryParserError> for Error {
fn from(parsing_error: query::QueryParserError) -> Error {
Error::InvalidArgument(format!("Query is invalid. {:?}", parsing_error))
ErrorKind::InvalidArgument(format!("Query is invalid. {:?}", parsing_error)).into()
}
}
impl<Guard> From<PoisonError<Guard>> for Error {
fn from(_: PoisonError<Guard>) -> Error {
Error::Poisoned
ErrorKind::Poisoned.into()
}
}
impl From<OpenReadError> for Error {
fn from(error: OpenReadError) -> Error {
match error {
OpenReadError::FileDoesNotExist(filepath) => Error::PathDoesNotExist(filepath),
OpenReadError::IOError(io_error) => Error::IOError(io_error),
OpenReadError::FileDoesNotExist(filepath) => {
ErrorKind::PathDoesNotExist(filepath).into()
}
OpenReadError::IOError(io_error) => ErrorKind::IOError(io_error).into(),
}
}
}
impl From<schema::DocParsingError> for Error {
fn from(error: schema::DocParsingError) -> Error {
Error::InvalidArgument(format!("Failed to parse document {:?}", error))
ErrorKind::InvalidArgument(format!("Failed to parse document {:?}", error)).into()
}
}
impl From<OpenWriteError> for Error {
fn from(error: OpenWriteError) -> Error {
match error {
OpenWriteError::FileAlreadyExists(filepath) =>
Error::FileAlreadyExists(filepath),
OpenWriteError::IOError(io_error) =>
Error::IOError(io_error),
}
OpenWriteError::FileAlreadyExists(filepath) => {
ErrorKind::FileAlreadyExists(filepath)
}
OpenWriteError::IOError(io_error) => ErrorKind::IOError(io_error),
}
.into()
}
}
impl From<OpenDirectoryError> for Error {
fn from(error: OpenDirectoryError) -> Error {
match error {
OpenDirectoryError::DoesNotExist(directory_path) =>
Error::PathDoesNotExist(directory_path),
OpenDirectoryError::NotADirectory(directory_path) =>
Error::InvalidArgument(format!("{:?} is not a directory", directory_path)),
OpenDirectoryError::DoesNotExist(directory_path) => {
ErrorKind::PathDoesNotExist(directory_path).into()
}
OpenDirectoryError::NotADirectory(directory_path) => {
ErrorKind::InvalidArgument(format!("{:?} is not a directory", directory_path))
.into()
}
}
}
}
impl From<serde_json::Error> for Error {
fn from(error: serde_json::Error) -> Error {
let io_err = io::Error::from(error);
ErrorKind::IOError(io_err.into()).into()
}
}

View File

@@ -6,6 +6,9 @@ use directory::ReadOnlySource;
use DocId;
use common::HasLen;
/// Write a delete `BitSet`
///
/// where `delete_bitset` is the set of deleted `DocId`.
pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> {
let max_doc = delete_bitset.capacity();
let mut byte = 0u8;
@@ -18,8 +21,7 @@ pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io:
writer.write_all(&[byte])?;
shift = 0;
byte = 0;
}
else {
} else {
shift += 1;
}
}
@@ -29,17 +31,18 @@ pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io:
writer.flush()
}
/// Set of deleted `DocId`s.
#[derive(Clone)]
pub struct DeleteBitSet {
data: ReadOnlySource,
len: usize,
len: usize,
}
impl DeleteBitSet {
impl DeleteBitSet {
/// Opens a delete bitset given its data source.
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
let num_deleted: usize = data
.as_slice()
let num_deleted: usize = data.as_slice()
.iter()
.map(|b| b.count_ones() as usize)
.sum();
@@ -49,6 +52,7 @@ impl DeleteBitSet {
}
}
/// Returns an empty delete bit set.
pub fn empty() -> DeleteBitSet {
DeleteBitSet {
data: ReadOnlySource::empty(),
@@ -56,22 +60,23 @@ impl DeleteBitSet {
}
}
/// Returns true iff the segment has some deleted documents.
pub fn has_deletes(&self) -> bool {
self.len() > 0
}
/// Returns true iff the document is deleted.
#[inline]
pub fn is_deleted(&self, doc: DocId) -> bool {
if self.len == 0 {
false
}
else {
} else {
let byte_offset = doc / 8u32;
let b: u8 = (*self.data)[byte_offset as usize];
let shift = (doc & 7u32) as u8;
b & (1u8 << shift) != 0
b & (1u8 << shift) != 0
}
}
}
@@ -124,4 +129,4 @@ mod tests {
test_delete_bitset_helper(&bitset);
}
}
}
}

23
src/fastfield/error.rs Normal file
View File

@@ -0,0 +1,23 @@
use std::result;
use schema::FieldEntry;
/// `FastFieldNotAvailableError` is returned when the
/// user requested for a fast field reader, and the field was not
/// defined in the schema as a fast field.
#[derive(Debug)]
pub struct FastFieldNotAvailableError {
field_name: String,
}
impl FastFieldNotAvailableError {
/// Creates a `FastFieldNotAvailable` error.
/// `field_entry` is the configuration of the field
/// for which fast fields are not available.
pub fn new(field_entry: &FieldEntry) -> FastFieldNotAvailableError {
FastFieldNotAvailableError { field_name: field_entry.name().to_string() }
}
}
/// Result when trying to access a fast field reader.
pub type Result<R> = result::Result<R, FastFieldNotAvailableError>;

View File

@@ -1,23 +1,41 @@
/// Fast field module
///
/// Fast fields are the equivalent of `DocValues` in `Lucene`.
/// Fast fields are stored in column-oriented fashion and allow fast
/// random access given a `DocId`.
///
/// Their performance is comparable to that of an array lookup.
/// They are useful when a field is required for all or most of
/// the `DocSet` : for instance for scoring, grouping, filtering, or facetting.
///
/// Currently only u32 fastfield are supported.
/*!
Fast fields is a column oriented storage storage.
It is the equivalent of `Lucene`'s `DocValues`.
Fast fields is a column-oriented fashion storage of `tantivy`.
It is designed for the fast random access of some document
fields given a document id.
`FastField` are useful when a field is required for all or most of
the `DocSet` : for instance for scoring, grouping, filtering, or facetting.
Fields have to be declared as `FAST` in the schema.
Currently only 64-bits integers (signed or unsigned) are
supported.
They are stored in a bitpacked fashion so that their
memory usage is directly linear with the amplitude of the
values stored.
Read access performance is comparable to that of an array lookup.
*/
mod reader;
mod writer;
mod serializer;
pub mod delete;
mod error;
mod delete;
pub use self::writer::{U32FastFieldsWriter, U32FastFieldWriter};
pub use self::reader::{U32FastFieldsReader, U32FastFieldReader};
pub use self::delete::write_delete_bitset;
pub use self::delete::DeleteBitSet;
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
pub use self::reader::{FastFieldsReader, U64FastFieldReader, I64FastFieldReader};
pub use self::reader::FastFieldReader;
pub use self::serializer::FastFieldSerializer;
pub use self::error::{Result, FastFieldNotAvailableError};
#[cfg(test)]
mod tests {
@@ -30,6 +48,7 @@ mod tests {
use schema::FAST;
use test::Bencher;
use test;
use fastfield::FastFieldReader;
use rand::Rng;
use rand::SeedableRng;
use rand::XorShiftRng;
@@ -37,26 +56,26 @@ mod tests {
lazy_static! {
static ref SCHEMA: Schema = {
let mut schema_builder = SchemaBuilder::default();
schema_builder.add_u32_field("field", FAST);
schema_builder.add_u64_field("field", FAST);
schema_builder.build()
};
static ref FIELD: Field = {
static ref FIELD: Field = {
SCHEMA.get_field("field").unwrap()
};
}
fn add_single_field_doc(fast_field_writers: &mut U32FastFieldsWriter, field: Field, value: u32) {
fn add_single_field_doc(fast_field_writers: &mut FastFieldsWriter, field: Field, value: u64) {
let mut doc = Document::default();
doc.add_u32(field, value);
doc.add_u64(field, value);
fast_field_writers.add_document(&doc);
}
#[test]
pub fn test_fastfield() {
let test_fastfield = U32FastFieldReader::from(vec!(100,200,300));
let test_fastfield = U64FastFieldReader::from(vec![100, 200, 300]);
assert_eq!(test_fastfield.get(0), 100);
assert_eq!(test_fastfield.get(1), 200);
assert_eq!(test_fastfield.get(2), 300);
assert_eq!(test_fastfield.get(2), 300);
}
#[test]
@@ -66,23 +85,24 @@ mod tests {
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::new(write).unwrap();
let mut fast_field_writers = U32FastFieldsWriter::from_schema(&SCHEMA);
add_single_field_doc(&mut fast_field_writers, *FIELD, 13u32);
add_single_field_doc(&mut fast_field_writers, *FIELD, 14u32);
add_single_field_doc(&mut fast_field_writers, *FIELD, 2u32);
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
add_single_field_doc(&mut fast_field_writers, *FIELD, 13u64);
add_single_field_doc(&mut fast_field_writers, *FIELD, 14u64);
add_single_field_doc(&mut fast_field_writers, *FIELD, 2u64);
fast_field_writers.serialize(&mut serializer).unwrap();
serializer.close().unwrap();
}
let source = directory.open_read(&path).unwrap();
{
assert_eq!(source.len(), 20 as usize);
assert_eq!(source.len(), 38 as usize);
}
{
let fast_field_readers = U32FastFieldsReader::open(source).unwrap();
let fast_field_reader = fast_field_readers.get_field(*FIELD).unwrap();
assert_eq!(fast_field_reader.get(0), 13u32);
assert_eq!(fast_field_reader.get(1), 14u32);
assert_eq!(fast_field_reader.get(2), 2u32);
let fast_field_readers = FastFieldsReader::from_source(source).unwrap();
let fast_field_reader: U64FastFieldReader =
fast_field_readers.open_reader(*FIELD).unwrap();
assert_eq!(fast_field_reader.get(0), 13u64);
assert_eq!(fast_field_reader.get(1), 14u64);
assert_eq!(fast_field_reader.get(2), 2u64);
}
}
@@ -93,40 +113,41 @@ mod tests {
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::new(write).unwrap();
let mut fast_field_writers = U32FastFieldsWriter::from_schema(&SCHEMA);
add_single_field_doc(&mut fast_field_writers, *FIELD, 4u32);
add_single_field_doc(&mut fast_field_writers, *FIELD, 14_082_001u32);
add_single_field_doc(&mut fast_field_writers, *FIELD, 3_052u32);
add_single_field_doc(&mut fast_field_writers, *FIELD, 9002u32);
add_single_field_doc(&mut fast_field_writers, *FIELD, 15_001u32);
add_single_field_doc(&mut fast_field_writers, *FIELD, 777u32);
add_single_field_doc(&mut fast_field_writers, *FIELD, 1_002u32);
add_single_field_doc(&mut fast_field_writers, *FIELD, 1_501u32);
add_single_field_doc(&mut fast_field_writers, *FIELD, 215u32);
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
add_single_field_doc(&mut fast_field_writers, *FIELD, 4u64);
add_single_field_doc(&mut fast_field_writers, *FIELD, 14_082_001u64);
add_single_field_doc(&mut fast_field_writers, *FIELD, 3_052u64);
add_single_field_doc(&mut fast_field_writers, *FIELD, 9002u64);
add_single_field_doc(&mut fast_field_writers, *FIELD, 15_001u64);
add_single_field_doc(&mut fast_field_writers, *FIELD, 777u64);
add_single_field_doc(&mut fast_field_writers, *FIELD, 1_002u64);
add_single_field_doc(&mut fast_field_writers, *FIELD, 1_501u64);
add_single_field_doc(&mut fast_field_writers, *FIELD, 215u64);
fast_field_writers.serialize(&mut serializer).unwrap();
serializer.close().unwrap();
}
let source = directory.open_read(&path).unwrap();
{
assert_eq!(source.len(), 45 as usize);
assert_eq!(source.len(), 63 as usize);
}
{
let fast_field_readers = U32FastFieldsReader::open(source).unwrap();
let fast_field_reader = fast_field_readers.get_field(*FIELD).unwrap();
assert_eq!(fast_field_reader.get(0), 4u32);
assert_eq!(fast_field_reader.get(1), 14_082_001u32);
assert_eq!(fast_field_reader.get(2), 3_052u32);
assert_eq!(fast_field_reader.get(3), 9002u32);
assert_eq!(fast_field_reader.get(4), 15_001u32);
assert_eq!(fast_field_reader.get(5), 777u32);
assert_eq!(fast_field_reader.get(6), 1_002u32);
assert_eq!(fast_field_reader.get(7), 1_501u32);
assert_eq!(fast_field_reader.get(8), 215u32);
let fast_field_readers = FastFieldsReader::from_source(source).unwrap();
let fast_field_reader: U64FastFieldReader =
fast_field_readers.open_reader(*FIELD).unwrap();
assert_eq!(fast_field_reader.get(0), 4u64);
assert_eq!(fast_field_reader.get(1), 14_082_001u64);
assert_eq!(fast_field_reader.get(2), 3_052u64);
assert_eq!(fast_field_reader.get(3), 9002u64);
assert_eq!(fast_field_reader.get(4), 15_001u64);
assert_eq!(fast_field_reader.get(5), 777u64);
assert_eq!(fast_field_reader.get(6), 1_002u64);
assert_eq!(fast_field_reader.get(7), 1_501u64);
assert_eq!(fast_field_reader.get(8), 215u64);
}
}
#[test]
fn test_intfastfield_null_amplitude() {
#[test]
fn test_intfastfield_null_amplitude() {
let path = Path::new("test");
let mut directory: RAMDirectory = RAMDirectory::create();
@@ -134,30 +155,134 @@ mod tests {
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::new(write).unwrap();
let mut fast_field_writers = U32FastFieldsWriter::from_schema(&SCHEMA);
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for _ in 0..10_000 {
add_single_field_doc(&mut fast_field_writers, *FIELD, 100_000u32);
add_single_field_doc(&mut fast_field_writers, *FIELD, 100_000u64);
}
fast_field_writers.serialize(&mut serializer).unwrap();
serializer.close().unwrap();
}
let source = directory.open_read(&path).unwrap();
{
assert_eq!(source.len(), 18 as usize);
assert_eq!(source.len(), 36 as usize);
}
{
let fast_field_readers = U32FastFieldsReader::open(source).unwrap();
let fast_field_reader = fast_field_readers.get_field(*FIELD).unwrap();
let fast_field_readers = FastFieldsReader::from_source(source).unwrap();
let fast_field_reader: U64FastFieldReader =
fast_field_readers.open_reader(*FIELD).unwrap();
for doc in 0..10_000 {
assert_eq!(fast_field_reader.get(doc), 100_000u32);
assert_eq!(fast_field_reader.get(doc), 100_000u64);
}
}
}
fn generate_permutation() -> Vec<u32> {
#[test]
fn test_intfastfield_large_numbers() {
let path = Path::new("test");
let mut directory: RAMDirectory = RAMDirectory::create();
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::new(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
// forcing the amplitude to be high
add_single_field_doc(&mut fast_field_writers, *FIELD, 0u64);
for i in 0u64..10_000u64 {
add_single_field_doc(&mut fast_field_writers,
*FIELD,
5_000_000_000_000_000_000u64 + i);
}
fast_field_writers.serialize(&mut serializer).unwrap();
serializer.close().unwrap();
}
let source = directory.open_read(&path).unwrap();
{
assert_eq!(source.len(), 80044 as usize);
}
{
let fast_field_readers = FastFieldsReader::from_source(source).unwrap();
let fast_field_reader: U64FastFieldReader =
fast_field_readers.open_reader(*FIELD).unwrap();
assert_eq!(fast_field_reader.get(0), 0u64);
for doc in 1..10_001 {
assert_eq!(fast_field_reader.get(doc),
5_000_000_000_000_000_000u64 + doc as u64 - 1u64);
}
}
}
#[test]
fn test_signed_intfastfield() {
let path = Path::new("test");
let mut directory: RAMDirectory = RAMDirectory::create();
let mut schema_builder = SchemaBuilder::new();
let i64_field = schema_builder.add_i64_field("field", FAST);
let schema = schema_builder.build();
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::new(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
for i in -100i64..10_000i64 {
let mut doc = Document::default();
doc.add_i64(i64_field, i);
fast_field_writers.add_document(&doc);
}
fast_field_writers.serialize(&mut serializer).unwrap();
serializer.close().unwrap();
}
let source = directory.open_read(&path).unwrap();
{
assert_eq!(source.len(), 17711 as usize);
}
{
let fast_field_readers = FastFieldsReader::from_source(source).unwrap();
let fast_field_reader: I64FastFieldReader =
fast_field_readers.open_reader(i64_field).unwrap();
assert_eq!(fast_field_reader.min_value(), -100i64);
assert_eq!(fast_field_reader.max_value(), 9_999i64);
for (doc, i) in (-100i64..10_000i64).enumerate() {
assert_eq!(fast_field_reader.get(doc as u32), i);
}
let mut buffer = vec![0i64; 100];
fast_field_reader.get_range(53, &mut buffer[..]);
for i in 0..100 {
assert_eq!(buffer[i], -100i64 + 53i64 + i as i64);
}
}
}
#[test]
fn test_signed_intfastfield_default_val() {
let path = Path::new("test");
let mut directory: RAMDirectory = RAMDirectory::create();
let mut schema_builder = SchemaBuilder::new();
let i64_field = schema_builder.add_i64_field("field", FAST);
let schema = schema_builder.build();
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::new(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
let doc = Document::default();
fast_field_writers.add_document(&doc);
fast_field_writers.serialize(&mut serializer).unwrap();
serializer.close().unwrap();
}
let source = directory.open_read(&path).unwrap();
{
let fast_field_readers = FastFieldsReader::from_source(source).unwrap();
let fast_field_reader: I64FastFieldReader =
fast_field_readers.open_reader(i64_field).unwrap();
assert_eq!(fast_field_reader.get(0u32), 0i64);
}
}
fn generate_permutation() -> Vec<u64> {
let seed: &[u32; 4] = &[1, 2, 3, 4];
let mut rng = XorShiftRng::from_seed(*seed);
let mut permutation: Vec<u32> = (0u32..1_000_000u32).collect();
let mut permutation: Vec<u64> = (0u64..1_000_000u64).collect();
rng.shuffle(&mut permutation);
permutation
}
@@ -171,7 +296,7 @@ mod tests {
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::new(write).unwrap();
let mut fast_field_writers = U32FastFieldsWriter::from_schema(&SCHEMA);
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for x in &permutation {
add_single_field_doc(&mut fast_field_writers, *FIELD, *x);
}
@@ -180,9 +305,10 @@ mod tests {
}
let source = directory.open_read(&path).unwrap();
{
let fast_field_readers = U32FastFieldsReader::open(source).unwrap();
let fast_field_reader = fast_field_readers.get_field(*FIELD).unwrap();
let mut a = 0u32;
let fast_field_readers = FastFieldsReader::from_source(source).unwrap();
let fast_field_reader: U64FastFieldReader =
fast_field_readers.open_reader(*FIELD).unwrap();
let mut a = 0u64;
for _ in 0..n {
assert_eq!(fast_field_reader.get(a as u32), permutation[a as usize]);
a = fast_field_reader.get(a as u32);
@@ -195,8 +321,8 @@ mod tests {
let permutation = generate_permutation();
b.iter(|| {
let n = test::black_box(7000u32);
let mut a = 0u32;
for i in (0u32..n).step_by(7) {
let mut a = 0u64;
for i in Iterator::step_by((0u32..n), 7) {
a ^= permutation[i as usize];
}
a
@@ -207,13 +333,13 @@ mod tests {
fn bench_intfastfield_veclookup(b: &mut Bencher) {
let permutation = generate_permutation();
b.iter(|| {
let n = test::black_box(1000u32);
let mut a = 0u32;
for _ in 0u32..n {
a = permutation[a as usize];
}
a
});
let n = test::black_box(1000u32);
let mut a = 0u64;
for _ in 0u32..n {
a = permutation[a as usize];
}
a
});
}
#[bench]
@@ -224,7 +350,7 @@ mod tests {
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::new(write).unwrap();
let mut fast_field_writers = U32FastFieldsWriter::from_schema(&SCHEMA);
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for x in &permutation {
add_single_field_doc(&mut fast_field_writers, *FIELD, *x);
}
@@ -233,12 +359,13 @@ mod tests {
}
let source = directory.open_read(&path).unwrap();
{
let fast_field_readers = U32FastFieldsReader::open(source).unwrap();
let fast_field_reader = fast_field_readers.get_field(*FIELD).unwrap();
let fast_field_readers = FastFieldsReader::from_source(source).unwrap();
let fast_field_reader: U64FastFieldReader =
fast_field_readers.open_reader(*FIELD).unwrap();
b.iter(|| {
let n = test::black_box(7000u32);
let mut a = 0u32;
for i in (0u32..n).step_by(7) {
let mut a = 0u64;
for i in Iterator::step_by((0u32..n), 7) {
a ^= fast_field_reader.get(i);
}
a
@@ -254,7 +381,7 @@ mod tests {
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::new(write).unwrap();
let mut fast_field_writers = U32FastFieldsWriter::from_schema(&SCHEMA);
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for x in &permutation {
add_single_field_doc(&mut fast_field_writers, *FIELD, *x);
}
@@ -263,16 +390,17 @@ mod tests {
}
let source = directory.open_read(&path).unwrap();
{
let fast_field_readers = U32FastFieldsReader::open(source).unwrap();
let fast_field_reader = fast_field_readers.get_field(*FIELD).unwrap();
let fast_field_readers = FastFieldsReader::from_source(source).unwrap();
let fast_field_reader: U64FastFieldReader =
fast_field_readers.open_reader(*FIELD).unwrap();
b.iter(|| {
let n = test::black_box(1000u32);
let mut a = 0u32;
for _ in 0u32..n {
a = fast_field_reader.get(a);
}
a
});
let n = test::black_box(1000u32);
let mut a = 0u32;
for _ in 0u32..n {
a = fast_field_reader.get(a) as u32;
}
a
});
}
}
}

View File

@@ -1,7 +1,5 @@
use std::io;
use std::collections::HashMap;
use std::ops::Deref;
use directory::ReadOnlySource;
use common::BinarySerializable;
use DocId;
@@ -10,83 +8,139 @@ use std::path::Path;
use schema::FAST;
use directory::{WritePtr, RAMDirectory, Directory};
use fastfield::FastFieldSerializer;
use fastfield::U32FastFieldsWriter;
use fastfield::FastFieldsWriter;
use common::bitpacker::compute_num_bits;
use common::bitpacker::BitUnpacker;
use schema::FieldType;
use error::ResultExt;
use std::mem;
use common;
use owning_ref::OwningRef;
/// Trait for accessing a fastfield.
///
/// Depending on the field type, a different
/// fast field is required.
pub trait FastFieldReader: Sized {
/// Type of the value stored in the fastfield.
type ValueType;
lazy_static! {
static ref U32_FAST_FIELD_EMPTY: ReadOnlySource = {
let u32_fast_field = U32FastFieldReader::from(Vec::new());
u32_fast_field._data.clone()
};
/// Return the value associated to the given document.
///
/// This accessor should return as fast as possible.
///
/// # Panics
///
/// May panic if `doc` is greater than the segment
// `maxdoc`.
fn get(&self, doc: DocId) -> Self::ValueType;
/// Fills an output buffer with the fast field values
/// associated with the `DocId` going from
/// `start` to `start + output.len()`.
///
/// # Panics
///
/// May panic if `start + output.len()` is greater than
/// the segment's `maxdoc`.
fn get_range(&self, start: u32, output: &mut [Self::ValueType]);
/// Opens a fast field given a source.
fn open(source: ReadOnlySource) -> Self;
/// Returns true iff the given field_type makes
/// it possible to access the field values via a
/// fastfield.
fn is_enabled(field_type: &FieldType) -> bool;
}
pub struct U32FastFieldReader {
_data: ReadOnlySource,
bit_unpacker: BitUnpacker,
min_val: u32,
max_val: u32,
/// `FastFieldReader` for unsigned 64-bits integers.
pub struct U64FastFieldReader {
bit_unpacker: BitUnpacker<OwningRef<ReadOnlySource, [u8]>>,
min_value: u64,
max_value: u64,
}
impl U32FastFieldReader {
pub fn empty() -> U32FastFieldReader {
U32FastFieldReader::open(U32_FAST_FIELD_EMPTY.clone())
impl U64FastFieldReader {
/// Returns the minimum value for this fast field.
///
/// The min value does not take in account of possible
/// deleted document, and should be considered as a lower bound
/// of the actual minimum value.
pub fn min_value(&self) -> u64 {
self.min_value
}
pub fn min_val(&self,) -> u32 {
self.min_val
/// Returns the maximum value for this fast field.
///
/// The max value does not take in account of possible
/// deleted document, and should be considered as an upper bound
/// of the actual maximum value.
pub fn max_value(&self) -> u64 {
self.max_value
}
}
impl FastFieldReader for U64FastFieldReader {
type ValueType = u64;
fn get(&self, doc: DocId) -> u64 {
self.min_value + self.bit_unpacker.get(doc as usize)
}
pub fn max_val(&self,) -> u32 {
self.max_val
fn is_enabled(field_type: &FieldType) -> bool {
match *field_type {
FieldType::U64(ref integer_options) => integer_options.is_fast(),
_ => false,
}
}
fn get_range(&self, start: u32, output: &mut [Self::ValueType]) {
self.bit_unpacker.get_range(start, output);
for out in output.iter_mut() {
*out += self.min_value;
}
}
/// Opens a new fast field reader given a read only source.
///
/// # Panics
/// Panics if the data is corrupted.
pub fn open(data: ReadOnlySource) -> U32FastFieldReader {
let min_val;
let amplitude;
let max_val;
fn open(data: ReadOnlySource) -> U64FastFieldReader {
let min_value: u64;
let amplitude: u64;
{
let mut cursor = data.as_slice();
min_val = u32::deserialize(&mut cursor).unwrap();
amplitude = u32::deserialize(&mut cursor).unwrap();
max_val = min_val + amplitude;
}
let num_bits = compute_num_bits(amplitude);
let bit_unpacker = {
let data_arr = &(data.deref()[8..]);
BitUnpacker::new(data_arr, num_bits as usize)
};
U32FastFieldReader {
_data: data,
bit_unpacker: bit_unpacker,
min_val: min_val,
max_val: max_val,
}
}
min_value = u64::deserialize(&mut cursor)
.expect("Failed to read the min_value of fast field.");
amplitude = u64::deserialize(&mut cursor)
.expect("Failed to read the amplitude of fast field.");
pub fn get(&self, doc: DocId) -> u32 {
self.min_val + self.bit_unpacker.get(doc as usize)
}
let max_value = min_value + amplitude;
let num_bits = compute_num_bits(amplitude);
let owning_ref = OwningRef::new(data).map(|data| &data[16..]);
let bit_unpacker = BitUnpacker::new(owning_ref, num_bits as usize);
U64FastFieldReader {
min_value: min_value,
max_value: max_value,
bit_unpacker: bit_unpacker,
}
}
}
impl From<Vec<u32>> for U32FastFieldReader {
fn from(vals: Vec<u32>) -> U32FastFieldReader {
impl From<Vec<u64>> for U64FastFieldReader {
fn from(vals: Vec<u64>) -> U64FastFieldReader {
let mut schema_builder = SchemaBuilder::default();
let field = schema_builder.add_u32_field("field", FAST);
let field = schema_builder.add_u64_field("field", FAST);
let schema = schema_builder.build();
let path = Path::new("test");
let mut directory: RAMDirectory = RAMDirectory::create();
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::new(write).unwrap();
let mut fast_field_writers = U32FastFieldsWriter::from_schema(&schema);
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
for val in vals {
let mut fast_field_writer = fast_field_writers.get_field_writer(field).unwrap();
fast_field_writer.add_val(val);
@@ -94,62 +148,150 @@ impl From<Vec<u32>> for U32FastFieldReader {
fast_field_writers.serialize(&mut serializer).unwrap();
serializer.close().unwrap();
}
let source = directory.open_read(&path).unwrap();
let fast_field_readers = U32FastFieldsReader::open(source).unwrap();
fast_field_readers.get_field(field).unwrap()
}
directory
.open_read(path)
.chain_err(|| "Failed to open the file")
.and_then(|source| {
FastFieldsReader::from_source(source)
.chain_err(|| "Failed to read the file.")
})
.and_then(|ff_readers| {
ff_readers
.open_reader(field)
.ok_or_else(|| "Failed to find the requested field".into())
})
.expect("This should never happen, please report.")
}
}
pub struct U32FastFieldsReader {
/// `FastFieldReader` for signed 64-bits integers.
pub struct I64FastFieldReader {
underlying: U64FastFieldReader,
}
impl I64FastFieldReader {
/// Returns the minimum value for this fast field.
///
/// The min value does not take in account of possible
/// deleted document, and should be considered as a lower bound
/// of the actual minimum value.
pub fn min_value(&self) -> i64 {
common::u64_to_i64(self.underlying.min_value())
}
/// Returns the maximum value for this fast field.
///
/// The max value does not take in account of possible
/// deleted document, and should be considered as an upper bound
/// of the actual maximum value.
pub fn max_value(&self) -> i64 {
common::u64_to_i64(self.underlying.max_value())
}
}
impl FastFieldReader for I64FastFieldReader {
type ValueType = i64;
///
///
/// # Panics
///
/// May panic or return wrong random result if `doc`
/// is greater or equal to the segment's `maxdoc`.
fn get(&self, doc: DocId) -> i64 {
common::u64_to_i64(self.underlying.get(doc))
}
///
/// # Panics
///
/// May panic or return wrong random result if `doc`
/// is greater or equal to the segment's `maxdoc`.
fn get_range(&self, start: u32, output: &mut [Self::ValueType]) {
let output_u64: &mut [u64] = unsafe { mem::transmute(output) };
self.underlying.get_range(start, output_u64);
for mut_val in output_u64.iter_mut() {
*mut_val ^= 1 << 63;
}
}
/// Opens a new fast field reader given a read only source.
///
/// # Panics
/// Panics if the data is corrupted.
fn open(data: ReadOnlySource) -> I64FastFieldReader {
I64FastFieldReader { underlying: U64FastFieldReader::open(data) }
}
fn is_enabled(field_type: &FieldType) -> bool {
match *field_type {
FieldType::I64(ref integer_options) => integer_options.is_fast(),
_ => false,
}
}
}
/// The `FastFieldsReader` is the datastructure containing
/// all of the fast fields' data.
///
/// It contains a mapping that associated these fields to
/// the proper slice in the fastfield reader file.
pub struct FastFieldsReader {
source: ReadOnlySource,
field_offsets: HashMap<Field, (u32, u32)>,
}
impl U32FastFieldsReader {
pub fn open(source: ReadOnlySource) -> io::Result<U32FastFieldsReader> {
impl FastFieldsReader {
/// Opens a `FastFieldsReader`
///
/// When opening the fast field reader, the
/// the list of the offset is read (as a footer of the
/// data file).
pub fn from_source(source: ReadOnlySource) -> io::Result<FastFieldsReader> {
let header_offset;
let field_offsets: Vec<(Field, u32)>;
{
let buffer = source.as_slice();
{
let mut cursor = buffer;
header_offset = try!(u32::deserialize(&mut cursor));
header_offset = u32::deserialize(&mut cursor)?;
}
{
let mut cursor = &buffer[header_offset as usize..];
field_offsets = try!(Vec::deserialize(&mut cursor));
field_offsets = Vec::deserialize(&mut cursor)?;
}
}
let mut end_offsets: Vec<u32> = field_offsets
.iter()
.map(|&(_, offset)| offset)
.collect();
let mut end_offsets: Vec<u32> = field_offsets.iter().map(|&(_, offset)| offset).collect();
end_offsets.push(header_offset);
let mut field_offsets_map: HashMap<Field, (u32, u32)> = HashMap::new();
for (field_start_offsets, stop_offset) in field_offsets.iter().zip(end_offsets.iter().skip(1)) {
for (field_start_offsets, stop_offset) in
field_offsets.iter().zip(end_offsets.iter().skip(1)) {
let (field, start_offset) = *field_start_offsets;
field_offsets_map.insert(field, (start_offset, *stop_offset));
}
Ok(U32FastFieldsReader {
field_offsets: field_offsets_map,
source: source,
})
Ok(FastFieldsReader {
field_offsets: field_offsets_map,
source: source,
})
}
/// Returns the u32 fast value reader if the field
/// is a u32 field indexed as "fast".
/// Returns the u64 fast value reader if the field
/// is a u64 field indexed as "fast".
///
/// Return None if the field is not a u32 field
/// Return None if the field is not a u64 field
/// indexed with the fast option.
///
/// # Panics
/// May panic if the index is corrupted.
pub fn get_field(&self, field: Field) -> Option<U32FastFieldReader> {
pub fn open_reader<FFReader: FastFieldReader>(&self, field: Field) -> Option<FFReader> {
self.field_offsets
.get(&field)
.map(|&(start, stop)| {
let field_source = self.source.slice(start as usize, stop as usize);
U32FastFieldReader::open(field_source)
})
let field_source = self.source.slice(start as usize, stop as usize);
FFReader::open(field_source)
})
}
}

View File

@@ -2,34 +2,33 @@ use common::BinarySerializable;
use directory::WritePtr;
use schema::Field;
use common::bitpacker::{compute_num_bits, BitPacker};
use common::CountingWriter;
use std::io::{self, Write, Seek, SeekFrom};
/// `FastFieldSerializer` is in charge of serializing
/// fastfields on disk.
///
///
/// Fast fields are encoded using bit-packing.
///
///
/// `FastFieldWriter`s are in charge of pushing the data to
/// the serializer.
/// The serializer expects to receive the following calls.
///
/// * `new_u32_fast_field(...)`
/// * `new_u64_fast_field(...)`
/// * `add_val(...)`
/// * `add_val(...)`
/// * `add_val(...)`
/// * ...
/// * `close_field()`
/// * `new_u32_fast_field(...)`
/// * `new_u64_fast_field(...)`
/// * `add_val(...)`
/// * ...
/// * `close_field()`
/// * `close()`
pub struct FastFieldSerializer {
write: WritePtr,
written_size: usize,
write: CountingWriter<WritePtr>,
fields: Vec<(Field, u32)>,
min_value: u32,
min_value: u64,
field_open: bool,
bit_packer: BitPacker,
}
@@ -37,46 +36,50 @@ pub struct FastFieldSerializer {
impl FastFieldSerializer {
/// Constructor
pub fn new(mut write: WritePtr) -> io::Result<FastFieldSerializer> {
pub fn new(write: WritePtr) -> io::Result<FastFieldSerializer> {
// just making room for the pointer to header.
let written_size: usize = try!(0u32.serialize(&mut write));
let mut counting_writer = CountingWriter::wrap(write);
0u32.serialize(&mut counting_writer)?;
Ok(FastFieldSerializer {
write: write,
written_size: written_size,
fields: Vec::new(),
min_value: 0,
field_open: false,
bit_packer: BitPacker::new(0),
})
write: counting_writer,
fields: Vec::new(),
min_value: 0,
field_open: false,
bit_packer: BitPacker::new(0),
})
}
/// Start serializing a new u32 fast field
pub fn new_u32_fast_field(&mut self, field: Field, min_value: u32, max_value: u32) -> io::Result<()> {
/// Start serializing a new u64 fast field
pub fn new_u64_fast_field(&mut self,
field: Field,
min_value: u64,
max_value: u64)
-> io::Result<()> {
if self.field_open {
return Err(io::Error::new(io::ErrorKind::Other, "Previous field not closed"));
}
self.min_value = min_value;
self.field_open = true;
self.fields.push((field, self.written_size as u32));
let write: &mut Write = &mut self.write;
self.written_size += try!(min_value.serialize(write));
self.fields.push((field, self.write.written_bytes() as u32));
let write = &mut self.write;
min_value.serialize(write)?;
let amplitude = max_value - min_value;
self.written_size += try!(amplitude.serialize(write));
amplitude.serialize(write)?;
let num_bits = compute_num_bits(amplitude);
self.bit_packer = BitPacker::new(num_bits as usize);
Ok(())
}
/// Pushes a new value to the currently open u32 fast field.
pub fn add_val(&mut self, val: u32) -> io::Result<()> {
let val_to_write: u32 = val - self.min_value;
/// Pushes a new value to the currently open u64 fast field.
pub fn add_val(&mut self, val: u64) -> io::Result<()> {
let val_to_write: u64 = val - self.min_value;
self.bit_packer.write(val_to_write, &mut self.write)?;
Ok(())
}
/// Close the u32 fast field.
pub fn close_field(&mut self,) -> io::Result<()> {
/// Close the u64 fast field.
pub fn close_field(&mut self) -> io::Result<()> {
if !self.field_open {
return Err(io::Error::new(io::ErrorKind::Other, "Current field is already closed"));
}
@@ -84,23 +87,24 @@ impl FastFieldSerializer {
// adding some padding to make sure we
// can read the last elements with our u64
// cursor
self.written_size += self.bit_packer.close(&mut self.write)?;
self.bit_packer.close(&mut self.write)?;
Ok(())
}
/// Closes the serializer
///
///
/// After this call the data must be persistently save on disk.
pub fn close(mut self,) -> io::Result<usize> {
pub fn close(self) -> io::Result<usize> {
if self.field_open {
return Err(io::Error::new(io::ErrorKind::Other, "Last field not closed"));
}
let header_offset: usize = self.written_size;
self.written_size += try!(self.fields.serialize(&mut self.write));
try!(self.write.seek(SeekFrom::Start(0)));
try!((header_offset as u32).serialize(&mut self.write));
try!(self.write.flush());
Ok(self.written_size)
let header_offset: usize = self.write.written_bytes() as usize;
let (mut write, written_size) = self.write.finish()?;
self.fields.serialize(&mut write)?;
write.seek(SeekFrom::Start(0))?;
(header_offset as u32).serialize(&mut write)?;
write.flush()?;
Ok(written_size)
}
}

View File

@@ -3,119 +3,218 @@ use fastfield::FastFieldSerializer;
use std::io;
use schema::Value;
use DocId;
use schema::FieldType;
use common;
use common::VInt;
use common::BinarySerializable;
pub struct U32FastFieldsWriter {
field_writers: Vec<U32FastFieldWriter>,
/// The fastfieldswriter regroup all of the fast field writers.
pub struct FastFieldsWriter {
field_writers: Vec<IntFastFieldWriter>,
}
impl U32FastFieldsWriter {
pub fn from_schema(schema: &Schema) -> U32FastFieldsWriter {
let u32_fields: Vec<Field> = schema.fields()
impl FastFieldsWriter {
/// Create all `FastFieldWriter` required by the schema.
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
let field_writers: Vec<IntFastFieldWriter> = schema
.fields()
.iter()
.enumerate()
.filter(|&(_, field_entry)| field_entry.is_u32_fast())
.map(|(field_id, _)| Field(field_id as u8))
.flat_map(|(field_id, field_entry)| {
let field = Field(field_id as u32);
match *field_entry.field_type() {
FieldType::I64(ref int_options) => {
if int_options.is_fast() {
let mut fast_field_writer = IntFastFieldWriter::new(field);
fast_field_writer.set_val_if_missing(common::i64_to_u64(0i64));
Some(fast_field_writer)
} else {
None
}
}
FieldType::U64(ref int_options) => {
if int_options.is_fast() {
Some(IntFastFieldWriter::new(field))
} else {
None
}
}
_ => None,
}
})
.collect();
U32FastFieldsWriter::new(u32_fields)
FastFieldsWriter { field_writers: field_writers }
}
pub fn new(fields: Vec<Field>) -> U32FastFieldsWriter {
U32FastFieldsWriter {
field_writers: fields
.into_iter()
.map(U32FastFieldWriter::new)
.collect(),
/// Returns a `FastFieldsWriter`
/// with a `IntFastFieldWriter` for each
/// of the field given in argument.
pub fn new(fields: Vec<Field>) -> FastFieldsWriter {
FastFieldsWriter {
field_writers: fields.into_iter().map(IntFastFieldWriter::new).collect(),
}
}
pub fn get_field_writer(&mut self, field: Field) -> Option<&mut U32FastFieldWriter> {
/// Get the `FastFieldWriter` associated to a field.
pub fn get_field_writer(&mut self, field: Field) -> Option<&mut IntFastFieldWriter> {
// TODO optimize
self.field_writers
.iter_mut()
.find(|field_writer| field_writer.field == field)
}
/// Indexes all of the fastfields of a new document.
pub fn add_document(&mut self, doc: &Document) {
for field_writer in &mut self.field_writers {
field_writer.add_document(doc);
}
}
/// Serializes all of the `FastFieldWriter`s by pushing them in
/// order to the fast field serializer.
pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> {
for field_writer in &self.field_writers {
try!(field_writer.serialize(serializer));
field_writer.serialize(serializer)?;
}
Ok(())
}
/// Ensures all of the fast field writers have
/// reached `doc`. (included)
///
///
/// The missing values will be filled with 0.
pub fn fill_val_up_to(&mut self, doc: DocId) {
for field_writer in &mut self.field_writers {
field_writer.fill_val_up_to(doc);
}
}
}
pub struct U32FastFieldWriter {
/// Fast field writer for ints.
/// The fast field writer just keeps the values in memory.
///
/// Only when the segment writer can be closed and
/// persisted on disc, the fast field writer is
/// sent to a `FastFieldSerializer` via the `.serialize(...)`
/// method.
///
/// We cannot serialize earlier as the values are
/// bitpacked and the number of bits required for bitpacking
/// can only been known once we have seen all of the values.
///
/// Both u64, and i64 use the same writer.
/// i64 are just remapped to the `0..2^64 - 1`
/// using `common::i64_to_u64`.
pub struct IntFastFieldWriter {
field: Field,
vals: Vec<u32>,
vals: Vec<u8>,
val_count: usize,
val_if_missing: u64,
val_min: u64,
val_max: u64,
}
impl U32FastFieldWriter {
pub fn new(field: Field) -> U32FastFieldWriter {
U32FastFieldWriter {
impl IntFastFieldWriter {
/// Creates a new `IntFastFieldWriter`
pub fn new(field: Field) -> IntFastFieldWriter {
IntFastFieldWriter {
field: field,
vals: Vec::new(),
val_count: 0,
val_if_missing: 0u64,
val_min: u64::max_value(),
val_max: 0,
}
}
/// Sets the default value.
///
/// This default value is recorded for documents if
/// a document does not have any value.
fn set_val_if_missing(&mut self, val_if_missing: u64) {
self.val_if_missing = val_if_missing;
}
/// Ensures all of the fast field writer have
/// reached `doc`. (included)
///
///
/// The missing values will be filled with 0.
fn fill_val_up_to(&mut self, doc: DocId) {
let target = doc as usize + 1;
debug_assert!(self.vals.len() <= target);
while self.vals.len() < target {
self.add_val(0u32)
debug_assert!(self.val_count <= target);
let val_if_missing = self.val_if_missing;
while self.val_count < target {
self.add_val(val_if_missing);
}
}
pub fn add_val(&mut self, val: u32) {
self.vals.push(val);
/// Records a new value.
///
/// The n-th value being recorded is implicitely
/// associated to the document with the `DocId` n.
/// (Well, `n-1` actually because of 0-indexing)
pub fn add_val(&mut self, val: u64) {
VInt(val)
.serialize(&mut self.vals)
.expect("unable to serialize VInt to Vec");
if val > self.val_max {
self.val_max = val;
}
if val < self.val_min {
self.val_min = val;
}
self.val_count += 1;
}
fn extract_val(&self, doc: &Document) -> u32 {
/// Extract the value associated to the fast field for
/// this document.
///
/// i64 are remapped to u64 using the logic
/// in `common::i64_to_u64`.
///
/// If the value is missing, then the default value is used
/// instead.
/// If the document has more than one value for the given field,
/// only the first one is taken in account.
fn extract_val(&self, doc: &Document) -> u64 {
match doc.get_first(self.field) {
Some(v) => {
match *v {
Value::U32(ref val) => { *val }
_ => { panic!("Expected a u32field, got {:?} ", v) }
Value::U64(ref val) => *val,
Value::I64(ref val) => common::i64_to_u64(*val),
_ => panic!("Expected a u64field, got {:?} ", v),
}
},
None => {
0u32
}
}
None => self.val_if_missing,
}
}
/// Extract the fast field value from the document
/// (or use the default value) and records it.
pub fn add_document(&mut self, doc: &Document) {
let val = self.extract_val(doc);
self.add_val(val);
}
/// Push the fast fields value to the `FastFieldWriter`.
pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> {
let zero = 0;
let min = *self.vals.iter().min().unwrap_or(&zero);
let max = *self.vals.iter().max().unwrap_or(&min);
try!(serializer.new_u32_fast_field(self.field, min, max));
for &val in &self.vals {
try!(serializer.add_val(val));
let (min, max) = if self.val_min > self.val_max {
(0, 0)
} else {
(self.val_min, self.val_max)
};
serializer.new_u64_fast_field(self.field, min, max)?;
let mut cursor = self.vals.as_slice();
while let Ok(VInt(val)) = VInt::deserialize(&mut cursor) {
serializer.add_val(val)?;
}
serializer.close_field()
}
}

View File

@@ -6,7 +6,7 @@ use Index;
use Searcher;
use rand::distributions::{IndependentSample, Range};
fn check_index_content(searcher: &Searcher, vals: &HashSet<u32>) {
fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
assert!(searcher.segment_readers().len() < 20);
assert_eq!(searcher.num_docs() as usize, vals.len());
}
@@ -17,19 +17,19 @@ fn test_indexing() {
let mut schema_builder = SchemaBuilder::default();
let id_field = schema_builder.add_u32_field("id", U32_INDEXED);
let multiples_field = schema_builder.add_u32_field("multiples", U32_INDEXED);
let id_field = schema_builder.add_u64_field("id", INT_INDEXED);
let multiples_field = schema_builder.add_u64_field("multiples", INT_INDEXED);
let schema = schema_builder.build();
let index = Index::create_from_tempdir(schema).unwrap();
let universe = Range::new(0u32, 20u32);
let universe = Range::new(0u64, 20u64);
let mut rng = thread_rng();
let mut index_writer = index.writer_with_num_threads(3, 120_000_000).unwrap();
let mut committed_docs: HashSet<u32> = HashSet::new();
let mut uncommitted_docs: HashSet<u32> = HashSet::new();
let mut committed_docs: HashSet<u64> = HashSet::new();
let mut uncommitted_docs: HashSet<u64> = HashSet::new();
for _ in 0..200 {
let random_val = universe.ind_sample(&mut rng);
@@ -41,19 +41,16 @@ fn test_indexing() {
let searcher = index.searcher();
// check that everything is correct.
check_index_content(&searcher, &committed_docs);
}
else {
if committed_docs.remove(&random_val) ||
uncommitted_docs.remove(&random_val) {
let doc_id_term = Term::from_field_u32(id_field, random_val);
} else {
if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) {
let doc_id_term = Term::from_field_u64(id_field, random_val);
index_writer.delete_term(doc_id_term);
}
else {
} else {
uncommitted_docs.insert(random_val);
let mut doc = Document::new();
doc.add_u32(id_field, random_val);
for i in 1u32..10u32 {
doc.add_u32(multiples_field, random_val * i);
doc.add_u64(id_field, random_val);
for i in 1u64..10u64 {
doc.add_u64(multiples_field, random_val * i);
}
index_writer.add_document(doc);
}

View File

@@ -6,17 +6,17 @@ use std::ops::DerefMut;
// The DeleteQueue is similar in conceptually to a multiple
// consumer single producer broadcast channel.
//
//
// All consumer will receive all messages.
//
//
// Consumer of the delete queue are holding a `DeleteCursor`,
// which points to a specific place of the `DeleteQueue`.
//
//
// New consumer can be created in two ways
// - calling `delete_queue.cursor()` returns a cursor, that
// - calling `delete_queue.cursor()` returns a cursor, that
// will include all future delete operation (and no past operations).
// - cloning an existing cursor returns a new cursor, that
// is at the exact same position, and can now advance independantly
// is at the exact same position, and can now advance independantly
// from the original cursor.
#[derive(Default)]
struct InnerDeleteQueue {
@@ -31,32 +31,27 @@ pub struct DeleteQueue {
impl DeleteQueue {
// Creates a new delete queue.
pub fn new() -> DeleteQueue {
let delete_queue = DeleteQueue {
inner: Arc::default(),
};
let delete_queue = DeleteQueue { inner: Arc::default() };
let next_block = NextBlock::from(delete_queue.clone());
{
let mut delete_queue_wlock = delete_queue.inner.write().unwrap();
delete_queue_wlock.last_block = Some(
Arc::new(Block {
operations: Arc::default(),
next: next_block,
})
);
delete_queue_wlock.last_block = Some(Arc::new(Block {
operations: Arc::default(),
next: next_block,
}));
}
delete_queue
}
// Creates a new cursor that makes it possible to
// Creates a new cursor that makes it possible to
// consume future delete operations.
//
//
// Past delete operations are not accessible.
pub fn cursor(&self) -> DeleteCursor {
let last_block = self.inner
@@ -85,40 +80,37 @@ impl DeleteQueue {
// DeleteQueue is a linked list of blocks of
// delete operations.
//
//
// Writing happens by simply appending to a vec.
// `.flush()` takes this pending delete operations vec
// creates a new read-only block from it,
// creates a new read-only block from it,
// and appends it to the linked list.
//
// `.flush()` happens when, for instance,
//
// `.flush()` happens when, for instance,
// a consumer reaches the last read-only operations.
// It then ask the delete queue if there happen to
// It then ask the delete queue if there happen to
// be some unflushed operations.
//
fn flush(&self) -> Option<Arc<Block>> {
let mut self_wlock = self
.inner
let mut self_wlock = self.inner
.write()
.expect("Failed to acquire write lock on delete queue writer");
let delete_operations;
{
let writer: &mut Vec<DeleteOperation> = &mut self_wlock.writer;
if writer.is_empty() {
return None;
}
delete_operations = mem::replace(writer, vec!());
delete_operations = mem::replace(writer, vec![]);
}
let next_block = NextBlock::from(self.clone());
{
self_wlock.last_block = Some(
Arc::new(Block {
operations: Arc::new(delete_operations),
next: next_block,
})
);
self_wlock.last_block = Some(Arc::new(Block {
operations: Arc::new(delete_operations),
next: next_block,
}));
}
self_wlock.last_block.clone()
}
@@ -137,17 +129,14 @@ impl From<DeleteQueue> for NextBlock {
}
}
impl NextBlock {
impl NextBlock {
fn next_block(&self) -> Option<Arc<Block>> {
{
let next_read_lock = self.0
.read()
.expect("Failed to acquire write lock in delete queue");
match *next_read_lock {
InnerNextBlock::Closed(ref block) => {
return Some(block.clone());
}
_ => {}
if let InnerNextBlock::Closed(ref block) = *next_read_lock {
return Some(block.clone());
}
}
let next_block;
@@ -170,8 +159,8 @@ impl NextBlock {
}
}
}
*next_write_lock.deref_mut() = InnerNextBlock::Closed(next_block.clone()); // TODO fix
return Some(next_block)
*next_write_lock.deref_mut() = InnerNextBlock::Closed(next_block.clone());
Some(next_block)
}
}
}
@@ -189,32 +178,31 @@ pub struct DeleteCursor {
}
impl DeleteCursor {
impl DeleteCursor {
/// Skips operations and position it so that
/// - either all of the delete operation currently in the
/// - either all of the delete operation currently in the
/// queue are consume and the next get will return None.
/// - the next get will return the first operation with an
/// `opstamp >= target_opstamp`.
pub fn skip_to(&mut self, target_opstamp: u64) {
// TODO Can be optimize as we work with block.
#[cfg_attr(feature = "cargo-clippy", allow(while_let_loop))]
loop {
if let Some(operation) = self.get() {
if operation.opstamp >= target_opstamp {
break;
}
}
else {
} else {
break;
}
self.advance();
}
}
/// If the current block has been entirely
/// If the current block has been entirely
/// consumed, try to load the next one.
///
/// Return `true`, if after this attempt,
///
/// Return `true`, if after this attempt,
/// the cursor is on a block that has not
/// been entirely consumed.
/// Return `false`, if we have reached the end of the queue.
@@ -229,24 +217,20 @@ impl DeleteCursor {
self.pos = 0;
true
}
None => {
false
}
None => false,
}
}
else {
} else {
true
}
}
/// Advance to the next delete operation.
/// Returns true iff there is such an operation.
pub fn advance(&mut self) -> bool {
if self.load_block_if_required() {
self.pos += 1;
true
}
else {
} else {
false
}
}
@@ -256,12 +240,10 @@ impl DeleteCursor {
pub fn get(&mut self) -> Option<&DeleteOperation> {
if self.load_block_if_required() {
Some(&self.block.operations[self.pos])
}
else {
} else {
None
}
}
}
@@ -278,12 +260,12 @@ mod tests {
#[test]
fn test_deletequeue() {
let delete_queue = DeleteQueue::new();
let make_op = |i: usize| {
let field = Field(1u8);
let field = Field(1u32);
DeleteOperation {
opstamp: i as u64,
term: Term::from_field_u32(field, i as u32)
term: Term::from_field_u64(field, i as u64),
}
};
@@ -299,7 +281,7 @@ mod tests {
operations_it.advance();
assert!(operations_it.get().is_none());
operations_it.advance();
let mut snapshot2 = delete_queue.cursor();
assert!(snapshot2.get().is_none());
delete_queue.push(make_op(3));
@@ -310,7 +292,7 @@ mod tests {
assert!(operations_it.get().is_none());
operations_it.advance();
}
{
{
let mut operations_it = snapshot.clone();
assert_eq!(operations_it.get().unwrap().opstamp, 1);
operations_it.advance();
@@ -320,6 +302,6 @@ mod tests {
operations_it.advance();
assert!(operations_it.get().is_none());
}
}
}
}

View File

@@ -26,4 +26,4 @@ impl Drop for DirectoryLock {
error!("Failed to remove the lock file. {:?}", e);
}
}
}
}

View File

@@ -4,7 +4,7 @@ use DocId;
// Doc to opstamp is used to identify which
// document should be deleted.
//
//
// Since the docset matching the query of a delete operation
// is not computed right when the delete operation is received,
// we need to find a way to evaluate, for each document,
@@ -14,13 +14,13 @@ use DocId;
//
// The doc to opstamp mapping stores precisely an array
// indexed by doc id and storing the opstamp of the document.
//
//
// This mapping is (for the moment) stricly increasing
// because of the way document id are allocated.
#[derive(Clone)]
pub enum DocToOpstampMapping {
WithMap(Arc<Vec<u64>>),
None
None,
}
impl From<Vec<u64>> for DocToOpstampMapping {
@@ -30,9 +30,8 @@ impl From<Vec<u64>> for DocToOpstampMapping {
}
impl DocToOpstampMapping {
/// Given an opstamp return the limit doc id L
/// such that all doc id D such that
/// such that all doc id D such that
// D >= L iff opstamp(D) >= than `target_opstamp`.
//
// The edge case opstamp = some doc opstamp is in practise
@@ -41,8 +40,7 @@ impl DocToOpstampMapping {
match *self {
DocToOpstampMapping::WithMap(ref doc_opstamps) => {
match doc_opstamps.binary_search(&target_opstamp) {
Ok(doc_id) => doc_id as DocId,
Err(doc_id) => doc_id as DocId,
Ok(doc_id) | Err(doc_id) => doc_id as DocId,
}
}
DocToOpstampMapping::None => DocId::max_value(),
@@ -58,23 +56,24 @@ mod tests {
#[test]
fn test_doc_to_opstamp_mapping_none() {
let doc_to_opstamp_mapping = DocToOpstampMapping::None;
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(1), u32::max_value());
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(1),
u32::max_value());
}
#[test]
fn test_doc_to_opstamp_mapping_complex() {
{
let doc_to_opstamp_mapping = DocToOpstampMapping::from(vec!());
let doc_to_opstamp_mapping = DocToOpstampMapping::from(vec![]);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(2u64), 0);
}
{
let doc_to_opstamp_mapping = DocToOpstampMapping::from(vec!(1u64));
let doc_to_opstamp_mapping = DocToOpstampMapping::from(vec![1u64]);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(2u64), 1);
}
{
let doc_to_opstamp_mapping = DocToOpstampMapping::from(vec!(1u64, 12u64, 17u64, 23u64));
let doc_to_opstamp_mapping = DocToOpstampMapping::from(vec![1u64, 12u64, 17u64, 23u64]);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0);
for i in 2u64..13u64 {
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(i), 1);
@@ -90,4 +89,4 @@ mod tests {
}
}
}
}
}

View File

@@ -9,11 +9,12 @@ use core::SegmentReader;
use indexer::stamper::Stamper;
use datastruct::stacker::Heap;
use directory::FileProtection;
use Error;
use error::{Error, ErrorKind, Result, ResultExt};
use Directory;
use fastfield::delete::write_delete_bitset;
use fastfield::write_delete_bitset;
use indexer::delete_queue::{DeleteCursor, DeleteQueue};
use futures::Canceled;
use datastruct::stacker::hashmap::split_memory;
use futures::Future;
use indexer::doc_opstamp_mapping::DocToOpstampMapping;
use indexer::MergePolicy;
@@ -22,12 +23,11 @@ use indexer::SegmentEntry;
use indexer::SegmentWriter;
use postings::DocSet;
use postings::SegmentPostingsOption;
use Result;
use schema::Document;
use schema::Schema;
use schema::Term;
use std::mem;
use std::mem::swap;
use std::mem::swap;
use std::thread::JoinHandle;
use super::directory_lock::DirectoryLock;
use super::operation::AddOperation;
@@ -36,12 +36,13 @@ use std::thread;
// Size of the margin for the heap. A segment is closed when the remaining memory
// in the heap goes below MARGIN_IN_BYTES.
pub const MARGIN_IN_BYTES: u32 = 10_000_000u32;
pub const MARGIN_IN_BYTES: u32 = 1_000_000u32;
// We impose the memory per thread to be at least 30 MB.
// We impose the memory per thread to be at least 3 MB.
pub const HEAP_SIZE_LIMIT: u32 = MARGIN_IN_BYTES * 3u32;
// Add document will block if the number of docs waiting in the queue to be indexed reaches PIPELINE_MAX_SIZE_IN_DOCS
// Add document will block if the number of docs waiting in the queue to be indexed
// reaches `PIPELINE_MAX_SIZE_IN_DOCS`
const PIPELINE_MAX_SIZE_IN_DOCS: usize = 10_000;
type DocumentSender = chan::Sender<AddOperation>;
@@ -54,11 +55,10 @@ type DocumentReceiver = chan::Receiver<AddOperation>;
/// Each indexing thread builds its own independant `Segment`, via
/// a `SegmentWriter` object.
pub struct IndexWriter {
// the lock is just used to bind the
// the lock is just used to bind the
// lifetime of the lock with that of the IndexWriter.
_directory_lock: DirectoryLock,
_directory_lock: DirectoryLock,
index: Index,
heap_size_in_bytes_per_thread: usize,
@@ -96,40 +96,40 @@ impl !Sync for IndexWriter {}
/// `IndexWriter` on the system is accessing the index directory,
/// it is safe to manually delete the lockfile.
///
/// num_threads specifies the number of indexing workers that
/// `num_threads` specifies the number of indexing workers that
/// should work at the same time.
/// # Errors
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
/// # Panics
/// If the heap size per thread is too small, panics.
pub fn open_index_writer(
index: &Index,
num_threads: usize,
heap_size_in_bytes_per_thread: usize) -> Result<IndexWriter> {
pub fn open_index_writer(index: &Index,
num_threads: usize,
heap_size_in_bytes_per_thread: usize)
-> Result<IndexWriter> {
if heap_size_in_bytes_per_thread <= HEAP_SIZE_LIMIT as usize {
panic!(format!("The heap size per thread needs to be at least {}.",
HEAP_SIZE_LIMIT));
HEAP_SIZE_LIMIT));
}
let directory_lock = try!(DirectoryLock::lock(index.directory().box_clone()));
let directory_lock = DirectoryLock::lock(index.directory().box_clone())?;
let (document_sender, document_receiver): (DocumentSender, DocumentReceiver) =
chan::sync(PIPELINE_MAX_SIZE_IN_DOCS);
let delete_queue = DeleteQueue::new();
let stamper = Stamper::new(index.opstamp());
let segment_updater = SegmentUpdater::new(index.clone(),
stamper.clone(),
delete_queue.cursor())?;
let current_opstamp = index.opstamp();
let stamper = Stamper::new(current_opstamp);
let segment_updater =
SegmentUpdater::new(index.clone(), stamper.clone(), delete_queue.cursor())?;
let mut index_writer = IndexWriter {
_directory_lock: directory_lock,
heap_size_in_bytes_per_thread: heap_size_in_bytes_per_thread,
index: index.clone(),
@@ -138,46 +138,47 @@ pub fn open_index_writer(
segment_updater: segment_updater,
workers_join_handle: vec!(),
workers_join_handle: vec![],
num_threads: num_threads,
delete_queue: delete_queue,
committed_opstamp: index.opstamp(),
committed_opstamp: current_opstamp,
stamper: stamper,
generation: 0,
worker_id: 0,
};
try!(index_writer.start_workers());
index_writer.start_workers()?;
Ok(index_writer)
}
pub fn compute_deleted_bitset(
delete_bitset: &mut BitSet,
segment_reader: &SegmentReader,
delete_cursor: &mut DeleteCursor,
doc_opstamps: DocToOpstampMapping,
target_opstamp: u64) -> Result<bool> {
pub fn compute_deleted_bitset(delete_bitset: &mut BitSet,
segment_reader: &SegmentReader,
delete_cursor: &mut DeleteCursor,
doc_opstamps: &DocToOpstampMapping,
target_opstamp: u64)
-> Result<bool> {
let mut might_have_changed = false;
#[cfg_attr(feature = "cargo-clippy", allow(while_let_loop))]
loop {
if let Some(delete_op) = delete_cursor.get() {
if delete_op.opstamp > target_opstamp {
break;
}
else {
} else {
// A delete operation should only affect
// document that were inserted after it.
//
//
// Limit doc helps identify the first document
// that may be affected by the delete operation.
let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp);
if let Some(mut docset) = segment_reader.read_postings(&delete_op.term, SegmentPostingsOption::NoFreq) {
if let Some(mut docset) =
segment_reader.read_postings(&delete_op.term, SegmentPostingsOption::NoFreq) {
while docset.advance() {
let deleted_doc = docset.doc();
if deleted_doc < limit_doc {
@@ -187,8 +188,7 @@ pub fn compute_deleted_bitset(
}
}
}
}
else {
} else {
break;
}
delete_cursor.advance();
@@ -196,16 +196,12 @@ pub fn compute_deleted_bitset(
Ok(might_have_changed)
}
// TODO skip delete operation before teh
// last delete opstamp
/// Advance delete for the given segment up
/// to the target opstamp.
pub fn advance_deletes(
mut segment: Segment,
segment_entry: &mut SegmentEntry,
target_opstamp: u64) -> Result<Option<FileProtection>> {
pub fn advance_deletes(mut segment: Segment,
segment_entry: &mut SegmentEntry,
target_opstamp: u64)
-> Result<Option<FileProtection>> {
let mut file_protect: Option<FileProtection> = None;
@@ -218,24 +214,20 @@ pub fn advance_deletes(
}
let segment_reader = SegmentReader::open(segment.clone())?;
let max_doc = segment_reader.max_doc();
let mut delete_bitset: BitSet =
match segment_entry.delete_bitset() {
Some(ref previous_delete_bitset) =>
(*previous_delete_bitset).clone(),
None =>
BitSet::with_capacity(max_doc as usize)
};
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
None => BitSet::with_capacity(max_doc as usize),
};
let delete_cursor = segment_entry.delete_cursor();
compute_deleted_bitset(
&mut delete_bitset,
&segment_reader,
delete_cursor,
DocToOpstampMapping::None,
target_opstamp)?;
compute_deleted_bitset(&mut delete_bitset,
&segment_reader,
delete_cursor,
&DocToOpstampMapping::None,
target_opstamp)?;
for doc in 0u32..max_doc {
if segment_reader.is_deleted(doc) {
delete_bitset.insert(doc as usize);
@@ -256,29 +248,43 @@ pub fn advance_deletes(
}
fn index_documents(heap: &mut Heap,
table_size: usize,
segment: Segment,
schema: &Schema,
generation: usize,
document_iterator: &mut Iterator<Item=AddOperation>,
document_iterator: &mut Iterator<Item = AddOperation>,
segment_updater: &mut SegmentUpdater,
mut delete_cursor: DeleteCursor)
-> Result<bool> {
heap.clear();
let segment_id = segment.id();
let mut segment_writer = SegmentWriter::for_segment(heap, segment.clone(), &schema)?;
let mut segment_writer = SegmentWriter::for_segment(heap, table_size, segment.clone(), schema)?;
for doc in document_iterator {
try!(segment_writer.add_document(&doc, &schema));
try!(segment_writer.add_document(&doc, schema));
// There is two possible conditions to close the segment.
// One is the memory arena dedicated to the segment is
// getting full.
if segment_writer.is_buffer_full() {
info!("Buffer limit reached, flushing segment with maxdoc={}.",
segment_writer.max_doc());
break;
}
// The second is the term dictionary hash table
// is reaching saturation.
//
// Tantivy does not resize its hashtable. When it reaches
// capacity, we just stop indexing new document.
if segment_writer.is_term_saturated() {
info!("Term dic saturated, flushing segment with maxdoc={}.",
segment_writer.max_doc());
break;
}
}
let num_docs = segment_writer.max_doc();
// this is ensured by the call to peek before starting
// the worker thread.
assert!(num_docs > 0);
assert!(num_docs > 0);
let doc_opstamps: Vec<u64> = segment_writer.finalize()?;
@@ -286,65 +292,70 @@ fn index_documents(heap: &mut Heap,
segment_meta.set_max_doc(num_docs);
let last_docstamp: u64 = *(doc_opstamps.last().unwrap());
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
let segment_reader = SegmentReader::open(segment)?;
let mut deleted_bitset = BitSet::with_capacity(num_docs as usize);
let may_have_deletes = compute_deleted_bitset(
&mut deleted_bitset,
&segment_reader,
&mut delete_cursor,
doc_to_opstamps,
last_docstamp,
)?;
let may_have_deletes = compute_deleted_bitset(&mut deleted_bitset,
&segment_reader,
&mut delete_cursor,
&doc_to_opstamps,
last_docstamp)?;
let segment_entry = SegmentEntry::new(
segment_meta,
delete_cursor,
{ if may_have_deletes { Some(deleted_bitset) }
else { None } }
);
Ok(
segment_updater
.add_segment(generation, segment_entry)
)
}
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, {
if may_have_deletes {
Some(deleted_bitset)
} else {
None
}
});
Ok(segment_updater.add_segment(generation, segment_entry))
}
impl IndexWriter {
/// The index writer
pub fn wait_merging_threads(mut self) -> Result<()> {
// this will stop the indexing thread,
// dropping the last reference to the segment_updater.
drop(self.document_sender);
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec!());
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]);
for join_handle in former_workers_handles {
try!(join_handle.join()
join_handle
.join()
.expect("Indexing Worker thread panicked")
.map_err(|e| {
Error::ErrorInThread(format!("Error in indexing worker thread. {:?}", e))
}));
.chain_err(|| ErrorKind::ErrorInThread("Error in indexing worker thread.".into()))?;
}
drop(self.workers_join_handle);
let result = self.segment_updater
.wait_merging_thread()
.map_err(|_|
Error::ErrorInThread("Failed to join merging thread.".to_string())
);
if let &Err(ref e) = &result {
let result =
self.segment_updater
.wait_merging_thread()
.chain_err(|| ErrorKind::ErrorInThread("Failed to join merging thread.".into()));
if let Err(ref e) = result {
error!("Some merging thread failed {:?}", e);
}
result
}
#[doc(hidden)]
pub fn add_segment(&mut self, segment_meta: SegmentMeta) {
let delete_cursor = self.delete_queue.cursor();
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
self.segment_updater
.add_segment(self.generation, segment_entry);
}
#[doc(hidden)]
pub fn new_segment(&self) -> Segment {
self.segment_updater.new_segment()
}
/// Spawns a new worker thread for indexing.
/// The thread consumes documents from the pipeline.
///
@@ -352,48 +363,48 @@ impl IndexWriter {
let schema = self.index.schema();
let document_receiver_clone = self.document_receiver.clone();
let mut segment_updater = self.segment_updater.clone();
let mut heap = Heap::with_capacity(self.heap_size_in_bytes_per_thread);
let (heap_size, table_size) = split_memory(self.heap_size_in_bytes_per_thread);
info!("heap size {}, table_size {}", heap_size, table_size);
let mut heap = Heap::with_capacity(heap_size);
let generation = self.generation;
let mut delete_cursor = self.delete_queue.cursor();
let join_handle: JoinHandle<Result<()>> =
thread::Builder::new()
let join_handle: JoinHandle<Result<()>> = thread::Builder::new()
.name(format!("indexing thread {} for gen {}", self.worker_id, generation))
.spawn(move || {
loop {
let mut document_iterator = document_receiver_clone.clone()
.into_iter()
.peekable();
let mut document_iterator =
document_receiver_clone.clone().into_iter().peekable();
// the peeking here is to avoid
// creating a new segment's files
// if no document are available.
//
// this is a valid guarantee as the
// this is a valid guarantee as the
// peeked document now belongs to
// our local iterator.
if let Some(operation) = document_iterator.peek() {
delete_cursor.skip_to(operation.opstamp);
}
else {
} else {
// No more documents.
// Happens when there is a commit, or if the `IndexWriter`
// was dropped.
return Ok(())
return Ok(());
}
let segment = segment_updater.new_segment();
index_documents(&mut heap,
table_size,
segment,
&schema,
generation,
&mut document_iterator,
&mut segment_updater,
delete_cursor.clone())?;
}
})?;
self.worker_id += 1;
@@ -410,7 +421,7 @@ impl IndexWriter {
pub fn set_merge_policy(&self, merge_policy: Box<MergePolicy>) {
self.segment_updater.set_merge_policy(merge_policy);
}
fn start_workers(&mut self) -> Result<()> {
for _ in 0..self.num_threads {
try!(self.add_indexing_worker());
@@ -425,7 +436,9 @@ impl IndexWriter {
}
/// Merges a given list of segments
pub fn merge(&mut self, segment_ids: &[SegmentId]) -> impl Future<Item=SegmentMeta, Error=Canceled> {
pub fn merge(&mut self,
segment_ids: &[SegmentId])
-> impl Future<Item = SegmentMeta, Error = Canceled> {
self.segment_updater.start_merge(segment_ids)
}
@@ -438,7 +451,8 @@ impl IndexWriter {
///
/// Returns the former segment_ready channel.
fn recreate_document_channel(&mut self) -> DocumentReceiver {
let (mut document_sender, mut document_receiver): (DocumentSender, DocumentReceiver) =
let (mut document_sender, mut document_receiver): (DocumentSender,
DocumentReceiver) =
chan::sync(PIPELINE_MAX_SIZE_IN_DOCS);
swap(&mut self.document_sender, &mut document_sender);
swap(&mut self.document_receiver, &mut document_receiver);
@@ -466,12 +480,9 @@ impl IndexWriter {
let heap_size_in_bytes_per_thread = self.heap_size_in_bytes_per_thread;
drop(self);
for _ in receiver_clone {}
let index_writer = open_index_writer(
&index,
num_threads,
heap_size_in_bytes_per_thread)?;
let index_writer = open_index_writer(&index, num_threads, heap_size_in_bytes_per_thread)?;
Ok(index_writer)
}
@@ -513,23 +524,25 @@ impl IndexWriter {
let mut former_workers_join_handle = Vec::new();
swap(&mut former_workers_join_handle,
&mut self.workers_join_handle);
for worker_handle in former_workers_join_handle {
let indexing_worker_result = try!(worker_handle.join()
.map_err(|e| Error::ErrorInThread(format!("{:?}", e))));
try!(indexing_worker_result);
let indexing_worker_result =
worker_handle
.join()
.map_err(|e| Error::from_kind(ErrorKind::ErrorInThread(format!("{:?}", e))))?;
indexing_worker_result?;
// add a new worker for the next generation.
try!(self.add_indexing_worker());
self.add_indexing_worker()?;
}
// wait for the segment update thread to have processed the info
self.segment_updater
.commit(self.committed_opstamp)?;
self.segment_updater.commit(self.committed_opstamp)?;
Ok(self.committed_opstamp)
}
}
/// Delete all documents containing a given term.
///
@@ -537,7 +550,7 @@ impl IndexWriter {
/// were added in previous commits, and documents
/// that were added previously in the same commit.
///
/// Like adds, the deletion itself will be visible
/// Like adds, the deletion itself will be visible
/// only after calling `commit()`.
pub fn delete_term(&mut self, term: Term) -> u64 {
let opstamp = self.stamper.stamp();
@@ -550,7 +563,7 @@ impl IndexWriter {
}
/// Returns the opstamp of the last successful commit.
///
///
/// This is, for instance, the opstamp the index will
/// rollback to if there is a failure like a power surge.
///
@@ -591,7 +604,7 @@ mod tests {
use schema::{self, Document};
use Index;
use Term;
use Error;
use error::*;
use env_logger;
#[test]
@@ -600,20 +613,23 @@ mod tests {
let index = Index::create_in_ram(schema_builder.build());
let _index_writer = index.writer(40_000_000).unwrap();
match index.writer(40_000_000) {
Err(Error::FileAlreadyExists(_)) => {}
Err(Error(ErrorKind::FileAlreadyExists(_), _)) => {}
_ => panic!("Expected FileAlreadyExists error"),
}
}
#[test]
fn test_set_merge_policy() {
let schema_builder = schema::SchemaBuilder::default();
let index = Index::create_in_ram(schema_builder.build());
let index_writer = index.writer(40_000_000).unwrap();
assert_eq!(format!("{:?}", index_writer.get_merge_policy()), "LogMergePolicy { min_merge_size: 8, min_layer_size: 10000, level_log_size: 0.75 }");
assert_eq!(format!("{:?}", index_writer.get_merge_policy()),
"LogMergePolicy { min_merge_size: 8, min_layer_size: 10000, \
level_log_size: 0.75 }");
let merge_policy = box NoMergePolicy::default();
index_writer.set_merge_policy(merge_policy);
assert_eq!(format!("{:?}", index_writer.get_merge_policy()), "NoMergePolicy");
assert_eq!(format!("{:?}", index_writer.get_merge_policy()),
"NoMergePolicy");
}
#[test]
@@ -622,12 +638,12 @@ mod tests {
let index = Index::create_in_ram(schema_builder.build());
{
let _index_writer = index.writer(40_000_000).unwrap();
// the lock should be released when the
// the lock should be released when the
// index_writer leaves the scope.
}
let _index_writer_two = index.writer(40_000_000).unwrap();
}
#[test]
fn test_commit_and_rollback() {
let mut schema_builder = schema::SchemaBuilder::default();
@@ -650,7 +666,7 @@ mod tests {
}
index_writer = index_writer.rollback().unwrap();
assert_eq!(index_writer.commit_opstamp(), 0u64);
assert_eq!(num_docs_containing("a"), 0);
@@ -703,12 +719,14 @@ mod tests {
}
// this should create 8 segments and trigger a merge.
index_writer.commit().expect("commit failed");
index_writer.wait_merging_threads().expect("waiting merging thread failed");
index_writer
.wait_merging_threads()
.expect("waiting merging thread failed");
index.load_searchers().unwrap();
assert_eq!(num_docs_containing("a"), 200);
assert!(index.searchable_segments().unwrap().len() < 8);
}
}

View File

@@ -9,7 +9,7 @@ const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000;
const DEFAULT_MIN_MERGE_SIZE: usize = 8;
/// LogMergePolicy tries tries to merge segments that have a similar number of
/// `LogMergePolicy` tries tries to merge segments that have a similar number of
/// documents.
#[derive(Debug, Clone)]
pub struct LogMergePolicy {
@@ -24,7 +24,7 @@ impl LogMergePolicy {
}
/// Set the minimum number of segment that may be merge together.
pub fn set_min_merge_size(&mut self, min_merge_size: usize) {
pub fn set_min_merge_size(&mut self, min_merge_size: usize) {
self.min_merge_size = min_merge_size;
}
@@ -52,14 +52,16 @@ impl MergePolicy for LogMergePolicy {
return Vec::new();
}
let mut size_sorted_tuples = segments.iter()
let mut size_sorted_tuples = segments
.iter()
.map(|x| x.num_docs())
.enumerate()
.collect::<Vec<(usize, u32)>>();
size_sorted_tuples.sort_by(|x, y| y.cmp(x));
let size_sorted_log_tuples: Vec<_> = size_sorted_tuples.into_iter()
let size_sorted_log_tuples: Vec<_> = size_sorted_tuples
.into_iter()
.map(|(ind, num_docs)| (ind, (self.clip_min_size(num_docs) as f64).log2()))
.collect();
@@ -77,14 +79,10 @@ impl MergePolicy for LogMergePolicy {
levels
.iter()
.filter(|level| level.len() >= self.min_merge_size)
.map(|ind_vec| {
MergeCandidate(ind_vec.iter()
.map(|&ind| segments[ind].id())
.collect())
})
.map(|ind_vec| MergeCandidate(ind_vec.iter().map(|&ind| segments[ind].id()).collect()))
.collect()
}
fn box_clone(&self) -> Box<MergePolicy> {
box self.clone()
}
@@ -128,9 +126,7 @@ mod tests {
#[test]
fn test_log_merge_policy_pair() {
let test_input = vec![seg_meta(10),
seg_meta(10),
seg_meta(10)];
let test_input = vec![seg_meta(10), seg_meta(10), seg_meta(10)];
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
assert_eq!(result_list.len(), 1);
}

View File

@@ -4,26 +4,26 @@ use std::marker;
use std::fmt::Debug;
/// Set of segment suggested for a merge.
/// Set of segment suggested for a merge.
#[derive(Debug, Clone)]
pub struct MergeCandidate(pub Vec<SegmentId>);
/// The Merge policy defines which segments should be merged.
///
/// The `MergePolicy` defines which segments should be merged.
///
/// Every time a the list of segments changes, the segment updater
/// asks the merge policy if some segments should be merged.
pub trait MergePolicy: marker::Send + marker::Sync + Debug {
/// Given the list of segment metas, returns the list of merge candidates.
/// Given the list of segment metas, returns the list of merge candidates.
///
/// This call happens on the segment updater thread, and will block
/// other segment updates, so all implementations should happen rapidly.
/// This call happens on the segment updater thread, and will block
/// other segment updates, so all implementations should happen rapidly.
fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec<MergeCandidate>;
/// Returns a boxed clone of the MergePolicy.
fn box_clone(&self) -> Box<MergePolicy>;
}
/// Never merge segments.
/// Never merge segments.
#[derive(Debug)]
pub struct NoMergePolicy;
@@ -37,7 +37,7 @@ impl MergePolicy for NoMergePolicy {
fn compute_merge_candidates(&self, _segments: &[SegmentMeta]) -> Vec<MergeCandidate> {
Vec::new()
}
fn box_clone(&self) -> Box<MergePolicy> {
box NoMergePolicy
}
@@ -52,7 +52,7 @@ pub mod tests {
use core::SegmentMeta;
/// Merge policy useful for test purposes.
/// `MergePolicy` useful for test purposes.
///
/// Everytime there is more than one segment,
/// it will suggest to merge them.
@@ -66,15 +66,14 @@ pub mod tests {
.map(|segment_meta| segment_meta.id())
.collect::<Vec<SegmentId>>();
if segment_ids.len() > 1 {
vec!(MergeCandidate(segment_ids))
}
else {
vec!()
vec![MergeCandidate(segment_ids)]
} else {
vec![]
}
}
fn box_clone(&self) -> Box<MergePolicy> {
box MergeWheneverPossible
}
}
}
}

View File

@@ -1,4 +1,4 @@
use {Error, Result};
use error::{ErrorKind, Result};
use core::SegmentReader;
use core::Segment;
use DocId;
@@ -6,17 +6,20 @@ use core::SerializableSegment;
use schema::FieldValue;
use indexer::SegmentSerializer;
use postings::PostingsSerializer;
use fastfield::U32FastFieldReader;
use fastfield::U64FastFieldReader;
use itertools::Itertools;
use postings::Postings;
use postings::DocSet;
use core::TermIterator;
use fastfield::delete::DeleteBitSet;
use fastfield::DeleteBitSet;
use schema::{Schema, Field};
use termdict::TermMerger;
use fastfield::FastFieldSerializer;
use fastfield::FastFieldReader;
use store::StoreWriter;
use std::cmp::{min, max};
use common::allocate_vec;
use schema::Term;
use termdict::TermStreamer;
use postings::SegmentPostingsOption;
pub struct IndexMerger {
schema: Schema,
@@ -31,9 +34,7 @@ struct DeltaPositionComputer {
impl DeltaPositionComputer {
fn new() -> DeltaPositionComputer {
DeltaPositionComputer {
buffer: allocate_vec(512)
}
DeltaPositionComputer { buffer: vec![0u32; 512] }
}
fn compute_delta_positions(&mut self, positions: &[u32]) -> &[u32] {
@@ -50,37 +51,42 @@ impl DeltaPositionComputer {
}
fn compute_min_max_val(u32_reader: &U32FastFieldReader, max_doc: DocId, delete_bitset: &DeleteBitSet) -> Option<(u32, u32)> {
fn compute_min_max_val(u64_reader: &U64FastFieldReader,
max_doc: DocId,
delete_bitset: &DeleteBitSet)
-> Option<(u64, u64)> {
if max_doc == 0 {
None
}
else if !delete_bitset.has_deletes() {
// no deleted documents,
} else if !delete_bitset.has_deletes() {
// no deleted documents,
// we can use the previous min_val, max_val.
Some((u32_reader.min_val(), u32_reader.max_val()))
}
else {
Some((u64_reader.min_value(), u64_reader.max_value()))
} else {
// some deleted documents,
// we need to recompute the max / min
(0..max_doc)
.filter(|doc_id| !delete_bitset.is_deleted(*doc_id))
.map(|doc_id| u32_reader.get(doc_id))
.map(|doc_id| u64_reader.get(doc_id))
.minmax()
.into_option()
}
}
fn extract_fieldnorm_reader(segment_reader: &SegmentReader, field: Field) -> Option<U32FastFieldReader> {
fn extract_fieldnorm_reader(segment_reader: &SegmentReader,
field: Field)
-> Option<U64FastFieldReader> {
segment_reader.get_fieldnorms_reader(field)
}
fn extract_fast_field_reader(segment_reader: &SegmentReader, field: Field) -> Option<U32FastFieldReader> {
segment_reader.get_fast_field_reader(field)
fn extract_fast_field_reader(segment_reader: &SegmentReader,
field: Field)
-> Option<U64FastFieldReader> {
segment_reader.fast_fields_reader().open_reader(field)
}
impl IndexMerger {
pub fn open(schema: Schema, segments: &[Segment]) -> Result<IndexMerger> {
let mut readers = vec!();
let mut readers = vec![];
let mut max_doc: u32 = 0u32;
for segment in segments {
if segment.meta().num_docs() > 0 {
@@ -90,117 +96,129 @@ impl IndexMerger {
}
}
Ok(IndexMerger {
schema: schema,
readers: readers,
max_doc: max_doc,
})
schema: schema,
readers: readers,
max_doc: max_doc,
})
}
fn write_fieldnorms(&self,
fast_field_serializer: &mut FastFieldSerializer) -> Result<()> {
fn write_fieldnorms(&self, fast_field_serializer: &mut FastFieldSerializer) -> Result<()> {
let fieldnorm_fastfields: Vec<Field> = self.schema
.fields()
.iter()
.enumerate()
.filter(|&(_, field_entry)| field_entry.is_indexed())
.map(|(field_id, _)| Field(field_id as u8))
.collect();
self.generic_write_fast_field(fieldnorm_fastfields, &extract_fieldnorm_reader, fast_field_serializer)
.fields()
.iter()
.enumerate()
.filter(|&(_, field_entry)| field_entry.is_indexed())
.map(|(field_id, _)| Field(field_id as u32))
.collect();
self.generic_write_fast_field(fieldnorm_fastfields,
&extract_fieldnorm_reader,
fast_field_serializer)
}
fn write_fast_fields(&self, fast_field_serializer: &mut FastFieldSerializer) -> Result<()> {
let fast_fields: Vec<Field> = self.schema
.fields()
.iter()
.enumerate()
.filter(|&(_, field_entry)| field_entry.is_u32_fast())
.map(|(field_id, _)| Field(field_id as u8))
.collect();
self.generic_write_fast_field(fast_fields, &extract_fast_field_reader, fast_field_serializer)
.fields()
.iter()
.enumerate()
.filter(|&(_, field_entry)| field_entry.is_int_fast())
.map(|(field_id, _)| Field(field_id as u32))
.collect();
self.generic_write_fast_field(fast_fields,
&extract_fast_field_reader,
fast_field_serializer)
}
// used both to merge field norms and regular u32 fast fields.
// used both to merge field norms and regular u64 fast fields.
fn generic_write_fast_field(&self,
fields: Vec<Field>,
field_reader_extractor: &Fn(&SegmentReader, Field) -> Option<U32FastFieldReader>,
fast_field_serializer: &mut FastFieldSerializer) -> Result<()> {
fields: Vec<Field>,
field_reader_extractor: &Fn(&SegmentReader, Field)
-> Option<U64FastFieldReader>,
fast_field_serializer: &mut FastFieldSerializer)
-> Result<()> {
for field in fields {
let mut u32_readers = vec!();
let mut min_val = u32::max_value();
let mut max_val = u32::min_value();
let mut u64_readers = vec![];
let mut min_val = u64::max_value();
let mut max_val = u64::min_value();
for reader in &self.readers {
match field_reader_extractor(reader, field) {
Some(u32_reader) => {
if let Some((seg_min_val, seg_max_val)) = compute_min_max_val(&u32_reader, reader.max_doc(), reader.delete_bitset()) {
Some(u64_reader) => {
if let Some((seg_min_val, seg_max_val)) =
compute_min_max_val(&u64_reader,
reader.max_doc(),
reader.delete_bitset()) {
// the segment has some non-deleted documents
min_val = min(min_val, seg_min_val);
max_val = max(max_val, seg_max_val);
u32_readers.push((reader.max_doc(), u32_reader, reader.delete_bitset()));
}
u64_readers
.push((reader.max_doc(), u64_reader, reader.delete_bitset()));
}
}
None => {
let error_msg = format!("Failed to find a u32_reader for field {:?}", field);
let error_msg = format!("Failed to find a u64_reader for field {:?}",
field);
error!("{}", error_msg);
return Err(Error::SchemaError(error_msg))
bail!(ErrorKind::SchemaError(error_msg));
}
}
}
if u32_readers.is_empty() {
if u64_readers.is_empty() {
// we have actually zero documents.
min_val = 0;
max_val = 0;
}
assert!(min_val <= max_val);
try!(fast_field_serializer.new_u32_fast_field(field, min_val, max_val));
for (max_doc, u32_reader, delete_bitset) in u32_readers {
fast_field_serializer
.new_u64_fast_field(field, min_val, max_val)?;
for (max_doc, u64_reader, delete_bitset) in u64_readers {
for doc_id in 0..max_doc {
if !delete_bitset.is_deleted(doc_id) {
let val = u32_reader.get(doc_id);
try!(fast_field_serializer.add_val(val));
let val = u64_reader.get(doc_id);
fast_field_serializer.add_val(val)?;
}
}
}
try!(fast_field_serializer.close_field());
fast_field_serializer.close_field()?;
}
Ok(())
}
fn write_postings(
&self,
postings_serializer: &mut PostingsSerializer) -> Result<()> {
let mut merged_terms = TermIterator::from(&self.readers[..]);
fn write_postings(&self, serializer: &mut PostingsSerializer) -> Result<()> {
let mut merged_terms = TermMerger::from(&self.readers[..]);
let mut delta_position_computer = DeltaPositionComputer::new();
let mut max_doc = 0;
// map from segment doc ids to the resulting merged segment doc id.
let mut merged_doc_id_map: Vec<Vec<Option<DocId>>> = Vec::with_capacity(self.readers.len());
for reader in &self.readers {
let mut segment_local_map = Vec::with_capacity(reader.max_doc() as usize);
for doc_id in 0..reader.max_doc() {
if reader.is_deleted(doc_id) {
segment_local_map.push(None);
}
else {
} else {
segment_local_map.push(Some(max_doc));
max_doc += 1u32;
}
}
merged_doc_id_map.push(segment_local_map);
}
let mut last_field: Option<Field> = None;
let mut segment_postings_option = SegmentPostingsOption::FreqAndPositions;
while merged_terms.advance() {
// Create the total list of doc ids
// by stacking the doc ids from the different segment.
//
@@ -208,53 +226,88 @@ impl IndexMerger {
// segment are stacked so that :
// - Segment 0's doc ids become doc id [0, seg.max_doc]
// - Segment 1's doc ids become [seg0.max_doc, seg0.max_doc + seg.max_doc]
// - Segment 2's doc ids become [seg0.max_doc + seg1.max_doc, seg0.max_doc + seg1.max_doc + seg2.max_doc]
// - Segment 2's doc ids become [seg0.max_doc + seg1.max_doc,
// seg0.max_doc + seg1.max_doc + seg2.max_doc]
// ...
let term = merged_terms.term();
let mut term_written = false;
let segment_postings = merged_terms
.segment_ords()
.iter()
.cloned()
.flat_map(|segment_ord| {
self.readers[segment_ord]
.read_postings_all_info(&term)
.map(|segment_postings| (segment_ord, segment_postings))
})
.collect::<Vec<_>>();
let term = Term::wrap(merged_terms.key());
let current_field = term.field();
// We can remove the term if all documents which
// contained it have been deleted.
if segment_postings.len() > 0 {
// We can now serialize this postings, by pushing each document to the
// postings serializer.
for (segment_ord, mut segment_postings) in segment_postings {
let old_to_new_doc_id = &merged_doc_id_map[segment_ord];
while segment_postings.advance() {
if let Some(remapped_doc_id) = old_to_new_doc_id[segment_postings.doc() as usize] {
if !term_written {
// we make sure to only write the term iff
// there is at least one document.
postings_serializer.new_term(&term)?;
term_written = true;
}
let delta_positions: &[u32] =
delta_position_computer.compute_delta_positions(segment_postings.positions());
try!(postings_serializer.write_doc(
remapped_doc_id,
segment_postings.term_freq(),
delta_positions));
}
if last_field != Some(current_field) {
// we reached a new field.
let field_entry = self.schema.get_field_entry(current_field);
// ... set segment postings option the new field.
segment_postings_option = field_entry
.field_type()
.get_segment_postings_option()
.expect("Encounterred a field that is not supposed to be
indexed. Have you modified the index?");
last_field = Some(current_field);
// it is perfectly safe to call `.new_field`
// even if there is no postings associated.
serializer.new_field(current_field);
}
// Let's compute the list of non-empty posting lists
let segment_postings: Vec<_> = merged_terms
.current_kvs()
.iter()
.flat_map(|heap_item| {
let segment_ord = heap_item.segment_ord;
let term_info = heap_item.streamer.value();
let segment_reader = &self.readers[heap_item.segment_ord];
let mut segment_postings =
segment_reader
.read_postings_from_terminfo(term_info, segment_postings_option);
if segment_postings.advance() {
Some((segment_ord, segment_postings))
} else {
None
}
})
.collect();
// At this point, `segment_postings` contains the posting list
// of all of the segments containing the given term.
//
// These segments are non-empty and advance has already been called.
if segment_postings.is_empty() {
// by continuing here, the `term` will be entirely removed.
continue;
}
// We know that there is at least one document containing
// the term, so we add it.
serializer.new_term(term.as_ref())?;
// We can now serialize this postings, by pushing each document to the
// postings serializer.
for (segment_ord, mut segment_postings) in segment_postings {
let old_to_new_doc_id = &merged_doc_id_map[segment_ord];
loop {
// `.advance()` has been called once before the loop.
// Hence we cannot use a `while segment_postings.advance()` loop.
if let Some(remapped_doc_id) =
old_to_new_doc_id[segment_postings.doc() as usize] {
// we make sure to only write the term iff
// there is at least one document.
let delta_positions: &[u32] =
delta_position_computer
.compute_delta_positions(segment_postings.positions());
let term_freq = segment_postings.term_freq();
serializer
.write_doc(remapped_doc_id, term_freq, delta_positions)?;
}
if !segment_postings.advance() {
break;
}
}
if term_written {
try!(postings_serializer.close_term());
}
}
// closing the term.
serializer.close_term()?;
}
Ok(())
}
@@ -265,12 +318,10 @@ impl IndexMerger {
for doc_id in 0..reader.max_doc() {
if !reader.is_deleted(doc_id) {
let doc = try!(store_reader.get(doc_id));
let field_values: Vec<&FieldValue> = doc.field_values()
.iter()
.collect();
let field_values: Vec<&FieldValue> = doc.field_values().iter().collect();
try!(store_writer.store(&field_values));
}
}
}
}
Ok(())
}
@@ -293,8 +344,9 @@ mod tests {
use schema::Document;
use schema::Term;
use query::TermQuery;
use schema::{Field, FieldValue};
use schema::Field;
use core::Index;
use fastfield::U64FastFieldReader;
use Searcher;
use DocAddress;
use collector::tests::FastFieldTestCollector;
@@ -308,11 +360,11 @@ mod tests {
fn test_index_merger_no_deletes() {
let mut schema_builder = schema::SchemaBuilder::default();
let text_fieldtype = schema::TextOptions::default()
.set_indexing_options(TextIndexingOptions::TokenizedWithFreq)
.set_stored();
.set_indexing_options(TextIndexingOptions::TokenizedWithFreq)
.set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype);
let score_fieldtype = schema::U32Options::default().set_fast();
let score_field = schema_builder.add_u32_field("score", score_fieldtype);
let score_fieldtype = schema::IntOptions::default().set_fast();
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
let index = Index::create_in_ram(schema_builder.build());
{
@@ -322,19 +374,19 @@ mod tests {
{
let mut doc = Document::default();
doc.add_text(text_field, "af b");
doc.add_u32(score_field, 3);
doc.add_u64(score_field, 3);
index_writer.add_document(doc);
}
{
let mut doc = Document::default();
doc.add_text(text_field, "a b c");
doc.add_u32(score_field, 5);
doc.add_u64(score_field, 5);
index_writer.add_document(doc);
}
{
let mut doc = Document::default();
doc.add_text(text_field, "a b c d");
doc.add_u32(score_field, 7);
doc.add_u64(score_field, 7);
index_writer.add_document(doc);
}
index_writer.commit().expect("committed");
@@ -345,24 +397,27 @@ mod tests {
{
let mut doc = Document::default();
doc.add_text(text_field, "af b");
doc.add_u32(score_field, 11);
doc.add_u64(score_field, 11);
index_writer.add_document(doc);
}
{
let mut doc = Document::default();
doc.add_text(text_field, "a b c g");
doc.add_u32(score_field, 13);
doc.add_u64(score_field, 13);
index_writer.add_document(doc);
}
index_writer.commit().expect("Commit failed");
}
}
{
let segment_ids = index.searchable_segment_ids().expect("Searchable segments failed.");
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
index_writer.merge(&segment_ids)
.wait()
.expect("Merging failed");
index_writer
.merge(&segment_ids)
.wait()
.expect("Merging failed");
index_writer.wait_merging_threads().unwrap();
}
{
@@ -376,13 +431,13 @@ mod tests {
};
{
assert_eq!(get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
vec!(1, 2, 4,));
vec![1, 2, 4]);
assert_eq!(get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
vec!(0, 3,));
vec![0, 3]);
assert_eq!(get_doc_ids(vec![Term::from_field_text(text_field, "g")]),
vec!(4,));
vec![4]);
assert_eq!(get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
vec!(0, 1, 2, 3, 4,));
vec![0, 1, 2, 3, 4]);
}
{
let doc = searcher.doc(&DocAddress(0, 0)).unwrap();
@@ -412,12 +467,12 @@ mod tests {
collector.vals()
};
assert_eq!(get_fast_vals(vec![Term::from_field_text(text_field, "a")]),
vec!(5, 7, 13,));
vec![5, 7, 13]);
}
}
}
fn search_term(searcher: &Searcher, term: Term) -> Vec<u32> {
fn search_term(searcher: &Searcher, term: Term) -> Vec<u64> {
let mut collector = FastFieldTestCollector::for_field(Field(1));
let term_query = TermQuery::new(term, SegmentPostingsOption::NoFreq);
searcher.search(&term_query, &mut collector).unwrap();
@@ -427,32 +482,31 @@ mod tests {
#[test]
fn test_index_merger_with_deletes() {
let mut schema_builder = schema::SchemaBuilder::default();
let text_fieldtype = schema::TextOptions
::default()
let text_fieldtype = schema::TextOptions::default()
.set_indexing_options(TextIndexingOptions::TokenizedWithFreq)
.set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype);
let score_fieldtype = schema::U32Options::default().set_fast();
let score_field = schema_builder.add_u32_field("score", score_fieldtype);
let score_fieldtype = schema::IntOptions::default().set_fast();
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{ // a first commit
index_writer.add_document(
doc!(
let empty_vec = Vec::<u64>::new();
{
// a first commit
index_writer.add_document(doc!(
text_field => "a b d",
score_field => 1
score_field => 1u64
));
index_writer.add_document(
doc!(
index_writer.add_document(doc!(
text_field => "b c",
score_field => 2
score_field => 2u64
));
index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.add_document(
doc!(
index_writer.add_document(doc!(
text_field => "c d",
score_field => 3
score_field => 3u64
));
index_writer.commit().expect("committed");
index.load_searchers().unwrap();
@@ -460,33 +514,34 @@ mod tests {
assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "a")), vec!(1));
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "b")), vec!(1));
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "c")), vec!(3));
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "d")), vec!(1, 3));
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "a")),
vec![1]);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "b")),
vec![1]);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "c")),
vec![3]);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "d")),
vec![1, 3]);
}
{ // a second commit
index_writer.add_document(
doc!(
{
// a second commit
index_writer.add_document(doc!(
text_field => "a d e",
score_field => 4_000
score_field => 4_000u64
));
index_writer.add_document(
doc!(
index_writer.add_document(doc!(
text_field => "e f",
score_field => 5_000
score_field => 5_000u64
));
index_writer.delete_term(Term::from_field_text(text_field, "a"));
index_writer.delete_term(Term::from_field_text(text_field, "f"));
index_writer.add_document(
doc!(
index_writer.add_document(doc!(
text_field => "f g",
score_field => 6_000
score_field => 6_000u64
));
index_writer.add_document(
doc!(
index_writer.add_document(doc!(
text_field => "g h",
score_field => 7_000
score_field => 7_000u64
));
index_writer.commit().expect("committed");
index.load_searchers().unwrap();
@@ -498,71 +553,112 @@ mod tests {
assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!(searcher.segment_readers()[1].num_docs(), 2);
assert_eq!(searcher.segment_readers()[1].max_doc(), 4);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "a")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "b")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "c")), vec!(3));
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "d")), vec!(3));
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "e")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "f")), vec!(6_000));
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "g")), vec!(6_000, 7_000));
let score_field_reader = searcher.segment_reader(0).get_fast_field_reader(score_field).unwrap();
assert_eq!(score_field_reader.min_val(), 1);
assert_eq!(score_field_reader.max_val(), 3);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "b")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "c")),
vec![3]);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "d")),
vec![3]);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "e")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "f")),
vec![6_000]);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "g")),
vec![6_000, 7_000]);
let score_field_reader = searcher.segment_reader(1).get_fast_field_reader(score_field).unwrap();
assert_eq!(score_field_reader.min_val(), 4000);
assert_eq!(score_field_reader.max_val(), 7000);
let score_field_reader: U64FastFieldReader = searcher
.segment_reader(0)
.get_fast_field_reader(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 1);
assert_eq!(score_field_reader.max_value(), 3);
let score_field_reader: U64FastFieldReader = searcher
.segment_reader(1)
.get_fast_field_reader(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 4000);
assert_eq!(score_field_reader.max_value(), 7000);
}
{ // merging the segments
let segment_ids = index.searchable_segment_ids().expect("Searchable segments failed.");
index_writer.merge(&segment_ids)
.wait()
.expect("Merging failed");
{
// merging the segments
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
index_writer
.merge(&segment_ids)
.wait()
.expect("Merging failed");
index.load_searchers().unwrap();
let ref searcher = *index.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 3);
assert_eq!(searcher.segment_readers()[0].num_docs(), 3);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "a")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "b")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "c")), vec!(3));
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "d")), vec!(3));
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "e")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "f")), vec!(6_000));
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "g")), vec!(6_000, 7_000));
let score_field_reader = searcher.segment_reader(0).get_fast_field_reader(score_field).unwrap();
assert_eq!(score_field_reader.min_val(), 3);
assert_eq!(score_field_reader.max_val(), 7000);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "b")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "c")),
vec![3]);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "d")),
vec![3]);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "e")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "f")),
vec![6_000]);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "g")),
vec![6_000, 7_000]);
let score_field_reader: U64FastFieldReader = searcher
.segment_reader(0)
.get_fast_field_reader(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 3);
assert_eq!(score_field_reader.max_value(), 7000);
}
{
{
// test a commit with only deletes
index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let ref searcher = *index.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "a")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "b")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "c")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "d")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "e")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "f")), vec!(6_000));
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "g")), vec!(6_000, 7_000));
let score_field_reader = searcher.segment_reader(0).get_fast_field_reader(score_field).unwrap();
assert_eq!(score_field_reader.min_val(), 3);
assert_eq!(score_field_reader.max_val(), 7000);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "b")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "c")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "d")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "e")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "f")),
vec![6_000]);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "g")),
vec![6_000, 7_000]);
let score_field_reader: U64FastFieldReader = searcher
.segment_reader(0)
.get_fast_field_reader(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 3);
assert_eq!(score_field_reader.max_value(), 7000);
}
{ // Test merging a single segment in order to remove deletes.
let segment_ids = index.searchable_segment_ids().expect("Searchable segments failed.");
index_writer.merge(&segment_ids)
.wait()
.expect("Merging failed");
{
// Test merging a single segment in order to remove deletes.
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
index_writer
.merge(&segment_ids)
.wait()
.expect("Merging failed");
index.load_searchers().unwrap();
let ref searcher = *index.searcher();
@@ -570,31 +666,45 @@ mod tests {
assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 2);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "a")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "b")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "c")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "d")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "e")), vec!());
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "f")), vec!(6_000));
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "g")), vec!(6_000, 7_000));
let score_field_reader = searcher.segment_reader(0).get_fast_field_reader(score_field).unwrap();
assert_eq!(score_field_reader.min_val(), 6000);
assert_eq!(score_field_reader.max_val(), 7000);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "b")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "c")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "d")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "e")),
empty_vec);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "f")),
vec![6_000]);
assert_eq!(search_term(&searcher, Term::from_field_text(text_field, "g")),
vec![6_000, 7_000]);
let score_field_reader: U64FastFieldReader = searcher
.segment_reader(0)
.get_fast_field_reader(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 6000);
assert_eq!(score_field_reader.max_value(), 7000);
}
{ // Test removing all docs
{
// Test removing all docs
index_writer.delete_term(Term::from_field_text(text_field, "g"));
let segment_ids = index.searchable_segment_ids().expect("Searchable segments failed.");
index_writer.merge(&segment_ids)
.wait()
.expect("Merging failed");
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
index_writer
.merge(&segment_ids)
.wait()
.expect("Merging failed");
index.load_searchers().unwrap();
let ref searcher = *index.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 0);
}
}
}

View File

@@ -22,5 +22,5 @@ pub use self::log_merge_policy::LogMergePolicy;
pub use self::merge_policy::{NoMergePolicy, MergeCandidate, MergePolicy};
pub use self::segment_manager::SegmentManager;
/// Alias for the default merge policy, which is the LogMergePolicy.
/// Alias for the default merge policy, which is the `LogMergePolicy`.
pub type DefaultMergePolicy = LogMergePolicy;

View File

@@ -8,11 +8,11 @@ use std::fmt;
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum SegmentState {
Ready,
InMerge,
InMerge,
}
impl SegmentState {
pub fn letter_code(&self,) -> char {
pub fn letter_code(&self) -> char {
match *self {
SegmentState::InMerge => 'M',
SegmentState::Ready => 'R',
@@ -21,35 +21,33 @@ impl SegmentState {
}
/// A segment entry describes the state of
/// A segment entry describes the state of
/// a given segment, at a given instant.
///
/// In addition to segment meta,
/// In addition to segment `meta`,
/// it contains a few transient states
/// - state expresses whether the segment is already in the
/// - `state` expresses whether the segment is already in the
/// middle of a merge
/// - delete_bitset is a bitset describing
/// - `delete_bitset` is a bitset describing
/// documents that were deleted during the commit
/// itself.
/// - Delete cursor, is the position in the delete queue.
/// - `delete_cursor` is the position in the delete queue.
/// Deletes happening before the cursor are reflected either
/// in the .del file or in the delete_bitset.
/// in the .del file or in the `delete_bitset`.
#[derive(Clone)]
pub struct SegmentEntry {
meta: SegmentMeta,
state: SegmentState,
delete_bitset: Option<BitSet>,
delete_cursor: DeleteCursor,
}
impl SegmentEntry {
/// Create a new `SegmentEntry`
pub fn new(segment_meta: SegmentMeta,
pub fn new(segment_meta: SegmentMeta,
delete_cursor: DeleteCursor,
delete_bitset: Option<BitSet>) -> SegmentEntry {
delete_bitset: Option<BitSet>)
-> SegmentEntry {
SegmentEntry {
meta: segment_meta,
state: SegmentState::Ready,
@@ -62,7 +60,7 @@ impl SegmentEntry {
/// Return a reference to the segment entry deleted bitset.
///
/// `DocId` in this bitset are flagged as deleted.
pub fn delete_bitset(&self,) -> Option<&BitSet> {
pub fn delete_bitset(&self) -> Option<&BitSet> {
self.delete_bitset.as_ref()
}
@@ -77,7 +75,7 @@ impl SegmentEntry {
&mut self.delete_cursor
}
/// Return the `SegmentEntry`.
/// Return the `SegmentEntry`.
///
/// The state describes whether the segment is available for
/// a merge or not.
@@ -89,7 +87,7 @@ impl SegmentEntry {
pub fn segment_id(&self) -> SegmentId {
self.meta.id()
}
/// Accessor to the `SegmentMeta`
pub fn meta(&self) -> &SegmentMeta {
@@ -99,9 +97,9 @@ impl SegmentEntry {
/// Mark the `SegmentEntry` as in merge.
///
/// Only segments that are not already
/// Only segments that are not already
/// in a merge are elligible for future merge.
pub fn start_merge(&mut self,) {
pub fn start_merge(&mut self) {
self.state = SegmentState::InMerge;
}
@@ -110,14 +108,14 @@ impl SegmentEntry {
/// If a merge fails, it is important to switch
/// the segment back to a idle state, so that it
/// may be elligible for future merges.
pub fn cancel_merge(&mut self,) {
pub fn cancel_merge(&mut self) {
self.state = SegmentState::Ready;
}
/// Returns true iff a segment should
/// be considered for a merge.
pub fn is_ready(&self,) -> bool {
pub fn is_ready(&self) -> bool {
self.state == SegmentState::Ready
}
}

View File

@@ -14,7 +14,7 @@ use indexer::delete_queue::DeleteCursor;
struct SegmentRegisters {
uncommitted: SegmentRegister,
committed: SegmentRegister,
writing: HashSet<SegmentId>,
writing: HashSet<SegmentId>,
}
@@ -22,7 +22,7 @@ struct SegmentRegisters {
/// The segment manager stores the list of segments
/// as well as their state.
///
/// It guarantees the atomicity of the
/// It guarantees the atomicity of the
/// changes (merges especially)
#[derive(Default)]
pub struct SegmentManager {
@@ -32,43 +32,43 @@ pub struct SegmentManager {
impl Debug for SegmentManager {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
let lock = self.read();
write!(f, "{{ uncommitted: {:?}, committed: {:?} }}", lock.uncommitted, lock.committed)
write!(f,
"{{ uncommitted: {:?}, committed: {:?} }}",
lock.uncommitted,
lock.committed)
}
}
pub fn get_mergeable_segments(segment_manager: &SegmentManager,) -> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
pub fn get_mergeable_segments(segment_manager: &SegmentManager)
-> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
let registers_lock = segment_manager.read();
(registers_lock.committed.get_mergeable_segments(),
registers_lock.uncommitted.get_mergeable_segments())
}
impl SegmentManager {
pub fn from_segments(segment_metas: Vec<SegmentMeta>, delete_cursor: DeleteCursor) -> SegmentManager {
pub fn from_segments(segment_metas: Vec<SegmentMeta>,
delete_cursor: DeleteCursor)
-> SegmentManager {
SegmentManager {
registers: RwLock::new(SegmentRegisters {
uncommitted: SegmentRegister::default(),
committed: SegmentRegister::new(segment_metas, delete_cursor),
writing: HashSet::new(),
}),
uncommitted: SegmentRegister::default(),
committed: SegmentRegister::new(segment_metas,
delete_cursor),
writing: HashSet::new(),
}),
}
}
/// Returns all of the segment entries (committed or uncommitted)
pub fn segment_entries(&self,) -> Vec<SegmentEntry> {
let mut segment_entries = self.read()
.uncommitted
.segment_entries();
segment_entries.extend(
self.read()
.committed
.segment_entries()
);
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
let mut segment_entries = self.read().uncommitted.segment_entries();
segment_entries.extend(self.read().committed.segment_entries());
segment_entries
}
/// Returns the overall number of segments in the `SegmentManager`
pub fn num_segments(&self,) -> usize {
pub fn num_segments(&self) -> usize {
let registers_lock = self.read();
registers_lock.committed.len() + registers_lock.uncommitted.len()
}
@@ -78,19 +78,14 @@ impl SegmentManager {
let mut files = HashSet::new();
files.insert(META_FILEPATH.clone());
files.insert(LOCKFILE_FILEPATH.clone());
let segment_metas: Vec<SegmentMeta> =
registers_lock.committed
.get_all_segments()
.into_iter()
.chain(registers_lock.uncommitted
.get_all_segments()
.into_iter())
.chain(registers_lock.writing
.iter()
.cloned()
.map(SegmentMeta::new))
.collect();
let segment_metas: Vec<SegmentMeta> = registers_lock
.committed
.get_all_segments()
.into_iter()
.chain(registers_lock.uncommitted.get_all_segments().into_iter())
.chain(registers_lock.writing.iter().cloned().map(SegmentMeta::new))
.collect();
for segment_meta in segment_metas {
files.extend(segment_meta.list_files());
}
@@ -102,18 +97,22 @@ impl SegmentManager {
registers
.committed
.segment_entry(segment_id)
.or_else(|| registers.uncommitted.segment_entry(segment_id))
.or_else(|| registers.uncommitted.segment_entry(segment_id))
}
// Lock poisoning should never happen :
// The lock is acquired and released within this class,
// and the operations cannot panic.
fn read(&self,) -> RwLockReadGuard<SegmentRegisters> {
self.registers.read().expect("Failed to acquire read lock on SegmentManager.")
// and the operations cannot panic.
fn read(&self) -> RwLockReadGuard<SegmentRegisters> {
self.registers
.read()
.expect("Failed to acquire read lock on SegmentManager.")
}
fn write(&self,) -> RwLockWriteGuard<SegmentRegisters> {
self.registers.write().expect("Failed to acquire write lock on SegmentManager.")
fn write(&self) -> RwLockWriteGuard<SegmentRegisters> {
self.registers
.write()
.expect("Failed to acquire write lock on SegmentManager.")
}
pub fn commit(&self, segment_entries: Vec<SegmentEntry>) {
@@ -124,42 +123,42 @@ impl SegmentManager {
registers_lock.committed.add_segment_entry(segment_entry);
}
}
pub fn start_merge(&self, segment_ids: &[SegmentId]) {
let mut registers_lock = self.write();
if registers_lock.uncommitted.contains_all(segment_ids) {
for segment_id in segment_ids {
registers_lock.uncommitted.start_merge(segment_id);
}
}
else if registers_lock.committed.contains_all(segment_ids) {
} else if registers_lock.committed.contains_all(segment_ids) {
for segment_id in segment_ids {
registers_lock.committed.start_merge(segment_id);
}
}
else {
} else {
error!("Merge operation sent for segments that are not all uncommited or commited.");
}
}
pub fn cancel_merge(&self,
before_merge_segment_ids: &[SegmentId],
after_merge_segment_id: SegmentId) {
before_merge_segment_ids: &[SegmentId],
after_merge_segment_id: SegmentId) {
let mut registers_lock = self.write();
// we mark all segments are ready for merge.
{
let target_segment_register: &mut SegmentRegister;
target_segment_register = {
if registers_lock.uncommitted.contains_all(&before_merge_segment_ids) {
if registers_lock
.uncommitted
.contains_all(before_merge_segment_ids) {
&mut registers_lock.uncommitted
}
else if registers_lock.committed.contains_all(&before_merge_segment_ids) {
} else if registers_lock
.committed
.contains_all(before_merge_segment_ids) {
&mut registers_lock.committed
}
else {
} else {
warn!("couldn't find segment in SegmentManager");
return;
}
@@ -172,7 +171,6 @@ impl SegmentManager {
// ... and we make sure the target segment entry
// can be garbage collected.
registers_lock.writing.remove(&after_merge_segment_id);
}
@@ -186,19 +184,24 @@ impl SegmentManager {
registers_lock.writing.remove(&segment_entry.segment_id());
registers_lock.uncommitted.add_segment_entry(segment_entry);
}
pub fn end_merge(&self,
before_merge_segment_ids: &[SegmentId],
after_merge_segment_entry: SegmentEntry) {
before_merge_segment_ids: &[SegmentId],
after_merge_segment_entry: SegmentEntry) {
let mut registers_lock = self.write();
registers_lock.writing.remove(&after_merge_segment_entry.segment_id());
registers_lock
.writing
.remove(&after_merge_segment_entry.segment_id());
let mut target_register: &mut SegmentRegister = {
if registers_lock.uncommitted.contains_all(&before_merge_segment_ids) {
if registers_lock
.uncommitted
.contains_all(before_merge_segment_ids) {
&mut registers_lock.uncommitted
}
else if registers_lock.committed.contains_all(&before_merge_segment_ids) {
} else if registers_lock
.committed
.contains_all(before_merge_segment_ids) {
&mut registers_lock.committed
} else {
warn!("couldn't find segment in SegmentManager");
@@ -210,12 +213,12 @@ impl SegmentManager {
}
target_register.add_segment_entry(after_merge_segment_entry);
}
pub fn committed_segment_metas(&self,) -> Vec<SegmentMeta> {
pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> {
let registers_lock = self.read();
registers_lock.committed.segment_metas()
}

View File

@@ -9,14 +9,14 @@ use indexer::delete_queue::DeleteCursor;
/// The segment register keeps track
/// of the list of segment, their size as well
/// as the state they are in.
///
/// It is consumed by indexes to get the list of
///
/// It is consumed by indexes to get the list of
/// segments that are currently searchable,
/// and by the index merger to identify
/// and by the index merger to identify
/// merge candidates.
#[derive(Default)]
pub struct SegmentRegister {
segment_states: HashMap<SegmentId, SegmentEntry>,
segment_states: HashMap<SegmentId, SegmentEntry>,
}
@@ -33,8 +33,7 @@ impl Debug for SegmentRegister {
impl SegmentRegister {
pub fn clear(&mut self,) {
pub fn clear(&mut self) {
self.segment_states.clear();
}
@@ -42,29 +41,26 @@ impl SegmentRegister {
self.segment_states.len()
}
pub fn get_all_segments(&self,) -> Vec<SegmentMeta> {
pub fn get_all_segments(&self) -> Vec<SegmentMeta> {
self.segment_states
.values()
.map(|segment_entry| segment_entry.meta().clone())
.collect()
}
pub fn get_mergeable_segments(&self,) -> Vec<SegmentMeta> {
pub fn get_mergeable_segments(&self) -> Vec<SegmentMeta> {
self.segment_states
.values()
.filter(|segment_entry| segment_entry.is_ready())
.map(|segment_entry| segment_entry.meta().clone())
.collect()
}
pub fn segment_entries(&self,) -> Vec<SegmentEntry> {
self.segment_states
.values()
.cloned()
.collect()
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
self.segment_states.values().cloned().collect()
}
pub fn segment_metas(&self,) -> Vec<SegmentMeta> {
pub fn segment_metas(&self) -> Vec<SegmentMeta> {
let mut segment_ids: Vec<SegmentMeta> = self.segment_states
.values()
.map(|segment_entry| segment_entry.meta().clone())
@@ -72,28 +68,26 @@ impl SegmentRegister {
segment_ids.sort_by_key(|meta| meta.id());
segment_ids
}
pub fn segment_entry(&self, segment_id: &SegmentId) -> Option<SegmentEntry> {
self.segment_states
.get(&segment_id)
.map(|segment_entry| segment_entry.clone())
self.segment_states.get(segment_id).cloned()
}
pub fn contains_all(&mut self, segment_ids: &[SegmentId]) -> bool {
segment_ids
.iter()
.all(|segment_id| self.segment_states.contains_key(segment_id))
}
pub fn add_segment_entry(&mut self, segment_entry: SegmentEntry) {
let segment_id = segment_entry.segment_id();
self.segment_states.insert(segment_id, segment_entry);
}
pub fn remove_segment(&mut self, segment_id: &SegmentId) {
self.segment_states.remove(segment_id);
}
}
pub fn cancel_merge(&mut self, segment_id: &SegmentId) {
self.segment_states
.get_mut(segment_id)
@@ -106,21 +100,16 @@ impl SegmentRegister {
.get_mut(segment_id)
.expect("Received a merge notification for a segment that is not registered")
.start_merge();
}
}
pub fn new(segment_metas: Vec<SegmentMeta>, delete_cursor: DeleteCursor) -> SegmentRegister {
let mut segment_states = HashMap::new();
for segment_meta in segment_metas {
let segment_id = segment_meta.id();
let segment_entry = SegmentEntry::new(
segment_meta,
delete_cursor.clone(),
None);
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor.clone(), None);
segment_states.insert(segment_id, segment_entry);
}
SegmentRegister {
segment_states: segment_states
}
SegmentRegister { segment_states: segment_states }
}
}
@@ -140,7 +129,7 @@ mod tests {
.map(|segment_meta| segment_meta.id())
.collect()
}
#[test]
fn test_segment_register() {
let delete_queue = DeleteQueue::new();
@@ -149,32 +138,48 @@ mod tests {
let segment_id_a = SegmentId::generate_random();
let segment_id_b = SegmentId::generate_random();
let segment_id_merged = SegmentId::generate_random();
{
let segment_meta = SegmentMeta::new(segment_id_a);
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
segment_register.add_segment_entry(segment_entry);
}
assert_eq!(segment_register.segment_entry(&segment_id_a).unwrap().state(), SegmentState::Ready);
assert_eq!(segment_ids(&segment_register), vec!(segment_id_a));
assert_eq!(segment_register
.segment_entry(&segment_id_a)
.unwrap()
.state(),
SegmentState::Ready);
assert_eq!(segment_ids(&segment_register), vec![segment_id_a]);
{
let segment_meta = SegmentMeta::new(segment_id_b);
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
segment_register.add_segment_entry(segment_entry);
}
assert_eq!(segment_register.segment_entry(&segment_id_b).unwrap().state(), SegmentState::Ready);
assert_eq!(segment_register
.segment_entry(&segment_id_b)
.unwrap()
.state(),
SegmentState::Ready);
segment_register.start_merge(&segment_id_a);
segment_register.start_merge(&segment_id_b);
assert_eq!(segment_register.segment_entry(&segment_id_a).unwrap().state(), SegmentState::InMerge);
assert_eq!(segment_register.segment_entry(&segment_id_b).unwrap().state(), SegmentState::InMerge);
assert_eq!(segment_register
.segment_entry(&segment_id_a)
.unwrap()
.state(),
SegmentState::InMerge);
assert_eq!(segment_register
.segment_entry(&segment_id_b)
.unwrap()
.state(),
SegmentState::InMerge);
segment_register.remove_segment(&segment_id_a);
segment_register.remove_segment(&segment_id_b);
{
let segment_meta_merged = SegmentMeta::new(segment_id_merged);
let segment_entry = SegmentEntry::new(segment_meta_merged, delete_queue.cursor(), None);
segment_register.add_segment_entry(segment_entry);
let segment_entry = SegmentEntry::new(segment_meta_merged, delete_queue.cursor(), None);
segment_register.add_segment_entry(segment_entry);
}
assert_eq!(segment_ids(&segment_register), vec!(segment_id_merged));
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
}
}
}

View File

@@ -29,11 +29,11 @@ impl SegmentSerializer {
let postings_serializer = try!(PostingsSerializer::open(segment));
Ok(SegmentSerializer {
postings_serializer: postings_serializer,
store_writer: StoreWriter::new(store_write),
fast_field_serializer: fast_field_serializer,
fieldnorms_serializer: fieldnorms_serializer,
})
postings_serializer: postings_serializer,
store_writer: StoreWriter::new(store_write),
fast_field_serializer: fast_field_serializer,
fieldnorms_serializer: fieldnorms_serializer,
})
}
/// Accessor to the `PostingsSerializer`.
@@ -58,10 +58,10 @@ impl SegmentSerializer {
/// Finalize the segment serialization.
pub fn close(self) -> Result<()> {
try!(self.fast_field_serializer.close());
try!(self.postings_serializer.close());
try!(self.store_writer.close());
try!(self.fieldnorms_serializer.close());
self.fast_field_serializer.close()?;
self.postings_serializer.close()?;
self.store_writer.close()?;
self.fieldnorms_serializer.close()?;
Ok(())
}
}

View File

@@ -1,5 +1,3 @@
#![allow(for_kv_map)]
use core::Index;
use core::IndexMeta;
use core::META_FILEPATH;
@@ -9,7 +7,7 @@ use core::SegmentMeta;
use core::SerializableSegment;
use directory::Directory;
use indexer::stamper::Stamper;
use Error;
use error::{Error, ErrorKind, Result};
use futures_cpupool::CpuPool;
use futures::Future;
use futures::Canceled;
@@ -21,9 +19,8 @@ use indexer::MergeCandidate;
use indexer::merger::IndexMerger;
use indexer::SegmentEntry;
use indexer::SegmentSerializer;
use Result;
use futures_cpupool::CpuFuture;
use rustc_serialize::json;
use serde_json;
use indexer::delete_queue::DeleteCursor;
use schema::Schema;
use std::borrow::BorrowMut;
@@ -49,17 +46,14 @@ use super::segment_manager::{SegmentManager, get_mergeable_segments};
/// and flushed.
///
/// This method is not part of tantivy's public API
pub fn save_new_metas(schema: Schema,
opstamp: u64,
directory: &mut Directory)
-> Result<()> {
save_metas(vec!(), schema, opstamp, directory)
pub fn save_new_metas(schema: Schema, opstamp: u64, directory: &mut Directory) -> Result<()> {
save_metas(vec![], schema, opstamp, directory)
}
/// Save the index meta file.
/// This operation is atomic :
/// This operation is atomic:
/// Either
// - it fails, in which case an error is returned,
/// and the `meta.json` remains untouched,
@@ -77,12 +71,11 @@ pub fn save_metas(segment_metas: Vec<SegmentMeta>,
schema: schema,
opstamp: opstamp,
};
let mut w = vec!();
try!(write!(&mut w, "{}\n", json::as_pretty_json(&metas)));
let res = directory.atomic_write(&META_FILEPATH, &w[..])?;
debug!("Saved metas {}", json::as_pretty_json(&metas));
Ok(res)
let mut w = try!(serde_json::to_vec_pretty(&metas));
try!(write!(&mut w, "\n"));
directory.atomic_write(&META_FILEPATH, &w[..])?;
debug!("Saved metas {:?}", serde_json::to_string_pretty(&metas));
Ok(())
}
@@ -90,7 +83,7 @@ pub fn save_metas(segment_metas: Vec<SegmentMeta>,
// of the `SegmentUpdate`s.
//
// All this processing happens on a single thread
// consuming a common queue.
// consuming a common queue.
#[derive(Clone)]
pub struct SegmentUpdater(Arc<InnerSegmentUpdater>);
@@ -99,56 +92,57 @@ pub struct SegmentUpdater(Arc<InnerSegmentUpdater>);
fn perform_merge(segment_ids: &[SegmentId],
segment_updater: &SegmentUpdater,
mut merged_segment: Segment,
target_opstamp: u64) -> Result<SegmentEntry> {
target_opstamp: u64)
-> Result<SegmentEntry> {
// first we need to apply deletes to our segment.
info!("Start merge: {:?}", segment_ids);
let ref index = segment_updater.0.index;
let schema = index.schema();
let mut segment_entries = vec!();
let mut file_protections: Vec<FileProtection> = vec!();
let index = &segment_updater.0.index;
let schema = index.schema();
let mut segment_entries = vec![];
let mut file_protections: Vec<FileProtection> = vec![];
for segment_id in segment_ids {
if let Some(mut segment_entry) = segment_updater.0
.segment_manager
.segment_entry(segment_id) {
if let Some(mut segment_entry) =
segment_updater.0.segment_manager.segment_entry(segment_id) {
let segment = index.segment(segment_entry.meta().clone());
if let Some(file_protection) = advance_deletes(segment, &mut segment_entry, target_opstamp)? {
if let Some(file_protection) =
advance_deletes(segment, &mut segment_entry, target_opstamp)? {
file_protections.push(file_protection);
}
segment_entries.push(segment_entry);
}
else {
error!("Error, had to abort merge as some of the segment is not managed anymore.a");
return Err(Error::InvalidArgument(format!("Segment {:?} requested for merge is not managed.", segment_id)));
} else {
error!("Error, had to abort merge as some of the segment is not managed anymore.");
let msg = format!("Segment {:?} requested for merge is not managed.",
segment_id);
bail!(ErrorKind::InvalidArgument(msg));
}
}
let delete_cursor = segment_entries[0].delete_cursor().clone();
let segments: Vec<Segment> = segment_entries
.iter()
.map(|segment_entry| {
index.segment(segment_entry.meta().clone())
})
.map(|segment_entry| index.segment(segment_entry.meta().clone()))
.collect();
// An IndexMerger is like a "view" of our merged segments.
let merger: IndexMerger = IndexMerger::open(schema, &segments[..])?;
// ... we just serialize this index merger in our new segment
// to merge the two segments.
let segment_serializer =
SegmentSerializer::for_segment(&mut merged_segment)
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)
.expect("Creating index serializer failed");
let num_docs = merger.write(segment_serializer).expect("Serializing merged index failed");
let num_docs = merger
.write(segment_serializer)
.expect("Serializing merged index failed");
let mut segment_meta = SegmentMeta::new(merged_segment.id());
segment_meta.set_max_doc(num_docs);
let after_merge_segment_entry = SegmentEntry::new(segment_meta.clone(), delete_cursor, None);
Ok(after_merge_segment_entry)
}
@@ -162,30 +156,28 @@ struct InnerSegmentUpdater {
merging_thread_id: AtomicUsize,
merging_threads: RwLock<HashMap<usize, JoinHandle<Result<()>>>>,
generation: AtomicUsize,
killed: AtomicBool,
killed: AtomicBool,
stamper: Stamper,
}
impl SegmentUpdater {
pub fn new(index: Index,
stamper: Stamper,
delete_cursor: DeleteCursor) -> Result<SegmentUpdater> {
delete_cursor: DeleteCursor)
-> Result<SegmentUpdater> {
let segments = index.searchable_segment_metas()?;
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
Ok(
SegmentUpdater(Arc::new(InnerSegmentUpdater {
pool: CpuPool::new(1),
index: index,
segment_manager: segment_manager,
merge_policy: RwLock::new(box DefaultMergePolicy::default()),
merging_thread_id: AtomicUsize::default(),
merging_threads: RwLock::new(HashMap::new()),
generation: AtomicUsize::default(),
killed: AtomicBool::new(false),
stamper: stamper,
}))
)
Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater {
pool: CpuPool::new(1),
index: index,
segment_manager: segment_manager,
merge_policy: RwLock::new(box DefaultMergePolicy::default()),
merging_thread_id: AtomicUsize::default(),
merging_threads: RwLock::new(HashMap::new()),
generation: AtomicUsize::default(),
killed: AtomicBool::new(false),
stamper: stamper,
})))
}
pub fn new_segment(&self) -> Segment {
@@ -200,40 +192,41 @@ impl SegmentUpdater {
}
pub fn set_merge_policy(&self, merge_policy: Box<MergePolicy>) {
*self.0.merge_policy.write().unwrap()= merge_policy;
*self.0.merge_policy.write().unwrap() = merge_policy;
}
fn get_merging_thread_id(&self) -> usize {
self.0.merging_thread_id.fetch_add(1, Ordering::SeqCst)
}
fn run_async<T: 'static + Send, F: 'static + Send + FnOnce(SegmentUpdater) -> T>(&self, f: F) -> CpuFuture<T, Error> {
fn run_async<T: 'static + Send, F: 'static + Send + FnOnce(SegmentUpdater) -> T>
(&self,
f: F)
-> CpuFuture<T, Error> {
let me_clone = self.clone();
self.0.pool.spawn_fn(move || {
Ok(f(me_clone))
})
self.0.pool.spawn_fn(move || Ok(f(me_clone)))
}
pub fn add_segment(&self, generation: usize, segment_entry: SegmentEntry) -> bool {
if generation >= self.0.generation.load(Ordering::Acquire) {
self.run_async(|segment_updater| {
segment_updater.0.segment_manager.add_segment(segment_entry);
segment_updater.consider_merge_options();
true
}).forget();
segment_updater.0.segment_manager.add_segment(segment_entry);
segment_updater.consider_merge_options();
true
})
.forget();
true
}
else {
} else {
false
}
}
pub fn kill(&mut self,) {
pub fn kill(&mut self) {
self.0.killed.store(true, Ordering::Release);
}
fn is_alive(&self,) -> bool {
fn is_alive(&self) -> bool {
!self.0.killed.load(Ordering::Acquire)
}
@@ -243,77 +236,81 @@ impl SegmentUpdater {
/// Tne method returns copies of the segment entries,
/// updated with the delete information.
fn purge_deletes(&self, target_opstamp: u64) -> Result<Vec<SegmentEntry>> {
let mut segment_entries = self.0.segment_manager.segment_entries();
let mut segment_entries = self.0.segment_manager.segment_entries();
for segment_entry in &mut segment_entries {
let segment = self.0.index.segment(segment_entry.meta().clone());
advance_deletes(segment, segment_entry, target_opstamp)?;
}
Ok(segment_entries)
}
pub fn save_metas(&self, opstamp: u64) {
if self.is_alive() {
let index = &self.0.index;
let directory = index.directory();
save_metas(
self.0.segment_manager.committed_segment_metas(),
index.schema(),
opstamp,
directory.box_clone().borrow_mut()).expect("Could not save metas.");
save_metas(self.0.segment_manager.committed_segment_metas(),
index.schema(),
opstamp,
directory.box_clone().borrow_mut())
.expect("Could not save metas.");
}
}
pub fn garbage_collect_files(&self) -> Result<()> {
self.run_async(move |segment_updater| {
segment_updater.garbage_collect_files_exec();
}).wait()
})
.wait()
}
fn garbage_collect_files_exec(&self) {
fn garbage_collect_files_exec(&self) {
info!("Running garbage collection");
let living_files = self.0.segment_manager.list_files();
let mut index = self.0.index.clone();
index.directory_mut().garbage_collect(living_files);
}
pub fn commit(&self, opstamp: u64) -> Result<()> {
self.run_async(move |segment_updater| {
if segment_updater.is_alive() {
let segment_entries = segment_updater
.purge_deletes(opstamp)
.expect("Failed purge deletes");
segment_updater.0.segment_manager.commit(segment_entries);
segment_updater.save_metas(opstamp);
segment_updater.garbage_collect_files_exec();
segment_updater.consider_merge_options();
}
}).wait()
self.run_async(move |segment_updater| if segment_updater.is_alive() {
let segment_entries = segment_updater
.purge_deletes(opstamp)
.expect("Failed purge deletes");
segment_updater.0.segment_manager.commit(segment_entries);
segment_updater.save_metas(opstamp);
segment_updater.garbage_collect_files_exec();
segment_updater.consider_merge_options();
})
.wait()
}
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> impl Future<Item=SegmentMeta, Error=Canceled> {
pub fn start_merge(&self,
segment_ids: &[SegmentId])
-> impl Future<Item = SegmentMeta, Error = Canceled> {
self.0.segment_manager.start_merge(segment_ids);
let segment_updater_clone = self.clone();
let segment_ids_vec = segment_ids.to_vec();
let segment_ids_vec = segment_ids.to_vec();
let merging_thread_id = self.get_merging_thread_id();
let (merging_future_send, merging_future_recv) = oneshot();
if segment_ids.is_empty() {
return merging_future_recv;
}
let target_opstamp = self.0.stamper.stamp();
let merging_join_handle = thread::spawn(move || {
// first we need to apply deletes to our segment.
info!("Start merge: {:?}", segment_ids_vec);
let merged_segment = segment_updater_clone.new_segment();
let merged_segment = segment_updater_clone.new_segment();
let merged_segment_id = merged_segment.id();
let merge_result = perform_merge(&segment_ids_vec, &segment_updater_clone, merged_segment, target_opstamp);
let merge_result = perform_merge(&segment_ids_vec,
&segment_updater_clone,
merged_segment,
target_opstamp);
match merge_result {
Ok(after_merge_segment_entry) => {
@@ -321,11 +318,11 @@ impl SegmentUpdater {
segment_updater_clone
.end_merge(segment_ids_vec, after_merge_segment_entry)
.expect("Segment updater thread is corrupted.");
// the future may fail if the listener of the oneshot future
// the future may fail if the listener of the oneshot future
// has been destroyed.
//
// This is not a problem here, so we just ignore any
// This is not a problem here, so we just ignore any
// possible error.
let _merging_future_res = merging_future_send.send(merged_segment_meta);
}
@@ -339,16 +336,26 @@ impl SegmentUpdater {
// merging_future_send will be dropped, sending an error to the future.
}
}
segment_updater_clone.0.merging_threads.write().unwrap().remove(&merging_thread_id);
segment_updater_clone
.0
.merging_threads
.write()
.unwrap()
.remove(&merging_thread_id);
Ok(())
});
self.0.merging_threads.write().unwrap().insert(merging_thread_id, merging_join_handle);
self.0
.merging_threads
.write()
.unwrap()
.insert(merging_thread_id, merging_join_handle);
merging_future_recv
}
fn consider_merge_options(&self) {
let (committed_segments, uncommitted_segments) = get_mergeable_segments(&self.0.segment_manager);
let (committed_segments, uncommitted_segments) =
get_mergeable_segments(&self.0.segment_manager);
// Committed segments cannot be merged with uncommitted_segments.
// We therefore consider merges using these two sets of segments independently.
let merge_policy = self.get_merge_policy();
@@ -360,31 +367,55 @@ impl SegmentUpdater {
}
}
fn cancel_merge(&self,
before_merge_segment_ids: &[SegmentId],
after_merge_segment_entry: SegmentId) {
self.0.segment_manager.cancel_merge(&before_merge_segment_ids, after_merge_segment_entry);
fn cancel_merge(&self,
before_merge_segment_ids: &[SegmentId],
after_merge_segment_entry: SegmentId) {
self.0
.segment_manager
.cancel_merge(before_merge_segment_ids, after_merge_segment_entry);
}
fn end_merge(&self,
before_merge_segment_ids: Vec<SegmentId>,
mut after_merge_segment_entry: SegmentEntry) -> Result<()> {
fn end_merge(&self,
before_merge_segment_ids: Vec<SegmentId>,
mut after_merge_segment_entry: SegmentEntry)
-> Result<()> {
self.run_async(move |segment_updater| {
debug!("End merge {:?}", after_merge_segment_entry.meta());
info!("End merge {:?}", after_merge_segment_entry.meta());
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
let mut _file_protection_opt = None;
if let Some(delete_operation) = delete_cursor.get() {
let committed_opstamp = segment_updater.0.index.opstamp();
if delete_operation.opstamp < committed_opstamp {
let segment = segment_updater.0.index.segment(after_merge_segment_entry.meta().clone());
// TODO check unwrap
advance_deletes(segment, &mut after_merge_segment_entry, committed_opstamp).unwrap();
let index = &segment_updater.0.index;
let segment = index.segment(after_merge_segment_entry.meta().clone());
match advance_deletes(segment,
&mut after_merge_segment_entry,
committed_opstamp) {
Ok(file_protection_opt_res) => {
_file_protection_opt = file_protection_opt_res;
}
Err(e) => {
error!("Merge of {:?} was cancelled (advancing deletes failed): {:?}",
before_merge_segment_ids, e);
// ... cancel merge
if cfg!(test) {
panic!("Merge failed.");
}
segment_updater.cancel_merge(&before_merge_segment_ids,
after_merge_segment_entry.segment_id());
return;
}
}
}
}
segment_updater.0.segment_manager.end_merge(&before_merge_segment_ids, after_merge_segment_entry);
segment_updater.0.segment_manager.end_merge(&before_merge_segment_ids,
after_merge_segment_entry);
segment_updater.consider_merge_options();
info!("save metas");
segment_updater.save_metas(segment_updater.0.index.opstamp());
segment_updater.garbage_collect_files_exec();
}).wait()
}
@@ -393,22 +424,22 @@ impl SegmentUpdater {
///
/// Upon termination of the current merging threads,
/// merge opportunity may appear.
//
//
/// We keep waiting until the merge policy judges that
/// no opportunity is available.
///
/// Note that it is not required to call this
/// Note that it is not required to call this
/// method in your application.
/// Terminating your application without letting
/// Terminating your application without letting
/// merge terminate is perfectly safe.
///
///
/// Obsolete files will eventually be cleaned up
/// by the directory garbage collector.
pub fn wait_merging_thread(&self) -> Result<()> {
let mut num_segments: usize;
loop {
num_segments = self.0.segment_manager.num_segments();
let mut new_merging_threads = HashMap::new();
@@ -421,9 +452,7 @@ impl SegmentUpdater {
merging_thread_handle
.join()
.map(|_| ())
.map_err(|_| {
Error::ErrorInThread("Merging thread failed.".to_string())
})?
.map_err(|_| ErrorKind::ErrorInThread("Merging thread failed.".into()))?;
}
// Our merging thread may have queued their completed
self.run_async(move |_| {}).wait()?;
@@ -433,10 +462,9 @@ impl SegmentUpdater {
if new_num_segments >= num_segments {
break;
}
}
}
Ok(())
}
}
@@ -456,7 +484,7 @@ mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
index_writer.set_merge_policy(box MergeWheneverPossible);
@@ -468,7 +496,7 @@ mod tests {
}
assert!(index_writer.commit().is_ok());
}
{
for _ in 0..100 {
index_writer.add_document(doc!(text_field=>"c"));
@@ -476,7 +504,7 @@ mod tests {
}
assert!(index_writer.commit().is_ok());
}
{
index_writer.add_document(doc!(text_field=>"e"));
index_writer.add_document(doc!(text_field=>"f"));
@@ -493,8 +521,9 @@ mod tests {
assert_eq!(index.searcher().num_docs(), 302);
{
index_writer.wait_merging_threads()
.expect( "waiting for merging threads");
index_writer
.wait_merging_threads()
.expect("waiting for merging threads");
}
index.load_searchers().unwrap();

View File

@@ -1,231 +1,214 @@
use Result;
use DocId;
use std::io;
use schema::Schema;
use schema::Schema;
use schema::Term;
use core::Segment;
use core::SerializableSegment;
use postings::PostingsWriter;
use fastfield::U32FastFieldsWriter;
use fastfield::FastFieldsWriter;
use schema::Field;
use schema::FieldEntry;
use schema::FieldValue;
use schema::FieldType;
use schema::TextIndexingOptions;
use postings::SpecializedPostingsWriter;
use postings::{NothingRecorder, TermFrequencyRecorder, TFAndPositionRecorder};
use indexer::segment_serializer::SegmentSerializer;
use datastruct::stacker::Heap;
use indexer::index_writer::MARGIN_IN_BYTES;
use super::operation::AddOperation;
use postings::MultiFieldPostingsWriter;
/// A `SegmentWriter` is in charge of creating segment index from a
/// documents.
///
///
/// They creates the postings list in anonymous memory.
/// The segment is layed on disk when the segment gets `finalized`.
pub struct SegmentWriter<'a> {
heap: &'a Heap,
heap: &'a Heap,
max_doc: DocId,
per_field_postings_writers: Vec<Box<PostingsWriter + 'a>>,
segment_serializer: SegmentSerializer,
fast_field_writers: U32FastFieldsWriter,
fieldnorms_writer: U32FastFieldsWriter,
doc_opstamps: Vec<u64>,
multifield_postings: MultiFieldPostingsWriter<'a>,
segment_serializer: SegmentSerializer,
fast_field_writers: FastFieldsWriter,
fieldnorms_writer: FastFieldsWriter,
doc_opstamps: Vec<u64>,
}
fn create_fieldnorms_writer(schema: &Schema) -> U32FastFieldsWriter {
let u32_fields: Vec<Field> = schema.fields()
.iter()
.enumerate()
.filter(|&(_, field_entry)| field_entry.is_indexed())
.map(|(field_id, _)| Field(field_id as u8))
.collect();
U32FastFieldsWriter::new(u32_fields)
}
fn posting_from_field_entry<'a>(field_entry: &FieldEntry, heap: &'a Heap) -> Box<PostingsWriter + 'a> {
match *field_entry.field_type() {
FieldType::Str(ref text_options) => {
match text_options.get_indexing_options() {
TextIndexingOptions::TokenizedWithFreq => {
SpecializedPostingsWriter::<TermFrequencyRecorder>::new_boxed(heap)
}
TextIndexingOptions::TokenizedWithFreqAndPosition => {
SpecializedPostingsWriter::<TFAndPositionRecorder>::new_boxed(heap)
}
_ => {
SpecializedPostingsWriter::<NothingRecorder>::new_boxed(heap)
}
}
}
FieldType::U32(_) => {
SpecializedPostingsWriter::<NothingRecorder>::new_boxed(heap)
}
}
fn create_fieldnorms_writer(schema: &Schema) -> FastFieldsWriter {
let u64_fields: Vec<Field> = schema
.fields()
.iter()
.enumerate()
.filter(|&(_, field_entry)| field_entry.is_indexed())
.map(|(field_id, _)| Field(field_id as u32))
.collect();
FastFieldsWriter::new(u64_fields)
}
impl<'a> SegmentWriter<'a> {
/// Creates a new `SegmentWriter`
///
/// The arguments are defined as follows
///
/// - heap: most of the segment writer data (terms, and postings lists recorders)
/// is stored in a user-defined heap object. This makes it possible for the user to define
/// the flushing behavior as a buffer limit
/// - segment: The segment being written
/// - schema
pub fn for_segment(heap: &'a Heap,
mut segment: Segment,
schema: &Schema) -> Result<SegmentWriter<'a>> {
let segment_serializer = try!(SegmentSerializer::for_segment(&mut segment));
let mut per_field_postings_writers: Vec<Box<PostingsWriter + 'a>> = Vec::new();
for field_entry in schema.fields() {
let postings_writer: Box<PostingsWriter + 'a> = posting_from_field_entry(field_entry, heap);
per_field_postings_writers.push(postings_writer);
}
Ok(SegmentWriter {
heap: heap,
max_doc: 0,
per_field_postings_writers: per_field_postings_writers,
fieldnorms_writer: create_fieldnorms_writer(schema),
segment_serializer: segment_serializer,
fast_field_writers: U32FastFieldsWriter::from_schema(schema),
doc_opstamps: Vec::with_capacity(1_000),
})
}
/// Lay on disk the current content of the `SegmentWriter`
///
/// Finalize consumes the `SegmentWriter`, so that it cannot
/// be used afterwards.
pub fn finalize(mut self) -> Result<Vec<u64>> {
for per_field_postings_writer in &mut self.per_field_postings_writers {
per_field_postings_writer.close(self.heap);
}
write(&self.per_field_postings_writers,
&self.fast_field_writers,
&self.fieldnorms_writer,
self.segment_serializer,
self.heap)?;
Ok(self.doc_opstamps)
}
/// Returns true iff the segment writer's buffer has reached capacity.
///
/// The limit is defined as `the user defined heap size - an arbitrary margin of 10MB`
/// The `Segment` is `finalize`d when the buffer gets full.
///
/// Because, we cannot cut through a document, the margin is there to ensure that we rarely
/// exceeds the heap size.
pub fn is_buffer_full(&self,) -> bool {
self.heap.num_free_bytes() <= MARGIN_IN_BYTES
}
/// Indexes a new document
///
/// As a user, you should rather use `IndexWriter`'s add_document.
pub fn add_document(&mut self, add_operation: &AddOperation, schema: &Schema) -> io::Result<()> {
let doc_id = self.max_doc;
let doc = &add_operation.document;
self.doc_opstamps.push(add_operation.opstamp);
for (field, field_values) in doc.get_sorted_field_values() {
let field_posting_writer: &mut Box<PostingsWriter> = &mut self.per_field_postings_writers[field.0 as usize];
let field_options = schema.get_field_entry(field);
match *field_options.field_type() {
FieldType::Str(ref text_options) => {
let num_tokens: u32 =
if text_options.get_indexing_options().is_tokenized() {
field_posting_writer.index_text(doc_id, field, &field_values, self.heap)
}
else {
let num_field_values = field_values.len() as u32;
for field_value in field_values {
let term = Term::from_field_text(field, field_value.value().text());
field_posting_writer.suscribe(doc_id, 0, &term, self.heap);
}
num_field_values
};
self.fieldnorms_writer
.get_field_writer(field)
.map(|field_norms_writer| {
field_norms_writer.add_val(num_tokens as u32)
});
}
FieldType::U32(ref u32_options) => {
if u32_options.is_indexed() {
for field_value in field_values {
let term = Term::from_field_u32(field_value.field(), field_value.value().u32_value());
field_posting_writer.suscribe(doc_id, 0, &term, self.heap);
}
}
}
}
}
self.fieldnorms_writer.fill_val_up_to(doc_id);
self.fast_field_writers.add_document(&doc);
let stored_fieldvalues: Vec<&FieldValue> = doc
.field_values()
.iter()
.filter(|field_value| schema.get_field_entry(field_value.field()).is_stored())
.collect();
let doc_writer = self.segment_serializer.get_store_writer();
try!(doc_writer.store(&stored_fieldvalues));
self.max_doc += 1;
Ok(())
/// Creates a new `SegmentWriter`
///
/// The arguments are defined as follows
///
/// - heap: most of the segment writer data (terms, and postings lists recorders)
/// is stored in a user-defined heap object. This makes it possible for the user to define
/// the flushing behavior as a buffer limit
/// - segment: The segment being written
/// - schema
pub fn for_segment(heap: &'a Heap,
table_bits: usize,
mut segment: Segment,
schema: &Schema)
-> Result<SegmentWriter<'a>> {
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_bits, heap);
Ok(SegmentWriter {
heap: heap,
max_doc: 0,
multifield_postings: multifield_postings,
fieldnorms_writer: create_fieldnorms_writer(schema),
segment_serializer: segment_serializer,
fast_field_writers: FastFieldsWriter::from_schema(schema),
doc_opstamps: Vec::with_capacity(1_000),
})
}
/// Max doc is
/// - the number of documents in the segment assuming there is no deletes
/// - the maximum document id (including deleted documents) + 1
///
/// Currently, **tantivy** does not handle deletes anyway,
/// so `max_doc == num_docs`
pub fn max_doc(&self,) -> u32 {
self.max_doc
}
/// Number of documents in the index.
/// Deleted documents are not counted.
///
/// Currently, **tantivy** does not handle deletes anyway,
/// so `max_doc == num_docs`
#[allow(dead_code)]
pub fn num_docs(&self,) -> u32 {
self.max_doc
}
/// Lay on disk the current content of the `SegmentWriter`
///
/// Finalize consumes the `SegmentWriter`, so that it cannot
/// be used afterwards.
pub fn finalize(self) -> Result<Vec<u64>> {
write(&self.multifield_postings,
&self.fast_field_writers,
&self.fieldnorms_writer,
self.segment_serializer)?;
Ok(self.doc_opstamps)
}
/// Returns true iff the segment writer's buffer has reached capacity.
///
/// The limit is defined as `the user defined heap size - an arbitrary margin of 10MB`
/// The `Segment` is `finalize`d when the buffer gets full.
///
/// Because, we cannot cut through a document, the margin is there to ensure that we rarely
/// exceeds the heap size.
pub fn is_buffer_full(&self) -> bool {
self.heap.num_free_bytes() <= MARGIN_IN_BYTES
}
/// Return true if the term dictionary hashmap is reaching capacity.
/// It is one of the condition that triggers a `SegmentWriter` to
/// be finalized.
pub(crate) fn is_term_saturated(&self) -> bool {
self.multifield_postings.is_term_saturated()
}
/// Indexes a new document
///
/// As a user, you should rather use `IndexWriter`'s add_document.
pub fn add_document(&mut self,
add_operation: &AddOperation,
schema: &Schema)
-> io::Result<()> {
let doc_id = self.max_doc;
let doc = &add_operation.document;
self.doc_opstamps.push(add_operation.opstamp);
for (field, field_values) in doc.get_sorted_field_values() {
let field_options = schema.get_field_entry(field);
match *field_options.field_type() {
FieldType::Str(ref text_options) => {
let num_tokens: u32 = if text_options.get_indexing_options().is_tokenized() {
self.multifield_postings
.index_text(doc_id, field, &field_values)
} else {
let num_field_values = field_values.len() as u32;
for field_value in field_values {
let term = Term::from_field_text(field, field_value.value().text());
self.multifield_postings.suscribe(doc_id, &term);
}
num_field_values
};
self.fieldnorms_writer
.get_field_writer(field)
.map(|field_norms_writer| field_norms_writer.add_val(num_tokens as u64));
}
FieldType::U64(ref int_option) => {
if int_option.is_indexed() {
for field_value in field_values {
let term = Term::from_field_u64(field_value.field(),
field_value.value().u64_value());
self.multifield_postings.suscribe(doc_id, &term);
}
}
}
FieldType::I64(ref int_option) => {
if int_option.is_indexed() {
for field_value in field_values {
let term = Term::from_field_i64(field_value.field(),
field_value.value().i64_value());
self.multifield_postings.suscribe(doc_id, &term);
}
}
}
}
}
self.fieldnorms_writer.fill_val_up_to(doc_id);
self.fast_field_writers.add_document(doc);
let stored_fieldvalues: Vec<&FieldValue> = doc.field_values()
.iter()
.filter(|field_value| schema.get_field_entry(field_value.field()).is_stored())
.collect();
let doc_writer = self.segment_serializer.get_store_writer();
try!(doc_writer.store(&stored_fieldvalues));
self.max_doc += 1;
Ok(())
}
/// Max doc is
/// - the number of documents in the segment assuming there is no deletes
/// - the maximum document id (including deleted documents) + 1
///
/// Currently, **tantivy** does not handle deletes anyway,
/// so `max_doc == num_docs`
pub fn max_doc(&self) -> u32 {
self.max_doc
}
/// Number of documents in the index.
/// Deleted documents are not counted.
///
/// Currently, **tantivy** does not handle deletes anyway,
/// so `max_doc == num_docs`
#[allow(dead_code)]
pub fn num_docs(&self) -> u32 {
self.max_doc
}
}
// This method is used as a trick to workaround the borrow checker
fn write<'a>(per_field_postings_writers: &[Box<PostingsWriter + 'a>],
fast_field_writers: &U32FastFieldsWriter,
fieldnorms_writer: &U32FastFieldsWriter,
mut serializer: SegmentSerializer,
heap: &'a Heap,) -> Result<()> {
for per_field_postings_writer in per_field_postings_writers {
try!(per_field_postings_writer.serialize(serializer.get_postings_serializer(), heap));
}
try!(fast_field_writers.serialize(serializer.get_fast_field_serializer()));
try!(fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer()));
try!(serializer.close());
Ok(())
fn write(multifield_postings: &MultiFieldPostingsWriter,
fast_field_writers: &FastFieldsWriter,
fieldnorms_writer: &FastFieldsWriter,
mut serializer: SegmentSerializer)
-> Result<()> {
try!(multifield_postings.serialize(serializer.get_postings_serializer()));
try!(fast_field_writers.serialize(serializer.get_fast_field_serializer()));
try!(fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer()));
try!(serializer.close());
Ok(())
}
impl<'a> SerializableSegment for SegmentWriter<'a> {
fn write(&self, serializer: SegmentSerializer) -> Result<u32> {
let max_doc = self.max_doc;
write(&self.per_field_postings_writers,
&self.fast_field_writers,
&self.fieldnorms_writer,
serializer,
self.heap)?;
Ok(max_doc)
}
fn write(&self, serializer: SegmentSerializer) -> Result<u32> {
let max_doc = self.max_doc;
write(&self.multifield_postings,
&self.fast_field_writers,
&self.fieldnorms_writer,
serializer)?;
Ok(max_doc)
}
}

View File

@@ -6,12 +6,11 @@ use std::sync::Arc;
pub struct Stamper(Arc<AtomicU64>);
impl Stamper {
pub fn new(first_opstamp: u64) -> Stamper {
Stamper(Arc::new(AtomicU64::new(first_opstamp)))
}
pub fn stamp(&self,) -> u64 {
pub fn stamp(&self) -> u64 {
self.0.fetch_add(1u64, Ordering::SeqCst)
}
}
}

View File

@@ -1,6 +1,6 @@
#![doc(html_logo_url = "http://fulmicoton.com/tantivy-logo/tantivy-logo.png")]
#![allow(unknown_lints)] // for the clippy lint options
#![allow(module_inception)]
#![cfg_attr(feature = "cargo-clippy", allow(module_inception))]
#![cfg_attr(feature = "cargo-clippy", allow(inline_always))]
#![feature(box_syntax)]
#![feature(optin_builtin_traits)]
@@ -8,25 +8,37 @@
#![feature(integer_atomics)]
#![cfg_attr(test, feature(test))]
#![cfg_attr(test, feature(step_by))]
#![cfg_attr(test, feature(iterator_step_by))]
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
#![allow(unknown_lints)]
#![warn(missing_docs)]
//! # `tantivy`
//!
//! Tantivy is a search engine library.
//! Tantivy is a search engine library.
//! Think `Lucene`, but in Rust.
//!
//! A good place for you to get started is to check out
//! the example code ( [literate programming](http://fulmicoton.com/tantivy-examples/simple_search.html) / [source code](https://github.com/fulmicoton/tantivy/blob/master/examples/simple_search.rs))
//! the example code (
//! [literate programming](http://fulmicoton.com/tantivy-examples/simple_search.html) /
//! [source code](https://github.com/fulmicoton/tantivy/blob/master/examples/simple_search.rs))
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate log;
#[macro_use]
extern crate error_chain;
#[macro_use]
extern crate version;
extern crate fst;
@@ -34,10 +46,11 @@ extern crate byteorder;
extern crate memmap;
extern crate regex;
extern crate tempfile;
extern crate rustc_serialize;
extern crate atomicwrites;
extern crate tempdir;
extern crate serde;
extern crate bincode;
extern crate serde_json;
extern crate time;
extern crate lz4;
extern crate uuid;
@@ -49,6 +62,8 @@ extern crate crossbeam;
extern crate bit_set;
extern crate futures;
extern crate futures_cpupool;
extern crate owning_ref;
extern crate stable_deref_trait;
#[cfg(test)]
extern crate env_logger;
@@ -59,59 +74,55 @@ extern crate libc;
#[cfg(windows)]
extern crate winapi;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate rand;
#[cfg(test)]
extern crate test;
#[cfg(test)]
extern crate rand;
#[cfg(test)]
mod functional_test;
#[macro_use]
mod macros {
macro_rules! get(
($e:expr) => (match $e { Some(e) => e, None => return None })
);
mod macros;
macro_rules! doc(
() => (Document::default()); // avoids a warning due to the useless `mut`.
($($field:ident => $value:expr),*) => {{
let mut document = Document::default();
$(
document.add(FieldValue::new($field, $value.into()));
)*
document
}};
);
}
pub use error::Error;
pub use error::{Error, ErrorKind, ResultExt};
/// Tantivy result.
pub type Result<T> = std::result::Result<T, Error>;
mod core;
mod compression;
mod fastfield;
mod store;
mod indexer;
mod common;
mod error;
mod analyzer;
mod datastruct;
pub mod termdict;
/// Row-oriented, slow, compressed storage of documents
pub mod store;
/// Query module
pub mod query;
/// Directory module
pub mod directory;
/// Collector module
pub mod collector;
/// Postings module (also called inverted index)
pub mod postings;
/// Schema
pub mod schema;
pub mod fastfield;
pub use directory::Directory;
pub use core::{Index, Segment, SegmentId, SegmentMeta, Searcher};
@@ -120,27 +131,24 @@ pub use schema::{Term, Document};
pub use core::SegmentReader;
pub use self::common::TimerTree;
pub use postings::DocSet;
pub use postings::Postings;
pub use core::SegmentComponent;
pub use postings::SegmentPostingsOption;
pub use core::TermIterator;
pub use common::{i64_to_u64, u64_to_i64};
/// Expose the current version of tantivy, as well
/// whether it was compiled with the simd compression.
pub fn version() -> &'static str {
if cfg!(feature="simdcompression") {
concat!(version!(), "-simd")
}
else {
concat!(version!(), "-nosimd")
if cfg!(feature = "simdcompression") {
concat!(version!(), "-simd")
} else {
concat!(version!(), "-nosimd")
}
}
/// Tantivy's makes it possible to personalize when
/// the indexer should merge its segments
/// Defines tantivy's merging strategy
pub mod merge_policy {
pub use indexer::MergePolicy;
pub use indexer::LogMergePolicy;
@@ -161,28 +169,27 @@ pub type Score = f32;
pub type SegmentLocalId = u32;
impl DocAddress {
/// Return the segment ordinal.
/// The segment ordinal is an id identifying the segment
/// hosting the document. It is only meaningful, in the context
/// of a searcher.
pub fn segment_ord(&self,) -> SegmentLocalId {
pub fn segment_ord(&self) -> SegmentLocalId {
self.0
}
/// Return the segment local `DocId`
pub fn doc(&self,) -> DocId {
pub fn doc(&self) -> DocId {
self.1
}
}
/// `DocAddress` contains all the necessary information
/// `DocAddress` contains all the necessary information
/// to identify a document given a `Searcher` object.
///
/// It consists in an id identifying its segment, and
///
/// It consists in an id identifying its segment, and
/// its segment-local `DocId`.
///
///
/// The id used for the segment is actually an ordinal
/// in the list of segment hold by a `Searcher`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
@@ -200,7 +207,33 @@ mod tests {
use schema::*;
use DocSet;
use IndexWriter;
use postings::SegmentPostingsOption::FreqAndPositions;
use fastfield::{FastFieldReader, U64FastFieldReader, I64FastFieldReader};
use Postings;
use rand::{XorShiftRng, Rng, SeedableRng};
fn generate_array_with_seed(n: usize, ratio: f32, seed_val: u32) -> Vec<u32> {
let seed: &[u32; 4] = &[1, 2, 3, seed_val];
let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed);
(0..u32::max_value())
.filter(|_| rng.next_f32() < ratio)
.take(n)
.collect()
}
pub fn generate_array(n: usize, ratio: f32) -> Vec<u32> {
generate_array_with_seed(n, ratio, 4)
}
fn sample_with_seed(n: u32, ratio: f32, seed_val: u32) -> Vec<u32> {
let seed: &[u32; 4] = &[1, 2, 3, seed_val];
let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed);
(0..n).filter(|_| rng.next_f32() < ratio).collect()
}
pub fn sample(n: u32, ratio: f32) -> Vec<u32> {
sample_with_seed(n, ratio, 4)
}
#[test]
fn test_indexing() {
@@ -267,8 +300,8 @@ mod tests {
assert_eq!(searcher.doc_freq(&term_d), 0);
}
}
#[test]
fn test_fieldnorm() {
let mut schema_builder = SchemaBuilder::default();
@@ -306,24 +339,32 @@ mod tests {
fn test_delete_postings1() {
let mut schema_builder = SchemaBuilder::default();
let text_field = schema_builder.add_text_field("text", TEXT);
let term_abcd = Term::from_field_text(text_field, "abcd");
let term_a = Term::from_field_text(text_field, "a");
let term_b = Term::from_field_text(text_field, "b");
let term_c = Term::from_field_text(text_field, "c");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{ // 0
{
// 0
let doc = doc!(text_field=>"a b");
index_writer.add_document(doc);
}
{ // 1
{
// 1
let doc = doc!(text_field=>" a c");
index_writer.add_document(doc);
}
{ // 2
{
// 2
let doc = doc!(text_field=>" b c");
index_writer.add_document(doc);
}
{ // 3
{
// 3
let doc = doc!(text_field=>" b d");
index_writer.add_document(doc);
}
@@ -333,11 +374,13 @@ mod tests {
{
index_writer.delete_term(Term::from_field_text(text_field, "a"));
}
{ // 4
{
// 4
let doc = doc!(text_field=>" b c");
index_writer.add_document(doc);
}
{ // 5
{
// 5
let doc = doc!(text_field=>" a");
index_writer.add_document(doc);
}
@@ -347,15 +390,15 @@ mod tests {
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = searcher.segment_reader(0);
assert!(reader.read_postings_all_info(&Term::from_field_text(text_field, "abcd")).is_none());
assert!(reader.read_postings(&term_abcd, FreqAndPositions).is_none());
{
let mut postings = reader.read_postings_all_info(&Term::from_field_text(text_field, "a")).unwrap();
let mut postings = reader.read_postings(&term_a, FreqAndPositions).unwrap();
assert!(postings.advance());
assert_eq!(postings.doc(), 5);
assert!(!postings.advance());
}
{
let mut postings = reader.read_postings_all_info(&Term::from_field_text(text_field, "b")).unwrap();
let mut postings = reader.read_postings(&term_b, FreqAndPositions).unwrap();
assert!(postings.advance());
assert_eq!(postings.doc(), 3);
assert!(postings.advance());
@@ -366,11 +409,13 @@ mod tests {
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{ // 0
{
// 0
let doc = doc!(text_field=>"a b");
index_writer.add_document(doc);
}
{ // 1
{
// 1
index_writer.delete_term(Term::from_field_text(text_field, "c"));
}
index_writer.rollback().unwrap();
@@ -379,15 +424,16 @@ mod tests {
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = searcher.segment_reader(0);
assert!(reader.read_postings_all_info(&Term::from_field_text(text_field, "abcd")).is_none());
assert!(reader.read_postings(&term_abcd, FreqAndPositions).is_none());
{
let mut postings = reader.read_postings_all_info(&Term::from_field_text(text_field, "a")).unwrap();
let mut postings = reader.read_postings(&term_a, FreqAndPositions).unwrap();
assert!(postings.advance());
assert_eq!(postings.doc(), 5);
assert!(!postings.advance());
}
{
let mut postings = reader.read_postings_all_info(&Term::from_field_text(text_field, "b")).unwrap();
let mut postings = reader.read_postings(&term_b, FreqAndPositions).unwrap();
assert!(postings.advance());
assert_eq!(postings.doc(), 3);
assert!(postings.advance());
@@ -398,14 +444,14 @@ mod tests {
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{
{
let doc = doc!(text_field=>"a b");
index_writer.add_document(doc);
}
{
{
index_writer.delete_term(Term::from_field_text(text_field, "c"));
}
index_writer = index_writer.rollback().unwrap();
index_writer = index_writer.rollback().unwrap();
index_writer.delete_term(Term::from_field_text(text_field, "a"));
index_writer.commit().unwrap();
}
@@ -413,13 +459,13 @@ mod tests {
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = searcher.segment_reader(0);
assert!(reader.read_postings_all_info(&Term::from_field_text(text_field, "abcd")).is_none());
assert!(reader.read_postings(&term_abcd, FreqAndPositions).is_none());
{
let mut postings = reader.read_postings_all_info(&Term::from_field_text(text_field, "a")).unwrap();
let mut postings = reader.read_postings(&term_a, FreqAndPositions).unwrap();
assert!(!postings.advance());
}
{
let mut postings = reader.read_postings_all_info(&Term::from_field_text(text_field, "b")).unwrap();
let mut postings = reader.read_postings(&term_b, FreqAndPositions).unwrap();
assert!(postings.advance());
assert_eq!(postings.doc(), 3);
assert!(postings.advance());
@@ -427,7 +473,7 @@ mod tests {
assert!(!postings.advance());
}
{
let mut postings = reader.read_postings_all_info(&Term::from_field_text(text_field, "c")).unwrap();
let mut postings = reader.read_postings(&term_c, FreqAndPositions).unwrap();
assert!(postings.advance());
assert_eq!(postings.doc(), 4);
assert!(!postings.advance());
@@ -437,21 +483,45 @@ mod tests {
#[test]
fn test_indexed_u32() {
fn test_indexed_u64() {
let mut schema_builder = SchemaBuilder::default();
let field = schema_builder.add_u32_field("text", U32_INDEXED);
let field = schema_builder.add_u64_field("value", INT_INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
index_writer.add_document(
doc!(field=>1)
);
index_writer.add_document(doc!(field=>1u64));
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let term = Term::from_field_u32(field, 1u32);
let mut postings = searcher.segment_reader(0).read_postings(&term, SegmentPostingsOption::NoFreq).unwrap();
let term = Term::from_field_u64(field, 1u64);
let mut postings = searcher
.segment_reader(0)
.read_postings(&term, SegmentPostingsOption::NoFreq)
.unwrap();
assert!(postings.advance());
assert_eq!(postings.doc(), 0);
assert!(!postings.advance());
}
#[test]
fn test_indexed_i64() {
let mut schema_builder = SchemaBuilder::default();
let value_field = schema_builder.add_i64_field("value", INT_INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
let negative_val = -1i64;
index_writer.add_document(doc!(value_field => negative_val));
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let term = Term::from_field_i64(value_field, negative_val);
let mut postings = searcher
.segment_reader(0)
.read_postings(&term, SegmentPostingsOption::NoFreq)
.unwrap();
assert!(postings.advance());
assert_eq!(postings.doc(), 0);
assert!(!postings.advance());
@@ -463,15 +533,15 @@ mod tests {
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
// writing the segment
let mut index_writer = index.writer_with_num_threads(2, 40_000_000).unwrap();
let add_document = |index_writer: &mut IndexWriter, val: &'static str| {
let doc = doc!(text_field=>val);
index_writer.add_document(doc);
};
let remove_document = |index_writer: &mut IndexWriter, val: &'static str| {
let delterm = Term::from_field_text(text_field, val);
index_writer.delete_term(delterm);
@@ -512,8 +582,10 @@ mod tests {
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = searcher.segment_reader(0);
assert!(reader.read_postings_all_info(&Term::from_field_text(text_field, "abcd")).is_none());
let mut postings = reader.read_postings_all_info(&Term::from_field_text(text_field, "af")).unwrap();
let term_abcd = Term::from_field_text(text_field, "abcd");
assert!(reader.read_postings(&term_abcd, FreqAndPositions).is_none());
let term_af = Term::from_field_text(text_field, "af");
let mut postings = reader.read_postings(&term_af, FreqAndPositions).unwrap();
assert!(postings.advance());
assert_eq!(postings.doc(), 0);
assert_eq!(postings.term_freq(), 3);
@@ -555,35 +627,29 @@ mod tests {
collector.docs()
};
{
assert_eq!(
get_doc_ids(vec!(Term::from_field_text(text_field, "a"))),
vec!(1, 2));
assert_eq!(get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
vec![1, 2]);
}
{
assert_eq!(
get_doc_ids(vec!(Term::from_field_text(text_field, "af"))),
vec!(0));
assert_eq!(get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
vec![0]);
}
{
assert_eq!(
get_doc_ids(vec!(Term::from_field_text(text_field, "b"))),
vec!(0, 1, 2));
assert_eq!(get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
vec![0, 1, 2]);
}
{
assert_eq!(
get_doc_ids(vec!(Term::from_field_text(text_field, "c"))),
vec!(1, 2));
assert_eq!(get_doc_ids(vec![Term::from_field_text(text_field, "c")]),
vec![1, 2]);
}
{
assert_eq!(
get_doc_ids(vec!(Term::from_field_text(text_field, "d"))),
vec!(2));
assert_eq!(get_doc_ids(vec![Term::from_field_text(text_field, "d")]),
vec![2]);
}
{
assert_eq!(
get_doc_ids(vec!(Term::from_field_text(text_field, "b"),
Term::from_field_text(text_field, "a"), )),
vec!(0, 1, 2));
assert_eq!(get_doc_ids(vec![Term::from_field_text(text_field, "b"),
Term::from_field_text(text_field, "a")]),
vec![0, 1, 2]);
}
}
}
@@ -620,7 +686,9 @@ mod tests {
let mut schema_builder = SchemaBuilder::default();
let text_field = schema_builder.add_text_field("text", TEXT);
let other_text_field = schema_builder.add_text_field("text2", TEXT);
let document = doc!(text_field => "tantivy", text_field => "some other value", other_text_field => "short");
let document = doc!(text_field => "tantivy",
text_field => "some other value",
other_text_field => "short");
assert_eq!(document.len(), 3);
let values = document.get_all(text_field);
assert_eq!(values.len(), 2);
@@ -630,4 +698,57 @@ mod tests {
assert_eq!(values.len(), 1);
assert_eq!(values[0].text(), "short");
}
#[test]
fn test_wrong_fast_field_type() {
let mut schema_builder = SchemaBuilder::default();
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
let text_field = schema_builder.add_text_field("text", TEXT);
let stored_int_field = schema_builder.add_u64_field("text", INT_STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 50_000_000).unwrap();
{
let document = doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64);
index_writer.add_document(document);
index_writer.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0);
{
let fast_field_reader_res =
segment_reader.get_fast_field_reader::<U64FastFieldReader>(text_field);
assert!(fast_field_reader_res.is_err());
}
{
let fast_field_reader_res =
segment_reader.get_fast_field_reader::<U64FastFieldReader>(stored_int_field);
assert!(fast_field_reader_res.is_err());
}
{
let fast_field_reader_res =
segment_reader.get_fast_field_reader::<U64FastFieldReader>(fast_field_signed);
assert!(fast_field_reader_res.is_err());
}
{
let fast_field_reader_res =
segment_reader.get_fast_field_reader::<I64FastFieldReader>(fast_field_signed);
assert!(fast_field_reader_res.is_ok());
let fast_field_reader = fast_field_reader_res.unwrap();
assert_eq!(fast_field_reader.get(0), 4i64)
}
{
let fast_field_reader_res =
segment_reader.get_fast_field_reader::<I64FastFieldReader>(fast_field_signed);
assert!(fast_field_reader_res.is_ok());
let fast_field_reader = fast_field_reader_res.unwrap();
assert_eq!(fast_field_reader.get(0), 4i64)
}
}
}

68
src/macros.rs Normal file
View File

@@ -0,0 +1,68 @@
macro_rules! get(
($e:expr) => (match $e { Some(e) => e, None => return None })
);
/// `doc!` is a shortcut that helps building `Document`
/// objects.
///
/// Assuming that `field1` and `field2` are `Field` instances.
/// You can create a document with a value of `value1` for `field1`
/// `value2` for `field2`, as follows :
///
/// ```c
/// doc!(
/// field1 => value1,
/// field2 => value2,
/// )
/// ```
///
/// The value can be a `u64`, a `&str`, a `i64`, or a `String`.
///
/// # Warning
///
/// The document hence created, is not yet validated against a schema.
/// Nothing prevents its user from creating an invalid document missing a
/// field, or associating a `String` to a `u64` field for instance.
///
/// # Example
///
/// ```
/// #[macro_use]
/// extern crate tantivy;
///
/// use tantivy::schema::{SchemaBuilder, TEXT, FAST};
///
/// //...
///
/// # fn main() {
/// let mut schema_builder = SchemaBuilder::new();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let author = schema_builder.add_text_field("text", TEXT);
/// let likes = schema_builder.add_u64_field("num_u64", FAST);
/// let schema = schema_builder.build();
/// let doc = doc!(
/// title => "Life Aquatic",
/// author => "Wes Anderson",
/// likes => 4u64
/// );
/// # }
/// ```
#[macro_export]
macro_rules! doc(
() => {
{
($crate::Document::default())
}
}; // avoids a warning due to the useless `mut`.
($($field:ident => $value:expr),*) => {
{
let mut document = $crate::Document::default();
$(
document.add($crate::schema::FieldValue::new($field, $value.into()));
)*
document
}
};
);

View File

@@ -65,6 +65,10 @@ pub trait DocSet {
None
}
}
/// Returns a best-effort hint of the
/// length of the docset.
fn size_hint(&self) -> usize;
}
@@ -83,6 +87,11 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
let unboxed: &TDocSet = self.borrow();
unboxed.doc()
}
fn size_hint(&self) -> usize {
let unboxed: &TDocSet = self.borrow();
unboxed.size_hint()
}
}
impl<'a, TDocSet: DocSet> DocSet for &'a mut TDocSet {
@@ -100,4 +109,9 @@ impl<'a, TDocSet: DocSet> DocSet for &'a mut TDocSet {
let unref: &TDocSet = *self;
unref.doc()
}
fn size_hint(&self) -> usize {
let unref: &TDocSet = *self;
unref.size_hint()
}
}

View File

@@ -47,7 +47,6 @@ impl FreqHandler {
}
}
/// Returns a `FreqHandler` that decodes `DocId`s, term frequencies, and term positions.
pub fn new_with_freq_and_position(position_data: &[u8]) -> FreqHandler {
let positions = read_positions(position_data);
@@ -123,4 +122,4 @@ impl FreqHandler {
}
}
}
}
}

View File

@@ -10,12 +10,13 @@ pub struct IntersectionDocSet<TDocSet: DocSet> {
}
impl<TDocSet: DocSet> From<Vec<TDocSet>> for IntersectionDocSet<TDocSet> {
fn from(docsets: Vec<TDocSet>) -> IntersectionDocSet<TDocSet> {
fn from(mut docsets: Vec<TDocSet>) -> IntersectionDocSet<TDocSet> {
assert!(docsets.len() >= 2);
docsets.sort_by_key(|docset| docset.size_hint());
IntersectionDocSet {
docsets: docsets,
finished: false,
doc: DocId::max_value(),
doc: 0u32,
}
}
}
@@ -31,37 +32,51 @@ impl<TDocSet: DocSet> IntersectionDocSet<TDocSet> {
impl<TDocSet: DocSet> DocSet for IntersectionDocSet<TDocSet> {
fn size_hint(&self) -> usize {
self.docsets
.iter()
.map(|docset| docset.size_hint())
.min()
.unwrap() // safe as docsets cannot be empty.
}
#[allow(never_loop)]
fn advance(&mut self) -> bool {
if self.finished {
return false;
}
let num_docsets = self.docsets.len();
let mut count_matching = 0;
let mut doc_candidate = 0;
let mut ord = 0;
loop {
let mut doc_set = &mut self.docsets[ord];
match doc_set.skip_next(doc_candidate) {
SkipResult::Reached => {
count_matching += 1;
if count_matching == num_docsets {
self.doc = doc_candidate;
return true;
let mut candidate_doc = self.doc;
let mut candidate_ord = self.docsets.len();
'outer: loop {
for (ord, docset) in self.docsets.iter_mut().enumerate() {
if ord != candidate_ord {
// `candidate_ord` is already at the
// right position.
//
// Calling `skip_next` would advance this docset
// and miss it.
match docset.skip_next(candidate_doc) {
SkipResult::Reached => {}
SkipResult::OverStep => {
// this is not in the intersection,
// let's update our candidate.
candidate_doc = docset.doc();
candidate_ord = ord;
continue 'outer;
}
SkipResult::End => {
self.finished = true;
return false;
}
}
}
SkipResult::End => {
self.finished = true;
return false;
}
SkipResult::OverStep => {
count_matching = 1;
doc_candidate = doc_set.doc();
}
}
ord += 1;
if ord == num_docsets {
ord = 0;
}
self.doc = candidate_doc;
return true;
}
}
@@ -69,3 +84,51 @@ impl<TDocSet: DocSet> DocSet for IntersectionDocSet<TDocSet> {
self.doc
}
}
#[cfg(test)]
mod tests {
use postings::{DocSet, VecPostings, IntersectionDocSet};
#[test]
fn test_intersection() {
{
let left = VecPostings::from(vec![1, 3, 9]);
let right = VecPostings::from(vec![3, 4, 9, 18]);
let mut intersection = IntersectionDocSet::from(vec![left, right]);
assert!(intersection.advance());
assert_eq!(intersection.doc(), 3);
assert!(intersection.advance());
assert_eq!(intersection.doc(), 9);
assert!(!intersection.advance());
}
{
let a = VecPostings::from(vec![1, 3, 9]);
let b = VecPostings::from(vec![3, 4, 9, 18]);
let c = VecPostings::from(vec![1, 5, 9, 111]);
let mut intersection = IntersectionDocSet::from(vec![a, b, c]);
assert!(intersection.advance());
assert_eq!(intersection.doc(), 9);
assert!(!intersection.advance());
}
}
#[test]
fn test_intersection_zero() {
let left = VecPostings::from(vec![0]);
let right = VecPostings::from(vec![0]);
let mut intersection = IntersectionDocSet::from(vec![left, right]);
assert!(intersection.advance());
assert_eq!(intersection.doc(), 0);
}
#[test]
fn test_intersection_empty() {
let a = VecPostings::from(vec![1, 3]);
let b = VecPostings::from(vec![1, 4]);
let c = VecPostings::from(vec![3, 9]);
let mut intersection = IntersectionDocSet::from(vec![a, b, c]);
assert!(!intersection.advance());
}
}

View File

@@ -17,39 +17,42 @@ mod docset;
mod segment_postings_option;
pub use self::docset::{SkipResult, DocSet};
pub use self::recorder::{Recorder, NothingRecorder, TermFrequencyRecorder, TFAndPositionRecorder};
use self::recorder::{Recorder, NothingRecorder, TermFrequencyRecorder, TFAndPositionRecorder};
pub use self::serializer::PostingsSerializer;
pub use self::postings_writer::PostingsWriter;
pub use self::postings_writer::SpecializedPostingsWriter;
pub(crate) use self::postings_writer::MultiFieldPostingsWriter;
pub use self::term_info::TermInfo;
pub use self::postings::Postings;
#[cfg(test)]
pub use self::vec_postings::VecPostings;
pub use self::segment_postings::SegmentPostings;
pub use self::segment_postings::{SegmentPostings, BlockSegmentPostings};
pub use self::intersection::IntersectionDocSet;
pub use self::freq_handler::FreqHandler;
pub use self::segment_postings_option::SegmentPostingsOption;
pub use common::HasLen;
#[cfg(test)]
mod tests {
use super::*;
use schema::{Document, TEXT, STRING, SchemaBuilder, Term};
use schema::{Document, INT_INDEXED, TEXT, STRING, SchemaBuilder, Term};
use core::SegmentComponent;
use indexer::SegmentWriter;
use core::SegmentReader;
use core::Index;
use postings::SegmentPostingsOption::FreqAndPositions;
use std::iter;
use datastruct::stacker::Heap;
use fastfield::FastFieldReader;
use query::TermQuery;
use schema::Field;
use test::Bencher;
use test::{self, Bencher};
use indexer::operation::AddOperation;
use tests;
use rand::{XorShiftRng, Rng, SeedableRng};
#[test]
pub fn test_position_write() {
let mut schema_builder = SchemaBuilder::default();
@@ -58,18 +61,18 @@ mod tests {
let index = Index::create_in_ram(schema);
let mut segment = index.new_segment();
let mut posting_serializer = PostingsSerializer::open(&mut segment).unwrap();
let term = Term::from_field_text(text_field, "abc");
posting_serializer.new_term(&term).unwrap();
posting_serializer.new_field(text_field);
posting_serializer.new_term("abc".as_bytes()).unwrap();
for doc_id in 0u32..3u32 {
let positions = vec!(1,2,3,2);
let positions = vec![1, 2, 3, 2];
posting_serializer.write_doc(doc_id, 2, &positions).unwrap();
}
posting_serializer.close_term().unwrap();
posting_serializer.close().unwrap();
let read = segment.open_read(SegmentComponent::POSITIONS).unwrap();
assert_eq!(read.len(), 13);
assert!(read.len() <= 16);
}
#[test]
pub fn test_position_and_fieldnorm() {
let mut schema_builder = SchemaBuilder::default();
@@ -77,16 +80,19 @@ mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let segment = index.new_segment();
let heap = Heap::with_capacity(10_000_000);
{
let mut segment_writer = SegmentWriter::for_segment(&heap, segment.clone(), &schema).unwrap();
let mut segment_writer = SegmentWriter::for_segment(&heap, 18, segment.clone(), &schema)
.unwrap();
{
let mut doc = Document::default();
// checking that position works if the field has two values
doc.add_text(text_field, "a b a c a d a a.");
doc.add_text(text_field, "d d d d a"); // checking that position works if the field has two values.
doc.add_text(text_field, "d d d d a");
let op = AddOperation {
opstamp: 0u64,
document: doc,
document: doc,
};
segment_writer.add_document(&op, &schema).unwrap();
}
@@ -95,7 +101,7 @@ mod tests {
doc.add_text(text_field, "b a");
let op = AddOperation {
opstamp: 1u64,
document: doc,
document: doc,
};
segment_writer.add_document(&op, &schema).unwrap();
}
@@ -106,7 +112,7 @@ mod tests {
doc.add_text(text_field, &text);
let op = AddOperation {
opstamp: 2u64,
document: doc,
document: doc,
};
segment_writer.add_document(&op, &schema).unwrap();
}
@@ -118,17 +124,21 @@ mod tests {
let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field).unwrap();
assert_eq!(fieldnorm_reader.get(0), 8 + 5);
assert_eq!(fieldnorm_reader.get(1), 2);
for i in 2 .. 1000 {
assert_eq!(fieldnorm_reader.get(i), i + 1);
for i in 2..1000 {
assert_eq!(fieldnorm_reader.get(i), (i + 1) as u64);
}
}
{
let term_a = Term::from_field_text(text_field, "abcdef");
assert!(segment_reader.read_postings_all_info(&term_a).is_none());
assert!(segment_reader
.read_postings(&term_a, FreqAndPositions)
.is_none());
}
{
let term_a = Term::from_field_text(text_field, "a");
let mut postings_a = segment_reader.read_postings_all_info(&term_a).unwrap();
let mut postings_a = segment_reader
.read_postings(&term_a, FreqAndPositions)
.unwrap();
assert_eq!(postings_a.len(), 1000);
assert!(postings_a.advance());
assert_eq!(postings_a.doc(), 0);
@@ -137,7 +147,7 @@ mod tests {
assert!(postings_a.advance());
assert_eq!(postings_a.doc(), 1u32);
assert_eq!(postings_a.term_freq(), 1);
for i in 2u32 .. 1000u32 {
for i in 2u32..1000u32 {
assert!(postings_a.advance());
assert_eq!(postings_a.term_freq(), 1);
assert_eq!(postings_a.positions(), [i]);
@@ -147,9 +157,11 @@ mod tests {
}
{
let term_e = Term::from_field_text(text_field, "e");
let mut postings_e = segment_reader.read_postings_all_info(&term_e).unwrap();
let mut postings_e = segment_reader
.read_postings(&term_e, FreqAndPositions)
.unwrap();
assert_eq!(postings_e.len(), 1000 - 2);
for i in 2u32 .. 1000u32 {
for i in 2u32..1000u32 {
assert!(postings_e.advance());
assert_eq!(postings_e.term_freq(), i);
let positions = postings_e.positions();
@@ -163,7 +175,7 @@ mod tests {
}
}
}
#[test]
pub fn test_position_and_fieldnorm2() {
let mut schema_builder = SchemaBuilder::default();
@@ -185,7 +197,8 @@ mod tests {
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
let term_query = TermQuery::new(Term::from_field_text(text_field, "a"), SegmentPostingsOption::NoFreq);
let term_query = TermQuery::new(Term::from_field_text(text_field, "a"),
SegmentPostingsOption::NoFreq);
let searcher = index.searcher();
let mut term_weight = term_query.specialized_weight(&*searcher);
term_weight.segment_postings_options = SegmentPostingsOption::FreqAndPositions;
@@ -195,31 +208,173 @@ mod tests {
assert_eq!(term_scorer.doc(), 1u32);
assert_eq!(term_scorer.postings().positions(), &[1u32, 4]);
}
#[test]
fn test_intersection() {
{
let left = VecPostings::from(vec!(1, 3, 9));
let right = VecPostings::from(vec!(3, 4, 9, 18));
let mut intersection = IntersectionDocSet::from(vec!(left, right));
assert!(intersection.advance());
assert_eq!(intersection.doc(), 3);
assert!(intersection.advance());
assert_eq!(intersection.doc(), 9);
assert!(!intersection.advance());
fn test_skip_next() {
let term_0 = Term::from_field_u64(Field(0), 0);
let term_1 = Term::from_field_u64(Field(0), 1);
let term_2 = Term::from_field_u64(Field(0), 2);
let num_docs = 300u32;
let index = {
let mut schema_builder = SchemaBuilder::default();
let value_field = schema_builder.add_u64_field("value", INT_INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
for i in 0..num_docs {
let mut doc = Document::default();
doc.add_u64(value_field, 2);
doc.add_u64(value_field, (i % 2) as u64);
index_writer.add_document(doc);
}
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
index
};
let searcher = index.searcher();
let segment_reader = searcher.segment_reader(0);
// check that the basic usage works
for i in 0..num_docs - 1 {
for j in i + 1..num_docs {
let mut segment_postings = segment_reader
.read_postings(&term_2, SegmentPostingsOption::NoFreq)
.unwrap();
assert_eq!(segment_postings.skip_next(i), SkipResult::Reached);
assert_eq!(segment_postings.doc(), i);
assert_eq!(segment_postings.skip_next(j), SkipResult::Reached);
assert_eq!(segment_postings.doc(), j);
}
}
{
let a = VecPostings::from(vec!(1, 3, 9));
let b = VecPostings::from(vec!(3, 4, 9, 18));
let c = VecPostings::from(vec!(1, 5, 9, 111));
let mut intersection = IntersectionDocSet::from(vec!(a, b, c));
assert!(intersection.advance());
assert_eq!(intersection.doc(), 9);
assert!(!intersection.advance());
let mut segment_postings = segment_reader
.read_postings(&term_2, SegmentPostingsOption::NoFreq)
.unwrap();
// check that `skip_next` advances the iterator
assert!(segment_postings.advance());
assert_eq!(segment_postings.doc(), 0);
assert_eq!(segment_postings.skip_next(1), SkipResult::Reached);
assert_eq!(segment_postings.doc(), 1);
assert_eq!(segment_postings.skip_next(1), SkipResult::OverStep);
assert_eq!(segment_postings.doc(), 2);
// check that going beyond the end is handled
assert_eq!(segment_postings.skip_next(num_docs), SkipResult::End);
}
// check that filtering works
{
let mut segment_postings = segment_reader
.read_postings(&term_0, SegmentPostingsOption::NoFreq)
.unwrap();
for i in 0..num_docs / 2 {
assert_eq!(segment_postings.skip_next(i * 2), SkipResult::Reached);
assert_eq!(segment_postings.doc(), i * 2);
}
let mut segment_postings = segment_reader
.read_postings(&term_0, SegmentPostingsOption::NoFreq)
.unwrap();
for i in 0..num_docs / 2 - 1 {
assert_eq!(segment_postings.skip_next(i * 2 + 1), SkipResult::OverStep);
assert_eq!(segment_postings.doc(), (i + 1) * 2);
}
}
// delete some of the documents
{
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
index_writer.delete_term(term_0);
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let segment_reader = searcher.segment_reader(0);
// make sure seeking still works
for i in 0..num_docs {
let mut segment_postings = segment_reader
.read_postings(&term_2, SegmentPostingsOption::NoFreq)
.unwrap();
if i % 2 == 0 {
assert_eq!(segment_postings.skip_next(i), SkipResult::OverStep);
assert_eq!(segment_postings.doc(), i + 1);
} else {
assert_eq!(segment_postings.skip_next(i), SkipResult::Reached);
assert_eq!(segment_postings.doc(), i);
}
}
// now try with a longer sequence
{
let mut segment_postings = segment_reader
.read_postings(&term_2, SegmentPostingsOption::NoFreq)
.unwrap();
let mut last = 2; // start from 5 to avoid seeking to 3 twice
let mut cur = 3;
loop {
match segment_postings.skip_next(cur) {
SkipResult::End => break,
SkipResult::Reached => assert_eq!(segment_postings.doc(), cur),
SkipResult::OverStep => assert_eq!(segment_postings.doc(), cur + 1),
}
let next = cur + last;
last = cur;
cur = next;
}
assert_eq!(cur, 377);
}
// delete everything else
{
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
index_writer.delete_term(term_1);
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let segment_reader = searcher.segment_reader(0);
// finally, check that it's empty
{
let mut segment_postings = segment_reader
.read_postings(&term_2, SegmentPostingsOption::NoFreq)
.unwrap();
assert_eq!(segment_postings.skip_next(0), SkipResult::End);
let mut segment_postings = segment_reader
.read_postings(&term_2, SegmentPostingsOption::NoFreq)
.unwrap();
assert_eq!(segment_postings.skip_next(num_docs), SkipResult::End);
}
}
lazy_static! {
static ref TERM_A: Term = {
let field = Field(0);
@@ -229,34 +384,40 @@ mod tests {
let field = Field(0);
Term::from_field_text(field, "b")
};
static ref TERM_C: Term = {
let field = Field(0);
Term::from_field_text(field, "c")
};
static ref TERM_D: Term = {
let field = Field(0);
Term::from_field_text(field, "d")
};
static ref INDEX: Index = {
let mut schema_builder = SchemaBuilder::default();
let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let seed: &[u32; 4] = &[1, 2, 3, 4];
let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed);
let index = Index::create_in_ram(schema);
let mut count_a = 0;
let mut count_b = 0;
let posting_list_size = 100_000;
let posting_list_size = 1_000_000;
{
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
for _ in 0 .. {
if count_a >= posting_list_size &&
count_b >= posting_list_size {
break;
}
for _ in 0 .. posting_list_size {
let mut doc = Document::default();
if count_a < posting_list_size && rng.gen_weighted_bool(15) {
count_a += 1;
if rng.gen_weighted_bool(15) {
doc.add_text(text_field, "a");
}
if count_b < posting_list_size && rng.gen_weighted_bool(10) {
count_b += 1;
if rng.gen_weighted_bool(10) {
doc.add_text(text_field, "b");
}
if rng.gen_weighted_bool(5) {
doc.add_text(text_field, "c");
}
if rng.gen_weighted_bool(1) {
doc.add_text(text_field, "d");
}
index_writer.add_document(doc);
}
assert!(index_writer.commit().is_ok());
@@ -265,27 +426,112 @@ mod tests {
index
};
}
#[bench]
fn bench_segment_postings(b: &mut Bencher) {
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
let mut segment_postings = segment_reader.read_postings(&*TERM_A, SegmentPostingsOption::NoFreq).unwrap();
while segment_postings.advance() {}
});
}
let mut segment_postings = segment_reader
.read_postings(&*TERM_A, SegmentPostingsOption::NoFreq)
.unwrap();
while segment_postings.advance() {}
});
}
#[bench]
fn bench_segment_intersection(b: &mut Bencher) {
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
let segment_postings_a = segment_reader.read_postings(&*TERM_A, SegmentPostingsOption::NoFreq).unwrap();
let segment_postings_b = segment_reader.read_postings(&*TERM_B, SegmentPostingsOption::NoFreq).unwrap();
let mut intersection = IntersectionDocSet::from(vec!(segment_postings_a, segment_postings_b));
let segment_postings_a = segment_reader
.read_postings(&*TERM_A, SegmentPostingsOption::NoFreq)
.unwrap();
let segment_postings_b = segment_reader
.read_postings(&*TERM_B, SegmentPostingsOption::NoFreq)
.unwrap();
let segment_postings_c = segment_reader
.read_postings(&*TERM_C, SegmentPostingsOption::NoFreq)
.unwrap();
let segment_postings_d = segment_reader
.read_postings(&*TERM_D, SegmentPostingsOption::NoFreq)
.unwrap();
let mut intersection = IntersectionDocSet::from(vec![segment_postings_a,
segment_postings_b,
segment_postings_c,
segment_postings_d]);
while intersection.advance() {}
});
}
}
fn bench_skip_next(p: f32, b: &mut Bencher) {
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
let docs = tests::sample(segment_reader.num_docs(), p);
let mut segment_postings = segment_reader
.read_postings(&*TERM_A, SegmentPostingsOption::NoFreq)
.unwrap();
let mut existing_docs = Vec::new();
segment_postings.advance();
for doc in &docs {
if *doc >= segment_postings.doc() {
existing_docs.push(*doc);
if segment_postings.skip_next(*doc) == SkipResult::End {
break;
}
}
}
b.iter(|| {
let mut segment_postings = segment_reader
.read_postings(&*TERM_A, SegmentPostingsOption::NoFreq)
.unwrap();
for doc in &existing_docs {
if segment_postings.skip_next(*doc) == SkipResult::End {
break;
}
}
});
}
#[bench]
fn bench_skip_next_p01(b: &mut Bencher) {
bench_skip_next(0.001, b);
}
#[bench]
fn bench_skip_next_p1(b: &mut Bencher) {
bench_skip_next(0.01, b);
}
#[bench]
fn bench_skip_next_p10(b: &mut Bencher) {
bench_skip_next(0.1, b);
}
#[bench]
fn bench_skip_next_p90(b: &mut Bencher) {
bench_skip_next(0.9, b);
}
#[bench]
fn bench_iterate_segment_postings(b: &mut Bencher) {
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
let n: u32 = test::black_box(17);
let mut segment_postings = segment_reader
.read_postings(&*TERM_A, SegmentPostingsOption::NoFreq)
.unwrap();
let mut s = 0u32;
while segment_postings.advance() {
s += (segment_postings.doc() & n) % 1024;
}
s
});
}
}

View File

@@ -5,9 +5,114 @@ use postings::PostingsSerializer;
use std::io;
use postings::Recorder;
use analyzer::SimpleTokenizer;
use schema::Field;
use Result;
use schema::{Schema, Field};
use analyzer::StreamingIterator;
use std::marker::PhantomData;
use std::ops::DerefMut;
use datastruct::stacker::{HashMap, Heap};
use postings::{NothingRecorder, TermFrequencyRecorder, TFAndPositionRecorder};
use schema::FieldEntry;
use schema::FieldType;
use schema::TextIndexingOptions;
fn posting_from_field_entry<'a>(field_entry: &FieldEntry,
heap: &'a Heap)
-> Box<PostingsWriter + 'a> {
match *field_entry.field_type() {
FieldType::Str(ref text_options) => {
match text_options.get_indexing_options() {
TextIndexingOptions::TokenizedWithFreq => {
SpecializedPostingsWriter::<TermFrequencyRecorder>::new_boxed(heap)
}
TextIndexingOptions::TokenizedWithFreqAndPosition => {
SpecializedPostingsWriter::<TFAndPositionRecorder>::new_boxed(heap)
}
_ => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(heap),
}
}
FieldType::U64(_) |
FieldType::I64(_) => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(heap),
}
}
pub struct MultiFieldPostingsWriter<'a> {
heap: &'a Heap,
term_index: HashMap<'a>,
per_field_postings_writers: Vec<Box<PostingsWriter + 'a>>,
}
impl<'a> MultiFieldPostingsWriter<'a> {
/// Create a new `MultiFieldPostingsWriter` given
/// a schema and a heap.
pub fn new(schema: &Schema, table_bits: usize, heap: &'a Heap) -> MultiFieldPostingsWriter<'a> {
let term_index = HashMap::new(table_bits, heap);
let per_field_postings_writers: Vec<_> = schema
.fields()
.iter()
.map(|field_entry| {
posting_from_field_entry(field_entry, heap)
})
.collect();
MultiFieldPostingsWriter {
heap: heap,
term_index: term_index,
per_field_postings_writers: per_field_postings_writers,
}
}
pub fn index_text(&mut self, doc: DocId, field: Field, field_values: &[&FieldValue]) -> u32 {
let postings_writer = self.per_field_postings_writers[field.0 as usize].deref_mut();
postings_writer.index_text(&mut self.term_index, doc, field, field_values, self.heap)
}
pub fn suscribe(&mut self, doc: DocId, term: &Term) {
let postings_writer = self.per_field_postings_writers[term.field().0 as usize].deref_mut();
postings_writer.suscribe(&mut self.term_index, doc, 0u32, term, self.heap)
}
/// Serialize the inverted index.
/// It pushes all term, one field at a time, towards the
/// postings serializer.
#[allow(needless_range_loop)]
pub fn serialize(&self, serializer: &mut PostingsSerializer) -> Result<()> {
let mut term_offsets: Vec<(&[u8], u32)> = self.term_index.iter().collect();
term_offsets.sort_by_key(|&(k, _v)| k);
let mut offsets: Vec<(Field, usize)> = vec![];
let term_offsets_it = term_offsets
.iter()
.cloned()
.map(|(key, _)| Term::wrap(key).field())
.enumerate();
let mut prev_field = Field(u32::max_value());
for (offset, field) in term_offsets_it {
if field != prev_field {
offsets.push((field, offset));
prev_field = field;
}
}
offsets.push((Field(0), term_offsets.len()));
for i in 0..(offsets.len() - 1) {
let (field, start) = offsets[i];
let (_, stop) = offsets[i + 1];
let postings_writer = &self.per_field_postings_writers[field.0 as usize];
postings_writer
.serialize(field, &term_offsets[start..stop], serializer, self.heap)?;
}
Ok(())
}
/// Return true iff the term dictionary is saturated.
pub fn is_term_saturated(&self) -> bool {
self.term_index.is_saturated()
}
}
/// The `PostingsWriter` is in charge of receiving documenting
/// and building a `Segment` in anonymous memory.
@@ -21,17 +126,25 @@ pub trait PostingsWriter {
/// * term - the term
/// * heap - heap used to store the postings informations as well as the terms
/// in the hashmap.
fn suscribe(&mut self, doc: DocId, pos: u32, term: &Term, heap: &Heap);
fn suscribe(&mut self,
term_index: &mut HashMap,
doc: DocId,
pos: u32,
term: &Term,
heap: &Heap);
/// Serializes the postings on disk.
/// The actual serialization format is handled by the `PostingsSerializer`.
fn serialize(&self, serializer: &mut PostingsSerializer, heap: &Heap) -> io::Result<()>;
/// Closes all of the currently open `Recorder`'s.
fn close(&mut self, heap: &Heap);
fn serialize(&self,
field: Field,
term_addrs: &[(&[u8], u32)],
serializer: &mut PostingsSerializer,
heap: &Heap)
-> io::Result<()>;
/// Tokenize a text and suscribe all of its token.
fn index_text<'a>(&mut self,
term_index: &mut HashMap,
doc_id: DocId,
field: Field,
field_values: &[&'a FieldValue],
@@ -39,14 +152,15 @@ pub trait PostingsWriter {
-> u32 {
let mut pos = 0u32;
let mut num_tokens: u32 = 0u32;
let mut term = Term::allocate(field, 100);
let mut term = unsafe { Term::with_capacity(100) };
term.set_field(field);
for field_value in field_values {
let mut tokens = SimpleTokenizer.tokenize(field_value.value().text());
// right now num_tokens and pos are redundant, but it should
// change when we get proper analyzers
while let Some(token) = tokens.next() {
term.set_text(token);
self.suscribe(doc_id, pos, &term, heap);
self.suscribe(term_index, doc_id, pos, &term, heap);
pos += 1u32;
num_tokens += 1u32;
}
@@ -61,29 +175,17 @@ pub trait PostingsWriter {
/// The `SpecializedPostingsWriter` is just here to remove dynamic
/// dispatch to the recorder information.
pub struct SpecializedPostingsWriter<'a, Rec: Recorder + 'static> {
term_index: HashMap<'a, Rec>,
}
/// Given a `Heap` size, computes a relevant size for the `HashMap`.
fn hashmap_size_in_bits(heap_capacity: u32) -> usize {
let num_buckets_usable = heap_capacity / 100;
let hash_table_size = num_buckets_usable * 2;
let mut pow = 512;
for num_bits in 10..32 {
pow <<= 1;
if pow > hash_table_size {
return num_bits;
}
}
32
heap: &'a Heap,
_recorder_type: PhantomData<Rec>,
}
impl<'a, Rec: Recorder + 'static> SpecializedPostingsWriter<'a, Rec> {
/// constructor
pub fn new(heap: &'a Heap) -> SpecializedPostingsWriter<'a, Rec> {
let capacity = heap.capacity();
let hashmap_size = hashmap_size_in_bits(capacity);
SpecializedPostingsWriter { term_index: HashMap::new(hashmap_size, heap) }
SpecializedPostingsWriter {
heap: heap,
_recorder_type: PhantomData,
}
}
/// Builds a `SpecializedPostingsWriter` storing its data in a heap.
@@ -93,16 +195,14 @@ impl<'a, Rec: Recorder + 'static> SpecializedPostingsWriter<'a, Rec> {
}
impl<'a, Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<'a, Rec> {
fn close(&mut self, heap: &Heap) {
for recorder in self.term_index.values_mut() {
recorder.close_doc(heap);
}
}
#[inline]
fn suscribe(&mut self, doc: DocId, position: u32, term: &Term, heap: &Heap) {
let mut recorder = self.term_index.get_or_create(term);
fn suscribe(&mut self,
term_index: &mut HashMap,
doc: DocId,
position: u32,
term: &Term,
heap: &Heap) {
debug_assert!(term.as_slice().len() >= 4);
let recorder: &mut Rec = term_index.get_or_create(term);
let current_doc = recorder.current_doc();
if current_doc != doc {
if current_doc != u32::max_value() {
@@ -113,16 +213,16 @@ impl<'a, Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<'
recorder.record_position(position, heap);
}
fn serialize(&self, serializer: &mut PostingsSerializer, heap: &Heap) -> io::Result<()> {
let mut term_offsets: Vec<(&[u8], (u32, &Rec))> = self.term_index
.iter()
.collect();
term_offsets.sort_by_key(|&(k, _v)| k);
let mut term = Term::allocate(Field(0), 100);
for (term_bytes, (addr, recorder)) in term_offsets {
// sadly we are required to copy the data
term.set_content(term_bytes);
try!(serializer.new_term(&term));
fn serialize(&self,
field: Field,
term_addrs: &[(&[u8], u32)],
serializer: &mut PostingsSerializer,
heap: &Heap)
-> io::Result<()> {
serializer.new_field(field);
for &(term_bytes, addr) in term_addrs {
let recorder: &mut Rec = self.heap.get_mut_ref(addr);
try!(serializer.new_term(term_bytes));
try!(recorder.serialize(addr, serializer, heap));
try!(serializer.close_term());
}
@@ -130,11 +230,3 @@ impl<'a, Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<'
}
}
#[test]
fn test_hashmap_size() {
assert_eq!(hashmap_size_in_bits(10), 10);
assert_eq!(hashmap_size_in_bits(0), 10);
assert_eq!(hashmap_size_in_bits(100_000), 11);
assert_eq!(hashmap_size_in_bits(300_000_000), 23);
}

View File

@@ -76,6 +76,7 @@ impl Recorder for NothingRecorder {
}
}
/// Recorder encoding document ids, and term frequencies
#[repr(C, packed)]
pub struct TermFrequencyRecorder {
@@ -114,20 +115,23 @@ impl Recorder for TermFrequencyRecorder {
self.current_tf = 0;
}
fn serialize(&self,
self_addr: u32,
serializer: &mut PostingsSerializer,
heap: &Heap)
-> io::Result<()> {
let mut doc_iter = self.stack.iter(self_addr, heap);
loop {
if let Some(doc) = doc_iter.next() {
if let Some(term_freq) = doc_iter.next() {
try!(serializer.write_doc(doc, term_freq, &EMPTY_ARRAY));
continue;
}
}
break;
// the last document has not been closed...
// its term freq is self.current_tf.
let mut doc_iter = self.stack
.iter(self_addr, heap)
.chain(Some(self.current_tf).into_iter());
while let Some(doc) = doc_iter.next() {
let term_freq = doc_iter
.next()
.expect("The IndexWriter recorded a doc without a term freq.");
serializer.write_doc(doc, term_freq, &EMPTY_ARRAY)?;
}
Ok(())
}
@@ -177,19 +181,12 @@ impl Recorder for TFAndPositionRecorder {
while let Some(doc) = positions_iter.next() {
let mut prev_position = 0;
doc_positions.clear();
loop {
match positions_iter.next() {
Some(position) => {
if position == POSITION_END {
break;
} else {
doc_positions.push(position - prev_position);
prev_position = position;
}
}
None => {
panic!("This should never happen. Pleasee report the bug.");
}
for position in &mut positions_iter {
if position == POSITION_END {
break;
} else {
doc_positions.push(position - prev_position);
prev_position = position;
}
}
try!(serializer.write_doc(doc, doc_positions.len() as u32, &doc_positions));

View File

@@ -1,81 +1,51 @@
use compression::{NUM_DOCS_PER_BLOCK, BlockDecoder, VIntDecoder};
use DocId;
use postings::{Postings, FreqHandler, DocSet, HasLen};
use std::num::Wrapping;
use fastfield::delete::DeleteBitSet;
use postings::{Postings, FreqHandler, DocSet, HasLen, SkipResult};
use std::cmp;
use fastfield::DeleteBitSet;
use fst::Streamer;
const EMPTY_DATA: [u8; 0] = [0u8; 0];
/// `SegmentPostings` represents the inverted list or postings associated to
/// a term in a `Segment`.
///
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
/// Positions on the other hand, are optionally entirely decoded upfront.
pub struct SegmentPostings<'a> {
len: usize,
doc_offset: u32,
block_decoder: BlockDecoder,
freq_handler: FreqHandler,
remaining_data: &'a [u8],
cur: Wrapping<usize>,
block_cursor: BlockSegmentPostings<'a>,
cur: usize,
delete_bitset: DeleteBitSet,
}
impl<'a> SegmentPostings<'a> {
fn load_next_block(&mut self) {
let num_remaining_docs = self.len - self.cur.0;
if num_remaining_docs >= NUM_DOCS_PER_BLOCK {
self.remaining_data = self.block_decoder
.uncompress_block_sorted(self.remaining_data, self.doc_offset);
self.remaining_data = self.freq_handler.read_freq_block(self.remaining_data);
self.doc_offset = self.block_decoder.output(NUM_DOCS_PER_BLOCK - 1);
} else {
self.remaining_data = self.block_decoder
.uncompress_vint_sorted(self.remaining_data, self.doc_offset, num_remaining_docs);
self.freq_handler.read_freq_vint(self.remaining_data, num_remaining_docs);
}
}
/// Reads a Segment postings from an &[u8]
///
/// * `len` - number of document in the posting lists.
/// * `data` - data array. The complete data is not necessarily used.
/// * `freq_handler` - the freq handler is in charge of decoding
/// frequencies and/or positions
pub fn from_data(len: u32,
data: &'a [u8],
delete_bitset: &'a DeleteBitSet,
freq_handler: FreqHandler) -> SegmentPostings<'a> {
pub fn from_block_postings(segment_block_postings: BlockSegmentPostings<'a>,
delete_bitset: DeleteBitSet)
-> SegmentPostings<'a> {
SegmentPostings {
len: len as usize,
doc_offset: 0,
block_decoder: BlockDecoder::new(),
freq_handler: freq_handler,
remaining_data: data,
cur: Wrapping(usize::max_value()),
delete_bitset: delete_bitset.clone(),
block_cursor: segment_block_postings,
cur: NUM_DOCS_PER_BLOCK, // cursor within the block
delete_bitset: delete_bitset,
}
}
/// Returns an empty segment postings object
pub fn empty() -> SegmentPostings<'static> {
let empty_block_cursor = BlockSegmentPostings::empty();
SegmentPostings {
len: 0,
doc_offset: 0,
block_decoder: BlockDecoder::new(),
freq_handler: FreqHandler::new_without_freq(),
remaining_data: &EMPTY_DATA,
block_cursor: empty_block_cursor,
delete_bitset: DeleteBitSet::empty(),
cur: Wrapping(usize::max_value()),
cur: NUM_DOCS_PER_BLOCK,
}
}
/// Index within a block is used as an address when
/// interacting with the `FreqHandler`
fn index_within_block(&self) -> usize {
self.cur.0 % NUM_DOCS_PER_BLOCK
}
}
@@ -85,12 +55,13 @@ impl<'a> DocSet for SegmentPostings<'a> {
#[inline]
fn advance(&mut self) -> bool {
loop {
self.cur += Wrapping(1);
if self.cur.0 >= self.len {
return false;
}
if self.index_within_block() == 0 {
self.load_next_block();
self.cur += 1;
if self.cur >= self.block_cursor.block_len() {
self.cur = 0;
if !self.block_cursor.advance() {
self.cur = NUM_DOCS_PER_BLOCK;
return false;
}
}
if !self.delete_bitset.is_deleted(self.doc()) {
return true;
@@ -98,24 +69,356 @@ impl<'a> DocSet for SegmentPostings<'a> {
}
}
fn skip_next(&mut self, target: DocId) -> SkipResult {
if !self.advance() {
return SkipResult::End;
}
// skip blocks until one that might contain the target
loop {
// check if we need to go to the next block
let (current_doc, last_doc_in_block) = {
let block_docs = self.block_cursor.docs();
(block_docs[self.cur], block_docs[block_docs.len() - 1])
};
if target > last_doc_in_block {
if !self.block_cursor.advance() {
return SkipResult::End;
}
self.cur = 0;
} else {
if target < current_doc {
// We've overpassed the target after the first `advance` call
// or we're at the beginning of a block.
// Either way, we're on the first `DocId` greater than `target`
return SkipResult::OverStep;
}
break;
}
}
{
// we're in the right block now, start with an exponential search
let block_docs = self.block_cursor.docs();
let block_len = block_docs.len();
debug_assert!(target >= block_docs[self.cur]);
debug_assert!(target <= block_docs[block_len - 1]);
let mut start = self.cur;
let mut end = block_len;
let mut count = 1;
loop {
let new = start + count;
if new < end && block_docs[new] < target {
start = new;
count *= 2;
} else {
break;
}
}
end = cmp::min(start + count, end);
// now do a binary search
let mut count = end - start;
while count > 0 {
let step = count / 2;
let mid = start + step;
let doc = block_docs[mid];
if doc < target {
start = mid + 1;
count -= step + 1;
} else {
count = step;
}
}
// `doc` is now >= `target`
let doc = block_docs[start];
self.cur = start;
if !self.delete_bitset.is_deleted(doc) {
if doc == target {
return SkipResult::Reached;
} else {
return SkipResult::OverStep;
}
}
}
if self.advance() {
SkipResult::OverStep
} else {
SkipResult::End
}
}
fn size_hint(&self) -> usize {
self.len()
}
#[inline]
fn doc(&self) -> DocId {
self.block_decoder.output(self.index_within_block())
let docs = self.block_cursor.docs();
assert!(self.cur < docs.len(),
"Have you forgotten to call `.advance()` at least once before calling .doc().");
docs[self.cur]
}
}
impl<'a> HasLen for SegmentPostings<'a> {
fn len(&self) -> usize {
self.len
self.block_cursor.doc_freq()
}
}
impl<'a> Postings for SegmentPostings<'a> {
fn term_freq(&self) -> u32 {
self.freq_handler.freq(self.index_within_block())
self.block_cursor.freq_handler().freq(self.cur)
}
fn positions(&self) -> &[u32] {
self.freq_handler.positions(self.index_within_block())
self.block_cursor.freq_handler().positions(self.cur)
}
}
/// `BlockSegmentPostings` is a cursor iterating over blocks
/// of documents.
///
/// # Warning
///
/// While it is useful for some very specific high-performance
/// use cases, you should prefer using `SegmentPostings` for most usage.
pub struct BlockSegmentPostings<'a> {
block_decoder: BlockDecoder,
doc_freq: usize,
doc_offset: DocId,
num_binpacked_blocks: usize,
num_vint_docs: usize,
remaining_data: &'a [u8],
freq_handler: FreqHandler,
}
impl<'a> BlockSegmentPostings<'a> {
pub(crate) fn from_data(doc_freq: usize,
data: &'a [u8],
freq_handler: FreqHandler)
-> BlockSegmentPostings<'a> {
let num_binpacked_blocks: usize = (doc_freq as usize) / NUM_DOCS_PER_BLOCK;
let num_vint_docs = (doc_freq as usize) - NUM_DOCS_PER_BLOCK * num_binpacked_blocks;
BlockSegmentPostings {
num_binpacked_blocks: num_binpacked_blocks,
num_vint_docs: num_vint_docs,
block_decoder: BlockDecoder::new(),
freq_handler: freq_handler,
remaining_data: data,
doc_offset: 0,
doc_freq: doc_freq,
}
}
// Resets the block segment postings on another position
// in the postings file.
//
// This is useful for enumerating through a list of terms,
// and consuming the associated posting lists while avoiding
// reallocating a `BlockSegmentPostings`.
//
// # Warning
//
// This does not reset the positions list.
pub(crate) fn reset(&mut self, doc_freq: usize, postings_data: &'a [u8]) {
let num_binpacked_blocks: usize = doc_freq / NUM_DOCS_PER_BLOCK;
let num_vint_docs = doc_freq & (NUM_DOCS_PER_BLOCK - 1);
self.num_binpacked_blocks = num_binpacked_blocks;
self.num_vint_docs = num_vint_docs;
self.remaining_data = postings_data;
self.doc_offset = 0;
self.doc_freq = doc_freq;
}
/// Returns the document frequency associated to this block postings.
///
/// This `doc_freq` is simply the sum of the length of all of the blocks
/// length, and it does not take in account deleted documents.
pub fn doc_freq(&self) -> usize {
self.doc_freq
}
/// Returns the array of docs in the current block.
///
/// Before the first call to `.advance()`, the block
/// returned by `.docs()` is empty.
#[inline]
pub fn docs(&self) -> &[DocId] {
self.block_decoder.output_array()
}
/// Returns the length of the current block.
///
/// All blocks have a length of `NUM_DOCS_PER_BLOCK`,
/// except the last block that may have a length
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
#[inline]
fn block_len(&self) -> usize {
self.block_decoder.output_len
}
/// Returns a reference to the frequency handler.
pub fn freq_handler(&self) -> &FreqHandler {
&self.freq_handler
}
/// Advance to the next block.
///
/// Returns false iff there was no remaining blocks.
pub fn advance(&mut self) -> bool {
if self.num_binpacked_blocks > 0 {
self.remaining_data =
self.block_decoder
.uncompress_block_sorted(self.remaining_data, self.doc_offset);
self.remaining_data = self.freq_handler.read_freq_block(self.remaining_data);
self.doc_offset = self.block_decoder.output(NUM_DOCS_PER_BLOCK - 1);
self.num_binpacked_blocks -= 1;
true
} else if self.num_vint_docs > 0 {
self.remaining_data =
self.block_decoder
.uncompress_vint_sorted(self.remaining_data,
self.doc_offset,
self.num_vint_docs);
self.freq_handler
.read_freq_vint(self.remaining_data, self.num_vint_docs);
self.num_vint_docs = 0;
true
} else {
false
}
}
/// Returns an empty segment postings object
pub fn empty() -> BlockSegmentPostings<'static> {
BlockSegmentPostings {
num_binpacked_blocks: 0,
num_vint_docs: 0,
block_decoder: BlockDecoder::new(),
freq_handler: FreqHandler::new_without_freq(),
remaining_data: &EMPTY_DATA,
doc_offset: 0,
doc_freq: 0,
}
}
}
impl<'a, 'b> Streamer<'b> for BlockSegmentPostings<'a> {
type Item = &'b [DocId];
fn next(&'b mut self) -> Option<&'b [DocId]> {
if self.advance() {
Some(self.docs())
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use DocSet;
use super::SegmentPostings;
use schema::SchemaBuilder;
use core::Index;
use schema::INT_INDEXED;
use schema::Term;
use fst::Streamer;
use postings::SegmentPostingsOption;
use common::HasLen;
use super::BlockSegmentPostings;
#[test]
fn test_empty_segment_postings() {
let mut postings = SegmentPostings::empty();
assert!(!postings.advance());
assert!(!postings.advance());
assert_eq!(postings.len(), 0);
}
#[test]
fn test_empty_block_segment_postings() {
let mut postings = BlockSegmentPostings::empty();
assert!(!postings.advance());
assert_eq!(postings.doc_freq(), 0);
}
#[test]
fn test_block_segment_postings() {
let mut schema_builder = SchemaBuilder::default();
let int_field = schema_builder.add_u64_field("id", INT_INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
for _ in 0..100_000 {
let doc = doc!(int_field=>0u64);
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let segment_reader = searcher.segment_reader(0);
let term = Term::from_field_u64(int_field, 0u64);
let term_info = segment_reader.get_term_info(&term).unwrap();
let mut block_segments =
segment_reader
.read_block_postings_from_terminfo(&term_info, SegmentPostingsOption::NoFreq);
let mut offset: u32 = 0u32;
// checking that the block before calling advance is empty
assert!(block_segments.docs().is_empty());
// checking that the `doc_freq` is correct
assert_eq!(block_segments.doc_freq(), 100_000);
while let Some(block) = block_segments.next() {
for (i, doc) in block.iter().cloned().enumerate() {
assert_eq!(offset + (i as u32), doc);
}
offset += block.len() as u32;
}
}
#[test]
fn test_reset_block_segment_postings() {
let mut schema_builder = SchemaBuilder::default();
let int_field = schema_builder.add_u64_field("id", INT_INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
// create two postings list, one containg even number,
// the other containing odd numbers.
for i in 0..6 {
let doc = doc!(int_field=> (i % 2) as u64);
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let segment_reader = searcher.segment_reader(0);
let mut block_segments;
{
let term = Term::from_field_u64(int_field, 0u64);
let term_info = segment_reader.get_term_info(&term).unwrap();
block_segments =
segment_reader
.read_block_postings_from_terminfo(&term_info, SegmentPostingsOption::NoFreq);
}
assert!(block_segments.advance());
assert!(block_segments.docs() == &[0, 2, 4]);
{
let term = Term::from_field_u64(int_field, 1u64);
let term_info = segment_reader.get_term_info(&term).unwrap();
segment_reader.reset_block_postings_from_terminfo(&term_info, &mut block_segments);
}
assert!(block_segments.advance());
assert!(block_segments.docs() == &[1, 3, 5]);
}
}

View File

@@ -6,7 +6,7 @@
/// avoid this extra cost when the information is not required.
/// For instance, positions are useful when running phrase queries
/// but useless in other queries.
#[derive(Clone, Copy, Debug)]
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Ord, Eq)]
pub enum SegmentPostingsOption {
/// Only the doc ids are decoded
NoFreq,
@@ -15,3 +15,15 @@ pub enum SegmentPostingsOption {
/// DocIds, term frequencies and positions will be decoded.
FreqAndPositions,
}
#[cfg(test)]
mod tests {
use super::SegmentPostingsOption;
#[test]
fn test_cmp_segment_postings_option() {
assert!(SegmentPostingsOption::FreqAndPositions > SegmentPostingsOption::Freq);
assert!(SegmentPostingsOption::Freq > SegmentPostingsOption::NoFreq);
}
}

View File

@@ -1,7 +1,6 @@
use Result;
use datastruct::FstMapBuilder;
use termdict::TermDictionaryBuilderImpl;
use super::TermInfo;
use schema::Term;
use schema::Field;
use schema::FieldEntry;
use schema::FieldType;
@@ -11,12 +10,12 @@ use directory::WritePtr;
use compression::{NUM_DOCS_PER_BLOCK, BlockEncoder, CompositeEncoder};
use DocId;
use core::Segment;
use std::io;
use core::SegmentComponent;
use std::io::Write;
use std::io::{self, Write};
use compression::VIntEncoder;
use common::VInt;
use common::BinarySerializable;
use common::CountingWriter;
use termdict::TermDictionaryBuilder;
/// `PostingsSerializer` is in charge of serializing
@@ -30,7 +29,7 @@ use common::BinarySerializable;
///
/// The serializer expects to receive the following calls
/// in this order :
///
/// * `set_field(...)`
/// * `new_term(...)`
/// * `write_doc(...)`
/// * `write_doc(...)`
@@ -41,6 +40,8 @@ use common::BinarySerializable;
/// * `write_doc(...)`
/// * ...
/// * `close_term()`
/// * `set_field(...)`
/// * ...
/// * `close()`
///
/// Terms have to be pushed in a lexicographically-sorted order.
@@ -49,11 +50,9 @@ use common::BinarySerializable;
/// A description of the serialization format is
/// [available here](https://fulmicoton.gitbooks.io/tantivy-doc/content/inverted-index.html).
pub struct PostingsSerializer {
terms_fst_builder: FstMapBuilder<WritePtr, TermInfo>,
postings_write: WritePtr,
positions_write: WritePtr,
written_bytes_postings: usize,
written_bytes_positions: usize,
terms_fst_builder: TermDictionaryBuilderImpl<WritePtr, TermInfo>,
postings_write: CountingWriter<WritePtr>,
positions_write: CountingWriter<WritePtr>,
last_doc_id_encoded: u32,
positions_encoder: CompositeEncoder,
block_encoder: BlockEncoder,
@@ -73,44 +72,45 @@ impl PostingsSerializer {
positions_write: WritePtr,
schema: Schema)
-> Result<PostingsSerializer> {
let terms_fst_builder = try!(FstMapBuilder::new(terms_write));
let terms_fst_builder = try!(TermDictionaryBuilderImpl::new(terms_write));
Ok(PostingsSerializer {
terms_fst_builder: terms_fst_builder,
postings_write: postings_write,
positions_write: positions_write,
written_bytes_postings: 0,
written_bytes_positions: 0,
last_doc_id_encoded: 0u32,
positions_encoder: CompositeEncoder::new(),
block_encoder: BlockEncoder::new(),
doc_ids: Vec::new(),
term_freqs: Vec::new(),
position_deltas: Vec::new(),
schema: schema,
text_indexing_options: TextIndexingOptions::Unindexed,
term_open: false,
current_term_info: TermInfo::default(),
})
terms_fst_builder: terms_fst_builder,
postings_write: CountingWriter::wrap(postings_write),
positions_write: CountingWriter::wrap(positions_write),
last_doc_id_encoded: 0u32,
positions_encoder: CompositeEncoder::new(),
block_encoder: BlockEncoder::new(),
doc_ids: Vec::new(),
term_freqs: Vec::new(),
position_deltas: Vec::new(),
schema: schema,
text_indexing_options: TextIndexingOptions::Unindexed,
term_open: false,
current_term_info: TermInfo::default(),
})
}
/// Open a new `PostingsSerializer` for the given segment
pub fn open(segment: &mut Segment) -> Result<PostingsSerializer> {
let terms_write = try!(segment.open_write(SegmentComponent::TERMS));
let postings_write = try!(segment.open_write(SegmentComponent::POSTINGS));
let positions_write = try!(segment.open_write(SegmentComponent::POSITIONS));
PostingsSerializer::new(terms_write,
postings_write,
positions_write,
use SegmentComponent::{TERMS, POSTINGS, POSITIONS};
PostingsSerializer::new(segment.open_write(TERMS)?,
segment.open_write(POSTINGS)?,
segment.open_write(POSITIONS)?,
segment.schema())
}
fn load_indexing_options(&mut self, field: Field) {
/// Must be called before starting pushing terms of
/// a given field.
///
/// Loads the indexing options for the given field.
pub fn new_field(&mut self, field: Field) {
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
self.text_indexing_options = match *field_entry.field_type() {
FieldType::Str(ref text_options) => text_options.get_indexing_options(),
FieldType::U32(ref u32_options) => {
if u32_options.is_indexed() {
FieldType::U64(ref int_options) |
FieldType::I64(ref int_options) => {
if int_options.is_indexed() {
TextIndexingOptions::Unindexed
} else {
TextIndexingOptions::Untokenized
@@ -123,22 +123,21 @@ impl PostingsSerializer {
/// * term - the term. It needs to come after the previous term according
/// to the lexicographical order.
/// * doc_freq - return the number of document containing the term.
pub fn new_term(&mut self, term: &Term) -> io::Result<()> {
pub fn new_term(&mut self, term: &[u8]) -> io::Result<()> {
if self.term_open {
panic!("Called new_term, while the previous term was not closed.");
}
self.term_open = true;
self.load_indexing_options(term.field());
self.doc_ids.clear();
self.last_doc_id_encoded = 0;
self.term_freqs.clear();
self.position_deltas.clear();
self.current_term_info = TermInfo {
doc_freq: 0,
postings_offset: self.written_bytes_postings as u32,
positions_offset: self.written_bytes_positions as u32,
postings_offset: self.postings_write.written_bytes() as u32,
positions_offset: self.positions_write.written_bytes() as u32,
};
self.terms_fst_builder.insert_key(term.as_slice())
self.terms_fst_builder.insert_key(term)
}
/// Finish the serialization for this term postings.
@@ -148,7 +147,8 @@ impl PostingsSerializer {
pub fn close_term(&mut self) -> io::Result<()> {
if self.term_open {
self.terms_fst_builder.insert_value(&self.current_term_info)?;
self.terms_fst_builder
.insert_value(&self.current_term_info)?;
if !self.doc_ids.is_empty() {
// we have doc ids waiting to be written
@@ -158,10 +158,10 @@ impl PostingsSerializer {
// In that case, the remaining part is encoded
// using variable int encoding.
{
let block_encoded = self.block_encoder
.compress_vint_sorted(&self.doc_ids, self.last_doc_id_encoded);
self.written_bytes_postings += block_encoded.len();
try!(self.postings_write.write_all(block_encoded));
let block_encoded =
self.block_encoder
.compress_vint_sorted(&self.doc_ids, self.last_doc_id_encoded);
self.postings_write.write_all(block_encoded)?;
self.doc_ids.clear();
}
// ... Idem for term frequencies
@@ -169,8 +169,7 @@ impl PostingsSerializer {
let block_encoded = self.block_encoder
.compress_vint_unsorted(&self.term_freqs[..]);
for num in block_encoded {
self.written_bytes_postings +=
try!(num.serialize(&mut self.postings_write));
num.serialize(&mut self.postings_write)?;
}
self.term_freqs.clear();
}
@@ -178,12 +177,11 @@ impl PostingsSerializer {
// On the other hand, positions are entirely buffered until the
// end of the term, at which point they are compressed and written.
if self.text_indexing_options.is_position_enabled() {
self.written_bytes_positions += try!(VInt(self.position_deltas.len() as u64)
.serialize(&mut self.positions_write));
let posdelta_len = VInt(self.position_deltas.len() as u64);
posdelta_len.serialize(&mut self.positions_write)?;
let positions_encoded: &[u8] = self.positions_encoder
.compress_unsorted(&self.position_deltas[..]);
try!(self.positions_write.write_all(positions_encoded));
self.written_bytes_positions += positions_encoded.len();
self.positions_write.write_all(positions_encoded)?;
self.position_deltas.clear();
}
self.term_open = false;
@@ -217,18 +215,17 @@ impl PostingsSerializer {
if self.doc_ids.len() == NUM_DOCS_PER_BLOCK {
{
// encode the doc ids
let block_encoded: &[u8] = self.block_encoder
.compress_block_sorted(&self.doc_ids, self.last_doc_id_encoded);
let block_encoded: &[u8] =
self.block_encoder
.compress_block_sorted(&self.doc_ids, self.last_doc_id_encoded);
self.last_doc_id_encoded = self.doc_ids[self.doc_ids.len() - 1];
try!(self.postings_write.write_all(block_encoded));
self.written_bytes_postings += block_encoded.len();
self.postings_write.write_all(block_encoded)?;
}
if self.text_indexing_options.is_termfreq_enabled() {
// encode the term_freqs
let block_encoded: &[u8] = self.block_encoder
.compress_block_unsorted(&self.term_freqs);
try!(self.postings_write.write_all(block_encoded));
self.written_bytes_postings += block_encoded.len();
self.postings_write.write_all(block_encoded)?;
self.term_freqs.clear();
}
self.doc_ids.clear();

Some files were not shown because too many files have changed in this diff Show More