mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-04 08:12:54 +00:00
Compare commits
180 Commits
nodeffeatf
...
0.14
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
784717749f | ||
|
|
945bcc5bd3 | ||
|
|
51aa9c319e | ||
|
|
74d8d2946b | ||
|
|
0a160cc16e | ||
|
|
f099f97daa | ||
|
|
769e9ba14d | ||
|
|
a482c0e966 | ||
|
|
86d92a72e7 | ||
|
|
ef618a5999 | ||
|
|
94d3d7a89a | ||
|
|
aa9e79f957 | ||
|
|
84a2f534db | ||
|
|
1b4be24dca | ||
|
|
824ccc37ae | ||
|
|
5231651020 | ||
|
|
fa2c6f80c7 | ||
|
|
43c7b3bfec | ||
|
|
b17a10546a | ||
|
|
bf6e6e8a7c | ||
|
|
203b0256a3 | ||
|
|
caf2a38b7e | ||
|
|
96f24b078e | ||
|
|
332b50a4eb | ||
|
|
8ca0954b3b | ||
|
|
36343e2de8 | ||
|
|
2f14a892ca | ||
|
|
9c3cabce40 | ||
|
|
f8d71c2b10 | ||
|
|
394dfb24f1 | ||
|
|
b0549a229d | ||
|
|
670b6eaff6 | ||
|
|
a4f33d3823 | ||
|
|
c7841e3da5 | ||
|
|
e7b4a12bba | ||
|
|
0aaa929d6e | ||
|
|
1112797c18 | ||
|
|
920481e1c1 | ||
|
|
55f7b84966 | ||
|
|
09ab4df1fe | ||
|
|
0c2cf81b37 | ||
|
|
d864430bda | ||
|
|
de60540e06 | ||
|
|
c3e311e6b8 | ||
|
|
ac704f2f22 | ||
|
|
be626083a0 | ||
|
|
b68fcca1e0 | ||
|
|
af6dfa1856 | ||
|
|
654c400a0b | ||
|
|
80a99539ce | ||
|
|
4b1c770e5e | ||
|
|
3491645e69 | ||
|
|
e72c8287f8 | ||
|
|
b4b3bc7acd | ||
|
|
521c7b271b | ||
|
|
acd888c999 | ||
|
|
3ab1ba0b2f | ||
|
|
b344c0ac05 | ||
|
|
1741619c7f | ||
|
|
067ba3dff0 | ||
|
|
f79250f665 | ||
|
|
5a33b8d533 | ||
|
|
d165655fb1 | ||
|
|
c805871b92 | ||
|
|
f288e32634 | ||
|
|
bc44543d8f | ||
|
|
db514208a7 | ||
|
|
b6ff29e020 | ||
|
|
7c94dfdc15 | ||
|
|
8782c0eada | ||
|
|
fea0ba1042 | ||
|
|
027555c75f | ||
|
|
b478ed747a | ||
|
|
e9aa27dace | ||
|
|
c079133f3a | ||
|
|
30c5f7c5f0 | ||
|
|
6f26871c0f | ||
|
|
f93cc5b5e3 | ||
|
|
5a25c8dfd3 | ||
|
|
f5c079159d | ||
|
|
1cfdce3437 | ||
|
|
e9e6d141e9 | ||
|
|
8d0e049261 | ||
|
|
0335c7353d | ||
|
|
267e920a80 | ||
|
|
d8a3a47e3e | ||
|
|
7f0e61b173 | ||
|
|
ce4c50446b | ||
|
|
9ab25d2575 | ||
|
|
6d4b982417 | ||
|
|
650eca271f | ||
|
|
8ee55aef6d | ||
|
|
40d41c7dcb | ||
|
|
c780a889a7 | ||
|
|
eef348004e | ||
|
|
e784bbc40f | ||
|
|
b8118d439f | ||
|
|
a49e59053c | ||
|
|
41bb2bd58b | ||
|
|
7fd6054145 | ||
|
|
6abf4e97b5 | ||
|
|
d23aee76c9 | ||
|
|
58a1595792 | ||
|
|
726d32eac5 | ||
|
|
b5f3dcdc8b | ||
|
|
2875deb4b1 | ||
|
|
b2dfacdc70 | ||
|
|
36a0520a48 | ||
|
|
6b5a5ac1d0 | ||
|
|
581c2bb718 | ||
|
|
3d192c0f57 | ||
|
|
9dc36f4431 | ||
|
|
730ccefffb | ||
|
|
2c56f4b583 | ||
|
|
9e27da8b4e | ||
|
|
7f373f232a | ||
|
|
6f0487979c | ||
|
|
71c66a5405 | ||
|
|
2eb5326aa4 | ||
|
|
91e92fa8a3 | ||
|
|
9cc1661ce2 | ||
|
|
c3f44d38f3 | ||
|
|
01b4aa9adc | ||
|
|
7a78b1cba3 | ||
|
|
4d011cc648 | ||
|
|
80cbe889ba | ||
|
|
c23a03ad81 | ||
|
|
579e3d1ed8 | ||
|
|
687a36a49c | ||
|
|
ad82b455a3 | ||
|
|
848afa43ee | ||
|
|
7720d21265 | ||
|
|
96f946d4c3 | ||
|
|
3432149759 | ||
|
|
392221e36a | ||
|
|
674cae8ee2 | ||
|
|
838c476733 | ||
|
|
5f574348d1 | ||
|
|
19a02b2c30 | ||
|
|
c339b05789 | ||
|
|
2d3c657f9d | ||
|
|
07f9b828ae | ||
|
|
70bae7ce4c | ||
|
|
ac2a7273e6 | ||
|
|
4ce9517a82 | ||
|
|
73024a8af3 | ||
|
|
e70e605fc3 | ||
|
|
439d6956a9 | ||
|
|
6530bf0eae | ||
|
|
151498cbe7 | ||
|
|
3a72b1cb98 | ||
|
|
2737822620 | ||
|
|
06c12ae221 | ||
|
|
4e4400af7f | ||
|
|
3f1ecf53ab | ||
|
|
0b583b8130 | ||
|
|
31d18dca1c | ||
|
|
5e06e7de5a | ||
|
|
8af53cbd36 | ||
|
|
4914076e8f | ||
|
|
e04f47e922 | ||
|
|
f355695581 | ||
|
|
cbacdf0de8 | ||
|
|
3dd0322f4c | ||
|
|
2481c87be8 | ||
|
|
b6a664b5f8 | ||
|
|
25b666a7c9 | ||
|
|
9b41912e66 | ||
|
|
8e74bb98b5 | ||
|
|
6db8bb49d6 | ||
|
|
410aed0176 | ||
|
|
00a239a712 | ||
|
|
68fe406924 | ||
|
|
f71b04acb0 | ||
|
|
1ab7f660a4 | ||
|
|
0ebbc4cb5a | ||
|
|
5300cb5da0 | ||
|
|
7d773abc92 | ||
|
|
c34541ccce | ||
|
|
1cc5bd706c |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,4 +1,5 @@
|
|||||||
tantivy.iml
|
tantivy.iml
|
||||||
|
proptest-regressions
|
||||||
*.swp
|
*.swp
|
||||||
target
|
target
|
||||||
target/debug
|
target/debug
|
||||||
@@ -11,3 +12,4 @@ cpp/simdcomp/bitpackingbenchmark
|
|||||||
*.bk
|
*.bk
|
||||||
.idea
|
.idea
|
||||||
trace.dat
|
trace.dat
|
||||||
|
cargo-timing*
|
||||||
|
|||||||
104
CHANGELOG.md
104
CHANGELOG.md
@@ -1,9 +1,43 @@
|
|||||||
|
Tantivy 0.14.0
|
||||||
|
=========================
|
||||||
|
- Remove dependency to atomicwrites #833 .Implemented by @fulmicoton upon suggestion and research from @asafigan).
|
||||||
|
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
|
||||||
|
- API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match.
|
||||||
|
- Large API Change in the Directory API. Tantivy used to assume that all files could be somehow memory mapped. After this change, Directory return a `FileSlice` that can be reduced and eventually read into an `OwnedBytes` object. Long and blocking io operation are still required by they do not span over the entire file.
|
||||||
|
- Added support for Brotli compression in the DocStore. (@ppodolsky)
|
||||||
|
- Added helper for building intersections and unions in BooleanQuery (@guilload)
|
||||||
|
- Bugfix in `Query::explain`
|
||||||
|
- Removed dependency on `notify` #924. Replaced with `FileWatcher` struct that polls meta file every 500ms in background thread. (@halvorboe @guilload)
|
||||||
|
- Added `FilterCollector`, which wraps another collector and filters docs using a predicate over a fast field (@barrotsteindev)
|
||||||
|
- Simplified the encoding of the skip reader struct. BlockWAND max tf is now encoded over a single byte. (@fulmicoton)
|
||||||
|
- `FilterCollector` now supports all Fast Field value types (@barrotsteindev)
|
||||||
|
- FastField are not all loaded when opening the segment reader. (@fulmicoton)
|
||||||
|
|
||||||
|
This version breaks compatibility and requires users to reindex everything.
|
||||||
|
|
||||||
|
Tantivy 0.13.2
|
||||||
|
===================
|
||||||
|
Bugfix. Acquiring a facet reader on a segment that does not contain any
|
||||||
|
doc with this facet returns `None`. (#896)
|
||||||
|
|
||||||
|
Tantivy 0.13.1
|
||||||
|
===================
|
||||||
|
Made `Query` and `Collector` `Send + Sync`.
|
||||||
|
Updated misc dependency versions.
|
||||||
|
|
||||||
Tantivy 0.13.0
|
Tantivy 0.13.0
|
||||||
======================
|
======================
|
||||||
|
Tantivy 0.13 introduce a change in the index format that will require
|
||||||
|
you to reindex your index (BlockWAND information are added in the skiplist).
|
||||||
|
The index size increase is minor as this information is only added for
|
||||||
|
full blocks.
|
||||||
|
If you have a massive index for which reindexing is not an option, please contact me
|
||||||
|
so that we can discuss possible solutions.
|
||||||
|
|
||||||
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
|
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
|
||||||
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
|
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
|
||||||
- `MMapDirectory::open` does not return a `Result` anymore.
|
- `MMapDirectory::open` does not return a `Result` anymore.
|
||||||
- Change in the DocSet and Scorer API. (@fulmicoton).
|
- Change in the DocSet and Scorer API. (@fulmicoton).
|
||||||
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
|
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
|
||||||
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
|
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
|
||||||
As a result, iterating through DocSet now looks as follows
|
As a result, iterating through DocSet now looks as follows
|
||||||
@@ -17,20 +51,22 @@ while doc != TERMINATED {
|
|||||||
The change made it possible to greatly simplify a lot of the docset's code.
|
The change made it possible to greatly simplify a lot of the docset's code.
|
||||||
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
|
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
|
||||||
- Added an offset option to the Top(.*)Collectors. (@robyoung)
|
- Added an offset option to the Top(.*)Collectors. (@robyoung)
|
||||||
|
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
|
||||||
|
to the PISA team for answering all my questions!)
|
||||||
|
|
||||||
Tantivy 0.12.0
|
Tantivy 0.12.0
|
||||||
======================
|
======================
|
||||||
- Removing static dispatch in tokenizers for simplicity. (#762)
|
- Removing static dispatch in tokenizers for simplicity. (#762)
|
||||||
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
|
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
|
||||||
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
|
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
|
||||||
- Added a configurable maximum number of docs (10M by default) for a segment to be considered for merge (@hntd187, landed by @halvorboe #713)
|
- Added a configurable maximum number of docs (10M by default) for a segment to be considered for merge (@hntd187, landed by @halvorboe #713)
|
||||||
- Important Bugfix #777, causing tantivy to retain memory mapping. (diagnosed by @poljar)
|
- Important Bugfix #777, causing tantivy to retain memory mapping. (diagnosed by @poljar)
|
||||||
- Added support for field boosting. (#547, @fulmicoton)
|
- Added support for field boosting. (#547, @fulmicoton)
|
||||||
|
|
||||||
## How to update?
|
## How to update?
|
||||||
|
|
||||||
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
|
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
|
||||||
minor changes. Check https://github.com/tantivy-search/tantivy/blob/master/examples/custom_tokenizer.rs
|
minor changes. Check https://github.com/tantivy-search/tantivy/blob/main/examples/custom_tokenizer.rs
|
||||||
to check for some code sample.
|
to check for some code sample.
|
||||||
|
|
||||||
Tantivy 0.11.3
|
Tantivy 0.11.3
|
||||||
@@ -66,7 +102,7 @@ Tantivy 0.11.0
|
|||||||
|
|
||||||
## How to update?
|
## How to update?
|
||||||
|
|
||||||
- The index format is changed. You are required to reindex your data to use tantivy 0.11.
|
- The index format is changed. You are required to reindex your data to use tantivy 0.11.
|
||||||
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
|
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
|
||||||
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
|
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
|
||||||
an error and handling the `Result` is required.
|
an error and handling the `Result` is required.
|
||||||
@@ -90,26 +126,26 @@ Tantivy 0.10.0
|
|||||||
|
|
||||||
*Tantivy 0.10.0 index format is compatible with the index format in 0.9.0.*
|
*Tantivy 0.10.0 index format is compatible with the index format in 0.9.0.*
|
||||||
|
|
||||||
- Added an API to easily tweak or entirely replace the
|
- Added an API to easily tweak or entirely replace the
|
||||||
default score. See `TopDocs::tweak_score`and `TopScore::custom_score` (@pmasurel)
|
default score. See `TopDocs::tweak_score`and `TopScore::custom_score` (@fulmicoton)
|
||||||
- Added an ASCII folding filter (@drusellers)
|
- Added an ASCII folding filter (@drusellers)
|
||||||
- Bugfix in `query.count` in presence of deletes (@pmasurel)
|
- Bugfix in `query.count` in presence of deletes (@fulmicoton)
|
||||||
- Added `.explain(...)` in `Query` and `Weight` to (@pmasurel)
|
- Added `.explain(...)` in `Query` and `Weight` to (@fulmicoton)
|
||||||
- Added an efficient way to `delete_all_documents` in `IndexWriter` (@petr-tik).
|
- Added an efficient way to `delete_all_documents` in `IndexWriter` (@petr-tik).
|
||||||
All segments are simply removed.
|
All segments are simply removed.
|
||||||
|
|
||||||
Minor
|
Minor
|
||||||
---------
|
---------
|
||||||
- Switched to Rust 2018 (@uvd)
|
- Switched to Rust 2018 (@uvd)
|
||||||
- Small simplification of the code.
|
- Small simplification of the code.
|
||||||
Calling .freq() or .doc() when .advance() has never been called
|
Calling .freq() or .doc() when .advance() has never been called
|
||||||
on segment postings should panic from now on.
|
on segment postings should panic from now on.
|
||||||
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
|
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
|
||||||
- Fast fields are now preloaded when the `SegmentReader` is created.
|
- Fast fields are now preloaded when the `SegmentReader` is created.
|
||||||
- `IndexMeta` is now public. (@hntd187)
|
- `IndexMeta` is now public. (@hntd187)
|
||||||
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
|
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
|
||||||
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
|
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
|
||||||
only require a read lock. (@pmasurel)
|
only require a read lock. (@fulmicoton)
|
||||||
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
|
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
|
||||||
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
|
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
|
||||||
- Bugfix - Files get deleted slightly earlier
|
- Bugfix - Files get deleted slightly earlier
|
||||||
@@ -123,7 +159,7 @@ Your program should be usable as is.
|
|||||||
|
|
||||||
Fast fields used to be accessed directly from the `SegmentReader`.
|
Fast fields used to be accessed directly from the `SegmentReader`.
|
||||||
The API changed, you are now required to acquire your fast field reader via the
|
The API changed, you are now required to acquire your fast field reader via the
|
||||||
`segment_reader.fast_fields()`, and use one of the typed method:
|
`segment_reader.fast_fields()`, and use one of the typed method:
|
||||||
- `.u64()`, `.i64()` if your field is single-valued ;
|
- `.u64()`, `.i64()` if your field is single-valued ;
|
||||||
- `.u64s()`, `.i64s()` if your field is multi-valued ;
|
- `.u64s()`, `.i64s()` if your field is multi-valued ;
|
||||||
- `.bytes()` if your field is bytes fast field.
|
- `.bytes()` if your field is bytes fast field.
|
||||||
@@ -132,16 +168,16 @@ The API changed, you are now required to acquire your fast field reader via the
|
|||||||
|
|
||||||
Tantivy 0.9.0
|
Tantivy 0.9.0
|
||||||
=====================
|
=====================
|
||||||
*0.9.0 index format is not compatible with the
|
*0.9.0 index format is not compatible with the
|
||||||
previous index format.*
|
previous index format.*
|
||||||
- MAJOR BUGFIX :
|
- MAJOR BUGFIX :
|
||||||
Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton)
|
Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton)
|
||||||
- Removed most unsafe (@fulmicoton)
|
- Removed most unsafe (@fulmicoton)
|
||||||
- Indexer memory footprint improved. (VInt comp, inlining the first block. (@fulmicoton)
|
- Indexer memory footprint improved. (VInt comp, inlining the first block. (@fulmicoton)
|
||||||
- Stemming in other language possible (@pentlander)
|
- Stemming in other language possible (@pentlander)
|
||||||
- Segments with no docs are deleted earlier (@barrotsteindev)
|
- Segments with no docs are deleted earlier (@barrotsteindev)
|
||||||
- Added grouped add and delete operations.
|
- Added grouped add and delete operations.
|
||||||
They are guaranteed to happen together (i.e. they cannot be split by a commit).
|
They are guaranteed to happen together (i.e. they cannot be split by a commit).
|
||||||
In addition, adds are guaranteed to happen on the same segment. (@elbow-jason)
|
In addition, adds are guaranteed to happen on the same segment. (@elbow-jason)
|
||||||
- Removed `INT_STORED` and `INT_INDEXED`. It is now possible to use `STORED` and `INDEXED`
|
- Removed `INT_STORED` and `INT_INDEXED`. It is now possible to use `STORED` and `INDEXED`
|
||||||
for int fields. (@fulmicoton)
|
for int fields. (@fulmicoton)
|
||||||
@@ -155,26 +191,26 @@ tantivy 0.9 brought some API breaking change.
|
|||||||
To update from tantivy 0.8, you will need to go through the following steps.
|
To update from tantivy 0.8, you will need to go through the following steps.
|
||||||
|
|
||||||
- `schema::INT_INDEXED` and `schema::INT_STORED` should be replaced by `schema::INDEXED` and `schema::INT_STORED`.
|
- `schema::INT_INDEXED` and `schema::INT_STORED` should be replaced by `schema::INDEXED` and `schema::INT_STORED`.
|
||||||
- The index now does not hold the pool of searcher anymore. You are required to create an intermediary object called
|
- The index now does not hold the pool of searcher anymore. You are required to create an intermediary object called
|
||||||
`IndexReader` for this.
|
`IndexReader` for this.
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
// create the reader. You typically need to create 1 reader for the entire
|
// create the reader. You typically need to create 1 reader for the entire
|
||||||
// lifetime of you program.
|
// lifetime of you program.
|
||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
|
|
||||||
// Acquire a searcher (previously `index.searcher()`) is now written:
|
// Acquire a searcher (previously `index.searcher()`) is now written:
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
|
|
||||||
// With the default setting of the reader, you are not required to
|
// With the default setting of the reader, you are not required to
|
||||||
// call `index.load_searchers()` anymore.
|
// call `index.load_searchers()` anymore.
|
||||||
//
|
//
|
||||||
// The IndexReader will pick up that change automatically, regardless
|
// The IndexReader will pick up that change automatically, regardless
|
||||||
// of whether the update was done in a different process or not.
|
// of whether the update was done in a different process or not.
|
||||||
// If this behavior is not wanted, you can create your reader with
|
// If this behavior is not wanted, you can create your reader with
|
||||||
// the `ReloadPolicy::Manual`, and manually decide when to reload the index
|
// the `ReloadPolicy::Manual`, and manually decide when to reload the index
|
||||||
// by calling `reader.reload()?`.
|
// by calling `reader.reload()?`.
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@@ -189,7 +225,7 @@ Tantivy 0.8.1
|
|||||||
=====================
|
=====================
|
||||||
Hotfix of #476.
|
Hotfix of #476.
|
||||||
|
|
||||||
Merge was reflecting deletes before commit was passed.
|
Merge was reflecting deletes before commit was passed.
|
||||||
Thanks @barrotsteindev for reporting the bug.
|
Thanks @barrotsteindev for reporting the bug.
|
||||||
|
|
||||||
|
|
||||||
@@ -197,7 +233,7 @@ Tantivy 0.8.0
|
|||||||
=====================
|
=====================
|
||||||
*No change in the index format*
|
*No change in the index format*
|
||||||
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
|
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
|
||||||
- Multithreaded search (@jwolfe, @fulmicoton)
|
- Multithreaded search (@jwolfe, @fulmicoton)
|
||||||
|
|
||||||
|
|
||||||
Tantivy 0.7.1
|
Tantivy 0.7.1
|
||||||
@@ -225,7 +261,7 @@ Tantivy 0.6.1
|
|||||||
- Exclusive `field:{startExcl to endExcl}`
|
- Exclusive `field:{startExcl to endExcl}`
|
||||||
- Mixed `field:[startIncl to endExcl}` and vice versa
|
- Mixed `field:[startIncl to endExcl}` and vice versa
|
||||||
- Unbounded `field:[start to *]`, `field:[* to end]`
|
- Unbounded `field:[start to *]`, `field:[* to end]`
|
||||||
|
|
||||||
|
|
||||||
Tantivy 0.6
|
Tantivy 0.6
|
||||||
==========================
|
==========================
|
||||||
@@ -233,10 +269,10 @@ Tantivy 0.6
|
|||||||
Special thanks to @drusellers and @jason-wolfe for their contributions
|
Special thanks to @drusellers and @jason-wolfe for their contributions
|
||||||
to this release!
|
to this release!
|
||||||
|
|
||||||
- Removed C code. Tantivy is now pure Rust. (@pmasurel)
|
- Removed C code. Tantivy is now pure Rust. (@fulmicoton)
|
||||||
- BM25 (@pmasurel)
|
- BM25 (@fulmicoton)
|
||||||
- Approximate field norms encoded over 1 byte. (@pmasurel)
|
- Approximate field norms encoded over 1 byte. (@fulmicoton)
|
||||||
- Compiles on stable rust (@pmasurel)
|
- Compiles on stable rust (@fulmicoton)
|
||||||
- Add &[u8] fastfield for associating arbitrary bytes to each document (@jason-wolfe) (#270)
|
- Add &[u8] fastfield for associating arbitrary bytes to each document (@jason-wolfe) (#270)
|
||||||
- Completely uncompressed
|
- Completely uncompressed
|
||||||
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
|
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
|
||||||
@@ -244,7 +280,7 @@ to this release!
|
|||||||
- Add Stopword Filter support (@drusellers)
|
- Add Stopword Filter support (@drusellers)
|
||||||
- Add a FuzzyTermQuery (@drusellers)
|
- Add a FuzzyTermQuery (@drusellers)
|
||||||
- Add a RegexQuery (@drusellers)
|
- Add a RegexQuery (@drusellers)
|
||||||
- Various performance improvements (@pmasurel)_
|
- Various performance improvements (@fulmicoton)_
|
||||||
|
|
||||||
|
|
||||||
Tantivy 0.5.2
|
Tantivy 0.5.2
|
||||||
|
|||||||
57
Cargo.toml
57
Cargo.toml
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy"
|
name = "tantivy"
|
||||||
version = "0.12.0"
|
version = "0.14.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
@@ -13,51 +13,51 @@ keywords = ["search", "information", "retrieval"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64 = "0.12.0"
|
base64 = "0.13"
|
||||||
byteorder = "1.0"
|
byteorder = "1"
|
||||||
crc32fast = "1.2.0"
|
crc32fast = "1"
|
||||||
once_cell = "1.0"
|
once_cell = "1"
|
||||||
regex ={version = "1.3.0", default-features = false, features = ["std"]}
|
regex ={version = "1", default-features = false, features = ["std"]}
|
||||||
tantivy-fst = "0.3"
|
tantivy-fst = "0.3"
|
||||||
memmap = {version = "0.7", optional=true}
|
memmap = {version = "0.7", optional=true}
|
||||||
lz4 = {version="1.20", optional=true}
|
lz4 = {version="1", optional=true}
|
||||||
|
brotli = {version="3.3.0", optional=true}
|
||||||
snap = "1"
|
snap = "1"
|
||||||
atomicwrites = {version="0.2.2", optional=true}
|
tempfile = {version="3", optional=true}
|
||||||
tempfile = "3.0"
|
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
serde = {version="1.0", features=["derive"]}
|
serde = {version="1", features=["derive"]}
|
||||||
serde_json = "1.0"
|
serde_json = "1"
|
||||||
num_cpus = "1.2"
|
num_cpus = "1"
|
||||||
fs2={version="0.4", optional=true}
|
fs2={version="0.4", optional=true}
|
||||||
levenshtein_automata = "0.2"
|
levenshtein_automata = "0.2"
|
||||||
notify = {version="4", optional=true}
|
|
||||||
uuid = { version = "0.8", features = ["v4", "serde"] }
|
uuid = { version = "0.8", features = ["v4", "serde"] }
|
||||||
crossbeam = "0.7"
|
crossbeam = "0.8"
|
||||||
futures = {version = "0.3", features=["thread-pool"] }
|
futures = {version = "0.3", features=["thread-pool"] }
|
||||||
owning_ref = "0.4"
|
tantivy-query-grammar = { version="0.14.0", path="./query-grammar" }
|
||||||
stable_deref_trait = "1.0.0"
|
stable_deref_trait = "1"
|
||||||
rust-stemmers = "1.2"
|
rust-stemmers = "1"
|
||||||
downcast-rs = { version="1.0" }
|
downcast-rs = "1"
|
||||||
tantivy-query-grammar = { version="0.13", path="./query-grammar" }
|
|
||||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||||
census = "0.4"
|
census = "0.4"
|
||||||
fnv = "1.0.6"
|
fnv = "1"
|
||||||
owned-read = "0.4"
|
thiserror = "1.0"
|
||||||
failure = "0.1"
|
htmlescape = "0.3"
|
||||||
htmlescape = "0.3.1"
|
|
||||||
fail = "0.4"
|
fail = "0.4"
|
||||||
murmurhash32 = "0.2"
|
murmurhash32 = "0.2"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
smallvec = "1.0"
|
smallvec = "1"
|
||||||
rayon = "1"
|
rayon = "1"
|
||||||
|
lru = "0.6"
|
||||||
|
|
||||||
[target.'cfg(windows)'.dependencies]
|
[target.'cfg(windows)'.dependencies]
|
||||||
winapi = "0.3"
|
winapi = "0.3"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rand = "0.7"
|
rand = "0.8"
|
||||||
maplit = "1"
|
maplit = "1"
|
||||||
matches = "0.1.8"
|
matches = "0.1.8"
|
||||||
|
proptest = "0.10"
|
||||||
|
criterion = "0.3"
|
||||||
|
|
||||||
[dev-dependencies.fail]
|
[dev-dependencies.fail]
|
||||||
version = "0.4"
|
version = "0.4"
|
||||||
@@ -74,7 +74,8 @@ overflow-checks = true
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["mmap"]
|
default = ["mmap"]
|
||||||
mmap = ["atomicwrites", "fs2", "memmap", "notify"]
|
mmap = ["fs2", "tempfile", "memmap"]
|
||||||
|
brotli-compression = ["brotli"]
|
||||||
lz4-compression = ["lz4"]
|
lz4-compression = ["lz4"]
|
||||||
failpoints = ["fail/failpoints"]
|
failpoints = ["fail/failpoints"]
|
||||||
unstable = [] # useful for benches.
|
unstable = [] # useful for benches.
|
||||||
@@ -97,3 +98,7 @@ travis-ci = { repository = "tantivy-search/tantivy" }
|
|||||||
name = "failpoints"
|
name = "failpoints"
|
||||||
path = "tests/failpoints/mod.rs"
|
path = "tests/failpoints/mod.rs"
|
||||||
required-features = ["fail/failpoints"]
|
required-features = ["fail/failpoints"]
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "analyzer"
|
||||||
|
harness = false
|
||||||
|
|||||||
14
README.md
14
README.md
@@ -1,11 +1,10 @@
|
|||||||
|
|
||||||
[](https://travis-ci.org/tantivy-search/tantivy)
|
[](https://travis-ci.org/tantivy-search/tantivy)
|
||||||
[](https://codecov.io/gh/tantivy-search/tantivy)
|
[](https://codecov.io/gh/tantivy-search/tantivy)
|
||||||
[](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
[](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
[](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
|
[](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/main)
|
||||||
[](https://crates.io/crates/tantivy)
|
[](https://crates.io/crates/tantivy)
|
||||||
[](https://saythanks.io/to/fulmicoton)
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -34,11 +33,6 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
|
|||||||
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
||||||
performance for different type of queries / collection.
|
performance for different type of queries / collection.
|
||||||
|
|
||||||
|
|
||||||
In general, Tantivy tends to be
|
|
||||||
- slower than Lucene on union with a Top-K due to Block-WAND optimization.
|
|
||||||
- faster than Lucene on intersection and phrase queries.
|
|
||||||
|
|
||||||
Your mileage WILL vary depending on the nature of queries and their load.
|
Your mileage WILL vary depending on the nature of queries and their load.
|
||||||
|
|
||||||
# Features
|
# Features
|
||||||
@@ -90,7 +84,7 @@ There are many ways to support this project.
|
|||||||
- Help with documentation by asking questions or submitting PRs
|
- Help with documentation by asking questions or submitting PRs
|
||||||
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
|
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
|
||||||
- Talk about Tantivy around you
|
- Talk about Tantivy around you
|
||||||
- Drop a word on on [](https://saythanks.io/to/fulmicoton) or even [](https://www.patreon.com/fulmicoton)
|
- [](https://www.patreon.com/fulmicoton)
|
||||||
|
|
||||||
# Contributing code
|
# Contributing code
|
||||||
|
|
||||||
|
|||||||
3774
benches/alice.txt
Normal file
3774
benches/alice.txt
Normal file
File diff suppressed because it is too large
Load Diff
22
benches/analyzer.rs
Normal file
22
benches/analyzer.rs
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
use criterion::{criterion_group, criterion_main, Criterion};
|
||||||
|
use tantivy::tokenizer::TokenizerManager;
|
||||||
|
|
||||||
|
const ALICE_TXT: &'static str = include_str!("alice.txt");
|
||||||
|
|
||||||
|
pub fn criterion_benchmark(c: &mut Criterion) {
|
||||||
|
let tokenizer_manager = TokenizerManager::default();
|
||||||
|
let tokenizer = tokenizer_manager.get("default").unwrap();
|
||||||
|
c.bench_function("default-tokenize-alice", |b| {
|
||||||
|
b.iter(|| {
|
||||||
|
let mut word_count = 0;
|
||||||
|
let mut token_stream = tokenizer.token_stream(ALICE_TXT);
|
||||||
|
while token_stream.advance() {
|
||||||
|
word_count += 1;
|
||||||
|
}
|
||||||
|
assert_eq!(word_count, 30_731);
|
||||||
|
})
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
criterion_group!(benches, criterion_benchmark);
|
||||||
|
criterion_main!(benches);
|
||||||
@@ -112,18 +112,6 @@ fn main() -> tantivy::Result<()> {
|
|||||||
limbs and branches that arch over the pool"
|
limbs and branches that arch over the pool"
|
||||||
));
|
));
|
||||||
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
title => "Of Mice and Men",
|
|
||||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
|
||||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
|
||||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
|
||||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
|
||||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
|
||||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
|
||||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
|
||||||
limbs and branches that arch over the pool"
|
|
||||||
));
|
|
||||||
|
|
||||||
// Multivalued field just need to be repeated.
|
// Multivalued field just need to be repeated.
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
title => "Frankenstein",
|
title => "Frankenstein",
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use tantivy::fastfield::FastFieldReader;
|
|||||||
use tantivy::query::QueryParser;
|
use tantivy::query::QueryParser;
|
||||||
use tantivy::schema::Field;
|
use tantivy::schema::Field;
|
||||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||||
use tantivy::{doc, Index, SegmentReader, TantivyError};
|
use tantivy::{doc, Index, Score, SegmentReader};
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct Stats {
|
struct Stats {
|
||||||
@@ -72,16 +72,7 @@ impl Collector for StatsCollector {
|
|||||||
_segment_local_id: u32,
|
_segment_local_id: u32,
|
||||||
segment_reader: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
) -> tantivy::Result<StatsSegmentCollector> {
|
) -> tantivy::Result<StatsSegmentCollector> {
|
||||||
let fast_field_reader = segment_reader
|
let fast_field_reader = segment_reader.fast_fields().u64(self.field)?;
|
||||||
.fast_fields()
|
|
||||||
.u64(self.field)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
let field_name = segment_reader.schema().get_field_name(self.field);
|
|
||||||
TantivyError::SchemaError(format!(
|
|
||||||
"Field {:?} is not a u64 fast field.",
|
|
||||||
field_name
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
Ok(StatsSegmentCollector {
|
Ok(StatsSegmentCollector {
|
||||||
fast_field_reader,
|
fast_field_reader,
|
||||||
stats: Stats::default(),
|
stats: Stats::default(),
|
||||||
@@ -114,7 +105,7 @@ struct StatsSegmentCollector {
|
|||||||
impl SegmentCollector for StatsSegmentCollector {
|
impl SegmentCollector for StatsSegmentCollector {
|
||||||
type Fruit = Option<Stats>;
|
type Fruit = Option<Stats>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, _score: f32) {
|
fn collect(&mut self, doc: u32, _score: Score) {
|
||||||
let value = self.fast_field_reader.get(doc) as f64;
|
let value = self.fast_field_reader.get(doc) as f64;
|
||||||
self.stats.count += 1;
|
self.stats.count += 1;
|
||||||
self.stats.sum += value;
|
self.stats.sum += value;
|
||||||
|
|||||||
@@ -56,12 +56,12 @@ fn main() -> tantivy::Result<()> {
|
|||||||
);
|
);
|
||||||
let top_docs_by_custom_score =
|
let top_docs_by_custom_score =
|
||||||
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
|
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
|
||||||
let mut ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
|
let ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
|
||||||
let facet_dict = ingredient_reader.facet_dict();
|
let facet_dict = ingredient_reader.facet_dict();
|
||||||
|
|
||||||
let query_ords: HashSet<u64> = facets
|
let query_ords: HashSet<u64> = facets
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|key| facet_dict.term_ord(key.encoded_str()))
|
.filter_map(|key| facet_dict.term_ord(key.encoded_str()).unwrap())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut facet_ords_buffer: Vec<u64> = Vec::with_capacity(20);
|
let mut facet_ords_buffer: Vec<u64> = Vec::with_capacity(20);
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// Inverted index stands for the combination of
|
// Inverted index stands for the combination of
|
||||||
// - the term dictionary
|
// - the term dictionary
|
||||||
// - the inverted lists associated to each terms and their positions
|
// - the inverted lists associated to each terms and their positions
|
||||||
let inverted_index = segment_reader.inverted_index(title);
|
let inverted_index = segment_reader.inverted_index(title)?;
|
||||||
|
|
||||||
// A `Term` is a text token associated with a field.
|
// A `Term` is a text token associated with a field.
|
||||||
// Let's go through all docs containing the term `title:the` and access their position
|
// Let's go through all docs containing the term `title:the` and access their position
|
||||||
@@ -58,7 +58,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// If you don't need all this information, you may get better performance by decompressing less
|
// If you don't need all this information, you may get better performance by decompressing less
|
||||||
// information.
|
// information.
|
||||||
if let Some(mut segment_postings) =
|
if let Some(mut segment_postings) =
|
||||||
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)
|
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
{
|
{
|
||||||
// this buffer will be used to request for positions
|
// this buffer will be used to request for positions
|
||||||
let mut positions: Vec<u32> = Vec::with_capacity(100);
|
let mut positions: Vec<u32> = Vec::with_capacity(100);
|
||||||
@@ -106,7 +106,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// Inverted index stands for the combination of
|
// Inverted index stands for the combination of
|
||||||
// - the term dictionary
|
// - the term dictionary
|
||||||
// - the inverted lists associated to each terms and their positions
|
// - the inverted lists associated to each terms and their positions
|
||||||
let inverted_index = segment_reader.inverted_index(title);
|
let inverted_index = segment_reader.inverted_index(title)?;
|
||||||
|
|
||||||
// This segment posting object is like a cursor over the documents matching the term.
|
// This segment posting object is like a cursor over the documents matching the term.
|
||||||
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
|
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
|
||||||
@@ -115,13 +115,18 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// If you don't need all this information, you may get better performance by decompressing less
|
// If you don't need all this information, you may get better performance by decompressing less
|
||||||
// information.
|
// information.
|
||||||
if let Some(mut block_segment_postings) =
|
if let Some(mut block_segment_postings) =
|
||||||
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
|
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)?
|
||||||
{
|
{
|
||||||
while block_segment_postings.advance() {
|
loop {
|
||||||
|
let docs = block_segment_postings.docs();
|
||||||
|
if docs.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
// Once again these docs MAY contains deleted documents as well.
|
// Once again these docs MAY contains deleted documents as well.
|
||||||
let docs = block_segment_postings.docs();
|
let docs = block_segment_postings.docs();
|
||||||
// Prints `Docs [0, 2].`
|
// Prints `Docs [0, 2].`
|
||||||
println!("Docs {:?}", docs);
|
println!("Docs {:?}", docs);
|
||||||
|
block_segment_postings.advance();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-query-grammar"
|
name = "tantivy-query-grammar"
|
||||||
version = "0.13.0"
|
version = "0.14.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
|
|||||||
@@ -31,22 +31,12 @@ impl Occur {
|
|||||||
|
|
||||||
/// Compose two occur values.
|
/// Compose two occur values.
|
||||||
pub fn compose(left: Occur, right: Occur) -> Occur {
|
pub fn compose(left: Occur, right: Occur) -> Occur {
|
||||||
match left {
|
match (left, right) {
|
||||||
Occur::Should => right,
|
(Occur::Should, _) => right,
|
||||||
Occur::Must => {
|
(Occur::Must, Occur::MustNot) => Occur::MustNot,
|
||||||
if right == Occur::MustNot {
|
(Occur::Must, _) => Occur::Must,
|
||||||
Occur::MustNot
|
(Occur::MustNot, Occur::MustNot) => Occur::Must,
|
||||||
} else {
|
(Occur::MustNot, _) => Occur::MustNot,
|
||||||
Occur::Must
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Occur::MustNot => {
|
|
||||||
if right == Occur::MustNot {
|
|
||||||
Occur::Must
|
|
||||||
} else {
|
|
||||||
Occur::MustNot
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -56,3 +46,27 @@ impl fmt::Display for Occur {
|
|||||||
f.write_char(self.to_char())
|
f.write_char(self.to_char())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use crate::Occur;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_occur_compose() {
|
||||||
|
assert_eq!(Occur::compose(Occur::Should, Occur::Should), Occur::Should);
|
||||||
|
assert_eq!(Occur::compose(Occur::Should, Occur::Must), Occur::Must);
|
||||||
|
assert_eq!(
|
||||||
|
Occur::compose(Occur::Should, Occur::MustNot),
|
||||||
|
Occur::MustNot
|
||||||
|
);
|
||||||
|
assert_eq!(Occur::compose(Occur::Must, Occur::Should), Occur::Must);
|
||||||
|
assert_eq!(Occur::compose(Occur::Must, Occur::Must), Occur::Must);
|
||||||
|
assert_eq!(Occur::compose(Occur::Must, Occur::MustNot), Occur::MustNot);
|
||||||
|
assert_eq!(
|
||||||
|
Occur::compose(Occur::MustNot, Occur::Should),
|
||||||
|
Occur::MustNot
|
||||||
|
);
|
||||||
|
assert_eq!(Occur::compose(Occur::MustNot, Occur::Must), Occur::MustNot);
|
||||||
|
assert_eq!(Occur::compose(Occur::MustNot, Occur::MustNot), Occur::Must);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,8 +9,10 @@ use combine::{
|
|||||||
|
|
||||||
fn field<'a>() -> impl Parser<&'a str, Output = String> {
|
fn field<'a>() -> impl Parser<&'a str, Output = String> {
|
||||||
(
|
(
|
||||||
letter(),
|
(letter().or(char('_'))),
|
||||||
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
|
many(satisfy(|c: char| {
|
||||||
|
c.is_alphanumeric() || c == '_' || c == '-'
|
||||||
|
})),
|
||||||
)
|
)
|
||||||
.skip(char(':'))
|
.skip(char(':'))
|
||||||
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
||||||
@@ -180,7 +182,7 @@ fn occur_leaf<'a>() -> impl Parser<&'a str, Output = (Option<Occur>, UserInputAS
|
|||||||
(optional(occur_symbol()), boosted_leaf())
|
(optional(occur_symbol()), boosted_leaf())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f32> {
|
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||||
(many1(digit()), optional((char('.'), many1(digit())))).map(
|
(many1(digit()), optional((char('.'), many1(digit())))).map(
|
||||||
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
|
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
|
||||||
let mut float_str = int_part;
|
let mut float_str = int_part;
|
||||||
@@ -188,18 +190,18 @@ fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f32> {
|
|||||||
float_str.push(chr);
|
float_str.push(chr);
|
||||||
float_str.push_str(&decimal_str);
|
float_str.push_str(&decimal_str);
|
||||||
}
|
}
|
||||||
float_str.parse::<f32>().unwrap()
|
float_str.parse::<f64>().unwrap()
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn boost<'a>() -> impl Parser<&'a str, Output = f32> {
|
fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||||
(char('^'), positive_float_number()).map(|(_, boost)| boost)
|
(char('^'), positive_float_number()).map(|(_, boost)| boost)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||||
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
|
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
|
||||||
Some(boost) if (boost - 1.0).abs() > std::f32::EPSILON => {
|
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
|
||||||
UserInputAST::Boost(Box::new(leaf), boost)
|
UserInputAST::Boost(Box::new(leaf), boost)
|
||||||
}
|
}
|
||||||
_ => leaf,
|
_ => leaf,
|
||||||
@@ -279,14 +281,16 @@ pub fn parse_to_ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
|
|
||||||
|
type TestParseResult = Result<(), StringStreamError>;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use combine::parser::Parser;
|
use combine::parser::Parser;
|
||||||
|
|
||||||
pub fn nearly_equals(a: f32, b: f32) -> bool {
|
pub fn nearly_equals(a: f64, b: f64) -> bool {
|
||||||
(a - b).abs() < 0.0005 * (a + b).abs()
|
(a - b).abs() < 0.0005 * (a + b).abs()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn assert_nearly_equals(expected: f32, val: f32) {
|
fn assert_nearly_equals(expected: f64, val: f64) {
|
||||||
assert!(
|
assert!(
|
||||||
nearly_equals(val, expected),
|
nearly_equals(val, expected),
|
||||||
"Got {}, expected {}.",
|
"Got {}, expected {}.",
|
||||||
@@ -296,14 +300,15 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_occur_symbol() {
|
fn test_occur_symbol() -> TestParseResult {
|
||||||
assert_eq!(super::occur_symbol().parse("-"), Ok((Occur::MustNot, "")));
|
assert_eq!(super::occur_symbol().parse("-")?, (Occur::MustNot, ""));
|
||||||
assert_eq!(super::occur_symbol().parse("+"), Ok((Occur::Must, "")));
|
assert_eq!(super::occur_symbol().parse("+")?, (Occur::Must, ""));
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_positive_float_number() {
|
fn test_positive_float_number() {
|
||||||
fn valid_parse(float_str: &str, expected_val: f32, expected_remaining: &str) {
|
fn valid_parse(float_str: &str, expected_val: f64, expected_remaining: &str) {
|
||||||
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
|
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
|
||||||
assert_eq!(remaining, expected_remaining);
|
assert_eq!(remaining, expected_remaining);
|
||||||
assert_nearly_equals(val, expected_val);
|
assert_nearly_equals(val, expected_val);
|
||||||
@@ -311,9 +316,9 @@ mod test {
|
|||||||
fn error_parse(float_str: &str) {
|
fn error_parse(float_str: &str) {
|
||||||
assert!(positive_float_number().parse(float_str).is_err());
|
assert!(positive_float_number().parse(float_str).is_err());
|
||||||
}
|
}
|
||||||
valid_parse("1.0", 1.0f32, "");
|
valid_parse("1.0", 1.0, "");
|
||||||
valid_parse("1", 1.0f32, "");
|
valid_parse("1", 1.0, "");
|
||||||
valid_parse("0.234234 aaa", 0.234234f32, " aaa");
|
valid_parse("0.234234 aaa", 0.234234f64, " aaa");
|
||||||
error_parse(".3332");
|
error_parse(".3332");
|
||||||
error_parse("1.");
|
error_parse("1.");
|
||||||
error_parse("-1.");
|
error_parse("-1.");
|
||||||
@@ -410,6 +415,25 @@ mod test {
|
|||||||
assert_eq!(format!("{:?}", ast), "\"abc\"");
|
assert_eq!(format!("{:?}", ast), "\"abc\"");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_field_name() -> TestParseResult {
|
||||||
|
assert_eq!(
|
||||||
|
super::field().parse("my-field-name:a")?,
|
||||||
|
("my-field-name".to_string(), "a")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
super::field().parse("my_field_name:a")?,
|
||||||
|
("my_field_name".to_string(), "a")
|
||||||
|
);
|
||||||
|
assert!(super::field().parse(":a").is_err());
|
||||||
|
assert!(super::field().parse("-my_field:a").is_err());
|
||||||
|
assert_eq!(
|
||||||
|
super::field().parse("_my_field:a")?,
|
||||||
|
("_my_field".to_string(), "a")
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_range_parser() {
|
fn test_range_parser() {
|
||||||
// testing the range() parser separately
|
// testing the range() parser separately
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ impl UserInputBound {
|
|||||||
pub enum UserInputAST {
|
pub enum UserInputAST {
|
||||||
Clause(Vec<(Option<Occur>, UserInputAST)>),
|
Clause(Vec<(Option<Occur>, UserInputAST)>),
|
||||||
Leaf(Box<UserInputLeaf>),
|
Leaf(Box<UserInputLeaf>),
|
||||||
Boost(Box<UserInputAST>, f32),
|
Boost(Box<UserInputAST>, f64),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserInputAST {
|
impl UserInputAST {
|
||||||
|
|||||||
@@ -96,18 +96,18 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut count_collector = SegmentCountCollector::default();
|
let mut count_collector = SegmentCountCollector::default();
|
||||||
count_collector.collect(0u32, 1f32);
|
count_collector.collect(0u32, 1.0);
|
||||||
assert_eq!(count_collector.harvest(), 1);
|
assert_eq!(count_collector.harvest(), 1);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut count_collector = SegmentCountCollector::default();
|
let mut count_collector = SegmentCountCollector::default();
|
||||||
count_collector.collect(0u32, 1f32);
|
count_collector.collect(0u32, 1.0);
|
||||||
assert_eq!(count_collector.harvest(), 1);
|
assert_eq!(count_collector.harvest(), 1);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut count_collector = SegmentCountCollector::default();
|
let mut count_collector = SegmentCountCollector::default();
|
||||||
count_collector.collect(0u32, 1f32);
|
count_collector.collect(0u32, 1.0);
|
||||||
count_collector.collect(1u32, 1f32);
|
count_collector.collect(1u32, 1.0);
|
||||||
assert_eq!(count_collector.harvest(), 2);
|
assert_eq!(count_collector.harvest(), 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ pub trait CustomScorer<TScore>: Sync {
|
|||||||
|
|
||||||
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
|
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
|
||||||
where
|
where
|
||||||
TCustomScorer: CustomScorer<TScore>,
|
TCustomScorer: CustomScorer<TScore> + Send + Sync,
|
||||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||||
{
|
{
|
||||||
type Fruit = Vec<(TScore, DocAddress)>;
|
type Fruit = Vec<(TScore, DocAddress)>;
|
||||||
@@ -58,10 +58,10 @@ where
|
|||||||
segment_local_id: u32,
|
segment_local_id: u32,
|
||||||
segment_reader: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
) -> crate::Result<Self::Child> {
|
) -> crate::Result<Self::Child> {
|
||||||
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
|
|
||||||
let segment_collector = self
|
let segment_collector = self
|
||||||
.collector
|
.collector
|
||||||
.for_segment(segment_local_id, segment_reader)?;
|
.for_segment(segment_local_id, segment_reader)?;
|
||||||
|
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
|
||||||
Ok(CustomScoreTopSegmentCollector {
|
Ok(CustomScoreTopSegmentCollector {
|
||||||
segment_collector,
|
segment_collector,
|
||||||
segment_scorer,
|
segment_scorer,
|
||||||
|
|||||||
61
src/collector/docset_collector.rs
Normal file
61
src/collector/docset_collector.rs
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
use crate::{DocAddress, DocId, Score};
|
||||||
|
|
||||||
|
use super::{Collector, SegmentCollector};
|
||||||
|
|
||||||
|
/// Collectors that returns the set of DocAddress that matches the query.
|
||||||
|
///
|
||||||
|
/// This collector is mostly useful for tests.
|
||||||
|
pub struct DocSetCollector;
|
||||||
|
|
||||||
|
impl Collector for DocSetCollector {
|
||||||
|
type Fruit = HashSet<DocAddress>;
|
||||||
|
type Child = DocSetChildCollector;
|
||||||
|
|
||||||
|
fn for_segment(
|
||||||
|
&self,
|
||||||
|
segment_local_id: crate::SegmentLocalId,
|
||||||
|
_segment: &crate::SegmentReader,
|
||||||
|
) -> crate::Result<Self::Child> {
|
||||||
|
Ok(DocSetChildCollector {
|
||||||
|
segment_local_id,
|
||||||
|
docs: HashSet::new(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn requires_scoring(&self) -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
segment_fruits: Vec<(u32, HashSet<DocId>)>,
|
||||||
|
) -> crate::Result<Self::Fruit> {
|
||||||
|
let len: usize = segment_fruits.iter().map(|(_, docset)| docset.len()).sum();
|
||||||
|
let mut result = HashSet::with_capacity(len);
|
||||||
|
for (segment_local_id, docs) in segment_fruits {
|
||||||
|
for doc in docs {
|
||||||
|
result.insert(DocAddress(segment_local_id, doc));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct DocSetChildCollector {
|
||||||
|
segment_local_id: u32,
|
||||||
|
docs: HashSet<DocId>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SegmentCollector for DocSetChildCollector {
|
||||||
|
type Fruit = (u32, HashSet<DocId>);
|
||||||
|
|
||||||
|
fn collect(&mut self, doc: crate::DocId, _score: Score) {
|
||||||
|
self.docs.insert(doc);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn harvest(self) -> (u32, HashSet<DocId>) {
|
||||||
|
(self.segment_local_id, self.docs)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -7,7 +7,6 @@ use crate::DocId;
|
|||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
use crate::TantivyError;
|
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::btree_map;
|
use std::collections::btree_map;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
@@ -266,10 +265,7 @@ impl Collector for FacetCollector {
|
|||||||
_: SegmentLocalId,
|
_: SegmentLocalId,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
) -> crate::Result<FacetSegmentCollector> {
|
) -> crate::Result<FacetSegmentCollector> {
|
||||||
let field_name = reader.schema().get_field_name(self.field);
|
let facet_reader = reader.facet_reader(self.field)?;
|
||||||
let facet_reader = reader.facet_reader(self.field).ok_or_else(|| {
|
|
||||||
TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let mut collapse_mapping = Vec::new();
|
let mut collapse_mapping = Vec::new();
|
||||||
let mut counts = Vec::new();
|
let mut counts = Vec::new();
|
||||||
@@ -278,7 +274,7 @@ impl Collector for FacetCollector {
|
|||||||
let mut collapse_facet_it = self.facets.iter().peekable();
|
let mut collapse_facet_it = self.facets.iter().peekable();
|
||||||
collapse_facet_ords.push(0);
|
collapse_facet_ords.push(0);
|
||||||
{
|
{
|
||||||
let mut facet_streamer = facet_reader.facet_dict().range().into_stream();
|
let mut facet_streamer = facet_reader.facet_dict().range().into_stream()?;
|
||||||
if facet_streamer.advance() {
|
if facet_streamer.advance() {
|
||||||
'outer: loop {
|
'outer: loop {
|
||||||
// at the begining of this loop, facet_streamer
|
// at the begining of this loop, facet_streamer
|
||||||
@@ -372,9 +368,12 @@ impl SegmentCollector for FacetSegmentCollector {
|
|||||||
}
|
}
|
||||||
let mut facet = vec![];
|
let mut facet = vec![];
|
||||||
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
|
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
|
||||||
facet_dict.ord_to_term(facet_ord as u64, &mut facet);
|
// TODO handle errors.
|
||||||
// TODO
|
if facet_dict.ord_to_term(facet_ord as u64, &mut facet).is_ok() {
|
||||||
facet_counts.insert(Facet::from_encoded(facet).unwrap(), count);
|
if let Ok(facet) = Facet::from_encoded(facet) {
|
||||||
|
facet_counts.insert(facet, count);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
FacetCounts { facet_counts }
|
FacetCounts { facet_counts }
|
||||||
}
|
}
|
||||||
@@ -399,6 +398,8 @@ impl<'a> Iterator for FacetChildIterator<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FacetCounts {
|
impl FacetCounts {
|
||||||
|
/// Returns an iterator over all of the facet count pairs inside this result.
|
||||||
|
/// See the documentation for `FacetCollector` for a usage example.
|
||||||
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_>
|
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_>
|
||||||
where
|
where
|
||||||
Facet: From<T>,
|
Facet: From<T>,
|
||||||
@@ -418,6 +419,8 @@ impl FacetCounts {
|
|||||||
FacetChildIterator { underlying }
|
FacetChildIterator { underlying }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a vector of top `k` facets with their counts, sorted highest-to-lowest by counts.
|
||||||
|
/// See the documentation for `FacetCollector` for a usage example.
|
||||||
pub fn top_k<T>(&self, facet: T, k: usize) -> Vec<(&Facet, u64)>
|
pub fn top_k<T>(&self, facet: T, k: usize) -> Vec<(&Facet, u64)>
|
||||||
where
|
where
|
||||||
Facet: From<T>,
|
Facet: From<T>,
|
||||||
@@ -472,7 +475,7 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let num_facets: usize = 3 * 4 * 5;
|
let num_facets: usize = 3 * 4 * 5;
|
||||||
let facets: Vec<Facet> = (0..num_facets)
|
let facets: Vec<Facet> = (0..num_facets)
|
||||||
.map(|mut n| {
|
.map(|mut n| {
|
||||||
@@ -531,7 +534,7 @@ mod tests {
|
|||||||
let facet_field = schema_builder.add_facet_field("facets");
|
let facet_field = schema_builder.add_facet_field("facets");
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from_text(&"/subjects/A/a"),
|
facet_field => Facet::from_text(&"/subjects/A/a"),
|
||||||
facet_field => Facet::from_text(&"/subjects/B/a"),
|
facet_field => Facet::from_text(&"/subjects/B/a"),
|
||||||
@@ -550,12 +553,12 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_doc_search_by_facet() {
|
fn test_doc_search_by_facet() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let facet_field = schema_builder.add_facet_field("facet");
|
let facet_field = schema_builder.add_facet_field("facet");
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from_text(&"/A/A"),
|
facet_field => Facet::from_text(&"/A/A"),
|
||||||
));
|
));
|
||||||
@@ -568,8 +571,8 @@ mod tests {
|
|||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from_text(&"/D/C/A"),
|
facet_field => Facet::from_text(&"/D/C/A"),
|
||||||
));
|
));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
assert_eq!(searcher.num_docs(), 4);
|
assert_eq!(searcher.num_docs(), 4);
|
||||||
|
|
||||||
@@ -586,17 +589,17 @@ mod tests {
|
|||||||
assert_eq!(count_facet("/A/C"), 1);
|
assert_eq!(count_facet("/A/C"), 1);
|
||||||
assert_eq!(count_facet("/A/C/A"), 1);
|
assert_eq!(count_facet("/A/C/A"), 1);
|
||||||
assert_eq!(count_facet("/C/A"), 0);
|
assert_eq!(count_facet("/C/A"), 0);
|
||||||
|
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![]);
|
||||||
{
|
{
|
||||||
let query_parser = QueryParser::for_index(&index, vec![]);
|
let query = query_parser.parse_query("facet:/A/B")?;
|
||||||
{
|
assert_eq!(1, searcher.search(&query, &Count).unwrap());
|
||||||
let query = query_parser.parse_query("facet:/A/B").unwrap();
|
|
||||||
assert_eq!(1, searcher.search(&query, &Count).unwrap());
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let query = query_parser.parse_query("facet:/A").unwrap();
|
|
||||||
assert_eq!(3, searcher.search(&query, &Count).unwrap());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
let query = query_parser.parse_query("facet:/A")?;
|
||||||
|
assert_eq!(3, searcher.search(&query, &Count)?);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -631,7 +634,7 @@ mod tests {
|
|||||||
.collect();
|
.collect();
|
||||||
docs[..].shuffle(&mut thread_rng());
|
docs[..].shuffle(&mut thread_rng());
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
for doc in docs {
|
for doc in docs {
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
@@ -684,7 +687,7 @@ mod bench {
|
|||||||
// 40425 docs
|
// 40425 docs
|
||||||
docs[..].shuffle(&mut thread_rng());
|
docs[..].shuffle(&mut thread_rng());
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
for doc in docs {
|
for doc in docs {
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
|
|||||||
183
src/collector/filter_collector_wrapper.rs
Normal file
183
src/collector/filter_collector_wrapper.rs
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
// # Custom collector example
|
||||||
|
//
|
||||||
|
// This example shows how you can implement your own
|
||||||
|
// collector. As an example, we will compute a collector
|
||||||
|
// that computes the standard deviation of a given fast field.
|
||||||
|
//
|
||||||
|
// Of course, you can have a look at the tantivy's built-in collectors
|
||||||
|
// such as the `CountCollector` for more examples.
|
||||||
|
|
||||||
|
// ---
|
||||||
|
// Importing tantivy...
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
|
use crate::collector::{Collector, SegmentCollector};
|
||||||
|
use crate::fastfield::{FastFieldReader, FastValue};
|
||||||
|
use crate::schema::Field;
|
||||||
|
use crate::{Score, SegmentReader, TantivyError};
|
||||||
|
|
||||||
|
/// The `FilterCollector` collector filters docs using a u64 fast field value and a predicate.
|
||||||
|
/// Only the documents for which the predicate returned "true" will be passed on to the next collector.
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// use tantivy::collector::{TopDocs, FilterCollector};
|
||||||
|
/// use tantivy::query::QueryParser;
|
||||||
|
/// use tantivy::schema::{Schema, TEXT, INDEXED, FAST};
|
||||||
|
/// use tantivy::{doc, DocAddress, Index};
|
||||||
|
///
|
||||||
|
/// let mut schema_builder = Schema::builder();
|
||||||
|
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||||
|
/// let price = schema_builder.add_u64_field("price", INDEXED | FAST);
|
||||||
|
/// let schema = schema_builder.build();
|
||||||
|
/// let index = Index::create_in_ram(schema);
|
||||||
|
///
|
||||||
|
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
|
/// index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64));
|
||||||
|
/// index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64));
|
||||||
|
/// index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64));
|
||||||
|
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64));
|
||||||
|
/// assert!(index_writer.commit().is_ok());
|
||||||
|
///
|
||||||
|
/// let reader = index.reader().unwrap();
|
||||||
|
/// let searcher = reader.searcher();
|
||||||
|
///
|
||||||
|
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||||
|
/// let query = query_parser.parse_query("diary").unwrap();
|
||||||
|
/// let no_filter_collector = FilterCollector::new(price, &|value: u64| value > 20_120u64, TopDocs::with_limit(2));
|
||||||
|
/// let top_docs = searcher.search(&query, &no_filter_collector).unwrap();
|
||||||
|
///
|
||||||
|
/// assert_eq!(top_docs.len(), 1);
|
||||||
|
/// assert_eq!(top_docs[0].1, DocAddress(0, 1));
|
||||||
|
///
|
||||||
|
/// let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new(price, &|value| value < 5u64, TopDocs::with_limit(2));
|
||||||
|
/// let filtered_top_docs = searcher.search(&query, &filter_all_collector).unwrap();
|
||||||
|
///
|
||||||
|
/// assert_eq!(filtered_top_docs.len(), 0);
|
||||||
|
/// ```
|
||||||
|
pub struct FilterCollector<TCollector, TPredicate, TPredicateValue: FastValue>
|
||||||
|
where
|
||||||
|
TPredicate: 'static,
|
||||||
|
{
|
||||||
|
field: Field,
|
||||||
|
collector: TCollector,
|
||||||
|
predicate: &'static TPredicate,
|
||||||
|
t_predicate_value: PhantomData<TPredicateValue>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TCollector, TPredicate, TPredicateValue: FastValue>
|
||||||
|
FilterCollector<TCollector, TPredicate, TPredicateValue>
|
||||||
|
where
|
||||||
|
TCollector: Collector + Send + Sync,
|
||||||
|
TPredicate: Fn(TPredicateValue) -> bool + Send + Sync,
|
||||||
|
{
|
||||||
|
/// Create a new FilterCollector.
|
||||||
|
pub fn new(
|
||||||
|
field: Field,
|
||||||
|
predicate: &'static TPredicate,
|
||||||
|
collector: TCollector,
|
||||||
|
) -> FilterCollector<TCollector, TPredicate, TPredicateValue> {
|
||||||
|
FilterCollector {
|
||||||
|
field,
|
||||||
|
predicate,
|
||||||
|
collector,
|
||||||
|
t_predicate_value: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TCollector, TPredicate, TPredicateValue: FastValue> Collector
|
||||||
|
for FilterCollector<TCollector, TPredicate, TPredicateValue>
|
||||||
|
where
|
||||||
|
TCollector: Collector + Send + Sync,
|
||||||
|
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync,
|
||||||
|
TPredicateValue: 'static + FastValue,
|
||||||
|
{
|
||||||
|
// That's the type of our result.
|
||||||
|
// Our standard deviation will be a float.
|
||||||
|
type Fruit = TCollector::Fruit;
|
||||||
|
|
||||||
|
type Child = FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>;
|
||||||
|
|
||||||
|
fn for_segment(
|
||||||
|
&self,
|
||||||
|
segment_local_id: u32,
|
||||||
|
segment_reader: &SegmentReader,
|
||||||
|
) -> crate::Result<FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>> {
|
||||||
|
let schema = segment_reader.schema();
|
||||||
|
let field_entry = schema.get_field_entry(self.field);
|
||||||
|
if !field_entry.is_fast() {
|
||||||
|
return Err(TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is not a fast field.",
|
||||||
|
field_entry.name()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
let requested_type = TPredicateValue::to_type();
|
||||||
|
let field_schema_type = field_entry.field_type().value_type();
|
||||||
|
if requested_type != field_schema_type {
|
||||||
|
return Err(TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is of type {:?}!={:?}",
|
||||||
|
field_entry.name(),
|
||||||
|
requested_type,
|
||||||
|
field_schema_type
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let fast_field_reader = segment_reader
|
||||||
|
.fast_fields()
|
||||||
|
.typed_fast_field_reader(self.field)?;
|
||||||
|
|
||||||
|
let segment_collector = self
|
||||||
|
.collector
|
||||||
|
.for_segment(segment_local_id, segment_reader)?;
|
||||||
|
|
||||||
|
Ok(FilterSegmentCollector {
|
||||||
|
fast_field_reader,
|
||||||
|
segment_collector,
|
||||||
|
predicate: self.predicate,
|
||||||
|
t_predicate_value: PhantomData,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn requires_scoring(&self) -> bool {
|
||||||
|
self.collector.requires_scoring()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
segment_fruits: Vec<<TCollector::Child as SegmentCollector>::Fruit>,
|
||||||
|
) -> crate::Result<TCollector::Fruit> {
|
||||||
|
self.collector.merge_fruits(segment_fruits)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
|
||||||
|
where
|
||||||
|
TPredicate: 'static,
|
||||||
|
TPredicateValue: 'static + FastValue,
|
||||||
|
{
|
||||||
|
fast_field_reader: FastFieldReader<TPredicateValue>,
|
||||||
|
segment_collector: TSegmentCollector,
|
||||||
|
predicate: &'static TPredicate,
|
||||||
|
t_predicate_value: PhantomData<TPredicateValue>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TSegmentCollector, TPredicate, TPredicateValue> SegmentCollector
|
||||||
|
for FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
|
||||||
|
where
|
||||||
|
TSegmentCollector: SegmentCollector,
|
||||||
|
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync,
|
||||||
|
TPredicateValue: 'static + FastValue,
|
||||||
|
{
|
||||||
|
type Fruit = TSegmentCollector::Fruit;
|
||||||
|
|
||||||
|
fn collect(&mut self, doc: u32, score: Score) {
|
||||||
|
let value = self.fast_field_reader.get(doc);
|
||||||
|
if (self.predicate)(value) {
|
||||||
|
self.segment_collector.collect(doc, score)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn harvest(self) -> <TSegmentCollector as SegmentCollector>::Fruit {
|
||||||
|
self.segment_collector.harvest()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,127 +0,0 @@
|
|||||||
use std::cmp::Eq;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::hash::Hash;
|
|
||||||
|
|
||||||
use collector::Collector;
|
|
||||||
use fastfield::FastFieldReader;
|
|
||||||
use schema::Field;
|
|
||||||
|
|
||||||
use DocId;
|
|
||||||
use Result;
|
|
||||||
use Score;
|
|
||||||
use SegmentReader;
|
|
||||||
use SegmentLocalId;
|
|
||||||
|
|
||||||
|
|
||||||
/// Facet collector for i64/u64 fast field
|
|
||||||
pub struct IntFacetCollector<T>
|
|
||||||
where
|
|
||||||
T: FastFieldReader,
|
|
||||||
T::ValueType: Eq + Hash,
|
|
||||||
{
|
|
||||||
counters: HashMap<T::ValueType, u64>,
|
|
||||||
field: Field,
|
|
||||||
ff_reader: Option<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
impl<T> IntFacetCollector<T>
|
|
||||||
where
|
|
||||||
T: FastFieldReader,
|
|
||||||
T::ValueType: Eq + Hash,
|
|
||||||
{
|
|
||||||
/// Creates a new facet collector for aggregating a given field.
|
|
||||||
pub fn new(field: Field) -> IntFacetCollector<T> {
|
|
||||||
IntFacetCollector {
|
|
||||||
counters: HashMap::new(),
|
|
||||||
field: field,
|
|
||||||
ff_reader: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
impl<T> Collector for IntFacetCollector<T>
|
|
||||||
where
|
|
||||||
T: FastFieldReader,
|
|
||||||
T::ValueType: Eq + Hash,
|
|
||||||
{
|
|
||||||
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
|
|
||||||
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn collect(&mut self, doc: DocId, _: Score) {
|
|
||||||
let val = self.ff_reader
|
|
||||||
.as_ref()
|
|
||||||
.expect(
|
|
||||||
"collect() was called before set_segment. \
|
|
||||||
This should never happen.",
|
|
||||||
)
|
|
||||||
.get(doc);
|
|
||||||
*(self.counters.entry(val).or_insert(0)) += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
|
|
||||||
use collector::{chain, IntFacetCollector};
|
|
||||||
use query::QueryParser;
|
|
||||||
use fastfield::{I64FastFieldReader, U64FastFieldReader};
|
|
||||||
use schema::{self, FAST, STRING};
|
|
||||||
use Index;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
// create 10 documents, set num field value to 0 or 1 for even/odd ones
|
|
||||||
// make sure we have facet counters correctly filled
|
|
||||||
fn test_facet_collector_results() {
|
|
||||||
|
|
||||||
let mut schema_builder = schema::Schema::builder();
|
|
||||||
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
|
|
||||||
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
|
|
||||||
let num_field_f64 = schema_builder.add_f64_field("num_f64", FAST);
|
|
||||||
let text_field = schema_builder.add_text_field("text", STRING);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema.clone());
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
{
|
|
||||||
for i in 0u64..10u64 {
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
num_field_i64 => ((i as i64) % 3i64) as i64,
|
|
||||||
num_field_u64 => (i % 2u64) as u64,
|
|
||||||
num_field_f64 => (i % 4u64) as f64,
|
|
||||||
text_field => "text"
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert_eq!(index_writer.commit().unwrap(), 10u64);
|
|
||||||
}
|
|
||||||
|
|
||||||
let searcher = index.reader().searcher();
|
|
||||||
let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64);
|
|
||||||
let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64);
|
|
||||||
let mut ffvf_f64: IntFacetCollector<F64FastFieldReader> = IntFacetCollector::new(num_field_f64);
|
|
||||||
|
|
||||||
{
|
|
||||||
// perform the query
|
|
||||||
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64).push(&mut ffvf_f64);
|
|
||||||
let mut query_parser = QueryParser::for_index(index, vec![text_field]);
|
|
||||||
let query = query_parser.parse_query("text:text").unwrap();
|
|
||||||
query.search(&searcher, &mut facet_collectors).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_eq!(ffvf_u64.counters[&0], 5);
|
|
||||||
assert_eq!(ffvf_u64.counters[&1], 5);
|
|
||||||
assert_eq!(ffvf_i64.counters[&0], 4);
|
|
||||||
assert_eq!(ffvf_i64.counters[&1], 3);
|
|
||||||
assert_eq!(ffvf_f64.counters[&0.0], 3);
|
|
||||||
assert_eq!(ffvf_f64.counters[&2.0], 2);
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -109,8 +109,15 @@ pub use self::tweak_score_top_collector::{ScoreSegmentTweaker, ScoreTweaker};
|
|||||||
|
|
||||||
mod facet_collector;
|
mod facet_collector;
|
||||||
pub use self::facet_collector::FacetCollector;
|
pub use self::facet_collector::FacetCollector;
|
||||||
|
pub use self::facet_collector::FacetCounts;
|
||||||
use crate::query::Weight;
|
use crate::query::Weight;
|
||||||
|
|
||||||
|
mod docset_collector;
|
||||||
|
pub use self::docset_collector::DocSetCollector;
|
||||||
|
|
||||||
|
mod filter_collector_wrapper;
|
||||||
|
pub use self::filter_collector_wrapper::FilterCollector;
|
||||||
|
|
||||||
/// `Fruit` is the type for the result of our collection.
|
/// `Fruit` is the type for the result of our collection.
|
||||||
/// e.g. `usize` for the `Count` collector.
|
/// e.g. `usize` for the `Count` collector.
|
||||||
pub trait Fruit: Send + downcast_rs::Downcast {}
|
pub trait Fruit: Send + downcast_rs::Downcast {}
|
||||||
@@ -133,13 +140,13 @@ impl<T> Fruit for T where T: Send + downcast_rs::Downcast {}
|
|||||||
/// The collection logic itself is in the `SegmentCollector`.
|
/// The collection logic itself is in the `SegmentCollector`.
|
||||||
///
|
///
|
||||||
/// Segments are not guaranteed to be visited in any specific order.
|
/// Segments are not guaranteed to be visited in any specific order.
|
||||||
pub trait Collector: Sync {
|
pub trait Collector: Sync + Send {
|
||||||
/// `Fruit` is the type for the result of our collection.
|
/// `Fruit` is the type for the result of our collection.
|
||||||
/// e.g. `usize` for the `Count` collector.
|
/// e.g. `usize` for the `Count` collector.
|
||||||
type Fruit: Fruit;
|
type Fruit: Fruit;
|
||||||
|
|
||||||
/// Type of the `SegmentCollector` associated to this collector.
|
/// Type of the `SegmentCollector` associated to this collector.
|
||||||
type Child: SegmentCollector<Fruit = Self::Fruit>;
|
type Child: SegmentCollector;
|
||||||
|
|
||||||
/// `set_segment` is called before beginning to enumerate
|
/// `set_segment` is called before beginning to enumerate
|
||||||
/// on this segment.
|
/// on this segment.
|
||||||
@@ -154,7 +161,10 @@ pub trait Collector: Sync {
|
|||||||
|
|
||||||
/// Combines the fruit associated to the collection of each segments
|
/// Combines the fruit associated to the collection of each segments
|
||||||
/// into one fruit.
|
/// into one fruit.
|
||||||
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit>;
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
|
) -> crate::Result<Self::Fruit>;
|
||||||
|
|
||||||
/// Created a segment collector and
|
/// Created a segment collector and
|
||||||
fn collect_segment(
|
fn collect_segment(
|
||||||
@@ -224,11 +234,11 @@ where
|
|||||||
|
|
||||||
fn merge_fruits(
|
fn merge_fruits(
|
||||||
&self,
|
&self,
|
||||||
children: Vec<(Left::Fruit, Right::Fruit)>,
|
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
) -> crate::Result<(Left::Fruit, Right::Fruit)> {
|
) -> crate::Result<(Left::Fruit, Right::Fruit)> {
|
||||||
let mut left_fruits = vec![];
|
let mut left_fruits = vec![];
|
||||||
let mut right_fruits = vec![];
|
let mut right_fruits = vec![];
|
||||||
for (left_fruit, right_fruit) in children {
|
for (left_fruit, right_fruit) in segment_fruits {
|
||||||
left_fruits.push(left_fruit);
|
left_fruits.push(left_fruit);
|
||||||
right_fruits.push(right_fruit);
|
right_fruits.push(right_fruit);
|
||||||
}
|
}
|
||||||
@@ -282,7 +292,10 @@ where
|
|||||||
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
|
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
|
) -> crate::Result<Self::Fruit> {
|
||||||
let mut one_fruits = vec![];
|
let mut one_fruits = vec![];
|
||||||
let mut two_fruits = vec![];
|
let mut two_fruits = vec![];
|
||||||
let mut three_fruits = vec![];
|
let mut three_fruits = vec![];
|
||||||
@@ -349,7 +362,10 @@ where
|
|||||||
|| self.3.requires_scoring()
|
|| self.3.requires_scoring()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
|
) -> crate::Result<Self::Fruit> {
|
||||||
let mut one_fruits = vec![];
|
let mut one_fruits = vec![];
|
||||||
let mut two_fruits = vec![];
|
let mut two_fruits = vec![];
|
||||||
let mut three_fruits = vec![];
|
let mut three_fruits = vec![];
|
||||||
|
|||||||
@@ -34,13 +34,13 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
|||||||
|
|
||||||
fn merge_fruits(
|
fn merge_fruits(
|
||||||
&self,
|
&self,
|
||||||
children: Vec<<Self as Collector>::Fruit>,
|
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
) -> crate::Result<Box<dyn Fruit>> {
|
) -> crate::Result<Box<dyn Fruit>> {
|
||||||
let typed_fruit: Vec<TCollector::Fruit> = children
|
let typed_fruit: Vec<<TCollector::Child as SegmentCollector>::Fruit> = children
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|untyped_fruit| {
|
.map(|untyped_fruit| {
|
||||||
untyped_fruit
|
untyped_fruit
|
||||||
.downcast::<TCollector::Fruit>()
|
.downcast::<<TCollector::Child as SegmentCollector>::Fruit>()
|
||||||
.map(|boxed_but_typed| *boxed_but_typed)
|
.map(|boxed_but_typed| *boxed_but_typed)
|
||||||
.map_err(|_| {
|
.map_err(|_| {
|
||||||
TantivyError::InvalidArgument("Failed to cast child fruit.".to_string())
|
TantivyError::InvalidArgument("Failed to cast child fruit.".to_string())
|
||||||
@@ -55,7 +55,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
|||||||
impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
||||||
type Fruit = Box<dyn Fruit>;
|
type Fruit = Box<dyn Fruit>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, score: f32) {
|
fn collect(&mut self, doc: u32, score: Score) {
|
||||||
self.as_mut().collect(doc, score);
|
self.as_mut().collect(doc, score);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,7 +65,7 @@ impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub trait BoxableSegmentCollector {
|
pub trait BoxableSegmentCollector {
|
||||||
fn collect(&mut self, doc: u32, score: f32);
|
fn collect(&mut self, doc: u32, score: Score);
|
||||||
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
|
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,7 +74,7 @@ pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegment
|
|||||||
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
|
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
|
||||||
for SegmentCollectorWrapper<TSegmentCollector>
|
for SegmentCollectorWrapper<TSegmentCollector>
|
||||||
{
|
{
|
||||||
fn collect(&mut self, doc: u32, score: f32) {
|
fn collect(&mut self, doc: u32, score: Score) {
|
||||||
self.0.collect(doc, score);
|
self.0.collect(doc, score);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -259,7 +259,7 @@ mod tests {
|
|||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(text=>"abc"));
|
index_writer.add_document(doc!(text=>"abc"));
|
||||||
index_writer.add_document(doc!(text=>"abc abc abc"));
|
index_writer.add_document(doc!(text=>"abc abc abc"));
|
||||||
index_writer.add_document(doc!(text=>"abc abc"));
|
index_writer.add_document(doc!(text=>"abc abc"));
|
||||||
|
|||||||
@@ -8,6 +8,13 @@ use crate::DocId;
|
|||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
|
|
||||||
|
use crate::collector::{FilterCollector, TopDocs};
|
||||||
|
use crate::query::QueryParser;
|
||||||
|
use crate::schema::{Schema, FAST, TEXT};
|
||||||
|
use crate::DateTime;
|
||||||
|
use crate::{doc, Index};
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
||||||
compute_score: true,
|
compute_score: true,
|
||||||
};
|
};
|
||||||
@@ -16,6 +23,54 @@ pub const TEST_COLLECTOR_WITHOUT_SCORE: TestCollector = TestCollector {
|
|||||||
compute_score: true,
|
compute_score: true,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_filter_collector() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let title = schema_builder.add_text_field("title", TEXT);
|
||||||
|
let price = schema_builder.add_u64_field("price", FAST);
|
||||||
|
let date = schema_builder.add_date_field("date", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
|
index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64, date => DateTime::from_str("1898-04-09T00:00:00+00:00").unwrap()));
|
||||||
|
index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64, date => DateTime::from_str("2020-04-09T00:00:00+00:00").unwrap()));
|
||||||
|
index_writer.add_document(doc!(title => "The Diary of Anne Frank", price => 18_240u64, date => DateTime::from_str("2019-04-20T00:00:00+00:00").unwrap()));
|
||||||
|
index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64, date => DateTime::from_str("2019-04-09T00:00:00+00:00").unwrap()));
|
||||||
|
index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64, date => DateTime::from_str("2018-04-09T00:00:00+00:00").unwrap()));
|
||||||
|
assert!(index_writer.commit().is_ok());
|
||||||
|
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||||
|
let query = query_parser.parse_query("diary").unwrap();
|
||||||
|
let filter_some_collector = FilterCollector::new(
|
||||||
|
price,
|
||||||
|
&|value: u64| value > 20_120u64,
|
||||||
|
TopDocs::with_limit(2),
|
||||||
|
);
|
||||||
|
let top_docs = searcher.search(&query, &filter_some_collector).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(top_docs.len(), 1);
|
||||||
|
assert_eq!(top_docs[0].1, DocAddress(0, 1));
|
||||||
|
|
||||||
|
let filter_all_collector: FilterCollector<_, _, u64> =
|
||||||
|
FilterCollector::new(price, &|value| value < 5u64, TopDocs::with_limit(2));
|
||||||
|
let filtered_top_docs = searcher.search(&query, &filter_all_collector).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(filtered_top_docs.len(), 0);
|
||||||
|
|
||||||
|
fn date_filter(value: DateTime) -> bool {
|
||||||
|
(value - DateTime::from_str("2019-04-09T00:00:00+00:00").unwrap()).num_weeks() > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
let filter_dates_collector = FilterCollector::new(date, &date_filter, TopDocs::with_limit(5));
|
||||||
|
let filtered_date_docs = searcher.search(&query, &filter_dates_collector).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(filtered_date_docs.len(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
/// Stores all of the doc ids.
|
/// Stores all of the doc ids.
|
||||||
/// This collector is only used for tests.
|
/// This collector is only used for tests.
|
||||||
/// It is unusable in pr
|
/// It is unusable in pr
|
||||||
@@ -185,12 +240,10 @@ impl Collector for BytesFastFieldTestCollector {
|
|||||||
_segment_local_id: u32,
|
_segment_local_id: u32,
|
||||||
segment_reader: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
) -> crate::Result<BytesFastFieldSegmentCollector> {
|
) -> crate::Result<BytesFastFieldSegmentCollector> {
|
||||||
|
let reader = segment_reader.fast_fields().bytes(self.field)?;
|
||||||
Ok(BytesFastFieldSegmentCollector {
|
Ok(BytesFastFieldSegmentCollector {
|
||||||
vals: Vec::new(),
|
vals: Vec::new(),
|
||||||
reader: segment_reader
|
reader,
|
||||||
.fast_fields()
|
|
||||||
.bytes(self.field)
|
|
||||||
.expect("Field is not a bytes fast field."),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -206,7 +259,7 @@ impl Collector for BytesFastFieldTestCollector {
|
|||||||
impl SegmentCollector for BytesFastFieldSegmentCollector {
|
impl SegmentCollector for BytesFastFieldSegmentCollector {
|
||||||
type Fruit = Vec<u8>;
|
type Fruit = Vec<u8>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, _score: f32) {
|
fn collect(&mut self, doc: u32, _score: Score) {
|
||||||
let data = self.reader.get_bytes(doc);
|
let data = self.reader.get_bytes(doc);
|
||||||
self.vals.extend(data);
|
self.vals.extend(data);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,9 +2,9 @@ use crate::DocAddress;
|
|||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
use serde::export::PhantomData;
|
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::BinaryHeap;
|
use std::collections::BinaryHeap;
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
/// Contains a feature (field, score, etc.) of a document along with the document address.
|
/// Contains a feature (field, score, etc.) of a document along with the document address.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -1,6 +1,4 @@
|
|||||||
use super::Collector;
|
use super::Collector;
|
||||||
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
|
||||||
use crate::collector::top_collector::TopSegmentCollector;
|
|
||||||
use crate::collector::top_collector::{ComparableDoc, TopCollector};
|
use crate::collector::top_collector::{ComparableDoc, TopCollector};
|
||||||
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
||||||
use crate::collector::{
|
use crate::collector::{
|
||||||
@@ -14,8 +12,71 @@ use crate::DocId;
|
|||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
use std::collections::BinaryHeap;
|
use crate::{collector::custom_score_top_collector::CustomScoreTopCollector, fastfield::FastValue};
|
||||||
|
use crate::{collector::top_collector::TopSegmentCollector, TantivyError};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
use std::{collections::BinaryHeap, marker::PhantomData};
|
||||||
|
|
||||||
|
struct FastFieldConvertCollector<
|
||||||
|
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
|
||||||
|
TFastValue: FastValue,
|
||||||
|
> {
|
||||||
|
pub collector: TCollector,
|
||||||
|
pub field: Field,
|
||||||
|
pub fast_value: std::marker::PhantomData<TFastValue>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TCollector, TFastValue> Collector for FastFieldConvertCollector<TCollector, TFastValue>
|
||||||
|
where
|
||||||
|
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
|
||||||
|
TFastValue: FastValue + 'static,
|
||||||
|
{
|
||||||
|
type Fruit = Vec<(TFastValue, DocAddress)>;
|
||||||
|
|
||||||
|
type Child = TCollector::Child;
|
||||||
|
|
||||||
|
fn for_segment(
|
||||||
|
&self,
|
||||||
|
segment_local_id: crate::SegmentLocalId,
|
||||||
|
segment: &SegmentReader,
|
||||||
|
) -> crate::Result<Self::Child> {
|
||||||
|
let schema = segment.schema();
|
||||||
|
let field_entry = schema.get_field_entry(self.field);
|
||||||
|
if !field_entry.is_fast() {
|
||||||
|
return Err(TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is not a fast field.",
|
||||||
|
field_entry.name()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
let schema_type = TFastValue::to_type();
|
||||||
|
let requested_type = field_entry.field_type().value_type();
|
||||||
|
if schema_type != requested_type {
|
||||||
|
return Err(TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is of type {:?}!={:?}",
|
||||||
|
field_entry.name(),
|
||||||
|
schema_type,
|
||||||
|
requested_type
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
self.collector.for_segment(segment_local_id, segment)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn requires_scoring(&self) -> bool {
|
||||||
|
self.collector.requires_scoring()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
|
) -> crate::Result<Self::Fruit> {
|
||||||
|
let raw_result = self.collector.merge_fruits(segment_fruits)?;
|
||||||
|
let transformed_result = raw_result
|
||||||
|
.into_iter()
|
||||||
|
.map(|(score, doc_address)| (TFastValue::from_u64(score), doc_address))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
Ok(transformed_result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The `TopDocs` collector keeps track of the top `K` documents
|
/// The `TopDocs` collector keeps track of the top `K` documents
|
||||||
/// sorted by their score.
|
/// sorted by their score.
|
||||||
@@ -38,7 +99,7 @@ use std::fmt;
|
|||||||
/// let schema = schema_builder.build();
|
/// let schema = schema_builder.build();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let index = Index::create_in_ram(schema);
|
||||||
///
|
///
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||||
@@ -52,8 +113,8 @@ use std::fmt;
|
|||||||
/// let query = query_parser.parse_query("diary").unwrap();
|
/// let query = query_parser.parse_query("diary").unwrap();
|
||||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
|
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
|
||||||
///
|
///
|
||||||
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
|
/// assert_eq!(top_docs[0].1, DocAddress(0, 1));
|
||||||
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
|
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
|
||||||
/// ```
|
/// ```
|
||||||
pub struct TopDocs(TopCollector<Score>);
|
pub struct TopDocs(TopCollector<Score>);
|
||||||
|
|
||||||
@@ -73,7 +134,7 @@ struct ScorerByFastFieldReader {
|
|||||||
|
|
||||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||||
fn score(&mut self, doc: DocId) -> u64 {
|
fn score(&mut self, doc: DocId) -> u64 {
|
||||||
self.ff_reader.get_u64(u64::from(doc))
|
self.ff_reader.get(doc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,15 +146,14 @@ impl CustomScorer<u64> for ScorerByField {
|
|||||||
type Child = ScorerByFastFieldReader;
|
type Child = ScorerByFastFieldReader;
|
||||||
|
|
||||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
|
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
|
||||||
let ff_reader = segment_reader
|
// We interpret this field as u64, regardless of its type, that way,
|
||||||
|
// we avoid needless conversion. Regardless of the fast field type, the
|
||||||
|
// mapping is monotonic, so it is sufficient to compute our top-K docs.
|
||||||
|
//
|
||||||
|
// The conversion will then happen only on the top-K docs.
|
||||||
|
let ff_reader: FastFieldReader<u64> = segment_reader
|
||||||
.fast_fields()
|
.fast_fields()
|
||||||
.u64(self.field)
|
.typed_fast_field_reader(self.field)?;
|
||||||
.ok_or_else(|| {
|
|
||||||
crate::TantivyError::SchemaError(format!(
|
|
||||||
"Field requested ({:?}) is not a i64/u64 fast field.",
|
|
||||||
self.field
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
Ok(ScorerByFastFieldReader { ff_reader })
|
Ok(ScorerByFastFieldReader { ff_reader })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -112,6 +172,8 @@ impl TopDocs {
|
|||||||
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
|
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
|
||||||
/// Lucene's TopDocsCollector.
|
/// Lucene's TopDocsCollector.
|
||||||
///
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// use tantivy::collector::TopDocs;
|
/// use tantivy::collector::TopDocs;
|
||||||
/// use tantivy::query::QueryParser;
|
/// use tantivy::query::QueryParser;
|
||||||
@@ -123,7 +185,7 @@ impl TopDocs {
|
|||||||
/// let schema = schema_builder.build();
|
/// let schema = schema_builder.build();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let index = Index::create_in_ram(schema);
|
||||||
///
|
///
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||||
@@ -139,8 +201,8 @@ impl TopDocs {
|
|||||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2).and_offset(1)).unwrap();
|
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2).and_offset(1)).unwrap();
|
||||||
///
|
///
|
||||||
/// assert_eq!(top_docs.len(), 2);
|
/// assert_eq!(top_docs.len(), 2);
|
||||||
/// assert_eq!(&top_docs[0], &(0.5204813, DocAddress(0, 4)));
|
/// assert_eq!(top_docs[0].1, DocAddress(0, 4));
|
||||||
/// assert_eq!(&top_docs[1], &(0.4793185, DocAddress(0, 3)));
|
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
|
||||||
/// ```
|
/// ```
|
||||||
pub fn and_offset(self, offset: usize) -> TopDocs {
|
pub fn and_offset(self, offset: usize) -> TopDocs {
|
||||||
TopDocs(self.0.and_offset(offset))
|
TopDocs(self.0.and_offset(offset))
|
||||||
@@ -148,6 +210,14 @@ impl TopDocs {
|
|||||||
|
|
||||||
/// Set top-K to rank documents by a given fast field.
|
/// Set top-K to rank documents by a given fast field.
|
||||||
///
|
///
|
||||||
|
/// If the field is not a fast or does not exist, this method returns successfully (it is not aware of any schema).
|
||||||
|
/// An error will be returned at the moment of search.
|
||||||
|
///
|
||||||
|
/// If the field is a FAST field but not a u64 field, search will return successfully but it will return
|
||||||
|
/// returns a monotonic u64-representation (ie. the order is still correct) of the requested field type.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||||
/// # use tantivy::{doc, Index, DocAddress};
|
/// # use tantivy::{doc, Index, DocAddress};
|
||||||
@@ -161,15 +231,15 @@ impl TopDocs {
|
|||||||
/// # let title = schema_builder.add_text_field("title", TEXT);
|
/// # let title = schema_builder.add_text_field("title", TEXT);
|
||||||
/// # let rating = schema_builder.add_u64_field("rating", FAST);
|
/// # let rating = schema_builder.add_u64_field("rating", FAST);
|
||||||
/// # let schema = schema_builder.build();
|
/// # let schema = schema_builder.build();
|
||||||
/// #
|
/// #
|
||||||
/// # let index = Index::create_in_ram(schema);
|
/// # let index = Index::create_in_ram(schema);
|
||||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
|
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
|
||||||
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
||||||
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
||||||
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
|
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
|
||||||
/// # assert!(index_writer.commit().is_ok());
|
/// # assert!(index_writer.commit().is_ok());
|
||||||
/// # let reader = index.reader().unwrap();
|
/// # let reader = index.reader()?;
|
||||||
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
||||||
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
||||||
/// # assert_eq!(top_docs,
|
/// # assert_eq!(top_docs,
|
||||||
@@ -177,26 +247,21 @@ impl TopDocs {
|
|||||||
/// # (80u64, DocAddress(0u32, 3))]);
|
/// # (80u64, DocAddress(0u32, 3))]);
|
||||||
/// # Ok(())
|
/// # Ok(())
|
||||||
/// # }
|
/// # }
|
||||||
///
|
|
||||||
///
|
|
||||||
/// /// Searches the document matching the given query, and
|
/// /// Searches the document matching the given query, and
|
||||||
/// /// collects the top 10 documents, order by the u64-`field`
|
/// /// collects the top 10 documents, order by the u64-`field`
|
||||||
/// /// given in argument.
|
/// /// given in argument.
|
||||||
/// ///
|
|
||||||
/// /// `field` is required to be a FAST field.
|
|
||||||
/// fn docs_sorted_by_rating(searcher: &Searcher,
|
/// fn docs_sorted_by_rating(searcher: &Searcher,
|
||||||
/// query: &dyn Query,
|
/// query: &dyn Query,
|
||||||
/// sort_by_field: Field)
|
/// rating_field: Field)
|
||||||
/// -> tantivy::Result<Vec<(u64, DocAddress)>> {
|
/// -> tantivy::Result<Vec<(u64, DocAddress)>> {
|
||||||
///
|
///
|
||||||
/// // This is where we build our topdocs collector
|
/// // This is where we build our topdocs collector
|
||||||
/// //
|
/// //
|
||||||
/// // Note the generics parameter that needs to match the
|
/// // Note the `rating_field` needs to be a FAST field here.
|
||||||
/// // type `sort_by_field`.
|
/// let top_books_by_rating = TopDocs
|
||||||
/// let top_docs_by_rating = TopDocs
|
|
||||||
/// ::with_limit(10)
|
/// ::with_limit(10)
|
||||||
/// .order_by_u64_field(sort_by_field);
|
/// .order_by_u64_field(rating_field);
|
||||||
///
|
///
|
||||||
/// // ... and here are our documents. Note this is a simple vec.
|
/// // ... and here are our documents. Note this is a simple vec.
|
||||||
/// // The `u64` in the pair is the value of our fast field for
|
/// // The `u64` in the pair is the value of our fast field for
|
||||||
/// // each documents.
|
/// // each documents.
|
||||||
@@ -205,21 +270,105 @@ impl TopDocs {
|
|||||||
/// // length of 10, or less if not enough documents matched the
|
/// // length of 10, or less if not enough documents matched the
|
||||||
/// // query.
|
/// // query.
|
||||||
/// let resulting_docs: Vec<(u64, DocAddress)> =
|
/// let resulting_docs: Vec<(u64, DocAddress)> =
|
||||||
/// searcher.search(query, &top_docs_by_rating)?;
|
/// searcher.search(query, &top_books_by_rating)?;
|
||||||
///
|
///
|
||||||
/// Ok(resulting_docs)
|
/// Ok(resulting_docs)
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # See also
|
||||||
///
|
|
||||||
/// May panic if the field requested is not a fast field.
|
|
||||||
///
|
///
|
||||||
|
/// To confortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
||||||
|
/// [.order_by_fast_field(...)](#method.order_by_fast_field) method.
|
||||||
pub fn order_by_u64_field(
|
pub fn order_by_u64_field(
|
||||||
self,
|
self,
|
||||||
field: Field,
|
field: Field,
|
||||||
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
|
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
|
||||||
self.custom_score(ScorerByField { field })
|
CustomScoreTopCollector::new(ScorerByField { field }, self.0.into_tscore())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set top-K to rank documents by a given fast field.
|
||||||
|
///
|
||||||
|
/// If the field is not a fast field, or its field type does not match the generic type, this method does not panic,
|
||||||
|
/// but an explicit error will be returned at the moment of collection.
|
||||||
|
///
|
||||||
|
/// Note that this method is a generic. The requested fast field type will be often
|
||||||
|
/// inferred in your code by the rust compiler.
|
||||||
|
///
|
||||||
|
/// Implementation-wise, for performance reason, tantivy will manipulate the u64 representation of your fast
|
||||||
|
/// field until the last moment.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||||
|
/// # use tantivy::{doc, Index, DocAddress};
|
||||||
|
/// # use tantivy::query::{Query, AllQuery};
|
||||||
|
/// use tantivy::Searcher;
|
||||||
|
/// use tantivy::collector::TopDocs;
|
||||||
|
/// use tantivy::schema::Field;
|
||||||
|
///
|
||||||
|
/// # fn main() -> tantivy::Result<()> {
|
||||||
|
/// # let mut schema_builder = Schema::builder();
|
||||||
|
/// # let title = schema_builder.add_text_field("company", TEXT);
|
||||||
|
/// # let rating = schema_builder.add_i64_field("revenue", FAST);
|
||||||
|
/// # let schema = schema_builder.build();
|
||||||
|
/// #
|
||||||
|
/// # let index = Index::create_in_ram(schema);
|
||||||
|
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
|
/// # index_writer.add_document(doc!(title => "MadCow Inc.", rating => 92_000_000i64));
|
||||||
|
/// # index_writer.add_document(doc!(title => "Zozo Cow KKK", rating => 119_000_000i64));
|
||||||
|
/// # index_writer.add_document(doc!(title => "Declining Cow", rating => -63_000_000i64));
|
||||||
|
/// # assert!(index_writer.commit().is_ok());
|
||||||
|
/// # let reader = index.reader()?;
|
||||||
|
/// # let top_docs = docs_sorted_by_revenue(&reader.searcher(), &AllQuery, rating)?;
|
||||||
|
/// # assert_eq!(top_docs,
|
||||||
|
/// # vec![(119_000_000i64, DocAddress(0, 1)),
|
||||||
|
/// # (92_000_000i64, DocAddress(0, 0))]);
|
||||||
|
/// # Ok(())
|
||||||
|
/// # }
|
||||||
|
/// /// Searches the document matching the given query, and
|
||||||
|
/// /// collects the top 10 documents, order by the u64-`field`
|
||||||
|
/// /// given in argument.
|
||||||
|
/// fn docs_sorted_by_revenue(searcher: &Searcher,
|
||||||
|
/// query: &dyn Query,
|
||||||
|
/// revenue_field: Field)
|
||||||
|
/// -> tantivy::Result<Vec<(i64, DocAddress)>> {
|
||||||
|
///
|
||||||
|
/// // This is where we build our topdocs collector
|
||||||
|
/// //
|
||||||
|
/// // Note the generics parameter that needs to match the
|
||||||
|
/// // type `sort_by_field`. revenue_field here is a FAST i64 field.
|
||||||
|
/// let top_company_by_revenue = TopDocs
|
||||||
|
/// ::with_limit(2)
|
||||||
|
/// .order_by_fast_field(revenue_field);
|
||||||
|
///
|
||||||
|
/// // ... and here are our documents. Note this is a simple vec.
|
||||||
|
/// // The `i64` in the pair is the value of our fast field for
|
||||||
|
/// // each documents.
|
||||||
|
/// //
|
||||||
|
/// // The vec is sorted decreasingly by `sort_by_field`, and has a
|
||||||
|
/// // length of 10, or less if not enough documents matched the
|
||||||
|
/// // query.
|
||||||
|
/// let resulting_docs: Vec<(i64, DocAddress)> =
|
||||||
|
/// searcher.search(query, &top_company_by_revenue)?;
|
||||||
|
///
|
||||||
|
/// Ok(resulting_docs)
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
pub fn order_by_fast_field<TFastValue>(
|
||||||
|
self,
|
||||||
|
fast_field: Field,
|
||||||
|
) -> impl Collector<Fruit = Vec<(TFastValue, DocAddress)>>
|
||||||
|
where
|
||||||
|
TFastValue: FastValue + 'static,
|
||||||
|
{
|
||||||
|
let u64_collector = self.order_by_u64_field(fast_field);
|
||||||
|
FastFieldConvertCollector {
|
||||||
|
collector: u64_collector,
|
||||||
|
field: fast_field,
|
||||||
|
fast_value: PhantomData,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ranks the documents using a custom score.
|
/// Ranks the documents using a custom score.
|
||||||
@@ -242,7 +391,7 @@ impl TopDocs {
|
|||||||
///
|
///
|
||||||
/// In the following example will will tweak our ranking a bit by
|
/// In the following example will will tweak our ranking a bit by
|
||||||
/// boosting popular products a notch.
|
/// boosting popular products a notch.
|
||||||
///
|
///
|
||||||
/// In more serious application, this tweaking could involved running a
|
/// In more serious application, this tweaking could involved running a
|
||||||
/// learning-to-rank model over various features
|
/// learning-to-rank model over various features
|
||||||
///
|
///
|
||||||
@@ -264,7 +413,7 @@ impl TopDocs {
|
|||||||
/// fn create_index() -> tantivy::Result<Index> {
|
/// fn create_index() -> tantivy::Result<Index> {
|
||||||
/// let schema = create_schema();
|
/// let schema = create_schema();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let index = Index::create_in_ram(schema);
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
/// let product_name = index.schema().get_field("product_name").unwrap();
|
/// let product_name = index.schema().get_field("product_name").unwrap();
|
||||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||||
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
||||||
@@ -303,7 +452,7 @@ impl TopDocs {
|
|||||||
/// let popularity: u64 = popularity_reader.get(doc);
|
/// let popularity: u64 = popularity_reader.get(doc);
|
||||||
/// // Well.. For the sake of the example we use a simple logarithm
|
/// // Well.. For the sake of the example we use a simple logarithm
|
||||||
/// // function.
|
/// // function.
|
||||||
/// let popularity_boost_score = ((2u64 + popularity) as f32).log2();
|
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
|
||||||
/// popularity_boost_score * original_score
|
/// popularity_boost_score * original_score
|
||||||
/// }
|
/// }
|
||||||
/// });
|
/// });
|
||||||
@@ -324,7 +473,7 @@ impl TopDocs {
|
|||||||
where
|
where
|
||||||
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
||||||
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
|
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
|
||||||
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
|
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker> + Send + Sync,
|
||||||
{
|
{
|
||||||
TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore())
|
TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore())
|
||||||
}
|
}
|
||||||
@@ -371,9 +520,9 @@ impl TopDocs {
|
|||||||
/// # fn main() -> tantivy::Result<()> {
|
/// # fn main() -> tantivy::Result<()> {
|
||||||
/// # let schema = create_schema();
|
/// # let schema = create_schema();
|
||||||
/// # let index = Index::create_in_ram(schema);
|
/// # let index = Index::create_in_ram(schema);
|
||||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
/// # let product_name = index.schema().get_field("product_name").unwrap();
|
/// # let product_name = index.schema().get_field("product_name").unwrap();
|
||||||
/// #
|
/// #
|
||||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||||
/// let boosted: Field = index.schema().get_field("boosted").unwrap();
|
/// let boosted: Field = index.schema().get_field("boosted").unwrap();
|
||||||
/// # index_writer.add_document(doc!(boosted=>1u64, product_name => "The Diary of Muadib", popularity => 1u64));
|
/// # index_writer.add_document(doc!(boosted=>1u64, product_name => "The Diary of Muadib", popularity => 1u64));
|
||||||
@@ -407,7 +556,7 @@ impl TopDocs {
|
|||||||
/// segment_reader.fast_fields().u64(popularity).unwrap();
|
/// segment_reader.fast_fields().u64(popularity).unwrap();
|
||||||
/// let boosted_reader =
|
/// let boosted_reader =
|
||||||
/// segment_reader.fast_fields().u64(boosted).unwrap();
|
/// segment_reader.fast_fields().u64(boosted).unwrap();
|
||||||
///
|
///
|
||||||
/// // We can now define our actual scoring function
|
/// // We can now define our actual scoring function
|
||||||
/// move |doc: DocId| {
|
/// move |doc: DocId| {
|
||||||
/// let popularity: u64 = popularity_reader.get(doc);
|
/// let popularity: u64 = popularity_reader.get(doc);
|
||||||
@@ -438,7 +587,7 @@ impl TopDocs {
|
|||||||
where
|
where
|
||||||
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
||||||
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
|
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
|
||||||
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
|
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer> + Send + Sync,
|
||||||
{
|
{
|
||||||
CustomScoreTopCollector::new(custom_score, self.0.into_tscore())
|
CustomScoreTopCollector::new(custom_score, self.0.into_tscore())
|
||||||
}
|
}
|
||||||
@@ -479,7 +628,7 @@ impl Collector for TopDocs {
|
|||||||
let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len);
|
let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len);
|
||||||
|
|
||||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||||
let mut threshold = f32::MIN;
|
let mut threshold = Score::MIN;
|
||||||
weight.for_each_pruning(threshold, reader, &mut |doc, score| {
|
weight.for_each_pruning(threshold, reader, &mut |doc, score| {
|
||||||
if delete_bitset.is_deleted(doc) {
|
if delete_bitset.is_deleted(doc) {
|
||||||
return threshold;
|
return threshold;
|
||||||
@@ -491,16 +640,16 @@ impl Collector for TopDocs {
|
|||||||
if heap.len() < heap_len {
|
if heap.len() < heap_len {
|
||||||
heap.push(heap_item);
|
heap.push(heap_item);
|
||||||
if heap.len() == heap_len {
|
if heap.len() == heap_len {
|
||||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(f32::MIN);
|
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||||
}
|
}
|
||||||
return threshold;
|
return threshold;
|
||||||
}
|
}
|
||||||
*heap.peek_mut().unwrap() = heap_item;
|
*heap.peek_mut().unwrap() = heap_item;
|
||||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(std::f32::MIN);
|
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||||
threshold
|
threshold
|
||||||
})?;
|
})?;
|
||||||
} else {
|
} else {
|
||||||
weight.for_each_pruning(f32::MIN, reader, &mut |doc, score| {
|
weight.for_each_pruning(Score::MIN, reader, &mut |doc, score| {
|
||||||
let heap_item = ComparableDoc {
|
let heap_item = ComparableDoc {
|
||||||
feature: score,
|
feature: score,
|
||||||
doc,
|
doc,
|
||||||
@@ -509,13 +658,13 @@ impl Collector for TopDocs {
|
|||||||
heap.push(heap_item);
|
heap.push(heap_item);
|
||||||
// TODO the threshold is suboptimal for heap.len == heap_len
|
// TODO the threshold is suboptimal for heap.len == heap_len
|
||||||
if heap.len() == heap_len {
|
if heap.len() == heap_len {
|
||||||
return heap.peek().map(|el| el.feature).unwrap_or(f32::MIN);
|
return heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||||
} else {
|
} else {
|
||||||
return f32::MIN;
|
return Score::MIN;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*heap.peek_mut().unwrap() = heap_item;
|
*heap.peek_mut().unwrap() = heap_item;
|
||||||
heap.peek().map(|el| el.feature).unwrap_or(std::f32::MIN)
|
heap.peek().map(|el| el.feature).unwrap_or(Score::MIN)
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -561,7 +710,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
|
index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
|
||||||
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
|
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
|
||||||
index_writer.add_document(doc!(text_field=>"I like Droopy"));
|
index_writer.add_document(doc!(text_field=>"I like Droopy"));
|
||||||
@@ -570,8 +719,15 @@ mod tests {
|
|||||||
index
|
index
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn assert_results_equals(results: &[(Score, DocAddress)], expected: &[(Score, DocAddress)]) {
|
||||||
|
for (result, expected) in results.iter().zip(expected.iter()) {
|
||||||
|
assert_eq!(result.1, expected.1);
|
||||||
|
crate::assert_nearly_equals!(result.0, expected.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_top_collector_not_at_capacity() {
|
fn test_top_collector_not_at_capacity_without_offset() {
|
||||||
let index = make_index();
|
let index = make_index();
|
||||||
let field = index.schema().get_field("text").unwrap();
|
let field = index.schema().get_field("text").unwrap();
|
||||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||||
@@ -582,13 +738,13 @@ mod tests {
|
|||||||
.searcher()
|
.searcher()
|
||||||
.search(&text_query, &TopDocs::with_limit(4))
|
.search(&text_query, &TopDocs::with_limit(4))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_results_equals(
|
||||||
score_docs,
|
&score_docs,
|
||||||
vec![
|
&[
|
||||||
(0.81221175, DocAddress(0u32, 1)),
|
(0.81221175, DocAddress(0u32, 1)),
|
||||||
(0.5376842, DocAddress(0u32, 2)),
|
(0.5376842, DocAddress(0u32, 2)),
|
||||||
(0.48527452, DocAddress(0, 0))
|
(0.48527452, DocAddress(0, 0)),
|
||||||
]
|
],
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -604,7 +760,7 @@ mod tests {
|
|||||||
.searcher()
|
.searcher()
|
||||||
.search(&text_query, &TopDocs::with_limit(4).and_offset(2))
|
.search(&text_query, &TopDocs::with_limit(4).and_offset(2))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(score_docs, vec![(0.48527452, DocAddress(0, 0))]);
|
assert_results_equals(&score_docs[..], &[(0.48527452, DocAddress(0, 0))]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -619,12 +775,12 @@ mod tests {
|
|||||||
.searcher()
|
.searcher()
|
||||||
.search(&text_query, &TopDocs::with_limit(2))
|
.search(&text_query, &TopDocs::with_limit(2))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_results_equals(
|
||||||
score_docs,
|
&score_docs,
|
||||||
vec![
|
&[
|
||||||
(0.81221175, DocAddress(0u32, 1)),
|
(0.81221175, DocAddress(0u32, 1)),
|
||||||
(0.5376842, DocAddress(0u32, 2)),
|
(0.5376842, DocAddress(0u32, 2)),
|
||||||
]
|
],
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -640,12 +796,12 @@ mod tests {
|
|||||||
.searcher()
|
.searcher()
|
||||||
.search(&text_query, &TopDocs::with_limit(2).and_offset(1))
|
.search(&text_query, &TopDocs::with_limit(2).and_offset(1))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_results_equals(
|
||||||
score_docs,
|
&score_docs[..],
|
||||||
vec![
|
&[
|
||||||
(0.5376842, DocAddress(0u32, 2)),
|
(0.5376842, DocAddress(0u32, 2)),
|
||||||
(0.48527452, DocAddress(0, 0))
|
(0.48527452, DocAddress(0, 0)),
|
||||||
]
|
],
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -706,8 +862,8 @@ mod tests {
|
|||||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||||
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
|
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
top_docs,
|
&top_docs[..],
|
||||||
vec![
|
&[
|
||||||
(64, DocAddress(0, 1)),
|
(64, DocAddress(0, 1)),
|
||||||
(16, DocAddress(0, 2)),
|
(16, DocAddress(0, 2)),
|
||||||
(12, DocAddress(0, 0))
|
(12, DocAddress(0, 0))
|
||||||
@@ -715,6 +871,94 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_top_field_collector_datetime() -> crate::Result<()> {
|
||||||
|
use std::str::FromStr;
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let name = schema_builder.add_text_field("name", TEXT);
|
||||||
|
let birthday = schema_builder.add_date_field("birthday", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
let pr_birthday = crate::DateTime::from_str("1898-04-09T00:00:00+00:00")?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
name => "Paul Robeson",
|
||||||
|
birthday => pr_birthday
|
||||||
|
));
|
||||||
|
let mr_birthday = crate::DateTime::from_str("1947-11-08T00:00:00+00:00")?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
name => "Minnie Riperton",
|
||||||
|
birthday => mr_birthday
|
||||||
|
));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let top_collector = TopDocs::with_limit(3).order_by_fast_field(birthday);
|
||||||
|
let top_docs: Vec<(crate::DateTime, DocAddress)> =
|
||||||
|
searcher.search(&AllQuery, &top_collector)?;
|
||||||
|
assert_eq!(
|
||||||
|
&top_docs[..],
|
||||||
|
&[
|
||||||
|
(mr_birthday, DocAddress(0, 1)),
|
||||||
|
(pr_birthday, DocAddress(0, 0)),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_top_field_collector_i64() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let city = schema_builder.add_text_field("city", TEXT);
|
||||||
|
let altitude = schema_builder.add_i64_field("altitude", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
city => "georgetown",
|
||||||
|
altitude => -1i64,
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
city => "tokyo",
|
||||||
|
altitude => 40i64,
|
||||||
|
));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
|
||||||
|
let top_docs: Vec<(i64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
||||||
|
assert_eq!(
|
||||||
|
&top_docs[..],
|
||||||
|
&[(40i64, DocAddress(0, 1)), (-1i64, DocAddress(0, 0)),]
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_top_field_collector_f64() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let city = schema_builder.add_text_field("city", TEXT);
|
||||||
|
let altitude = schema_builder.add_f64_field("altitude", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
city => "georgetown",
|
||||||
|
altitude => -1.0f64,
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
city => "tokyo",
|
||||||
|
altitude => 40f64,
|
||||||
|
));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
|
||||||
|
let top_docs: Vec<(f64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
||||||
|
assert_eq!(
|
||||||
|
&top_docs[..],
|
||||||
|
&[(40f64, DocAddress(0, 1)), (-1.0f64, DocAddress(0, 0)),]
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic]
|
#[should_panic]
|
||||||
fn test_field_does_not_exist() {
|
fn test_field_does_not_exist() {
|
||||||
@@ -737,29 +981,39 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_field_not_fast_field() {
|
fn test_field_not_fast_field() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
|
||||||
let size = schema_builder.add_u64_field(SIZE, STORED);
|
let size = schema_builder.add_u64_field(SIZE, STORED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let (index, _) = index("beer", title, schema, |index_writer| {
|
let index = Index::create_in_ram(schema);
|
||||||
index_writer.add_document(doc!(
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
title => "bottle of beer",
|
index_writer.add_document(doc!(size=>1u64));
|
||||||
size => 12u64,
|
index_writer.commit()?;
|
||||||
));
|
let searcher = index.reader()?.searcher();
|
||||||
});
|
|
||||||
let searcher = index.reader().unwrap().searcher();
|
|
||||||
let segment = searcher.segment_reader(0);
|
let segment = searcher.segment_reader(0);
|
||||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||||
let err = top_collector.for_segment(0, segment);
|
let err = top_collector.for_segment(0, segment).err().unwrap();
|
||||||
if let Err(crate::TantivyError::SchemaError(msg)) = err {
|
assert!(matches!(err, crate::TantivyError::SchemaError(_)));
|
||||||
assert_eq!(
|
Ok(())
|
||||||
msg,
|
}
|
||||||
"Field requested (Field(1)) is not a i64/u64 fast field."
|
|
||||||
);
|
#[test]
|
||||||
} else {
|
fn test_field_wrong_type() -> crate::Result<()> {
|
||||||
assert!(false);
|
let mut schema_builder = Schema::builder();
|
||||||
}
|
let size = schema_builder.add_u64_field(SIZE, STORED);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(size=>1u64));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let segment = searcher.segment_reader(0);
|
||||||
|
let top_collector = TopDocs::with_limit(4).order_by_fast_field::<i64>(size);
|
||||||
|
let err = top_collector.for_segment(0, segment).err().unwrap();
|
||||||
|
assert!(
|
||||||
|
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field \"size\" is not a fast field.")
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -813,8 +1067,7 @@ mod tests {
|
|||||||
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
|
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
|
||||||
) -> (Index, Box<dyn Query>) {
|
) -> (Index, Box<dyn Query>) {
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
doc_adder(&mut index_writer);
|
doc_adder(&mut index_writer);
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
let query_parser = QueryParser::for_index(&index, vec![query_field]);
|
let query_parser = QueryParser::for_index(&index, vec![query_field]);
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ pub trait ScoreTweaker<TScore>: Sync {
|
|||||||
|
|
||||||
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
|
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
|
||||||
where
|
where
|
||||||
TScoreTweaker: ScoreTweaker<TScore>,
|
TScoreTweaker: ScoreTweaker<TScore> + Send + Sync,
|
||||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||||
{
|
{
|
||||||
type Fruit = Vec<(TScore, DocAddress)>;
|
type Fruit = Vec<(TScore, DocAddress)>;
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::ops::Deref;
|
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
|
|
||||||
pub(crate) struct BitPacker {
|
pub(crate) struct BitPacker {
|
||||||
mini_buffer: u64,
|
mini_buffer: u64,
|
||||||
@@ -60,20 +61,14 @@ impl BitPacker {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BitUnpacker<Data>
|
pub struct BitUnpacker {
|
||||||
where
|
|
||||||
Data: Deref<Target = [u8]>,
|
|
||||||
{
|
|
||||||
num_bits: u64,
|
num_bits: u64,
|
||||||
mask: u64,
|
mask: u64,
|
||||||
data: Data,
|
data: OwnedBytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Data> BitUnpacker<Data>
|
impl BitUnpacker {
|
||||||
where
|
pub fn new(data: OwnedBytes, num_bits: u8) -> BitUnpacker {
|
||||||
Data: Deref<Target = [u8]>,
|
|
||||||
{
|
|
||||||
pub fn new(data: Data, num_bits: u8) -> BitUnpacker<Data> {
|
|
||||||
let mask: u64 = if num_bits == 64 {
|
let mask: u64 = if num_bits == 64 {
|
||||||
!0u64
|
!0u64
|
||||||
} else {
|
} else {
|
||||||
@@ -90,7 +85,7 @@ where
|
|||||||
if self.num_bits == 0 {
|
if self.num_bits == 0 {
|
||||||
return 0u64;
|
return 0u64;
|
||||||
}
|
}
|
||||||
let data: &[u8] = &*self.data;
|
let data: &[u8] = self.data.as_slice();
|
||||||
let num_bits = self.num_bits;
|
let num_bits = self.num_bits;
|
||||||
let mask = self.mask;
|
let mask = self.mask;
|
||||||
let addr_in_bits = idx * num_bits;
|
let addr_in_bits = idx * num_bits;
|
||||||
@@ -109,8 +104,9 @@ where
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::{BitPacker, BitUnpacker};
|
use super::{BitPacker, BitUnpacker};
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
|
|
||||||
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
|
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>) {
|
||||||
let mut data = Vec::new();
|
let mut data = Vec::new();
|
||||||
let mut bitpacker = BitPacker::new();
|
let mut bitpacker = BitPacker::new();
|
||||||
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
|
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
|
||||||
@@ -122,7 +118,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
bitpacker.close(&mut data).unwrap();
|
bitpacker.close(&mut data).unwrap();
|
||||||
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
|
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
|
||||||
let bitunpacker = BitUnpacker::new(data, num_bits);
|
let bitunpacker = BitUnpacker::new(OwnedBytes::new(data), num_bits);
|
||||||
(bitunpacker, vals)
|
(bitunpacker, vals)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,15 @@
|
|||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::common::CountingWriter;
|
use crate::common::CountingWriter;
|
||||||
use crate::common::VInt;
|
use crate::common::VInt;
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::FileSlice;
|
||||||
use crate::directory::{TerminatingWrite, WritePtr};
|
use crate::directory::{TerminatingWrite, WritePtr};
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::space_usage::FieldUsage;
|
use crate::space_usage::FieldUsage;
|
||||||
use crate::space_usage::PerFieldSpaceUsage;
|
use crate::space_usage::PerFieldSpaceUsage;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::io::Write;
|
use std::io::{self, Read, Write};
|
||||||
use std::io::{self, Read};
|
|
||||||
|
use super::HasLen;
|
||||||
|
|
||||||
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
|
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
|
||||||
pub struct FileAddr {
|
pub struct FileAddr {
|
||||||
@@ -103,25 +104,26 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
|||||||
/// for each field.
|
/// for each field.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct CompositeFile {
|
pub struct CompositeFile {
|
||||||
data: ReadOnlySource,
|
data: FileSlice,
|
||||||
offsets_index: HashMap<FileAddr, (usize, usize)>,
|
offsets_index: HashMap<FileAddr, (usize, usize)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CompositeFile {
|
impl CompositeFile {
|
||||||
/// Opens a composite file stored in a given
|
/// Opens a composite file stored in a given
|
||||||
/// `ReadOnlySource`.
|
/// `FileSlice`.
|
||||||
pub fn open(data: &ReadOnlySource) -> io::Result<CompositeFile> {
|
pub fn open(data: &FileSlice) -> io::Result<CompositeFile> {
|
||||||
let end = data.len();
|
let end = data.len();
|
||||||
let footer_len_data = data.slice_from(end - 4);
|
let footer_len_data = data.slice_from(end - 4).read_bytes()?;
|
||||||
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
|
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
|
||||||
let footer_start = end - 4 - footer_len;
|
let footer_start = end - 4 - footer_len;
|
||||||
let footer_data = data.slice(footer_start, footer_start + footer_len);
|
let footer_data = data
|
||||||
|
.slice(footer_start, footer_start + footer_len)
|
||||||
|
.read_bytes()?;
|
||||||
let mut footer_buffer = footer_data.as_slice();
|
let mut footer_buffer = footer_data.as_slice();
|
||||||
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
|
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
|
||||||
|
|
||||||
let mut file_addrs = vec![];
|
let mut file_addrs = vec![];
|
||||||
let mut offsets = vec![];
|
let mut offsets = vec![];
|
||||||
|
|
||||||
let mut field_index = HashMap::new();
|
let mut field_index = HashMap::new();
|
||||||
|
|
||||||
let mut offset = 0;
|
let mut offset = 0;
|
||||||
@@ -150,19 +152,19 @@ impl CompositeFile {
|
|||||||
pub fn empty() -> CompositeFile {
|
pub fn empty() -> CompositeFile {
|
||||||
CompositeFile {
|
CompositeFile {
|
||||||
offsets_index: HashMap::new(),
|
offsets_index: HashMap::new(),
|
||||||
data: ReadOnlySource::empty(),
|
data: FileSlice::empty(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `ReadOnlySource` associated
|
/// Returns the `FileSlice` associated
|
||||||
/// to a given `Field` and stored in a `CompositeFile`.
|
/// to a given `Field` and stored in a `CompositeFile`.
|
||||||
pub fn open_read(&self, field: Field) -> Option<ReadOnlySource> {
|
pub fn open_read(&self, field: Field) -> Option<FileSlice> {
|
||||||
self.open_read_with_idx(field, 0)
|
self.open_read_with_idx(field, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `ReadOnlySource` associated
|
/// Returns the `FileSlice` associated
|
||||||
/// to a given `Field` and stored in a `CompositeFile`.
|
/// to a given `Field` and stored in a `CompositeFile`.
|
||||||
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<ReadOnlySource> {
|
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> {
|
||||||
self.offsets_index
|
self.offsets_index
|
||||||
.get(&FileAddr { field, idx })
|
.get(&FileAddr { field, idx })
|
||||||
.map(|&(from, to)| self.data.slice(from, to))
|
.map(|&(from, to)| self.data.slice(from, to))
|
||||||
@@ -192,46 +194,44 @@ mod test {
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_composite_file() {
|
fn test_composite_file() -> crate::Result<()> {
|
||||||
let path = Path::new("test_path");
|
let path = Path::new("test_path");
|
||||||
let mut directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let w = directory.open_write(path).unwrap();
|
let w = directory.open_write(path).unwrap();
|
||||||
let mut composite_write = CompositeWrite::wrap(w);
|
let mut composite_write = CompositeWrite::wrap(w);
|
||||||
{
|
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
|
||||||
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
|
VInt(32431123u64).serialize(&mut write_0)?;
|
||||||
VInt(32431123u64).serialize(&mut write_0).unwrap();
|
write_0.flush()?;
|
||||||
write_0.flush().unwrap();
|
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
|
||||||
}
|
VInt(2).serialize(&mut write_4)?;
|
||||||
|
write_4.flush()?;
|
||||||
{
|
composite_write.close()?;
|
||||||
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
|
|
||||||
VInt(2).serialize(&mut write_4).unwrap();
|
|
||||||
write_4.flush().unwrap();
|
|
||||||
}
|
|
||||||
composite_write.close().unwrap();
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let r = directory.open_read(path).unwrap();
|
let r = directory.open_read(path)?;
|
||||||
let composite_file = CompositeFile::open(&r).unwrap();
|
let composite_file = CompositeFile::open(&r)?;
|
||||||
{
|
{
|
||||||
let file0 = composite_file
|
let file0 = composite_file
|
||||||
.open_read(Field::from_field_id(0u32))
|
.open_read(Field::from_field_id(0u32))
|
||||||
.unwrap();
|
.unwrap()
|
||||||
|
.read_bytes()?;
|
||||||
let mut file0_buf = file0.as_slice();
|
let mut file0_buf = file0.as_slice();
|
||||||
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
|
let payload_0 = VInt::deserialize(&mut file0_buf)?.0;
|
||||||
assert_eq!(file0_buf.len(), 0);
|
assert_eq!(file0_buf.len(), 0);
|
||||||
assert_eq!(payload_0, 32431123u64);
|
assert_eq!(payload_0, 32431123u64);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let file4 = composite_file
|
let file4 = composite_file
|
||||||
.open_read(Field::from_field_id(4u32))
|
.open_read(Field::from_field_id(4u32))
|
||||||
.unwrap();
|
.unwrap()
|
||||||
|
.read_bytes()?;
|
||||||
let mut file4_buf = file4.as_slice();
|
let mut file4_buf = file4.as_slice();
|
||||||
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
|
let payload_4 = VInt::deserialize(&mut file4_buf)?.0;
|
||||||
assert_eq!(file4_buf.len(), 0);
|
assert_eq!(file4_buf.len(), 0);
|
||||||
assert_eq!(payload_4, 2u64);
|
assert_eq!(payload_4, 2u64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,9 +20,10 @@ impl<W: Write> CountingWriter<W> {
|
|||||||
self.written_bytes
|
self.written_bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn finish(mut self) -> io::Result<(W, u64)> {
|
/// Returns the underlying write object.
|
||||||
self.flush()?;
|
/// Note that this method does not trigger any flushing.
|
||||||
Ok((self.underlying, self.written_bytes))
|
pub fn finish(self) -> W {
|
||||||
|
self.underlying
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,7 +47,6 @@ impl<W: Write> Write for CountingWriter<W> {
|
|||||||
|
|
||||||
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
||||||
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
|
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
|
||||||
self.flush()?;
|
|
||||||
self.underlying.terminate_ref(token)
|
self.underlying.terminate_ref(token)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -63,8 +63,9 @@ mod test {
|
|||||||
let mut counting_writer = CountingWriter::wrap(buffer);
|
let mut counting_writer = CountingWriter::wrap(buffer);
|
||||||
let bytes = (0u8..10u8).collect::<Vec<u8>>();
|
let bytes = (0u8..10u8).collect::<Vec<u8>>();
|
||||||
counting_writer.write_all(&bytes).unwrap();
|
counting_writer.write_all(&bytes).unwrap();
|
||||||
let (w, len): (Vec<u8>, u64) = counting_writer.finish().unwrap();
|
let len = counting_writer.written_bytes();
|
||||||
|
let buffer_restituted: Vec<u8> = counting_writer.finish();
|
||||||
assert_eq!(len, 10u64);
|
assert_eq!(len, 10u64);
|
||||||
assert_eq!(w.len(), 10);
|
assert_eq!(buffer_restituted.len(), 10);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,9 @@ pub(crate) use self::bitset::TinySet;
|
|||||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||||
pub use self::counting_writer::CountingWriter;
|
pub use self::counting_writer::CountingWriter;
|
||||||
pub use self::serialize::{BinarySerializable, FixedSize};
|
pub use self::serialize::{BinarySerializable, FixedSize};
|
||||||
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
|
pub use self::vint::{
|
||||||
|
read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt,
|
||||||
|
};
|
||||||
pub use byteorder::LittleEndian as Endianness;
|
pub use byteorder::LittleEndian as Endianness;
|
||||||
|
|
||||||
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
||||||
@@ -64,10 +66,6 @@ pub(crate) fn compute_num_bits(n: u64) -> u8 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn is_power_of_2(n: usize) -> bool {
|
|
||||||
(n > 0) && (n & (n - 1) == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Has length trait
|
/// Has length trait
|
||||||
pub trait HasLen {
|
pub trait HasLen {
|
||||||
/// Return length
|
/// Return length
|
||||||
@@ -117,11 +115,16 @@ pub fn u64_to_i64(val: u64) -> i64 {
|
|||||||
/// For simplicity, tantivy internally handles `f64` as `u64`.
|
/// For simplicity, tantivy internally handles `f64` as `u64`.
|
||||||
/// The mapping is defined by this function.
|
/// The mapping is defined by this function.
|
||||||
///
|
///
|
||||||
/// Maps `f64` to `u64` so that lexical order is preserved.
|
/// Maps `f64` to `u64` in a monotonic manner, so that bytes lexical order is preserved.
|
||||||
///
|
///
|
||||||
/// This is more suited than simply casting (`val as u64`)
|
/// This is more suited than simply casting (`val as u64`)
|
||||||
/// which would truncate the result
|
/// which would truncate the result
|
||||||
///
|
///
|
||||||
|
/// # Reference
|
||||||
|
///
|
||||||
|
/// Daniel Lemire's [blog post](https://lemire.me/blog/2020/12/14/converting-floating-point-numbers-to-integers-while-preserving-order/)
|
||||||
|
/// explains the mapping in a clear manner.
|
||||||
|
///
|
||||||
/// # See also
|
/// # See also
|
||||||
/// The [reverse mapping is `u64_to_f64`](./fn.u64_to_f64.html).
|
/// The [reverse mapping is `u64_to_f64`](./fn.u64_to_f64.html).
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
@@ -150,6 +153,7 @@ pub(crate) mod test {
|
|||||||
pub use super::minmax;
|
pub use super::minmax;
|
||||||
pub use super::serialize::test::fixed_size_test;
|
pub use super::serialize::test::fixed_size_test;
|
||||||
use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
||||||
|
use proptest::prelude::*;
|
||||||
use std::f64;
|
use std::f64;
|
||||||
|
|
||||||
fn test_i64_converter_helper(val: i64) {
|
fn test_i64_converter_helper(val: i64) {
|
||||||
@@ -160,6 +164,15 @@ pub(crate) mod test {
|
|||||||
assert_eq!(u64_to_f64(f64_to_u64(val)), val);
|
assert_eq!(u64_to_f64(f64_to_u64(val)), val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
proptest! {
|
||||||
|
#[test]
|
||||||
|
fn test_f64_converter_monotonicity_proptest((left, right) in (proptest::num::f64::NORMAL, proptest::num::f64::NORMAL)) {
|
||||||
|
let left_u64 = f64_to_u64(left);
|
||||||
|
let right_u64 = f64_to_u64(right);
|
||||||
|
assert_eq!(left_u64 < right_u64, left < right);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_i64_converter() {
|
fn test_i64_converter() {
|
||||||
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
|
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
|
||||||
|
|||||||
@@ -89,6 +89,19 @@ impl FixedSize for u64 {
|
|||||||
const SIZE_IN_BYTES: usize = 8;
|
const SIZE_IN_BYTES: usize = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for f32 {
|
||||||
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
writer.write_f32::<Endianness>(*self)
|
||||||
|
}
|
||||||
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
reader.read_f32::<Endianness>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FixedSize for f32 {
|
||||||
|
const SIZE_IN_BYTES: usize = 4;
|
||||||
|
}
|
||||||
|
|
||||||
impl BinarySerializable for i64 {
|
impl BinarySerializable for i64 {
|
||||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_i64::<Endianness>(*self)
|
writer.write_i64::<Endianness>(*self)
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ use std::io::Read;
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
/// Wrapper over a `u64` that serializes as a variable int.
|
/// Wrapper over a `u64` that serializes as a variable int.
|
||||||
#[derive(Debug, Eq, PartialEq)]
|
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||||
pub struct VInt(pub u64);
|
pub struct VInt(pub u64);
|
||||||
|
|
||||||
const STOP_BIT: u8 = 128;
|
const STOP_BIT: u8 = 128;
|
||||||
|
|
||||||
pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
|
||||||
const START_2: u64 = 1 << 7;
|
const START_2: u64 = 1 << 7;
|
||||||
const START_3: u64 = 1 << 14;
|
const START_3: u64 = 1 << 14;
|
||||||
const START_4: u64 = 1 << 21;
|
const START_4: u64 = 1 << 21;
|
||||||
@@ -29,7 +29,7 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
|||||||
|
|
||||||
let val = u64::from(val);
|
let val = u64::from(val);
|
||||||
const STOP_BIT: u64 = 128u64;
|
const STOP_BIT: u64 = 128u64;
|
||||||
match val {
|
let (res, num_bytes) = match val {
|
||||||
0..=STOP_1 => (val | STOP_BIT, 1),
|
0..=STOP_1 => (val | STOP_BIT, 1),
|
||||||
START_2..=STOP_2 => (
|
START_2..=STOP_2 => (
|
||||||
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
|
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
|
||||||
@@ -56,7 +56,9 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
|||||||
| (STOP_BIT << (8 * 4)),
|
| (STOP_BIT << (8 * 4)),
|
||||||
5,
|
5,
|
||||||
),
|
),
|
||||||
}
|
};
|
||||||
|
LittleEndian::write_u64(&mut buf[..], res);
|
||||||
|
&buf[0..num_bytes]
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of bytes covered by a
|
/// Returns the number of bytes covered by a
|
||||||
@@ -85,23 +87,26 @@ fn vint_len(data: &[u8]) -> usize {
|
|||||||
/// If the buffer does not start by a valid
|
/// If the buffer does not start by a valid
|
||||||
/// vint payload
|
/// vint payload
|
||||||
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
||||||
let vlen = vint_len(*data);
|
let (result, vlen) = read_u32_vint_no_advance(*data);
|
||||||
|
*data = &data[vlen..];
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
|
||||||
|
let vlen = vint_len(data);
|
||||||
let mut result = 0u32;
|
let mut result = 0u32;
|
||||||
let mut shift = 0u64;
|
let mut shift = 0u64;
|
||||||
for &b in &data[..vlen] {
|
for &b in &data[..vlen] {
|
||||||
result |= u32::from(b & 127u8) << shift;
|
result |= u32::from(b & 127u8) << shift;
|
||||||
shift += 7;
|
shift += 7;
|
||||||
}
|
}
|
||||||
*data = &data[vlen..];
|
(result, vlen)
|
||||||
result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write a `u32` as a vint payload.
|
/// Write a `u32` as a vint payload.
|
||||||
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
||||||
let (val, num_bytes) = serialize_vint_u32(val);
|
let mut buf = [0u8; 8];
|
||||||
let mut buffer = [0u8; 8];
|
let data = serialize_vint_u32(val, &mut buf);
|
||||||
LittleEndian::write_u64(&mut buffer, val);
|
writer.write_all(&data)
|
||||||
writer.write_all(&buffer[..num_bytes])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VInt {
|
impl VInt {
|
||||||
@@ -172,7 +177,6 @@ mod tests {
|
|||||||
use super::serialize_vint_u32;
|
use super::serialize_vint_u32;
|
||||||
use super::VInt;
|
use super::VInt;
|
||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use byteorder::{ByteOrder, LittleEndian};
|
|
||||||
|
|
||||||
fn aux_test_vint(val: u64) {
|
fn aux_test_vint(val: u64) {
|
||||||
let mut v = [14u8; 10];
|
let mut v = [14u8; 10];
|
||||||
@@ -208,12 +212,10 @@ mod tests {
|
|||||||
|
|
||||||
fn aux_test_serialize_vint_u32(val: u32) {
|
fn aux_test_serialize_vint_u32(val: u32) {
|
||||||
let mut buffer = [0u8; 10];
|
let mut buffer = [0u8; 10];
|
||||||
let mut buffer2 = [0u8; 10];
|
let mut buffer2 = [0u8; 8];
|
||||||
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
|
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
|
||||||
let (vint, len) = serialize_vint_u32(val);
|
let res2 = serialize_vint_u32(val, &mut buffer2);
|
||||||
assert_eq!(len, len_vint, "len wrong for val {}", val);
|
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
|
||||||
LittleEndian::write_u64(&mut buffer2, vint);
|
|
||||||
assert_eq!(&buffer[..len], &buffer2[..len], "array wrong for {}", val);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ use crate::core::SegmentId;
|
|||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::core::SegmentMetaInventory;
|
use crate::core::SegmentMetaInventory;
|
||||||
use crate::core::META_FILEPATH;
|
use crate::core::META_FILEPATH;
|
||||||
|
use crate::directory::error::OpenReadError;
|
||||||
use crate::directory::ManagedDirectory;
|
use crate::directory::ManagedDirectory;
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
use crate::directory::MmapDirectory;
|
use crate::directory::MmapDirectory;
|
||||||
@@ -21,7 +22,6 @@ use crate::schema::FieldType;
|
|||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||||
use crate::IndexWriter;
|
use crate::IndexWriter;
|
||||||
use std::borrow::BorrowMut;
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
@@ -35,12 +35,21 @@ fn load_metas(
|
|||||||
inventory: &SegmentMetaInventory,
|
inventory: &SegmentMetaInventory,
|
||||||
) -> crate::Result<IndexMeta> {
|
) -> crate::Result<IndexMeta> {
|
||||||
let meta_data = directory.atomic_read(&META_FILEPATH)?;
|
let meta_data = directory.atomic_read(&META_FILEPATH)?;
|
||||||
let meta_string = String::from_utf8_lossy(&meta_data);
|
let meta_string = String::from_utf8(meta_data).map_err(|_utf8_err| {
|
||||||
|
error!("Meta data is not valid utf8.");
|
||||||
|
DataCorruption::new(
|
||||||
|
META_FILEPATH.to_path_buf(),
|
||||||
|
"Meta file does not contain valid utf8 file.".to_string(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
IndexMeta::deserialize(&meta_string, &inventory)
|
IndexMeta::deserialize(&meta_string, &inventory)
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
DataCorruption::new(
|
DataCorruption::new(
|
||||||
META_FILEPATH.to_path_buf(),
|
META_FILEPATH.to_path_buf(),
|
||||||
format!("Meta file cannot be deserialized. {:?}.", e),
|
format!(
|
||||||
|
"Meta file cannot be deserialized. {:?}. Content: {:?}",
|
||||||
|
e, meta_string
|
||||||
|
),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.map_err(From::from)
|
.map_err(From::from)
|
||||||
@@ -57,8 +66,10 @@ pub struct Index {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Index {
|
impl Index {
|
||||||
/// Examines the director to see if it contains an index
|
/// Examines the directory to see if it contains an index.
|
||||||
pub fn exists<Dir: Directory>(dir: &Dir) -> bool {
|
///
|
||||||
|
/// Effectively, it only checks for the presence of the `meta.json` file.
|
||||||
|
pub fn exists<Dir: Directory>(dir: &Dir) -> Result<bool, OpenReadError> {
|
||||||
dir.exists(&META_FILEPATH)
|
dir.exists(&META_FILEPATH)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -105,7 +116,7 @@ impl Index {
|
|||||||
schema: Schema,
|
schema: Schema,
|
||||||
) -> crate::Result<Index> {
|
) -> crate::Result<Index> {
|
||||||
let mmap_directory = MmapDirectory::open(directory_path)?;
|
let mmap_directory = MmapDirectory::open(directory_path)?;
|
||||||
if Index::exists(&mmap_directory) {
|
if Index::exists(&mmap_directory)? {
|
||||||
return Err(TantivyError::IndexAlreadyExists);
|
return Err(TantivyError::IndexAlreadyExists);
|
||||||
}
|
}
|
||||||
Index::create(mmap_directory, schema)
|
Index::create(mmap_directory, schema)
|
||||||
@@ -113,7 +124,7 @@ impl Index {
|
|||||||
|
|
||||||
/// Opens or creates a new index in the provided directory
|
/// Opens or creates a new index in the provided directory
|
||||||
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
||||||
if !Index::exists(&dir) {
|
if !Index::exists(&dir)? {
|
||||||
return Index::create(dir, schema);
|
return Index::create(dir, schema);
|
||||||
}
|
}
|
||||||
let index = Index::open(dir)?;
|
let index = Index::open(dir)?;
|
||||||
@@ -140,7 +151,9 @@ impl Index {
|
|||||||
Index::create(mmap_directory, schema)
|
Index::create(mmap_directory, schema)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new index given an implementation of the trait `Directory`
|
/// Creates a new index given an implementation of the trait `Directory`.
|
||||||
|
///
|
||||||
|
/// If a directory previously existed, it will be erased.
|
||||||
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
||||||
let directory = ManagedDirectory::wrap(dir)?;
|
let directory = ManagedDirectory::wrap(dir)?;
|
||||||
Index::from_directory(directory, schema)
|
Index::from_directory(directory, schema)
|
||||||
@@ -149,8 +162,8 @@ impl Index {
|
|||||||
/// Create a new index from a directory.
|
/// Create a new index from a directory.
|
||||||
///
|
///
|
||||||
/// This will overwrite existing meta.json
|
/// This will overwrite existing meta.json
|
||||||
fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
|
fn from_directory(directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
|
||||||
save_new_metas(schema.clone(), directory.borrow_mut())?;
|
save_new_metas(schema.clone(), &directory)?;
|
||||||
let metas = IndexMeta::with_schema(schema);
|
let metas = IndexMeta::with_schema(schema);
|
||||||
Index::create_from_metas(directory, &metas, SegmentMetaInventory::default())
|
Index::create_from_metas(directory, &metas, SegmentMetaInventory::default())
|
||||||
}
|
}
|
||||||
@@ -283,7 +296,7 @@ impl Index {
|
|||||||
TantivyError::LockFailure(
|
TantivyError::LockFailure(
|
||||||
err,
|
err,
|
||||||
Some(
|
Some(
|
||||||
"Failed to acquire index lock. If you are using\
|
"Failed to acquire index lock. If you are using \
|
||||||
a regular directory, this means there is already an \
|
a regular directory, this means there is already an \
|
||||||
`IndexWriter` working on this `Directory`, in this process \
|
`IndexWriter` working on this `Directory`, in this process \
|
||||||
or in a different process."
|
or in a different process."
|
||||||
@@ -300,6 +313,15 @@ impl Index {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper to create an index writer for tests.
|
||||||
|
///
|
||||||
|
/// That index writer only simply has a single thread and a heap of 5 MB.
|
||||||
|
/// Using a single thread gives us a deterministic allocation of DocId.
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn writer_for_tests(&self) -> crate::Result<IndexWriter> {
|
||||||
|
self.writer_with_num_threads(1, 10_000_000)
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates a multithreaded writer
|
/// Creates a multithreaded writer
|
||||||
///
|
///
|
||||||
/// Tantivy will automatically define the number of threads to use.
|
/// Tantivy will automatically define the number of threads to use.
|
||||||
@@ -387,7 +409,7 @@ impl fmt::Debug for Index {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::directory::RAMDirectory;
|
use crate::directory::{RAMDirectory, WatchCallback};
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::{Schema, INDEXED, TEXT};
|
use crate::schema::{Schema, INDEXED, TEXT};
|
||||||
use crate::IndexReader;
|
use crate::IndexReader;
|
||||||
@@ -411,24 +433,24 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_index_exists() {
|
fn test_index_exists() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(!Index::exists(&directory));
|
assert!(!Index::exists(&directory).unwrap());
|
||||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory));
|
assert!(Index::exists(&directory).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn open_or_create_should_create() {
|
fn open_or_create_should_create() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(!Index::exists(&directory));
|
assert!(!Index::exists(&directory).unwrap());
|
||||||
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory));
|
assert!(Index::exists(&directory).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn open_or_create_should_open() {
|
fn open_or_create_should_open() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory));
|
assert!(Index::exists(&directory).unwrap());
|
||||||
assert!(Index::open_or_create(directory, throw_away_schema()).is_ok());
|
assert!(Index::open_or_create(directory, throw_away_schema()).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -436,7 +458,7 @@ mod tests {
|
|||||||
fn create_should_wipeoff_existing() {
|
fn create_should_wipeoff_existing() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory));
|
assert!(Index::exists(&directory).unwrap());
|
||||||
assert!(Index::create(directory.clone(), Schema::builder().build()).is_ok());
|
assert!(Index::create(directory.clone(), Schema::builder().build()).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -444,7 +466,7 @@ mod tests {
|
|||||||
fn open_or_create_exists_but_schema_does_not_match() {
|
fn open_or_create_exists_but_schema_does_not_match() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory));
|
assert!(Index::exists(&directory).unwrap());
|
||||||
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
let err = Index::open_or_create(directory, Schema::builder().build());
|
let err = Index::open_or_create(directory, Schema::builder().build());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -498,28 +520,28 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_index_manual_policy_mmap() {
|
fn test_index_manual_policy_mmap() -> crate::Result<()> {
|
||||||
let schema = throw_away_schema();
|
let schema = throw_away_schema();
|
||||||
let field = schema.get_field("num_likes").unwrap();
|
let field = schema.get_field("num_likes").unwrap();
|
||||||
let mut index = Index::create_from_tempdir(schema).unwrap();
|
let mut index = Index::create_from_tempdir(schema)?;
|
||||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut writer = index.writer_for_tests()?;
|
||||||
writer.commit().unwrap();
|
writer.commit()?;
|
||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()
|
.try_into()?;
|
||||||
.unwrap();
|
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
writer.add_document(doc!(field=>1u64));
|
writer.add_document(doc!(field=>1u64));
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||||
let _handle = index.directory_mut().watch(Box::new(move || {
|
let _handle = index.directory_mut().watch(WatchCallback::new(move || {
|
||||||
let _ = sender.send(());
|
let _ = sender.send(());
|
||||||
}));
|
}));
|
||||||
writer.commit().unwrap();
|
writer.commit()?;
|
||||||
assert!(receiver.recv().is_ok());
|
assert!(receiver.recv().is_ok());
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
reader.reload().unwrap();
|
reader.reload()?;
|
||||||
assert_eq!(reader.searcher().num_docs(), 1);
|
assert_eq!(reader.searcher().num_docs(), 1);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -539,23 +561,35 @@ mod tests {
|
|||||||
test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
|
test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
|
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
|
||||||
let mut reader_index = reader.index();
|
let mut reader_index = reader.index();
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||||
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
|
let _watch_handle = reader_index
|
||||||
let _ = sender.send(());
|
.directory_mut()
|
||||||
}));
|
.watch(WatchCallback::new(move || {
|
||||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let _ = sender.send(());
|
||||||
|
}));
|
||||||
|
let mut writer = index.writer_for_tests().unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
writer.add_document(doc!(field=>1u64));
|
writer.add_document(doc!(field=>1u64));
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
assert!(receiver.recv().is_ok());
|
// We need a loop here because it is possible for notify to send more than
|
||||||
assert_eq!(reader.searcher().num_docs(), 1);
|
// one modify event. It was observed on CI on MacOS.
|
||||||
|
loop {
|
||||||
|
assert!(receiver.recv().is_ok());
|
||||||
|
if reader.searcher().num_docs() == 1 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
writer.add_document(doc!(field=>2u64));
|
writer.add_document(doc!(field=>2u64));
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
assert!(receiver.recv().is_ok());
|
// ... Same as above
|
||||||
assert_eq!(reader.searcher().num_docs(), 2);
|
loop {
|
||||||
|
assert!(receiver.recv().is_ok());
|
||||||
|
if reader.searcher().num_docs() == 2 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test will not pass on windows, because windows
|
// This test will not pass on windows, because windows
|
||||||
@@ -573,7 +607,7 @@ mod tests {
|
|||||||
writer.add_document(doc!(field => i));
|
writer.add_document(doc!(field => i));
|
||||||
}
|
}
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||||
let _handle = directory.watch(Box::new(move || {
|
let _handle = directory.watch(WatchCallback::new(move || {
|
||||||
let _ = sender.send(());
|
let _ = sender.send(());
|
||||||
}));
|
}));
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
|
|||||||
@@ -213,7 +213,7 @@ pub struct IndexMeta {
|
|||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
/// Payload associated to the last commit.
|
/// Payload associated to the last commit.
|
||||||
///
|
///
|
||||||
/// Upon commit, clients can optionally add a small `Striing` payload to their commit
|
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
||||||
/// to help identify this commit.
|
/// to help identify this commit.
|
||||||
/// This payload is entirely unused by tantivy.
|
/// This payload is entirely unused by tantivy.
|
||||||
pub payload: Option<String>,
|
pub payload: Option<String>,
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
|
use std::io;
|
||||||
|
|
||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::FileSlice;
|
||||||
use crate::positions::PositionReader;
|
use crate::positions::PositionReader;
|
||||||
use crate::postings::TermInfo;
|
use crate::postings::TermInfo;
|
||||||
use crate::postings::{BlockSegmentPostings, SegmentPostings};
|
use crate::postings::{BlockSegmentPostings, SegmentPostings};
|
||||||
use crate::schema::FieldType;
|
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
@@ -15,7 +16,7 @@ use crate::termdict::TermDictionary;
|
|||||||
///
|
///
|
||||||
/// It is safe to delete the segment associated to
|
/// It is safe to delete the segment associated to
|
||||||
/// an `InvertedIndexReader`. As long as it is open,
|
/// an `InvertedIndexReader`. As long as it is open,
|
||||||
/// the `ReadOnlySource` it is relying on should
|
/// the `FileSlice` it is relying on should
|
||||||
/// stay available.
|
/// stay available.
|
||||||
///
|
///
|
||||||
///
|
///
|
||||||
@@ -23,9 +24,9 @@ use crate::termdict::TermDictionary;
|
|||||||
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
||||||
pub struct InvertedIndexReader {
|
pub struct InvertedIndexReader {
|
||||||
termdict: TermDictionary,
|
termdict: TermDictionary,
|
||||||
postings_source: ReadOnlySource,
|
postings_file_slice: FileSlice,
|
||||||
positions_source: ReadOnlySource,
|
positions_file_slice: FileSlice,
|
||||||
positions_idx_source: ReadOnlySource,
|
positions_idx_file_slice: FileSlice,
|
||||||
record_option: IndexRecordOption,
|
record_option: IndexRecordOption,
|
||||||
total_num_tokens: u64,
|
total_num_tokens: u64,
|
||||||
}
|
}
|
||||||
@@ -34,42 +35,38 @@ impl InvertedIndexReader {
|
|||||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
|
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
termdict: TermDictionary,
|
termdict: TermDictionary,
|
||||||
postings_source: ReadOnlySource,
|
postings_file_slice: FileSlice,
|
||||||
positions_source: ReadOnlySource,
|
positions_file_slice: FileSlice,
|
||||||
positions_idx_source: ReadOnlySource,
|
positions_idx_file_slice: FileSlice,
|
||||||
record_option: IndexRecordOption,
|
record_option: IndexRecordOption,
|
||||||
) -> InvertedIndexReader {
|
) -> io::Result<InvertedIndexReader> {
|
||||||
let total_num_tokens_data = postings_source.slice(0, 8);
|
let (total_num_tokens_slice, postings_body) = postings_file_slice.split(8);
|
||||||
let mut total_num_tokens_cursor = total_num_tokens_data.as_slice();
|
let total_num_tokens = u64::deserialize(&mut total_num_tokens_slice.read_bytes()?)?;
|
||||||
let total_num_tokens = u64::deserialize(&mut total_num_tokens_cursor).unwrap_or(0u64);
|
Ok(InvertedIndexReader {
|
||||||
InvertedIndexReader {
|
|
||||||
termdict,
|
termdict,
|
||||||
postings_source: postings_source.slice_from(8),
|
postings_file_slice: postings_body,
|
||||||
positions_source,
|
positions_file_slice,
|
||||||
positions_idx_source,
|
positions_idx_file_slice,
|
||||||
record_option,
|
record_option,
|
||||||
total_num_tokens,
|
total_num_tokens,
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates an empty `InvertedIndexReader` object, which
|
/// Creates an empty `InvertedIndexReader` object, which
|
||||||
/// contains no terms at all.
|
/// contains no terms at all.
|
||||||
pub fn empty(field_type: &FieldType) -> InvertedIndexReader {
|
pub fn empty(record_option: IndexRecordOption) -> InvertedIndexReader {
|
||||||
let record_option = field_type
|
|
||||||
.get_index_record_option()
|
|
||||||
.unwrap_or(IndexRecordOption::Basic);
|
|
||||||
InvertedIndexReader {
|
InvertedIndexReader {
|
||||||
termdict: TermDictionary::empty(),
|
termdict: TermDictionary::empty(),
|
||||||
postings_source: ReadOnlySource::empty(),
|
postings_file_slice: FileSlice::empty(),
|
||||||
positions_source: ReadOnlySource::empty(),
|
positions_file_slice: FileSlice::empty(),
|
||||||
positions_idx_source: ReadOnlySource::empty(),
|
positions_idx_file_slice: FileSlice::empty(),
|
||||||
record_option,
|
record_option,
|
||||||
total_num_tokens: 0u64,
|
total_num_tokens: 0u64,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the term info associated with the term.
|
/// Returns the term info associated with the term.
|
||||||
pub fn get_term_info(&self, term: &Term) -> Option<TermInfo> {
|
pub fn get_term_info(&self, term: &Term) -> io::Result<Option<TermInfo>> {
|
||||||
self.termdict.get(term.value_bytes())
|
self.termdict.get(term.value_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,11 +89,12 @@ impl InvertedIndexReader {
|
|||||||
&self,
|
&self,
|
||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
block_postings: &mut BlockSegmentPostings,
|
block_postings: &mut BlockSegmentPostings,
|
||||||
) {
|
) -> io::Result<()> {
|
||||||
let offset = term_info.postings_offset as usize;
|
let start_offset = term_info.postings_start_offset as usize;
|
||||||
let end_source = self.postings_source.len();
|
let stop_offset = term_info.postings_stop_offset as usize;
|
||||||
let postings_slice = self.postings_source.slice(offset, end_source);
|
let postings_slice = self.postings_file_slice.slice(start_offset, stop_offset);
|
||||||
block_postings.reset(term_info.doc_freq, postings_slice);
|
block_postings.reset(term_info.doc_freq, postings_slice.read_bytes()?);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a block postings given a `Term`.
|
/// Returns a block postings given a `Term`.
|
||||||
@@ -107,9 +105,10 @@ impl InvertedIndexReader {
|
|||||||
&self,
|
&self,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
option: IndexRecordOption,
|
option: IndexRecordOption,
|
||||||
) -> Option<BlockSegmentPostings> {
|
) -> io::Result<Option<BlockSegmentPostings>> {
|
||||||
self.get_term_info(term)
|
self.get_term_info(term)?
|
||||||
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
|
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
|
||||||
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a block postings given a `term_info`.
|
/// Returns a block postings given a `term_info`.
|
||||||
@@ -120,10 +119,12 @@ impl InvertedIndexReader {
|
|||||||
&self,
|
&self,
|
||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
requested_option: IndexRecordOption,
|
requested_option: IndexRecordOption,
|
||||||
) -> BlockSegmentPostings {
|
) -> io::Result<BlockSegmentPostings> {
|
||||||
let offset = term_info.postings_offset as usize;
|
let postings_data = self.postings_file_slice.slice(
|
||||||
let postings_data = self.postings_source.slice_from(offset);
|
term_info.postings_start_offset as usize,
|
||||||
BlockSegmentPostings::from_data(
|
term_info.postings_stop_offset as usize,
|
||||||
|
);
|
||||||
|
BlockSegmentPostings::open(
|
||||||
term_info.doc_freq,
|
term_info.doc_freq,
|
||||||
postings_data,
|
postings_data,
|
||||||
self.record_option,
|
self.record_option,
|
||||||
@@ -139,20 +140,23 @@ impl InvertedIndexReader {
|
|||||||
&self,
|
&self,
|
||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
option: IndexRecordOption,
|
option: IndexRecordOption,
|
||||||
) -> SegmentPostings {
|
) -> io::Result<SegmentPostings> {
|
||||||
let block_postings = self.read_block_postings_from_terminfo(term_info, option);
|
let block_postings = self.read_block_postings_from_terminfo(term_info, option)?;
|
||||||
let position_stream = {
|
let position_stream = {
|
||||||
if option.has_positions() {
|
if option.has_positions() {
|
||||||
let position_reader = self.positions_source.clone();
|
let position_reader = self.positions_file_slice.clone();
|
||||||
let skip_reader = self.positions_idx_source.clone();
|
let skip_reader = self.positions_idx_file_slice.clone();
|
||||||
let position_reader =
|
let position_reader =
|
||||||
PositionReader::new(position_reader, skip_reader, term_info.positions_idx);
|
PositionReader::new(position_reader, skip_reader, term_info.positions_idx)?;
|
||||||
Some(position_reader)
|
Some(position_reader)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
SegmentPostings::from_block_postings(block_postings, position_stream)
|
Ok(SegmentPostings::from_block_postings(
|
||||||
|
block_postings,
|
||||||
|
position_stream,
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the total number of tokens recorded for all documents
|
/// Returns the total number of tokens recorded for all documents
|
||||||
@@ -171,24 +175,31 @@ impl InvertedIndexReader {
|
|||||||
/// For instance, requesting `IndexRecordOption::Freq` for a
|
/// For instance, requesting `IndexRecordOption::Freq` for a
|
||||||
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
||||||
/// with `DocId`s and frequencies.
|
/// with `DocId`s and frequencies.
|
||||||
pub fn read_postings(&self, term: &Term, option: IndexRecordOption) -> Option<SegmentPostings> {
|
pub fn read_postings(
|
||||||
self.get_term_info(term)
|
&self,
|
||||||
|
term: &Term,
|
||||||
|
option: IndexRecordOption,
|
||||||
|
) -> io::Result<Option<SegmentPostings>> {
|
||||||
|
self.get_term_info(term)?
|
||||||
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option))
|
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option))
|
||||||
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn read_postings_no_deletes(
|
pub(crate) fn read_postings_no_deletes(
|
||||||
&self,
|
&self,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
option: IndexRecordOption,
|
option: IndexRecordOption,
|
||||||
) -> Option<SegmentPostings> {
|
) -> io::Result<Option<SegmentPostings>> {
|
||||||
self.get_term_info(term)
|
self.get_term_info(term)?
|
||||||
.map(|term_info| self.read_postings_from_terminfo(&term_info, option))
|
.map(|term_info| self.read_postings_from_terminfo(&term_info, option))
|
||||||
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of documents containing the term.
|
/// Returns the number of documents containing the term.
|
||||||
pub fn doc_freq(&self, term: &Term) -> u32 {
|
pub fn doc_freq(&self, term: &Term) -> io::Result<u32> {
|
||||||
self.get_term_info(term)
|
Ok(self
|
||||||
|
.get_term_info(term)?
|
||||||
.map(|term_info| term_info.doc_freq)
|
.map(|term_info| term_info.doc_freq)
|
||||||
.unwrap_or(0u32)
|
.unwrap_or(0u32))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,18 +1,17 @@
|
|||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
use crate::core::Executor;
|
use crate::core::Executor;
|
||||||
use crate::core::InvertedIndexReader;
|
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::query::Query;
|
use crate::query::Query;
|
||||||
use crate::schema::Document;
|
use crate::schema::Document;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::schema::{Field, Term};
|
use crate::schema::Term;
|
||||||
use crate::space_usage::SearcherSpaceUsage;
|
use crate::space_usage::SearcherSpaceUsage;
|
||||||
use crate::store::StoreReader;
|
use crate::store::StoreReader;
|
||||||
use crate::termdict::TermMerger;
|
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use std::fmt;
|
|
||||||
use std::sync::Arc;
|
use std::{fmt, io};
|
||||||
|
|
||||||
/// Holds a list of `SegmentReader`s ready for search.
|
/// Holds a list of `SegmentReader`s ready for search.
|
||||||
///
|
///
|
||||||
@@ -32,17 +31,17 @@ impl Searcher {
|
|||||||
schema: Schema,
|
schema: Schema,
|
||||||
index: Index,
|
index: Index,
|
||||||
segment_readers: Vec<SegmentReader>,
|
segment_readers: Vec<SegmentReader>,
|
||||||
) -> Searcher {
|
) -> io::Result<Searcher> {
|
||||||
let store_readers = segment_readers
|
let store_readers: Vec<StoreReader> = segment_readers
|
||||||
.iter()
|
.iter()
|
||||||
.map(SegmentReader::get_store_reader)
|
.map(SegmentReader::get_store_reader)
|
||||||
.collect();
|
.collect::<io::Result<Vec<_>>>()?;
|
||||||
Searcher {
|
Ok(Searcher {
|
||||||
schema,
|
schema,
|
||||||
index,
|
index,
|
||||||
segment_readers,
|
segment_readers,
|
||||||
store_readers,
|
store_readers,
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `Index` associated to the `Searcher`
|
/// Returns the `Index` associated to the `Searcher`
|
||||||
@@ -75,13 +74,14 @@ impl Searcher {
|
|||||||
|
|
||||||
/// Return the overall number of documents containing
|
/// Return the overall number of documents containing
|
||||||
/// the given term.
|
/// the given term.
|
||||||
pub fn doc_freq(&self, term: &Term) -> u64 {
|
pub fn doc_freq(&self, term: &Term) -> crate::Result<u64> {
|
||||||
self.segment_readers
|
let mut total_doc_freq = 0;
|
||||||
.iter()
|
for segment_reader in &self.segment_readers {
|
||||||
.map(|segment_reader| {
|
let inverted_index = segment_reader.inverted_index(term.field())?;
|
||||||
u64::from(segment_reader.inverted_index(term.field()).doc_freq(term))
|
let doc_freq = inverted_index.doc_freq(term)?;
|
||||||
})
|
total_doc_freq += u64::from(doc_freq);
|
||||||
.sum::<u64>()
|
}
|
||||||
|
Ok(total_doc_freq)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the list of segment readers
|
/// Return the list of segment readers
|
||||||
@@ -147,44 +147,13 @@ impl Searcher {
|
|||||||
collector.merge_fruits(fruits)
|
collector.merge_fruits(fruits)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the field searcher associated to a `Field`.
|
|
||||||
pub fn field(&self, field: Field) -> FieldSearcher {
|
|
||||||
let inv_index_readers = self
|
|
||||||
.segment_readers
|
|
||||||
.iter()
|
|
||||||
.map(|segment_reader| segment_reader.inverted_index(field))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
FieldSearcher::new(inv_index_readers)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Summarize total space usage of this searcher.
|
/// Summarize total space usage of this searcher.
|
||||||
pub fn space_usage(&self) -> SearcherSpaceUsage {
|
pub fn space_usage(&self) -> io::Result<SearcherSpaceUsage> {
|
||||||
let mut space_usage = SearcherSpaceUsage::new();
|
let mut space_usage = SearcherSpaceUsage::new();
|
||||||
for segment_reader in self.segment_readers.iter() {
|
for segment_reader in &self.segment_readers {
|
||||||
space_usage.add_segment(segment_reader.space_usage());
|
space_usage.add_segment(segment_reader.space_usage()?);
|
||||||
}
|
}
|
||||||
space_usage
|
Ok(space_usage)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct FieldSearcher {
|
|
||||||
inv_index_readers: Vec<Arc<InvertedIndexReader>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FieldSearcher {
|
|
||||||
fn new(inv_index_readers: Vec<Arc<InvertedIndexReader>>) -> FieldSearcher {
|
|
||||||
FieldSearcher { inv_index_readers }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a Stream over all of the sorted unique terms of
|
|
||||||
/// for the given field.
|
|
||||||
pub fn terms(&self) -> TermMerger<'_> {
|
|
||||||
let term_streamers: Vec<_> = self
|
|
||||||
.inv_index_readers
|
|
||||||
.iter()
|
|
||||||
.map(|inverted_index| inverted_index.terms().stream())
|
|
||||||
.collect();
|
|
||||||
TermMerger::new(term_streamers)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use crate::core::SegmentId;
|
|||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::directory::error::{OpenReadError, OpenWriteError};
|
use crate::directory::error::{OpenReadError, OpenWriteError};
|
||||||
use crate::directory::Directory;
|
use crate::directory::Directory;
|
||||||
use crate::directory::{ReadOnlySource, WritePtr};
|
use crate::directory::{FileSlice, WritePtr};
|
||||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
@@ -78,10 +78,9 @@ impl Segment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Open one of the component file for a *regular* read.
|
/// Open one of the component file for a *regular* read.
|
||||||
pub fn open_read(&self, component: SegmentComponent) -> Result<ReadOnlySource, OpenReadError> {
|
pub fn open_read(&self, component: SegmentComponent) -> Result<FileSlice, OpenReadError> {
|
||||||
let path = self.relative_path(component);
|
let path = self.relative_path(component);
|
||||||
let source = self.index.directory().open_read(&path)?;
|
self.index.directory().open_read(&path)
|
||||||
Ok(source)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Open one of the component file for *regular* write.
|
/// Open one of the component file for *regular* write.
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ pub enum SegmentComponent {
|
|||||||
/// Dictionary associating `Term`s to `TermInfo`s which is
|
/// Dictionary associating `Term`s to `TermInfo`s which is
|
||||||
/// simply an address into the `postings` file and the `positions` file.
|
/// simply an address into the `postings` file and the `positions` file.
|
||||||
TERMS,
|
TERMS,
|
||||||
/// Row-oriented, LZ4-compressed storage of the documents.
|
/// Row-oriented, compressed storage of the documents.
|
||||||
/// Accessing a document from the store is relatively slow, as it
|
/// Accessing a document from the store is relatively slow, as it
|
||||||
/// requires to decompress the entire block it belongs to.
|
/// requires to decompress the entire block it belongs to.
|
||||||
STORE,
|
STORE,
|
||||||
|
|||||||
@@ -1,26 +1,26 @@
|
|||||||
use crate::common::CompositeFile;
|
|
||||||
use crate::common::HasLen;
|
use crate::common::HasLen;
|
||||||
use crate::core::InvertedIndexReader;
|
use crate::core::InvertedIndexReader;
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::core::SegmentComponent;
|
use crate::core::SegmentComponent;
|
||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::FileSlice;
|
||||||
use crate::fastfield::DeleteBitSet;
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::fastfield::FacetReader;
|
use crate::fastfield::FacetReader;
|
||||||
use crate::fastfield::FastFieldReaders;
|
use crate::fastfield::FastFieldReaders;
|
||||||
use crate::fieldnorm::FieldNormReader;
|
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
|
||||||
use crate::schema::Field;
|
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
|
use crate::schema::{Field, IndexRecordOption};
|
||||||
use crate::space_usage::SegmentSpaceUsage;
|
use crate::space_usage::SegmentSpaceUsage;
|
||||||
use crate::store::StoreReader;
|
use crate::store::StoreReader;
|
||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
use crate::{common::CompositeFile, error::DataCorruption};
|
||||||
use fail::fail_point;
|
use fail::fail_point;
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
|
use std::{collections::HashMap, io};
|
||||||
|
|
||||||
/// Entry point to access all of the datastructures of the `Segment`
|
/// Entry point to access all of the datastructures of the `Segment`
|
||||||
///
|
///
|
||||||
@@ -48,9 +48,9 @@ pub struct SegmentReader {
|
|||||||
positions_composite: CompositeFile,
|
positions_composite: CompositeFile,
|
||||||
positions_idx_composite: CompositeFile,
|
positions_idx_composite: CompositeFile,
|
||||||
fast_fields_readers: Arc<FastFieldReaders>,
|
fast_fields_readers: Arc<FastFieldReaders>,
|
||||||
fieldnorms_composite: CompositeFile,
|
fieldnorm_readers: FieldNormReaders,
|
||||||
|
|
||||||
store_source: ReadOnlySource,
|
store_file: FileSlice,
|
||||||
delete_bitset_opt: Option<DeleteBitSet>,
|
delete_bitset_opt: Option<DeleteBitSet>,
|
||||||
schema: Schema,
|
schema: Schema,
|
||||||
}
|
}
|
||||||
@@ -106,16 +106,21 @@ impl SegmentReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the `FacetReader` associated to a given `Field`.
|
/// Accessor to the `FacetReader` associated to a given `Field`.
|
||||||
pub fn facet_reader(&self, field: Field) -> Option<FacetReader> {
|
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> {
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
if field_entry.field_type() != &FieldType::HierarchicalFacet {
|
if field_entry.field_type() != &FieldType::HierarchicalFacet {
|
||||||
return None;
|
return Err(crate::TantivyError::InvalidArgument(format!(
|
||||||
|
"Field {:?} is not a facet field.",
|
||||||
|
field_entry.name()
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
let term_ords_reader = self.fast_fields().u64s(field)?;
|
let term_ords_reader = self.fast_fields().u64s(field)?;
|
||||||
let termdict_source = self.termdict_composite.open_read(field)?;
|
let termdict = self
|
||||||
let termdict = TermDictionary::from_source(&termdict_source);
|
.termdict_composite
|
||||||
let facet_reader = FacetReader::new(term_ords_reader, termdict);
|
.open_read(field)
|
||||||
Some(facet_reader)
|
.map(TermDictionary::open)
|
||||||
|
.unwrap_or_else(|| Ok(TermDictionary::empty()))?;
|
||||||
|
Ok(FacetReader::new(term_ords_reader, termdict))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the segment's `Field norms`'s reader.
|
/// Accessor to the segment's `Field norms`'s reader.
|
||||||
@@ -125,47 +130,45 @@ impl SegmentReader {
|
|||||||
///
|
///
|
||||||
/// They are simply stored as a fast field, serialized in
|
/// They are simply stored as a fast field, serialized in
|
||||||
/// the `.fieldnorm` file of the segment.
|
/// the `.fieldnorm` file of the segment.
|
||||||
pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader {
|
pub fn get_fieldnorms_reader(&self, field: Field) -> crate::Result<FieldNormReader> {
|
||||||
if let Some(fieldnorm_source) = self.fieldnorms_composite.open_read(field) {
|
self.fieldnorm_readers.get_field(field)?.ok_or_else(|| {
|
||||||
FieldNormReader::open(fieldnorm_source)
|
|
||||||
} else {
|
|
||||||
let field_name = self.schema.get_field_name(field);
|
let field_name = self.schema.get_field_name(field);
|
||||||
let err_msg = format!(
|
let err_msg = format!(
|
||||||
"Field norm not found for field {:?}. Was it market as indexed during indexing.",
|
"Field norm not found for field {:?}. Was it marked as indexed during indexing?",
|
||||||
field_name
|
field_name
|
||||||
);
|
);
|
||||||
panic!(err_msg);
|
crate::TantivyError::SchemaError(err_msg)
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the segment's `StoreReader`.
|
/// Accessor to the segment's `StoreReader`.
|
||||||
pub fn get_store_reader(&self) -> StoreReader {
|
pub fn get_store_reader(&self) -> io::Result<StoreReader> {
|
||||||
StoreReader::from_source(self.store_source.clone())
|
StoreReader::open(self.store_file.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Open a new segment for reading.
|
/// Open a new segment for reading.
|
||||||
pub fn open(segment: &Segment) -> crate::Result<SegmentReader> {
|
pub fn open(segment: &Segment) -> crate::Result<SegmentReader> {
|
||||||
let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
|
let termdict_file = segment.open_read(SegmentComponent::TERMS)?;
|
||||||
let termdict_composite = CompositeFile::open(&termdict_source)?;
|
let termdict_composite = CompositeFile::open(&termdict_file)?;
|
||||||
|
|
||||||
let store_source = segment.open_read(SegmentComponent::STORE)?;
|
let store_file = segment.open_read(SegmentComponent::STORE)?;
|
||||||
|
|
||||||
fail_point!("SegmentReader::open#middle");
|
fail_point!("SegmentReader::open#middle");
|
||||||
|
|
||||||
let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
|
let postings_file = segment.open_read(SegmentComponent::POSTINGS)?;
|
||||||
let postings_composite = CompositeFile::open(&postings_source)?;
|
let postings_composite = CompositeFile::open(&postings_file)?;
|
||||||
|
|
||||||
let positions_composite = {
|
let positions_composite = {
|
||||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONS) {
|
if let Ok(positions_file) = segment.open_read(SegmentComponent::POSITIONS) {
|
||||||
CompositeFile::open(&source)?
|
CompositeFile::open(&positions_file)?
|
||||||
} else {
|
} else {
|
||||||
CompositeFile::empty()
|
CompositeFile::empty()
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let positions_idx_composite = {
|
let positions_idx_composite = {
|
||||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
|
if let Ok(positions_skip_file) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
|
||||||
CompositeFile::open(&source)?
|
CompositeFile::open(&positions_skip_file)?
|
||||||
} else {
|
} else {
|
||||||
CompositeFile::empty()
|
CompositeFile::empty()
|
||||||
}
|
}
|
||||||
@@ -175,29 +178,32 @@ impl SegmentReader {
|
|||||||
|
|
||||||
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
|
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
|
||||||
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
||||||
let fast_field_readers =
|
let fast_field_readers = Arc::new(FastFieldReaders::new(
|
||||||
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
|
schema.clone(),
|
||||||
|
fast_fields_composite,
|
||||||
|
)?);
|
||||||
|
|
||||||
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
let fieldnorm_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
||||||
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
|
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||||
|
|
||||||
let delete_bitset_opt = if segment.meta().has_deletes() {
|
let delete_bitset_opt = if segment.meta().has_deletes() {
|
||||||
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
||||||
Some(DeleteBitSet::open(delete_data))
|
let delete_bitset = DeleteBitSet::open(delete_data)?;
|
||||||
|
Some(delete_bitset)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(SegmentReader {
|
Ok(SegmentReader {
|
||||||
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
|
inv_idx_reader_cache: Default::default(),
|
||||||
max_doc: segment.meta().max_doc(),
|
max_doc: segment.meta().max_doc(),
|
||||||
num_docs: segment.meta().num_docs(),
|
num_docs: segment.meta().num_docs(),
|
||||||
termdict_composite,
|
termdict_composite,
|
||||||
postings_composite,
|
postings_composite,
|
||||||
fast_fields_readers: fast_field_readers,
|
fast_fields_readers: fast_field_readers,
|
||||||
fieldnorms_composite,
|
fieldnorm_readers,
|
||||||
segment_id: segment.id(),
|
segment_id: segment.id(),
|
||||||
store_source,
|
store_file,
|
||||||
delete_bitset_opt,
|
delete_bitset_opt,
|
||||||
positions_composite,
|
positions_composite,
|
||||||
positions_idx_composite,
|
positions_idx_composite,
|
||||||
@@ -212,58 +218,64 @@ impl SegmentReader {
|
|||||||
/// The field reader is in charge of iterating through the
|
/// The field reader is in charge of iterating through the
|
||||||
/// term dictionary associated to a specific field,
|
/// term dictionary associated to a specific field,
|
||||||
/// and opening the posting list associated to any term.
|
/// and opening the posting list associated to any term.
|
||||||
pub fn inverted_index(&self, field: Field) -> Arc<InvertedIndexReader> {
|
///
|
||||||
|
/// If the field is marked as index, a warn is logged and an empty `InvertedIndexReader`
|
||||||
|
/// is returned.
|
||||||
|
/// Similarly if the field is marked as indexed but no term has been indexed for the given
|
||||||
|
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||||
|
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
|
||||||
if let Some(inv_idx_reader) = self
|
if let Some(inv_idx_reader) = self
|
||||||
.inv_idx_reader_cache
|
.inv_idx_reader_cache
|
||||||
.read()
|
.read()
|
||||||
.expect("Lock poisoned. This should never happen")
|
.expect("Lock poisoned. This should never happen")
|
||||||
.get(&field)
|
.get(&field)
|
||||||
{
|
{
|
||||||
return Arc::clone(inv_idx_reader);
|
return Ok(Arc::clone(inv_idx_reader));
|
||||||
}
|
}
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
let field_type = field_entry.field_type();
|
let field_type = field_entry.field_type();
|
||||||
let record_option_opt = field_type.get_index_record_option();
|
let record_option_opt = field_type.get_index_record_option();
|
||||||
|
|
||||||
if record_option_opt.is_none() {
|
if record_option_opt.is_none() {
|
||||||
panic!("Field {:?} does not seem indexed.", field_entry.name());
|
warn!("Field {:?} does not seem indexed.", field_entry.name());
|
||||||
}
|
}
|
||||||
|
|
||||||
let record_option = record_option_opt.unwrap();
|
let postings_file_opt = self.postings_composite.open_read(field);
|
||||||
|
|
||||||
let postings_source_opt = self.postings_composite.open_read(field);
|
if postings_file_opt.is_none() || record_option_opt.is_none() {
|
||||||
|
|
||||||
if postings_source_opt.is_none() {
|
|
||||||
// no documents in the segment contained this field.
|
// no documents in the segment contained this field.
|
||||||
// As a result, no data is associated to the inverted index.
|
// As a result, no data is associated to the inverted index.
|
||||||
//
|
//
|
||||||
// Returns an empty inverted index.
|
// Returns an empty inverted index.
|
||||||
return Arc::new(InvertedIndexReader::empty(field_type));
|
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
|
||||||
|
return Ok(Arc::new(InvertedIndexReader::empty(record_option)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let postings_source = postings_source_opt.unwrap();
|
let record_option = record_option_opt.unwrap();
|
||||||
|
let postings_file = postings_file_opt.unwrap();
|
||||||
|
|
||||||
let termdict_source = self.termdict_composite.open_read(field).expect(
|
let termdict_file: FileSlice = self.termdict_composite.open_read(field)
|
||||||
"Failed to open field term dictionary in composite file. Is the field indexed?",
|
.ok_or_else(||
|
||||||
);
|
DataCorruption::comment_only(format!("Failed to open field {:?}'s term dictionary in the composite file. Has the schema been modified?", field_entry.name()))
|
||||||
|
)?;
|
||||||
|
|
||||||
let positions_source = self
|
let positions_file = self
|
||||||
.positions_composite
|
.positions_composite
|
||||||
.open_read(field)
|
.open_read(field)
|
||||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||||
|
|
||||||
let positions_idx_source = self
|
let positions_idx_file = self
|
||||||
.positions_idx_composite
|
.positions_idx_composite
|
||||||
.open_read(field)
|
.open_read(field)
|
||||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||||
|
|
||||||
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
||||||
TermDictionary::from_source(&termdict_source),
|
TermDictionary::open(termdict_file)?,
|
||||||
postings_source,
|
postings_file,
|
||||||
positions_source,
|
positions_file,
|
||||||
positions_idx_source,
|
positions_idx_file,
|
||||||
record_option,
|
record_option,
|
||||||
));
|
)?);
|
||||||
|
|
||||||
// by releasing the lock in between, we may end up opening the inverting index
|
// by releasing the lock in between, we may end up opening the inverting index
|
||||||
// twice, but this is fine.
|
// twice, but this is fine.
|
||||||
@@ -272,7 +284,7 @@ impl SegmentReader {
|
|||||||
.expect("Field reader cache lock poisoned. This should never happen.")
|
.expect("Field reader cache lock poisoned. This should never happen.")
|
||||||
.insert(field, Arc::clone(&inv_idx_reader));
|
.insert(field, Arc::clone(&inv_idx_reader));
|
||||||
|
|
||||||
inv_idx_reader
|
Ok(inv_idx_reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the segment id
|
/// Returns the segment id
|
||||||
@@ -295,26 +307,26 @@ impl SegmentReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator that will iterate over the alive document ids
|
/// Returns an iterator that will iterate over the alive document ids
|
||||||
pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator<'_> {
|
pub fn doc_ids_alive(&self) -> impl Iterator<Item = DocId> + '_ {
|
||||||
SegmentReaderAliveDocsIterator::new(&self)
|
(0u32..self.max_doc).filter(move |doc| !self.is_deleted(*doc))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Summarize total space usage of this segment.
|
/// Summarize total space usage of this segment.
|
||||||
pub fn space_usage(&self) -> SegmentSpaceUsage {
|
pub fn space_usage(&self) -> io::Result<SegmentSpaceUsage> {
|
||||||
SegmentSpaceUsage::new(
|
Ok(SegmentSpaceUsage::new(
|
||||||
self.num_docs(),
|
self.num_docs(),
|
||||||
self.termdict_composite.space_usage(),
|
self.termdict_composite.space_usage(),
|
||||||
self.postings_composite.space_usage(),
|
self.postings_composite.space_usage(),
|
||||||
self.positions_composite.space_usage(),
|
self.positions_composite.space_usage(),
|
||||||
self.positions_idx_composite.space_usage(),
|
self.positions_idx_composite.space_usage(),
|
||||||
self.fast_fields_readers.space_usage(),
|
self.fast_fields_readers.space_usage(),
|
||||||
self.fieldnorms_composite.space_usage(),
|
self.fieldnorm_readers.space_usage(),
|
||||||
self.get_store_reader().space_usage(),
|
self.get_store_reader()?.space_usage(),
|
||||||
self.delete_bitset_opt
|
self.delete_bitset_opt
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(DeleteBitSet::space_usage)
|
.map(DeleteBitSet::space_usage)
|
||||||
.unwrap_or(0),
|
.unwrap_or(0),
|
||||||
)
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -324,52 +336,6 @@ impl fmt::Debug for SegmentReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Implements the iterator trait to allow easy iteration
|
|
||||||
/// over non-deleted ("alive") DocIds in a SegmentReader
|
|
||||||
pub struct SegmentReaderAliveDocsIterator<'a> {
|
|
||||||
reader: &'a SegmentReader,
|
|
||||||
max_doc: DocId,
|
|
||||||
current: DocId,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> SegmentReaderAliveDocsIterator<'a> {
|
|
||||||
pub fn new(reader: &'a SegmentReader) -> SegmentReaderAliveDocsIterator<'a> {
|
|
||||||
SegmentReaderAliveDocsIterator {
|
|
||||||
reader,
|
|
||||||
max_doc: reader.max_doc(),
|
|
||||||
current: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Iterator for SegmentReaderAliveDocsIterator<'a> {
|
|
||||||
type Item = DocId;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
// TODO: Use TinySet (like in BitSetDocSet) to speed this process up
|
|
||||||
if self.current >= self.max_doc {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
// find the next alive doc id
|
|
||||||
while self.reader.is_deleted(self.current) {
|
|
||||||
self.current += 1;
|
|
||||||
|
|
||||||
if self.current >= self.max_doc {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// capture the current alive DocId
|
|
||||||
let result = Some(self.current);
|
|
||||||
|
|
||||||
// move down the chain
|
|
||||||
self.current += 1;
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
@@ -377,7 +343,7 @@ mod test {
|
|||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_alive_docs_iterator() {
|
fn test_alive_docs_iterator() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
schema_builder.add_text_field("name", TEXT | STORED);
|
schema_builder.add_text_field("name", TEXT | STORED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -385,26 +351,26 @@ mod test {
|
|||||||
let name = schema.get_field("name").unwrap();
|
let name = schema.get_field("name").unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(name => "tantivy"));
|
index_writer.add_document(doc!(name => "tantivy"));
|
||||||
index_writer.add_document(doc!(name => "horse"));
|
index_writer.add_document(doc!(name => "horse"));
|
||||||
index_writer.add_document(doc!(name => "jockey"));
|
index_writer.add_document(doc!(name => "jockey"));
|
||||||
index_writer.add_document(doc!(name => "cap"));
|
index_writer.add_document(doc!(name => "cap"));
|
||||||
|
|
||||||
// we should now have one segment with two docs
|
// we should now have one segment with two docs
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer2 = index.writer(50_000_000).unwrap();
|
let mut index_writer2 = index.writer(50_000_000)?;
|
||||||
index_writer2.delete_term(Term::from_field_text(name, "horse"));
|
index_writer2.delete_term(Term::from_field_text(name, "horse"));
|
||||||
index_writer2.delete_term(Term::from_field_text(name, "cap"));
|
index_writer2.delete_term(Term::from_field_text(name, "cap"));
|
||||||
|
|
||||||
// ok, now we should have a deleted doc
|
// ok, now we should have a deleted doc
|
||||||
index_writer2.commit().unwrap();
|
index_writer2.commit()?;
|
||||||
}
|
}
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader()?.searcher();
|
||||||
let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect();
|
let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect();
|
||||||
assert_eq!(vec![0u32, 2u32], docs);
|
assert_eq!(vec![0u32, 2u32], docs);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use crate::directory::directory_lock::Lock;
|
use crate::directory::directory_lock::Lock;
|
||||||
use crate::directory::error::LockError;
|
use crate::directory::error::LockError;
|
||||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||||
use crate::directory::WatchCallback;
|
|
||||||
use crate::directory::WatchHandle;
|
use crate::directory::WatchHandle;
|
||||||
use crate::directory::{ReadOnlySource, WritePtr};
|
use crate::directory::{FileHandle, WatchCallback};
|
||||||
|
use crate::directory::{FileSlice, WritePtr};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@@ -11,7 +11,6 @@ use std::marker::Send;
|
|||||||
use std::marker::Sync;
|
use std::marker::Sync;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::result;
|
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
@@ -80,7 +79,7 @@ fn try_acquire_lock(
|
|||||||
) -> Result<DirectoryLock, TryAcquireLockError> {
|
) -> Result<DirectoryLock, TryAcquireLockError> {
|
||||||
let mut write = directory.open_write(filepath).map_err(|e| match e {
|
let mut write = directory.open_write(filepath).map_err(|e| match e {
|
||||||
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
|
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
|
||||||
OpenWriteError::IOError(io_error) => TryAcquireLockError::IOError(io_error.into()),
|
OpenWriteError::IOError { io_error, .. } => TryAcquireLockError::IOError(io_error),
|
||||||
})?;
|
})?;
|
||||||
write.flush().map_err(TryAcquireLockError::IOError)?;
|
write.flush().map_err(TryAcquireLockError::IOError)?;
|
||||||
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
|
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
|
||||||
@@ -109,37 +108,43 @@ fn retry_policy(is_blocking: bool) -> RetryPolicy {
|
|||||||
/// should be your default choice.
|
/// should be your default choice.
|
||||||
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
|
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
|
||||||
/// should be used mostly for tests.
|
/// should be used mostly for tests.
|
||||||
///
|
|
||||||
pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||||
/// Opens a virtual file for read.
|
/// Opens a file and returns a boxed `FileHandle`.
|
||||||
///
|
///
|
||||||
|
/// Users of `Directory` should typically call `Directory::open_read(...)`,
|
||||||
|
/// while `Directory` implementor should implement `get_file_handle()`.
|
||||||
|
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError>;
|
||||||
|
|
||||||
/// Once a virtual file is open, its data may not
|
/// Once a virtual file is open, its data may not
|
||||||
/// change.
|
/// change.
|
||||||
///
|
///
|
||||||
/// Specifically, subsequent writes or flushes should
|
/// Specifically, subsequent writes or flushes should
|
||||||
/// have no effect on the returned `ReadOnlySource` object.
|
/// have no effect on the returned `FileSlice` object.
|
||||||
///
|
///
|
||||||
/// You should only use this to read files create with [Directory::open_write].
|
/// You should only use this to read files create with [Directory::open_write].
|
||||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
|
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> {
|
||||||
|
let file_handle = self.get_file_handle(path)?;
|
||||||
|
Ok(FileSlice::new(file_handle))
|
||||||
|
}
|
||||||
|
|
||||||
/// Removes a file
|
/// Removes a file
|
||||||
///
|
///
|
||||||
/// Removing a file will not affect an eventual
|
/// Removing a file will not affect an eventual
|
||||||
/// existing ReadOnlySource pointing to it.
|
/// existing FileSlice pointing to it.
|
||||||
///
|
///
|
||||||
/// Removing a nonexistent file, yields a
|
/// Removing a nonexistent file, yields a
|
||||||
/// `DeleteError::DoesNotExist`.
|
/// `DeleteError::DoesNotExist`.
|
||||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
|
fn delete(&self, path: &Path) -> Result<(), DeleteError>;
|
||||||
|
|
||||||
/// Returns true iff the file exists
|
/// Returns true iff the file exists
|
||||||
fn exists(&self, path: &Path) -> bool;
|
fn exists(&self, path: &Path) -> Result<bool, OpenReadError>;
|
||||||
|
|
||||||
/// Opens a writer for the *virtual file* associated with
|
/// Opens a writer for the *virtual file* associated with
|
||||||
/// a Path.
|
/// a Path.
|
||||||
///
|
///
|
||||||
/// Right after this call, the file should be created
|
/// Right after this call, the file should be created
|
||||||
/// and any subsequent call to `open_read` for the
|
/// and any subsequent call to `open_read` for the
|
||||||
/// same path should return a `ReadOnlySource`.
|
/// same path should return a `FileSlice`.
|
||||||
///
|
///
|
||||||
/// Write operations may be aggressively buffered.
|
/// Write operations may be aggressively buffered.
|
||||||
/// The client of this trait is responsible for calling flush
|
/// The client of this trait is responsible for calling flush
|
||||||
@@ -153,7 +158,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
/// was not called.
|
/// was not called.
|
||||||
///
|
///
|
||||||
/// The file may not previously exist.
|
/// The file may not previously exist.
|
||||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
||||||
|
|
||||||
/// Reads the full content file that has been written using
|
/// Reads the full content file that has been written using
|
||||||
/// atomic_write.
|
/// atomic_write.
|
||||||
@@ -169,7 +174,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
/// a partially written file.
|
/// a partially written file.
|
||||||
///
|
///
|
||||||
/// The file may or may not previously exist.
|
/// The file may or may not previously exist.
|
||||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()>;
|
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()>;
|
||||||
|
|
||||||
/// Acquire a lock in the given directory.
|
/// Acquire a lock in the given directory.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -1,162 +1,68 @@
|
|||||||
use crate::Version;
|
use crate::Version;
|
||||||
use std::error::Error as StdError;
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
/// Error while trying to acquire a directory lock.
|
/// Error while trying to acquire a directory lock.
|
||||||
#[derive(Debug, Fail)]
|
#[derive(Debug, Error)]
|
||||||
pub enum LockError {
|
pub enum LockError {
|
||||||
/// Failed to acquired a lock as it is already held by another
|
/// Failed to acquired a lock as it is already held by another
|
||||||
/// client.
|
/// client.
|
||||||
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
|
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
|
||||||
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
|
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
|
||||||
#[fail(
|
#[error("Could not acquire lock as it is already held, possibly by a different process.")]
|
||||||
display = "Could not acquire lock as it is already held, possibly by a different process."
|
|
||||||
)]
|
|
||||||
LockBusy,
|
LockBusy,
|
||||||
/// Trying to acquire a lock failed with an `IOError`
|
/// Trying to acquire a lock failed with an `IOError`
|
||||||
#[fail(display = "Failed to acquire the lock due to an io:Error.")]
|
#[error("Failed to acquire the lock due to an io:Error.")]
|
||||||
IOError(io::Error),
|
IOError(io::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// General IO error with an optional path to the offending file.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct IOError {
|
|
||||||
path: Option<PathBuf>,
|
|
||||||
err: io::Error,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<io::Error> for IOError {
|
|
||||||
fn into(self) -> io::Error {
|
|
||||||
self.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for IOError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self.path {
|
|
||||||
Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err),
|
|
||||||
None => write!(f, "io error occurred: '{}'", self.err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StdError for IOError {
|
|
||||||
fn description(&self) -> &str {
|
|
||||||
"io error occurred"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cause(&self) -> Option<&dyn StdError> {
|
|
||||||
Some(&self.err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IOError {
|
|
||||||
pub(crate) fn with_path(path: PathBuf, err: io::Error) -> Self {
|
|
||||||
IOError {
|
|
||||||
path: Some(path),
|
|
||||||
err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<io::Error> for IOError {
|
|
||||||
fn from(err: io::Error) -> IOError {
|
|
||||||
IOError { path: None, err }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Error that may occur when opening a directory
|
/// Error that may occur when opening a directory
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Error)]
|
||||||
pub enum OpenDirectoryError {
|
pub enum OpenDirectoryError {
|
||||||
/// The underlying directory does not exists.
|
/// The underlying directory does not exists.
|
||||||
|
#[error("Directory does not exist: '{0}'.")]
|
||||||
DoesNotExist(PathBuf),
|
DoesNotExist(PathBuf),
|
||||||
/// The path exists but is not a directory.
|
/// The path exists but is not a directory.
|
||||||
|
#[error("Path exists but is not a directory: '{0}'.")]
|
||||||
NotADirectory(PathBuf),
|
NotADirectory(PathBuf),
|
||||||
|
/// Failed to create a temp directory.
|
||||||
|
#[error("Failed to create a temporary directory: '{0}'.")]
|
||||||
|
FailedToCreateTempDir(io::Error),
|
||||||
/// IoError
|
/// IoError
|
||||||
IoError(io::Error),
|
#[error("IOError '{io_error:?}' while create directory in: '{directory_path:?}'.")]
|
||||||
}
|
IoError {
|
||||||
|
/// underlying io Error.
|
||||||
impl From<io::Error> for OpenDirectoryError {
|
io_error: io::Error,
|
||||||
fn from(io_err: io::Error) -> Self {
|
/// directory we tried to open.
|
||||||
OpenDirectoryError::IoError(io_err)
|
directory_path: PathBuf,
|
||||||
}
|
},
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for OpenDirectoryError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match *self {
|
|
||||||
OpenDirectoryError::DoesNotExist(ref path) => {
|
|
||||||
write!(f, "the underlying directory '{:?}' does not exist", path)
|
|
||||||
}
|
|
||||||
OpenDirectoryError::NotADirectory(ref path) => {
|
|
||||||
write!(f, "the path '{:?}' exists but is not a directory", path)
|
|
||||||
}
|
|
||||||
OpenDirectoryError::IoError(ref err) => write!(
|
|
||||||
f,
|
|
||||||
"IOError while trying to open/create the directory. {:?}",
|
|
||||||
err
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StdError for OpenDirectoryError {
|
|
||||||
fn description(&self) -> &str {
|
|
||||||
"error occurred while opening a directory"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cause(&self) -> Option<&dyn StdError> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Error that may occur when starting to write in a file
|
/// Error that may occur when starting to write in a file
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Error)]
|
||||||
pub enum OpenWriteError {
|
pub enum OpenWriteError {
|
||||||
/// Our directory is WORM, writing an existing file is forbidden.
|
/// Our directory is WORM, writing an existing file is forbidden.
|
||||||
/// Checkout the `Directory` documentation.
|
/// Checkout the `Directory` documentation.
|
||||||
|
#[error("File already exists: '{0}'")]
|
||||||
FileAlreadyExists(PathBuf),
|
FileAlreadyExists(PathBuf),
|
||||||
/// Any kind of IO error that happens when
|
/// Any kind of IO error that happens when
|
||||||
/// writing in the underlying IO device.
|
/// writing in the underlying IO device.
|
||||||
IOError(IOError),
|
#[error("IOError '{io_error:?}' while opening file for write: '{filepath}'.")]
|
||||||
|
IOError {
|
||||||
|
/// The underlying `io::Error`.
|
||||||
|
io_error: io::Error,
|
||||||
|
/// File path of the file that tantivy failed to open for write.
|
||||||
|
filepath: PathBuf,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<IOError> for OpenWriteError {
|
impl OpenWriteError {
|
||||||
fn from(err: IOError) -> OpenWriteError {
|
/// Wraps an io error.
|
||||||
OpenWriteError::IOError(err)
|
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||||
|
Self::IOError { io_error, filepath }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for OpenWriteError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match *self {
|
|
||||||
OpenWriteError::FileAlreadyExists(ref path) => {
|
|
||||||
write!(f, "the file '{:?}' already exists", path)
|
|
||||||
}
|
|
||||||
OpenWriteError::IOError(ref err) => write!(
|
|
||||||
f,
|
|
||||||
"an io error occurred while opening a file for writing: '{}'",
|
|
||||||
err
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StdError for OpenWriteError {
|
|
||||||
fn description(&self) -> &str {
|
|
||||||
"error occurred while opening a file for writing"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cause(&self) -> Option<&dyn StdError> {
|
|
||||||
match *self {
|
|
||||||
OpenWriteError::FileAlreadyExists(_) => None,
|
|
||||||
OpenWriteError::IOError(ref err) => Some(err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Type of index incompatibility between the library and the index found on disk
|
/// Type of index incompatibility between the library and the index found on disk
|
||||||
/// Used to catch and provide a hint to solve this incompatibility issue
|
/// Used to catch and provide a hint to solve this incompatibility issue
|
||||||
pub enum Incompatibility {
|
pub enum Incompatibility {
|
||||||
@@ -217,55 +123,47 @@ impl fmt::Debug for Incompatibility {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Error that may occur when accessing a file read
|
/// Error that may occur when accessing a file read
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Error)]
|
||||||
pub enum OpenReadError {
|
pub enum OpenReadError {
|
||||||
/// The file does not exists.
|
/// The file does not exists.
|
||||||
|
#[error("Files does not exists: {0:?}")]
|
||||||
FileDoesNotExist(PathBuf),
|
FileDoesNotExist(PathBuf),
|
||||||
/// Any kind of IO error that happens when
|
/// Any kind of io::Error.
|
||||||
/// interacting with the underlying IO device.
|
#[error(
|
||||||
IOError(IOError),
|
"IOError: '{io_error:?}' happened while opening the following file for Read: {filepath}."
|
||||||
/// This library doesn't support the index version found on disk
|
)]
|
||||||
|
IOError {
|
||||||
|
/// The underlying `io::Error`.
|
||||||
|
io_error: io::Error,
|
||||||
|
/// File path of the file that tantivy failed to open for read.
|
||||||
|
filepath: PathBuf,
|
||||||
|
},
|
||||||
|
/// This library does not support the index version found in file footer.
|
||||||
|
#[error("Index version unsupported: {0:?}")]
|
||||||
IncompatibleIndex(Incompatibility),
|
IncompatibleIndex(Incompatibility),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<IOError> for OpenReadError {
|
impl OpenReadError {
|
||||||
fn from(err: IOError) -> OpenReadError {
|
/// Wraps an io error.
|
||||||
OpenReadError::IOError(err)
|
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||||
|
Self::IOError { io_error, filepath }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for OpenReadError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match *self {
|
|
||||||
OpenReadError::FileDoesNotExist(ref path) => {
|
|
||||||
write!(f, "the file '{:?}' does not exist", path)
|
|
||||||
}
|
|
||||||
OpenReadError::IOError(ref err) => write!(
|
|
||||||
f,
|
|
||||||
"an io error occurred while opening a file for reading: '{}'",
|
|
||||||
err
|
|
||||||
),
|
|
||||||
OpenReadError::IncompatibleIndex(ref footer) => {
|
|
||||||
write!(f, "Incompatible index format: {:?}", footer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Error that may occur when trying to delete a file
|
/// Error that may occur when trying to delete a file
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Error)]
|
||||||
pub enum DeleteError {
|
pub enum DeleteError {
|
||||||
/// The file does not exists.
|
/// The file does not exists.
|
||||||
|
#[error("File does not exists: '{0}'.")]
|
||||||
FileDoesNotExist(PathBuf),
|
FileDoesNotExist(PathBuf),
|
||||||
/// Any kind of IO error that happens when
|
/// Any kind of IO error that happens when
|
||||||
/// interacting with the underlying IO device.
|
/// interacting with the underlying IO device.
|
||||||
IOError(IOError),
|
#[error("The following IO error happened while deleting file '{filepath}': '{io_error:?}'.")]
|
||||||
}
|
IOError {
|
||||||
|
/// The underlying `io::Error`.
|
||||||
impl From<IOError> for DeleteError {
|
io_error: io::Error,
|
||||||
fn from(err: IOError) -> DeleteError {
|
/// File path of the file that tantivy failed to delete.
|
||||||
DeleteError::IOError(err)
|
filepath: PathBuf,
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Incompatibility> for OpenReadError {
|
impl From<Incompatibility> for OpenReadError {
|
||||||
@@ -273,29 +171,3 @@ impl From<Incompatibility> for OpenReadError {
|
|||||||
OpenReadError::IncompatibleIndex(incompatibility)
|
OpenReadError::IncompatibleIndex(incompatibility)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for DeleteError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match *self {
|
|
||||||
DeleteError::FileDoesNotExist(ref path) => {
|
|
||||||
write!(f, "the file '{:?}' does not exist", path)
|
|
||||||
}
|
|
||||||
DeleteError::IOError(ref err) => {
|
|
||||||
write!(f, "an io error occurred while deleting a file: '{}'", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StdError for DeleteError {
|
|
||||||
fn description(&self) -> &str {
|
|
||||||
"error occurred while deleting a file"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cause(&self) -> Option<&dyn StdError> {
|
|
||||||
match *self {
|
|
||||||
DeleteError::FileDoesNotExist(_) => None,
|
|
||||||
DeleteError::IOError(ref err) => Some(err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
247
src/directory/file_slice.rs
Normal file
247
src/directory/file_slice.rs
Normal file
@@ -0,0 +1,247 @@
|
|||||||
|
use stable_deref_trait::StableDeref;
|
||||||
|
|
||||||
|
use crate::common::HasLen;
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
use std::{io, ops::Deref};
|
||||||
|
|
||||||
|
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||||
|
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||||
|
|
||||||
|
/// Objects that represents files sections in tantivy.
|
||||||
|
///
|
||||||
|
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
||||||
|
/// is alive, the data associated with it cannot be altered or destroyed.
|
||||||
|
///
|
||||||
|
/// The underlying behavior is therefore specific to the `Directory` that created it.
|
||||||
|
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
|
||||||
|
/// on the filesystem.
|
||||||
|
pub trait FileHandle: 'static + Send + Sync + HasLen {
|
||||||
|
/// Reads a slice of bytes.
|
||||||
|
///
|
||||||
|
/// This method may panic if the range requested is invalid.
|
||||||
|
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileHandle for &'static [u8] {
|
||||||
|
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
||||||
|
let bytes = &self[from..to];
|
||||||
|
Ok(OwnedBytes::new(bytes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Deref<Target = [u8]>> HasLen for T {
|
||||||
|
fn len(&self) -> usize {
|
||||||
|
self.as_ref().len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<B> From<B> for FileSlice
|
||||||
|
where
|
||||||
|
B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync,
|
||||||
|
{
|
||||||
|
fn from(bytes: B) -> FileSlice {
|
||||||
|
FileSlice::new(Box::new(OwnedBytes::new(bytes)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Logical slice of read only file in tantivy.
|
||||||
|
//
|
||||||
|
/// It can be cloned and sliced cheaply.
|
||||||
|
///
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct FileSlice {
|
||||||
|
data: Arc<dyn FileHandle>,
|
||||||
|
start: usize,
|
||||||
|
stop: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileSlice {
|
||||||
|
/// Wraps a FileHandle.
|
||||||
|
pub fn new(file_handle: Box<dyn FileHandle>) -> Self {
|
||||||
|
let num_bytes = file_handle.len();
|
||||||
|
FileSlice::new_with_num_bytes(file_handle, num_bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wraps a FileHandle.
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub fn new_with_num_bytes(file_handle: Box<dyn FileHandle>, num_bytes: usize) -> Self {
|
||||||
|
FileSlice {
|
||||||
|
data: Arc::from(file_handle),
|
||||||
|
start: 0,
|
||||||
|
stop: num_bytes,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a fileslice that is just a view over a slice of the data.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if `to < from` or if `to` exceeds the filesize.
|
||||||
|
pub fn slice(&self, from: usize, to: usize) -> FileSlice {
|
||||||
|
assert!(to <= self.len());
|
||||||
|
assert!(to >= from);
|
||||||
|
FileSlice {
|
||||||
|
data: self.data.clone(),
|
||||||
|
start: self.start + from,
|
||||||
|
stop: self.start + to,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates an empty FileSlice
|
||||||
|
pub fn empty() -> FileSlice {
|
||||||
|
const EMPTY_SLICE: &[u8] = &[];
|
||||||
|
FileSlice::from(EMPTY_SLICE)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a `OwnedBytes` with all of the data in the `FileSlice`.
|
||||||
|
///
|
||||||
|
/// The behavior is strongly dependant on the implementation of the underlying
|
||||||
|
/// `Directory` and the `FileSliceTrait` it creates.
|
||||||
|
/// In particular, it is up to the `Directory` implementation
|
||||||
|
/// to handle caching if needed.
|
||||||
|
pub fn read_bytes(&self) -> io::Result<OwnedBytes> {
|
||||||
|
self.data.read_bytes(self.start, self.stop)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reads a specific slice of data.
|
||||||
|
///
|
||||||
|
/// This is equivalent to running `file_slice.slice(from, to).read_bytes()`.
|
||||||
|
pub fn read_bytes_slice(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
||||||
|
assert!(from <= to);
|
||||||
|
assert!(
|
||||||
|
self.start + to <= self.stop,
|
||||||
|
"`to` exceeds the fileslice length"
|
||||||
|
);
|
||||||
|
self.data.read_bytes(self.start + from, self.start + to)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Splits the FileSlice at the given offset and return two file slices.
|
||||||
|
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
||||||
|
///
|
||||||
|
/// This operation is cheap and must not copy any underlying data.
|
||||||
|
pub fn split(self, left_len: usize) -> (FileSlice, FileSlice) {
|
||||||
|
let left = self.slice_to(left_len);
|
||||||
|
let right = self.slice_from(left_len);
|
||||||
|
(left, right)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Splits the file slice at the given offset and return two file slices.
|
||||||
|
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
||||||
|
pub fn split_from_end(self, right_len: usize) -> (FileSlice, FileSlice) {
|
||||||
|
let left_len = self.len() - right_len;
|
||||||
|
self.split(left_len)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Like `.slice(...)` but enforcing only the `from`
|
||||||
|
/// boundary.
|
||||||
|
///
|
||||||
|
/// Equivalent to `.slice(from_offset, self.len())`
|
||||||
|
pub fn slice_from(&self, from_offset: usize) -> FileSlice {
|
||||||
|
self.slice(from_offset, self.len())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Like `.slice(...)` but enforcing only the `to`
|
||||||
|
/// boundary.
|
||||||
|
///
|
||||||
|
/// Equivalent to `.slice(0, to_offset)`
|
||||||
|
pub fn slice_to(&self, to_offset: usize) -> FileSlice {
|
||||||
|
self.slice(0, to_offset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileHandle for FileSlice {
|
||||||
|
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
||||||
|
self.read_bytes_slice(from, to)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HasLen for FileSlice {
|
||||||
|
fn len(&self) -> usize {
|
||||||
|
self.stop - self.start
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::{FileHandle, FileSlice};
|
||||||
|
use crate::common::HasLen;
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_file_slice() -> io::Result<()> {
|
||||||
|
let file_slice = FileSlice::new(Box::new(b"abcdef".as_ref()));
|
||||||
|
assert_eq!(file_slice.len(), 6);
|
||||||
|
assert_eq!(file_slice.slice_from(2).read_bytes()?.as_slice(), b"cdef");
|
||||||
|
assert_eq!(file_slice.slice_to(2).read_bytes()?.as_slice(), b"ab");
|
||||||
|
assert_eq!(
|
||||||
|
file_slice
|
||||||
|
.slice_from(1)
|
||||||
|
.slice_to(2)
|
||||||
|
.read_bytes()?
|
||||||
|
.as_slice(),
|
||||||
|
b"bc"
|
||||||
|
);
|
||||||
|
{
|
||||||
|
let (left, right) = file_slice.clone().split(0);
|
||||||
|
assert_eq!(left.read_bytes()?.as_slice(), b"");
|
||||||
|
assert_eq!(right.read_bytes()?.as_slice(), b"abcdef");
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let (left, right) = file_slice.clone().split(2);
|
||||||
|
assert_eq!(left.read_bytes()?.as_slice(), b"ab");
|
||||||
|
assert_eq!(right.read_bytes()?.as_slice(), b"cdef");
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let (left, right) = file_slice.clone().split_from_end(0);
|
||||||
|
assert_eq!(left.read_bytes()?.as_slice(), b"abcdef");
|
||||||
|
assert_eq!(right.read_bytes()?.as_slice(), b"");
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let (left, right) = file_slice.clone().split_from_end(2);
|
||||||
|
assert_eq!(left.read_bytes()?.as_slice(), b"abcd");
|
||||||
|
assert_eq!(right.read_bytes()?.as_slice(), b"ef");
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_file_slice_trait_slice_len() {
|
||||||
|
let blop: &'static [u8] = b"abc";
|
||||||
|
let owned_bytes: Box<dyn FileHandle> = Box::new(blop);
|
||||||
|
assert_eq!(owned_bytes.len(), 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_slice_simple_read() -> io::Result<()> {
|
||||||
|
let slice = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||||
|
assert_eq!(slice.len(), 6);
|
||||||
|
assert_eq!(slice.read_bytes()?.as_ref(), b"abcdef");
|
||||||
|
assert_eq!(slice.slice(1, 4).read_bytes()?.as_ref(), b"bcd");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_slice_read_slice() -> io::Result<()> {
|
||||||
|
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||||
|
assert_eq!(slice_deref.read_bytes_slice(1, 4)?.as_ref(), b"bcd");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic(expected = "assertion failed: from <= to")]
|
||||||
|
fn test_slice_read_slice_invalid_range() {
|
||||||
|
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||||
|
assert_eq!(slice_deref.read_bytes_slice(1, 0).unwrap().as_ref(), b"bcd");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic(expected = "`to` exceeds the fileslice length")]
|
||||||
|
fn test_slice_read_slice_invalid_range_exceeds() {
|
||||||
|
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||||
|
assert_eq!(
|
||||||
|
slice_deref.read_bytes_slice(0, 10).unwrap().as_ref(),
|
||||||
|
b"bcd"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
178
src/directory/file_watcher.rs
Normal file
178
src/directory/file_watcher.rs
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
use crate::directory::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||||
|
use crc32fast::Hasher;
|
||||||
|
use std::fs;
|
||||||
|
use std::io;
|
||||||
|
use std::io::BufRead;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::thread;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
pub const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
|
||||||
|
|
||||||
|
// Watches a file and executes registered callbacks when the file is modified.
|
||||||
|
pub struct FileWatcher {
|
||||||
|
path: Arc<Path>,
|
||||||
|
callbacks: Arc<WatchCallbackList>,
|
||||||
|
state: Arc<AtomicUsize>, // 0: new, 1: runnable, 2: terminated
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileWatcher {
|
||||||
|
pub fn new(path: &Path) -> FileWatcher {
|
||||||
|
FileWatcher {
|
||||||
|
path: Arc::from(path),
|
||||||
|
callbacks: Default::default(),
|
||||||
|
state: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn spawn(&self) {
|
||||||
|
if self.state.compare_and_swap(0, 1, Ordering::SeqCst) > 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let path = self.path.clone();
|
||||||
|
let callbacks = self.callbacks.clone();
|
||||||
|
let state = self.state.clone();
|
||||||
|
|
||||||
|
thread::Builder::new()
|
||||||
|
.name("thread-tantivy-meta-file-watcher".to_string())
|
||||||
|
.spawn(move || {
|
||||||
|
let mut current_checksum = None;
|
||||||
|
|
||||||
|
while state.load(Ordering::SeqCst) == 1 {
|
||||||
|
if let Ok(checksum) = FileWatcher::compute_checksum(&path) {
|
||||||
|
// `None.unwrap_or_else(|| !checksum) != checksum` evaluates to `true`
|
||||||
|
if current_checksum.unwrap_or_else(|| !checksum) != checksum {
|
||||||
|
info!("Meta file {:?} was modified", path);
|
||||||
|
current_checksum = Some(checksum);
|
||||||
|
futures::executor::block_on(callbacks.broadcast());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
thread::sleep(POLLING_INTERVAL);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.expect("Failed to spawn meta file watcher thread");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn watch(&self, callback: WatchCallback) -> WatchHandle {
|
||||||
|
let handle = self.callbacks.subscribe(callback);
|
||||||
|
self.spawn();
|
||||||
|
handle
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_checksum(path: &Path) -> Result<u32, io::Error> {
|
||||||
|
let reader = match fs::File::open(path) {
|
||||||
|
Ok(f) => io::BufReader::new(f),
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Failed to open meta file {:?}: {:?}", path, e);
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut hasher = Hasher::new();
|
||||||
|
|
||||||
|
for line in reader.lines() {
|
||||||
|
hasher.update(line?.as_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(hasher.finalize())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for FileWatcher {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.state.store(2, Ordering::SeqCst);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
|
||||||
|
use std::mem;
|
||||||
|
|
||||||
|
use crate::directory::mmap_directory::atomic_write;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_file_watcher_drop_watcher() -> crate::Result<()> {
|
||||||
|
let tmp_dir = tempfile::TempDir::new()?;
|
||||||
|
let tmp_file = tmp_dir.path().join("watched.txt");
|
||||||
|
|
||||||
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
|
let (tx, rx) = crossbeam::channel::unbounded();
|
||||||
|
let timeout = Duration::from_millis(100);
|
||||||
|
|
||||||
|
let watcher = FileWatcher::new(&tmp_file);
|
||||||
|
|
||||||
|
let state = watcher.state.clone();
|
||||||
|
assert_eq!(state.load(Ordering::SeqCst), 0);
|
||||||
|
|
||||||
|
let counter_clone = counter.clone();
|
||||||
|
|
||||||
|
let _handle = watcher.watch(WatchCallback::new(move || {
|
||||||
|
let val = counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
|
tx.send(val + 1).unwrap();
|
||||||
|
}));
|
||||||
|
|
||||||
|
assert_eq!(counter.load(Ordering::SeqCst), 0);
|
||||||
|
assert_eq!(state.load(Ordering::SeqCst), 1);
|
||||||
|
|
||||||
|
atomic_write(&tmp_file, b"foo")?;
|
||||||
|
assert_eq!(rx.recv_timeout(timeout), Ok(1));
|
||||||
|
|
||||||
|
atomic_write(&tmp_file, b"foo")?;
|
||||||
|
assert!(rx.recv_timeout(timeout).is_err());
|
||||||
|
|
||||||
|
atomic_write(&tmp_file, b"bar")?;
|
||||||
|
assert_eq!(rx.recv_timeout(timeout), Ok(2));
|
||||||
|
|
||||||
|
mem::drop(watcher);
|
||||||
|
|
||||||
|
atomic_write(&tmp_file, b"qux")?;
|
||||||
|
thread::sleep(Duration::from_millis(10));
|
||||||
|
assert_eq!(counter.load(Ordering::SeqCst), 2);
|
||||||
|
assert_eq!(state.load(Ordering::SeqCst), 2);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_file_watcher_drop_handle() -> crate::Result<()> {
|
||||||
|
let tmp_dir = tempfile::TempDir::new()?;
|
||||||
|
let tmp_file = tmp_dir.path().join("watched.txt");
|
||||||
|
|
||||||
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
|
let (tx, rx) = crossbeam::channel::unbounded();
|
||||||
|
let timeout = Duration::from_millis(100);
|
||||||
|
|
||||||
|
let watcher = FileWatcher::new(&tmp_file);
|
||||||
|
|
||||||
|
let state = watcher.state.clone();
|
||||||
|
assert_eq!(state.load(Ordering::SeqCst), 0);
|
||||||
|
|
||||||
|
let counter_clone = counter.clone();
|
||||||
|
|
||||||
|
let handle = watcher.watch(WatchCallback::new(move || {
|
||||||
|
let val = counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
|
tx.send(val + 1).unwrap();
|
||||||
|
}));
|
||||||
|
|
||||||
|
assert_eq!(counter.load(Ordering::SeqCst), 0);
|
||||||
|
assert_eq!(state.load(Ordering::SeqCst), 1);
|
||||||
|
|
||||||
|
atomic_write(&tmp_file, b"foo")?;
|
||||||
|
assert_eq!(rx.recv_timeout(timeout), Ok(1));
|
||||||
|
|
||||||
|
mem::drop(handle);
|
||||||
|
|
||||||
|
atomic_write(&tmp_file, b"qux")?;
|
||||||
|
assert_eq!(counter.load(Ordering::SeqCst), 1);
|
||||||
|
assert_eq!(state.load(Ordering::SeqCst), 1);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,9 +1,8 @@
|
|||||||
use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt};
|
use crate::common::{BinarySerializable, CountingWriter, FixedSize, HasLen, VInt};
|
||||||
use crate::directory::error::Incompatibility;
|
use crate::directory::error::Incompatibility;
|
||||||
use crate::directory::read_only_source::ReadOnlySource;
|
use crate::directory::FileSlice;
|
||||||
use crate::directory::{AntiCallToken, TerminatingWrite};
|
use crate::directory::{AntiCallToken, TerminatingWrite};
|
||||||
use crate::Version;
|
use crate::Version;
|
||||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
|
||||||
use crc32fast::Hasher;
|
use crc32fast::Hasher;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@@ -64,26 +63,26 @@ impl Footer {
|
|||||||
let mut counting_write = CountingWriter::wrap(&mut write);
|
let mut counting_write = CountingWriter::wrap(&mut write);
|
||||||
self.serialize(&mut counting_write)?;
|
self.serialize(&mut counting_write)?;
|
||||||
let written_len = counting_write.written_bytes();
|
let written_len = counting_write.written_bytes();
|
||||||
write.write_u32::<LittleEndian>(written_len as u32)?;
|
(written_len as u32).serialize(write)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
|
pub fn extract_footer(file: FileSlice) -> io::Result<(Footer, FileSlice)> {
|
||||||
if source.len() < 4 {
|
if file.len() < 4 {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
io::ErrorKind::UnexpectedEof,
|
io::ErrorKind::UnexpectedEof,
|
||||||
format!(
|
format!(
|
||||||
"File corrupted. The file is smaller than 4 bytes (len={}).",
|
"File corrupted. The file is smaller than 4 bytes (len={}).",
|
||||||
source.len()
|
file.len()
|
||||||
),
|
),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES);
|
let (body_footer, footer_len_file) = file.split_from_end(u32::SIZE_IN_BYTES);
|
||||||
let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize;
|
let mut footer_len_bytes = footer_len_file.read_bytes()?;
|
||||||
let body_len = body_footer.len() - footer_len;
|
let footer_len = u32::deserialize(&mut footer_len_bytes)? as usize;
|
||||||
let (body, footer_data) = body_footer.split(body_len);
|
let (body, footer) = body_footer.split_from_end(footer_len);
|
||||||
let mut cursor = footer_data.as_slice();
|
let mut footer_bytes = footer.read_bytes()?;
|
||||||
let footer = Footer::deserialize(&mut cursor)?;
|
let footer = Footer::deserialize(&mut footer_bytes)?;
|
||||||
Ok((footer, body))
|
Ok((footer, body))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,12 +93,36 @@ impl Footer {
|
|||||||
match &self.versioned_footer {
|
match &self.versioned_footer {
|
||||||
VersionedFooter::V1 {
|
VersionedFooter::V1 {
|
||||||
crc32: _crc,
|
crc32: _crc,
|
||||||
store_compression: compression,
|
store_compression,
|
||||||
} => {
|
} => {
|
||||||
if &library_version.store_compression != compression {
|
if &library_version.store_compression != store_compression {
|
||||||
return Err(Incompatibility::CompressionMismatch {
|
return Err(Incompatibility::CompressionMismatch {
|
||||||
library_compression_format: library_version.store_compression.to_string(),
|
library_compression_format: library_version.store_compression.to_string(),
|
||||||
index_compression_format: compression.to_string(),
|
index_compression_format: store_compression.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
VersionedFooter::V2 {
|
||||||
|
crc32: _crc,
|
||||||
|
store_compression,
|
||||||
|
} => {
|
||||||
|
if &library_version.store_compression != store_compression {
|
||||||
|
return Err(Incompatibility::CompressionMismatch {
|
||||||
|
library_compression_format: library_version.store_compression.to_string(),
|
||||||
|
index_compression_format: store_compression.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
VersionedFooter::V3 {
|
||||||
|
crc32: _crc,
|
||||||
|
store_compression,
|
||||||
|
} => {
|
||||||
|
if &library_version.store_compression != store_compression {
|
||||||
|
return Err(Incompatibility::CompressionMismatch {
|
||||||
|
library_compression_format: library_version.store_compression.to_string(),
|
||||||
|
index_compression_format: store_compression.to_string(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -120,24 +143,36 @@ pub enum VersionedFooter {
|
|||||||
crc32: CrcHashU32,
|
crc32: CrcHashU32,
|
||||||
store_compression: String,
|
store_compression: String,
|
||||||
},
|
},
|
||||||
|
// Introduction of the Block WAND information.
|
||||||
|
V2 {
|
||||||
|
crc32: CrcHashU32,
|
||||||
|
store_compression: String,
|
||||||
|
},
|
||||||
|
// Block wand max termfred on 1 byte
|
||||||
|
V3 {
|
||||||
|
crc32: CrcHashU32,
|
||||||
|
store_compression: String,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BinarySerializable for VersionedFooter {
|
impl BinarySerializable for VersionedFooter {
|
||||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
let mut buf = Vec::new();
|
let mut buf = Vec::new();
|
||||||
match self {
|
match self {
|
||||||
VersionedFooter::V1 {
|
VersionedFooter::V3 {
|
||||||
crc32,
|
crc32,
|
||||||
store_compression: compression,
|
store_compression: compression,
|
||||||
} => {
|
} => {
|
||||||
// Serializes a valid `VersionedFooter` or panics if the version is unknown
|
// Serializes a valid `VersionedFooter` or panics if the version is unknown
|
||||||
// [ version | crc_hash | compression_mode ]
|
// [ version | crc_hash | compression_mode ]
|
||||||
// [ 0..4 | 4..8 | variable ]
|
// [ 0..4 | 4..8 | variable ]
|
||||||
BinarySerializable::serialize(&1u32, &mut buf)?;
|
BinarySerializable::serialize(&3u32, &mut buf)?;
|
||||||
BinarySerializable::serialize(crc32, &mut buf)?;
|
BinarySerializable::serialize(crc32, &mut buf)?;
|
||||||
BinarySerializable::serialize(compression, &mut buf)?;
|
BinarySerializable::serialize(compression, &mut buf)?;
|
||||||
}
|
}
|
||||||
VersionedFooter::UnknownVersion => {
|
VersionedFooter::V2 { .. }
|
||||||
|
| VersionedFooter::V1 { .. }
|
||||||
|
| VersionedFooter::UnknownVersion => {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
io::ErrorKind::InvalidInput,
|
io::ErrorKind::InvalidInput,
|
||||||
"Cannot serialize an unknown versioned footer ",
|
"Cannot serialize an unknown versioned footer ",
|
||||||
@@ -166,22 +201,36 @@ impl BinarySerializable for VersionedFooter {
|
|||||||
reader.read_exact(&mut buf[..])?;
|
reader.read_exact(&mut buf[..])?;
|
||||||
let mut cursor = &buf[..];
|
let mut cursor = &buf[..];
|
||||||
let version = u32::deserialize(&mut cursor)?;
|
let version = u32::deserialize(&mut cursor)?;
|
||||||
if version == 1 {
|
if version > 3 {
|
||||||
let crc32 = u32::deserialize(&mut cursor)?;
|
return Ok(VersionedFooter::UnknownVersion);
|
||||||
let compression = String::deserialize(&mut cursor)?;
|
|
||||||
Ok(VersionedFooter::V1 {
|
|
||||||
crc32,
|
|
||||||
store_compression: compression,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Ok(VersionedFooter::UnknownVersion)
|
|
||||||
}
|
}
|
||||||
|
let crc32 = u32::deserialize(&mut cursor)?;
|
||||||
|
let store_compression = String::deserialize(&mut cursor)?;
|
||||||
|
Ok(if version == 1 {
|
||||||
|
VersionedFooter::V1 {
|
||||||
|
crc32,
|
||||||
|
store_compression,
|
||||||
|
}
|
||||||
|
} else if version == 2 {
|
||||||
|
VersionedFooter::V2 {
|
||||||
|
crc32,
|
||||||
|
store_compression,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
assert_eq!(version, 3);
|
||||||
|
VersionedFooter::V3 {
|
||||||
|
crc32,
|
||||||
|
store_compression,
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VersionedFooter {
|
impl VersionedFooter {
|
||||||
pub fn crc(&self) -> Option<CrcHashU32> {
|
pub fn crc(&self) -> Option<CrcHashU32> {
|
||||||
match self {
|
match self {
|
||||||
|
VersionedFooter::V3 { crc32, .. } => Some(*crc32),
|
||||||
|
VersionedFooter::V2 { crc32, .. } => Some(*crc32),
|
||||||
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
|
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
|
||||||
VersionedFooter::UnknownVersion { .. } => None,
|
VersionedFooter::UnknownVersion { .. } => None,
|
||||||
}
|
}
|
||||||
@@ -219,7 +268,7 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
|
|||||||
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
||||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||||
let crc32 = self.hasher.take().unwrap().finalize();
|
let crc32 = self.hasher.take().unwrap().finalize();
|
||||||
let footer = Footer::new(VersionedFooter::V1 {
|
let footer = Footer::new(VersionedFooter::V3 {
|
||||||
crc32,
|
crc32,
|
||||||
store_compression: crate::store::COMPRESSION.to_string(),
|
store_compression: crate::store::COMPRESSION.to_string(),
|
||||||
});
|
});
|
||||||
@@ -246,17 +295,17 @@ mod tests {
|
|||||||
let mut vec = Vec::new();
|
let mut vec = Vec::new();
|
||||||
let footer_proxy = FooterProxy::new(&mut vec);
|
let footer_proxy = FooterProxy::new(&mut vec);
|
||||||
assert!(footer_proxy.terminate().is_ok());
|
assert!(footer_proxy.terminate().is_ok());
|
||||||
assert_eq!(vec.len(), 167);
|
if crate::store::COMPRESSION == "lz4" {
|
||||||
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
|
assert_eq!(vec.len(), 158);
|
||||||
if let VersionedFooter::V1 {
|
|
||||||
crc32: _,
|
|
||||||
store_compression,
|
|
||||||
} = footer.versioned_footer
|
|
||||||
{
|
|
||||||
assert_eq!(store_compression, crate::store::COMPRESSION);
|
|
||||||
} else {
|
} else {
|
||||||
panic!("Versioned footer should be V1.");
|
assert_eq!(vec.len(), 167);
|
||||||
}
|
}
|
||||||
|
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
|
||||||
|
assert!(matches!(
|
||||||
|
footer.versioned_footer,
|
||||||
|
VersionedFooter::V3 { store_compression, .. }
|
||||||
|
if store_compression == crate::store::COMPRESSION
|
||||||
|
));
|
||||||
assert_eq!(&footer.version, crate::version());
|
assert_eq!(&footer.version, crate::version());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,7 +313,7 @@ mod tests {
|
|||||||
fn test_serialize_deserialize_footer() {
|
fn test_serialize_deserialize_footer() {
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
let crc32 = 123456u32;
|
let crc32 = 123456u32;
|
||||||
let footer: Footer = Footer::new(VersionedFooter::V1 {
|
let footer: Footer = Footer::new(VersionedFooter::V3 {
|
||||||
crc32,
|
crc32,
|
||||||
store_compression: "lz4".to_string(),
|
store_compression: "lz4".to_string(),
|
||||||
});
|
});
|
||||||
@@ -276,7 +325,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn footer_length() {
|
fn footer_length() {
|
||||||
let crc32 = 1111111u32;
|
let crc32 = 1111111u32;
|
||||||
let versioned_footer = VersionedFooter::V1 {
|
let versioned_footer = VersionedFooter::V3 {
|
||||||
crc32,
|
crc32,
|
||||||
store_compression: "lz4".to_string(),
|
store_compression: "lz4".to_string(),
|
||||||
};
|
};
|
||||||
@@ -297,7 +346,7 @@ mod tests {
|
|||||||
// versionned footer length
|
// versionned footer length
|
||||||
12 | 128,
|
12 | 128,
|
||||||
// index format version
|
// index format version
|
||||||
1,
|
3,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
@@ -316,7 +365,7 @@ mod tests {
|
|||||||
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
|
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
|
||||||
assert!(cursor.is_empty());
|
assert!(cursor.is_empty());
|
||||||
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
|
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
|
||||||
let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 {
|
let expected_versioned_footer: VersionedFooter = VersionedFooter::V3 {
|
||||||
crc32: expected_crc,
|
crc32: expected_crc,
|
||||||
store_compression: "lz4".to_string(),
|
store_compression: "lz4".to_string(),
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use crate::core::MANAGED_FILEPATH;
|
use crate::core::{MANAGED_FILEPATH, META_FILEPATH};
|
||||||
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
|
use crate::directory::error::{DeleteError, LockError, OpenReadError, OpenWriteError};
|
||||||
use crate::directory::footer::{Footer, FooterProxy};
|
use crate::directory::footer::{Footer, FooterProxy};
|
||||||
use crate::directory::DirectoryLock;
|
|
||||||
use crate::directory::GarbageCollectionResult;
|
use crate::directory::GarbageCollectionResult;
|
||||||
use crate::directory::Lock;
|
use crate::directory::Lock;
|
||||||
use crate::directory::META_LOCK;
|
use crate::directory::META_LOCK;
|
||||||
use crate::directory::{ReadOnlySource, WritePtr};
|
use crate::directory::{DirectoryLock, FileHandle};
|
||||||
|
use crate::directory::{FileSlice, WritePtr};
|
||||||
use crate::directory::{WatchCallback, WatchHandle};
|
use crate::directory::{WatchCallback, WatchHandle};
|
||||||
use crate::error::DataCorruption;
|
use crate::error::DataCorruption;
|
||||||
use crate::Directory;
|
use crate::Directory;
|
||||||
@@ -53,7 +53,7 @@ struct MetaInformation {
|
|||||||
/// Saves the file containing the list of existing files
|
/// Saves the file containing the list of existing files
|
||||||
/// that were created by tantivy.
|
/// that were created by tantivy.
|
||||||
fn save_managed_paths(
|
fn save_managed_paths(
|
||||||
directory: &mut dyn Directory,
|
directory: &dyn Directory,
|
||||||
wlock: &RwLockWriteGuard<'_, MetaInformation>,
|
wlock: &RwLockWriteGuard<'_, MetaInformation>,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let mut w = serde_json::to_vec(&wlock.managed_paths)?;
|
let mut w = serde_json::to_vec(&wlock.managed_paths)?;
|
||||||
@@ -86,7 +86,7 @@ impl ManagedDirectory {
|
|||||||
directory: Box::new(directory),
|
directory: Box::new(directory),
|
||||||
meta_informations: Arc::default(),
|
meta_informations: Arc::default(),
|
||||||
}),
|
}),
|
||||||
Err(OpenReadError::IOError(e)) => Err(From::from(e)),
|
io_err @ Err(OpenReadError::IOError { .. }) => Err(io_err.err().unwrap().into()),
|
||||||
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
|
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
|
||||||
// For the moment, this should never happen `meta.json`
|
// For the moment, this should never happen `meta.json`
|
||||||
// do not have any footer and cannot detect incompatibility.
|
// do not have any footer and cannot detect incompatibility.
|
||||||
@@ -168,7 +168,7 @@ impl ManagedDirectory {
|
|||||||
DeleteError::FileDoesNotExist(_) => {
|
DeleteError::FileDoesNotExist(_) => {
|
||||||
deleted_files.push(file_to_delete.clone());
|
deleted_files.push(file_to_delete.clone());
|
||||||
}
|
}
|
||||||
DeleteError::IOError(_) => {
|
DeleteError::IOError { .. } => {
|
||||||
failed_to_delete_files.push(file_to_delete.clone());
|
failed_to_delete_files.push(file_to_delete.clone());
|
||||||
if !cfg!(target_os = "windows") {
|
if !cfg!(target_os = "windows") {
|
||||||
// On windows, delete is expected to fail if the file
|
// On windows, delete is expected to fail if the file
|
||||||
@@ -212,7 +212,7 @@ impl ManagedDirectory {
|
|||||||
/// File starting by "." are reserved to locks.
|
/// File starting by "." are reserved to locks.
|
||||||
/// They are not managed and cannot be subjected
|
/// They are not managed and cannot be subjected
|
||||||
/// to garbage collection.
|
/// to garbage collection.
|
||||||
fn register_file_as_managed(&mut self, filepath: &Path) -> io::Result<()> {
|
fn register_file_as_managed(&self, filepath: &Path) -> io::Result<()> {
|
||||||
// Files starting by "." (e.g. lock files) are not managed.
|
// Files starting by "." (e.g. lock files) are not managed.
|
||||||
if !is_managed(filepath) {
|
if !is_managed(filepath) {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@@ -223,7 +223,7 @@ impl ManagedDirectory {
|
|||||||
.expect("Managed file lock poisoned");
|
.expect("Managed file lock poisoned");
|
||||||
let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned());
|
let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned());
|
||||||
if has_changed {
|
if has_changed {
|
||||||
save_managed_paths(self.directory.as_mut(), &meta_wlock)?;
|
save_managed_paths(self.directory.as_ref(), &meta_wlock)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -231,10 +231,19 @@ impl ManagedDirectory {
|
|||||||
/// Verify checksum of a managed file
|
/// Verify checksum of a managed file
|
||||||
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
|
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
|
||||||
let reader = self.directory.open_read(path)?;
|
let reader = self.directory.open_read(path)?;
|
||||||
let (footer, data) = Footer::extract_footer(reader)
|
let (footer, data) =
|
||||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
Footer::extract_footer(reader).map_err(|io_error| OpenReadError::IOError {
|
||||||
|
io_error,
|
||||||
|
filepath: path.to_path_buf(),
|
||||||
|
})?;
|
||||||
|
let bytes = data
|
||||||
|
.read_bytes()
|
||||||
|
.map_err(|io_error| OpenReadError::IOError {
|
||||||
|
filepath: path.to_path_buf(),
|
||||||
|
io_error,
|
||||||
|
})?;
|
||||||
let mut hasher = Hasher::new();
|
let mut hasher = Hasher::new();
|
||||||
hasher.update(data.as_slice());
|
hasher.update(bytes.as_slice());
|
||||||
let crc = hasher.finalize();
|
let crc = hasher.finalize();
|
||||||
Ok(footer
|
Ok(footer
|
||||||
.versioned_footer
|
.versioned_footer
|
||||||
@@ -245,35 +254,42 @@ impl ManagedDirectory {
|
|||||||
|
|
||||||
/// List files for which checksum does not match content
|
/// List files for which checksum does not match content
|
||||||
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
|
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
|
||||||
let mut hashset = HashSet::new();
|
let mut managed_paths = self
|
||||||
let managed_paths = self
|
|
||||||
.meta_informations
|
.meta_informations
|
||||||
.read()
|
.read()
|
||||||
.expect("Managed directory rlock poisoned in list damaged.")
|
.expect("Managed directory rlock poisoned in list damaged.")
|
||||||
.managed_paths
|
.managed_paths
|
||||||
.clone();
|
.clone();
|
||||||
|
|
||||||
for path in managed_paths.into_iter() {
|
managed_paths.remove(*META_FILEPATH);
|
||||||
|
|
||||||
|
let mut damaged_files = HashSet::new();
|
||||||
|
for path in managed_paths {
|
||||||
if !self.validate_checksum(&path)? {
|
if !self.validate_checksum(&path)? {
|
||||||
hashset.insert(path);
|
damaged_files.insert(path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(hashset)
|
Ok(damaged_files)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Directory for ManagedDirectory {
|
impl Directory for ManagedDirectory {
|
||||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> {
|
||||||
let read_only_source = self.directory.open_read(path)?;
|
let file_slice = self.open_read(path)?;
|
||||||
let (footer, reader) = Footer::extract_footer(read_only_source)
|
Ok(Box::new(file_slice))
|
||||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
}
|
||||||
|
|
||||||
|
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||||
|
let file_slice = self.directory.open_read(path)?;
|
||||||
|
let (footer, reader) = Footer::extract_footer(file_slice)
|
||||||
|
.map_err(|io_error| OpenReadError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||||
footer.is_compatible()?;
|
footer.is_compatible()?;
|
||||||
Ok(reader)
|
Ok(reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
|
fn open_write(&self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
|
||||||
self.register_file_as_managed(path)
|
self.register_file_as_managed(path)
|
||||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||||
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
|
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
|
||||||
self.directory
|
self.directory
|
||||||
.open_write(path)?
|
.open_write(path)?
|
||||||
@@ -283,7 +299,7 @@ impl Directory for ManagedDirectory {
|
|||||||
))))
|
))))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||||
self.register_file_as_managed(path)?;
|
self.register_file_as_managed(path)?;
|
||||||
self.directory.atomic_write(path, data)
|
self.directory.atomic_write(path, data)
|
||||||
}
|
}
|
||||||
@@ -296,7 +312,7 @@ impl Directory for ManagedDirectory {
|
|||||||
self.directory.delete(path)
|
self.directory.delete(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exists(&self, path: &Path) -> bool {
|
fn exists(&self, path: &Path) -> Result<bool, OpenReadError> {
|
||||||
self.directory.exists(path)
|
self.directory.exists(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -344,22 +360,22 @@ mod tests_mmap_specific {
|
|||||||
managed_directory
|
managed_directory
|
||||||
.atomic_write(test_path2, &[0u8, 1u8])
|
.atomic_write(test_path2, &[0u8, 1u8])
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1).unwrap());
|
||||||
assert!(managed_directory.exists(test_path2));
|
assert!(managed_directory.exists(test_path2).unwrap());
|
||||||
let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect();
|
let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect();
|
||||||
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1).unwrap());
|
||||||
assert!(!managed_directory.exists(test_path2));
|
assert!(!managed_directory.exists(test_path2).unwrap());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1).unwrap());
|
||||||
assert!(!managed_directory.exists(test_path2));
|
assert!(!managed_directory.exists(test_path2).unwrap());
|
||||||
let living_files: HashSet<PathBuf> = HashSet::new();
|
let living_files: HashSet<PathBuf> = HashSet::new();
|
||||||
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
||||||
assert!(!managed_directory.exists(test_path1));
|
assert!(!managed_directory.exists(test_path1).unwrap());
|
||||||
assert!(!managed_directory.exists(test_path2));
|
assert!(!managed_directory.exists(test_path2).unwrap());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -376,7 +392,7 @@ mod tests_mmap_specific {
|
|||||||
let mut write = managed_directory.open_write(test_path1).unwrap();
|
let mut write = managed_directory.open_write(test_path1).unwrap();
|
||||||
write.write_all(&[0u8, 1u8]).unwrap();
|
write.write_all(&[0u8, 1u8]).unwrap();
|
||||||
write.terminate().unwrap();
|
write.terminate().unwrap();
|
||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1).unwrap());
|
||||||
|
|
||||||
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
|
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
|
||||||
assert!(managed_directory
|
assert!(managed_directory
|
||||||
@@ -384,52 +400,50 @@ mod tests_mmap_specific {
|
|||||||
.is_ok());
|
.is_ok());
|
||||||
if cfg!(target_os = "windows") {
|
if cfg!(target_os = "windows") {
|
||||||
// On Windows, gc should try and fail the file as it is mmapped.
|
// On Windows, gc should try and fail the file as it is mmapped.
|
||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1).unwrap());
|
||||||
// unmap should happen here.
|
// unmap should happen here.
|
||||||
drop(_mmap_read);
|
drop(_mmap_read);
|
||||||
// The file should still be in the list of managed file and
|
// The file should still be in the list of managed file and
|
||||||
// eventually be deleted once mmap is released.
|
// eventually be deleted once mmap is released.
|
||||||
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
||||||
assert!(!managed_directory.exists(test_path1));
|
assert!(!managed_directory.exists(test_path1).unwrap());
|
||||||
} else {
|
} else {
|
||||||
assert!(!managed_directory.exists(test_path1));
|
assert!(!managed_directory.exists(test_path1).unwrap());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_checksum() {
|
fn test_checksum() -> crate::Result<()> {
|
||||||
let test_path1: &'static Path = Path::new("some_path_for_test");
|
let test_path1: &'static Path = Path::new("some_path_for_test");
|
||||||
let test_path2: &'static Path = Path::new("other_test_path");
|
let test_path2: &'static Path = Path::new("other_test_path");
|
||||||
|
|
||||||
let tempdir = TempDir::new().unwrap();
|
let tempdir = TempDir::new().unwrap();
|
||||||
let tempdir_path = PathBuf::from(tempdir.path());
|
let tempdir_path = PathBuf::from(tempdir.path());
|
||||||
|
|
||||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
let mmap_directory = MmapDirectory::open(&tempdir_path)?;
|
||||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
let managed_directory = ManagedDirectory::wrap(mmap_directory)?;
|
||||||
let mut write = managed_directory.open_write(test_path1).unwrap();
|
let mut write = managed_directory.open_write(test_path1)?;
|
||||||
write.write_all(&[0u8, 1u8]).unwrap();
|
write.write_all(&[0u8, 1u8])?;
|
||||||
write.terminate().unwrap();
|
write.terminate()?;
|
||||||
|
|
||||||
let mut write = managed_directory.open_write(test_path2).unwrap();
|
let mut write = managed_directory.open_write(test_path2)?;
|
||||||
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
|
write.write_all(&[3u8, 4u8, 5u8])?;
|
||||||
write.terminate().unwrap();
|
write.terminate()?;
|
||||||
|
|
||||||
let read_source = managed_directory.open_read(test_path2).unwrap();
|
let read_file = managed_directory.open_read(test_path2)?.read_bytes()?;
|
||||||
assert_eq!(read_source.as_slice(), &[3u8, 4u8, 5u8]);
|
assert_eq!(read_file.as_slice(), &[3u8, 4u8, 5u8]);
|
||||||
assert!(managed_directory.list_damaged().unwrap().is_empty());
|
assert!(managed_directory.list_damaged().unwrap().is_empty());
|
||||||
|
|
||||||
let mut corrupted_path = tempdir_path.clone();
|
let mut corrupted_path = tempdir_path.clone();
|
||||||
corrupted_path.push(test_path2);
|
corrupted_path.push(test_path2);
|
||||||
let mut file = OpenOptions::new()
|
let mut file = OpenOptions::new().write(true).open(&corrupted_path)?;
|
||||||
.write(true)
|
file.write_all(&[255u8])?;
|
||||||
.open(&corrupted_path)
|
file.flush()?;
|
||||||
.unwrap();
|
|
||||||
file.write_all(&[255u8]).unwrap();
|
|
||||||
file.flush().unwrap();
|
|
||||||
drop(file);
|
drop(file);
|
||||||
|
|
||||||
let damaged = managed_directory.list_damaged().unwrap();
|
let damaged = managed_directory.list_damaged()?;
|
||||||
assert_eq!(damaged.len(), 1);
|
assert_eq!(damaged.len(), 1);
|
||||||
assert!(damaged.contains(test_path2));
|
assert!(damaged.contains(test_path2));
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,25 +1,19 @@
|
|||||||
use crate::core::META_FILEPATH;
|
use crate::core::META_FILEPATH;
|
||||||
use crate::directory::error::LockError;
|
use crate::directory::error::LockError;
|
||||||
use crate::directory::error::{
|
use crate::directory::error::{DeleteError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||||
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
|
use crate::directory::file_watcher::FileWatcher;
|
||||||
};
|
|
||||||
use crate::directory::read_only_source::BoxedData;
|
|
||||||
use crate::directory::AntiCallToken;
|
|
||||||
use crate::directory::Directory;
|
use crate::directory::Directory;
|
||||||
use crate::directory::DirectoryLock;
|
use crate::directory::DirectoryLock;
|
||||||
use crate::directory::Lock;
|
use crate::directory::Lock;
|
||||||
use crate::directory::ReadOnlySource;
|
|
||||||
use crate::directory::WatchCallback;
|
use crate::directory::WatchCallback;
|
||||||
use crate::directory::WatchCallbackList;
|
|
||||||
use crate::directory::WatchHandle;
|
use crate::directory::WatchHandle;
|
||||||
|
use crate::directory::{AntiCallToken, FileHandle, OwnedBytes};
|
||||||
|
use crate::directory::{ArcBytes, WeakArcBytes};
|
||||||
use crate::directory::{TerminatingWrite, WritePtr};
|
use crate::directory::{TerminatingWrite, WritePtr};
|
||||||
use fs2::FileExt;
|
use fs2::FileExt;
|
||||||
use memmap::Mmap;
|
use memmap::Mmap;
|
||||||
use notify::RawEvent;
|
|
||||||
use notify::RecursiveMode;
|
|
||||||
use notify::Watcher;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::collections::HashMap;
|
use stable_deref_trait::StableDeref;
|
||||||
use std::convert::From;
|
use std::convert::From;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::fs::OpenOptions;
|
use std::fs::OpenOptions;
|
||||||
@@ -28,12 +22,9 @@ use std::io::{self, Seek, SeekFrom};
|
|||||||
use std::io::{BufWriter, Read, Write};
|
use std::io::{BufWriter, Read, Write};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::result;
|
use std::result;
|
||||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::Mutex;
|
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use std::sync::Weak;
|
use std::{collections::HashMap, ops::Deref};
|
||||||
use std::thread;
|
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
|
|
||||||
/// Create a default io error given a string.
|
/// Create a default io error given a string.
|
||||||
@@ -44,17 +35,17 @@ pub(crate) fn make_io_err(msg: String) -> io::Error {
|
|||||||
/// Returns None iff the file exists, can be read, but is empty (and hence
|
/// Returns None iff the file exists, can be read, but is empty (and hence
|
||||||
/// cannot be mmapped)
|
/// cannot be mmapped)
|
||||||
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
||||||
let file = File::open(full_path).map_err(|e| {
|
let file = File::open(full_path).map_err(|io_err| {
|
||||||
if e.kind() == io::ErrorKind::NotFound {
|
if io_err.kind() == io::ErrorKind::NotFound {
|
||||||
OpenReadError::FileDoesNotExist(full_path.to_owned())
|
OpenReadError::FileDoesNotExist(full_path.to_path_buf())
|
||||||
} else {
|
} else {
|
||||||
OpenReadError::IOError(IOError::with_path(full_path.to_owned(), e))
|
OpenReadError::wrap_io_error(io_err, full_path.to_path_buf())
|
||||||
}
|
}
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let meta_data = file
|
let meta_data = file
|
||||||
.metadata()
|
.metadata()
|
||||||
.map_err(|e| IOError::with_path(full_path.to_owned(), e))?;
|
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_owned()))?;
|
||||||
if meta_data.len() == 0 {
|
if meta_data.len() == 0 {
|
||||||
// if the file size is 0, it will not be possible
|
// if the file size is 0, it will not be possible
|
||||||
// to mmap the file, so we return None
|
// to mmap the file, so we return None
|
||||||
@@ -64,7 +55,7 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
|||||||
unsafe {
|
unsafe {
|
||||||
memmap::Mmap::map(&file)
|
memmap::Mmap::map(&file)
|
||||||
.map(Some)
|
.map(Some)
|
||||||
.map_err(|e| From::from(IOError::with_path(full_path.to_owned(), e)))
|
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_path_buf()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,7 +76,7 @@ pub struct CacheInfo {
|
|||||||
|
|
||||||
struct MmapCache {
|
struct MmapCache {
|
||||||
counters: CacheCounters,
|
counters: CacheCounters,
|
||||||
cache: HashMap<PathBuf, Weak<BoxedData>>,
|
cache: HashMap<PathBuf, WeakArcBytes>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for MmapCache {
|
impl Default for MmapCache {
|
||||||
@@ -119,7 +110,7 @@ impl MmapCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns None if the file exists but as a len of 0 (and hence is not mmappable).
|
// Returns None if the file exists but as a len of 0 (and hence is not mmappable).
|
||||||
fn get_mmap(&mut self, full_path: &Path) -> Result<Option<Arc<BoxedData>>, OpenReadError> {
|
fn get_mmap(&mut self, full_path: &Path) -> Result<Option<ArcBytes>, OpenReadError> {
|
||||||
if let Some(mmap_weak) = self.cache.get(full_path) {
|
if let Some(mmap_weak) = self.cache.get(full_path) {
|
||||||
if let Some(mmap_arc) = mmap_weak.upgrade() {
|
if let Some(mmap_arc) = mmap_weak.upgrade() {
|
||||||
self.counters.hit += 1;
|
self.counters.hit += 1;
|
||||||
@@ -130,7 +121,7 @@ impl MmapCache {
|
|||||||
self.counters.miss += 1;
|
self.counters.miss += 1;
|
||||||
let mmap_opt = open_mmap(full_path)?;
|
let mmap_opt = open_mmap(full_path)?;
|
||||||
Ok(mmap_opt.map(|mmap| {
|
Ok(mmap_opt.map(|mmap| {
|
||||||
let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap));
|
let mmap_arc: ArcBytes = Arc::new(mmap);
|
||||||
let mmap_weak = Arc::downgrade(&mmap_arc);
|
let mmap_weak = Arc::downgrade(&mmap_arc);
|
||||||
self.cache.insert(full_path.to_owned(), mmap_weak);
|
self.cache.insert(full_path.to_owned(), mmap_weak);
|
||||||
mmap_arc
|
mmap_arc
|
||||||
@@ -138,63 +129,6 @@ impl MmapCache {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct WatcherWrapper {
|
|
||||||
_watcher: Mutex<notify::RecommendedWatcher>,
|
|
||||||
watcher_router: Arc<WatchCallbackList>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WatcherWrapper {
|
|
||||||
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
|
|
||||||
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
|
|
||||||
// We need to initialize the
|
|
||||||
let watcher = notify::raw_watcher(tx)
|
|
||||||
.and_then(|mut watcher| {
|
|
||||||
watcher.watch(path, RecursiveMode::Recursive)?;
|
|
||||||
Ok(watcher)
|
|
||||||
})
|
|
||||||
.map_err(|err| match err {
|
|
||||||
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
|
|
||||||
_ => {
|
|
||||||
panic!("Unknown error while starting watching directory {:?}", path);
|
|
||||||
}
|
|
||||||
})?;
|
|
||||||
let watcher_router: Arc<WatchCallbackList> = Default::default();
|
|
||||||
let watcher_router_clone = watcher_router.clone();
|
|
||||||
thread::Builder::new()
|
|
||||||
.name("meta-file-watch-thread".to_string())
|
|
||||||
.spawn(move || {
|
|
||||||
loop {
|
|
||||||
match watcher_recv.recv().map(|evt| evt.path) {
|
|
||||||
Ok(Some(changed_path)) => {
|
|
||||||
// ... Actually subject to false positive.
|
|
||||||
// We might want to be more accurate than this at one point.
|
|
||||||
if let Some(filename) = changed_path.file_name() {
|
|
||||||
if filename == *META_FILEPATH {
|
|
||||||
let _ = watcher_router_clone.broadcast();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(None) => {
|
|
||||||
// not an event we are interested in.
|
|
||||||
}
|
|
||||||
Err(_e) => {
|
|
||||||
// the watch send channel was dropped
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})?;
|
|
||||||
Ok(WatcherWrapper {
|
|
||||||
_watcher: Mutex::new(watcher),
|
|
||||||
watcher_router,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle {
|
|
||||||
self.watcher_router.subscribe(watch_callback)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Directory storing data in files, read via mmap.
|
/// Directory storing data in files, read via mmap.
|
||||||
///
|
///
|
||||||
/// The Mmap object are cached to limit the
|
/// The Mmap object are cached to limit the
|
||||||
@@ -216,40 +150,21 @@ struct MmapDirectoryInner {
|
|||||||
root_path: PathBuf,
|
root_path: PathBuf,
|
||||||
mmap_cache: RwLock<MmapCache>,
|
mmap_cache: RwLock<MmapCache>,
|
||||||
_temp_directory: Option<TempDir>,
|
_temp_directory: Option<TempDir>,
|
||||||
watcher: RwLock<Option<WatcherWrapper>>,
|
watcher: FileWatcher,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MmapDirectoryInner {
|
impl MmapDirectoryInner {
|
||||||
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectoryInner {
|
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectoryInner {
|
||||||
MmapDirectoryInner {
|
MmapDirectoryInner {
|
||||||
root_path,
|
|
||||||
mmap_cache: Default::default(),
|
mmap_cache: Default::default(),
|
||||||
_temp_directory: temp_directory,
|
_temp_directory: temp_directory,
|
||||||
watcher: RwLock::new(None),
|
watcher: FileWatcher::new(&root_path.join(*META_FILEPATH)),
|
||||||
|
root_path,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
|
fn watch(&self, callback: WatchCallback) -> crate::Result<WatchHandle> {
|
||||||
// a lot of juggling here, to ensure we don't do anything that panics
|
Ok(self.watcher.watch(callback))
|
||||||
// while the rwlock is held. That way we ensure that the rwlock cannot
|
|
||||||
// be poisoned.
|
|
||||||
//
|
|
||||||
// The downside is that we might create a watch wrapper that is not useful.
|
|
||||||
let need_initialization = self.watcher.read().unwrap().is_none();
|
|
||||||
if need_initialization {
|
|
||||||
let watch_wrapper = WatcherWrapper::new(&self.root_path)?;
|
|
||||||
let mut watch_wlock = self.watcher.write().unwrap();
|
|
||||||
// the watcher could have been initialized when we released the lock, and
|
|
||||||
// we do not want to lose the watched files that were set.
|
|
||||||
if watch_wlock.is_none() {
|
|
||||||
*watch_wlock = Some(watch_wrapper);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(watch_wrapper) = self.watcher.write().unwrap().as_mut() {
|
|
||||||
Ok(watch_wrapper.watch(watch_callback))
|
|
||||||
} else {
|
|
||||||
unreachable!("At this point, watch wrapper is supposed to be initialized");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -272,9 +187,11 @@ impl MmapDirectory {
|
|||||||
/// This is mostly useful to test the MmapDirectory itself.
|
/// This is mostly useful to test the MmapDirectory itself.
|
||||||
/// For your unit tests, prefer the RAMDirectory.
|
/// For your unit tests, prefer the RAMDirectory.
|
||||||
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
||||||
let tempdir = TempDir::new().map_err(OpenDirectoryError::IoError)?;
|
let tempdir = TempDir::new().map_err(OpenDirectoryError::FailedToCreateTempDir)?;
|
||||||
let tempdir_path = PathBuf::from(tempdir.path());
|
Ok(MmapDirectory::new(
|
||||||
Ok(MmapDirectory::new(tempdir_path, Some(tempdir)))
|
tempdir.path().to_path_buf(),
|
||||||
|
Some(tempdir),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Opens a MmapDirectory in a directory.
|
/// Opens a MmapDirectory in a directory.
|
||||||
@@ -396,8 +313,38 @@ impl TerminatingWrite for SafeFileWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct MmapArc(Arc<dyn Deref<Target = [u8]> + Send + Sync>);
|
||||||
|
|
||||||
|
impl Deref for MmapArc {
|
||||||
|
type Target = [u8];
|
||||||
|
|
||||||
|
fn deref(&self) -> &[u8] {
|
||||||
|
self.0.deref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
unsafe impl StableDeref for MmapArc {}
|
||||||
|
|
||||||
|
/// Writes a file in an atomic manner.
|
||||||
|
pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
|
||||||
|
// We create the temporary file in the same directory as the target file.
|
||||||
|
// Indeed the canonical temp directory and the target file might sit in different
|
||||||
|
// filesystem, in which case the atomic write may actually not work.
|
||||||
|
let parent_path = path.parent().ok_or_else(|| {
|
||||||
|
io::Error::new(
|
||||||
|
io::ErrorKind::InvalidInput,
|
||||||
|
"Path {:?} does not have parent directory.",
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let mut tempfile = tempfile::Builder::new().tempfile_in(&parent_path)?;
|
||||||
|
tempfile.write_all(content)?;
|
||||||
|
tempfile.flush()?;
|
||||||
|
tempfile.into_temp_path().persist(path)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
impl Directory for MmapDirectory {
|
impl Directory for MmapDirectory {
|
||||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
fn get_file_handle(&self, path: &Path) -> result::Result<Box<dyn FileHandle>, OpenReadError> {
|
||||||
debug!("Open Read {:?}", path);
|
debug!("Open Read {:?}", path);
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
|
|
||||||
@@ -407,12 +354,19 @@ impl Directory for MmapDirectory {
|
|||||||
on mmap cache while reading {:?}",
|
on mmap cache while reading {:?}",
|
||||||
path
|
path
|
||||||
);
|
);
|
||||||
IOError::with_path(path.to_owned(), make_io_err(msg))
|
let io_err = make_io_err(msg);
|
||||||
|
OpenReadError::wrap_io_error(io_err, path.to_path_buf())
|
||||||
})?;
|
})?;
|
||||||
Ok(mmap_cache
|
|
||||||
|
let owned_bytes = mmap_cache
|
||||||
.get_mmap(&full_path)?
|
.get_mmap(&full_path)?
|
||||||
.map(ReadOnlySource::from)
|
.map(|mmap_arc| {
|
||||||
.unwrap_or_else(ReadOnlySource::empty))
|
let mmap_arc_obj = MmapArc(mmap_arc);
|
||||||
|
OwnedBytes::new(mmap_arc_obj)
|
||||||
|
})
|
||||||
|
.unwrap_or_else(OwnedBytes::empty);
|
||||||
|
|
||||||
|
Ok(Box::new(owned_bytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Any entry associated to the path in the mmap will be
|
/// Any entry associated to the path in the mmap will be
|
||||||
@@ -420,25 +374,29 @@ impl Directory for MmapDirectory {
|
|||||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
match fs::remove_file(&full_path) {
|
match fs::remove_file(&full_path) {
|
||||||
Ok(_) => self
|
Ok(_) => self.sync_directory().map_err(|e| DeleteError::IOError {
|
||||||
.sync_directory()
|
io_error: e,
|
||||||
.map_err(|e| IOError::with_path(path.to_owned(), e).into()),
|
filepath: path.to_path_buf(),
|
||||||
|
}),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if e.kind() == io::ErrorKind::NotFound {
|
if e.kind() == io::ErrorKind::NotFound {
|
||||||
Err(DeleteError::FileDoesNotExist(path.to_owned()))
|
Err(DeleteError::FileDoesNotExist(path.to_owned()))
|
||||||
} else {
|
} else {
|
||||||
Err(IOError::with_path(path.to_owned(), e).into())
|
Err(DeleteError::IOError {
|
||||||
|
io_error: e,
|
||||||
|
filepath: path.to_path_buf(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exists(&self, path: &Path) -> bool {
|
fn exists(&self, path: &Path) -> Result<bool, OpenReadError> {
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
full_path.exists()
|
Ok(full_path.exists())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||||
debug!("Open Write {:?}", path);
|
debug!("Open Write {:?}", path);
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
|
|
||||||
@@ -447,22 +405,22 @@ impl Directory for MmapDirectory {
|
|||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open(full_path);
|
.open(full_path);
|
||||||
|
|
||||||
let mut file = open_res.map_err(|err| {
|
let mut file = open_res.map_err(|io_err| {
|
||||||
if err.kind() == io::ErrorKind::AlreadyExists {
|
if io_err.kind() == io::ErrorKind::AlreadyExists {
|
||||||
OpenWriteError::FileAlreadyExists(path.to_owned())
|
OpenWriteError::FileAlreadyExists(path.to_path_buf())
|
||||||
} else {
|
} else {
|
||||||
IOError::with_path(path.to_owned(), err).into()
|
OpenWriteError::wrap_io_error(io_err, path.to_path_buf())
|
||||||
}
|
}
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// making sure the file is created.
|
// making sure the file is created.
|
||||||
file.flush()
|
file.flush()
|
||||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||||
|
|
||||||
// Apparetntly, on some filesystem syncing the parent
|
// Apparetntly, on some filesystem syncing the parent
|
||||||
// directory is required.
|
// directory is required.
|
||||||
self.sync_directory()
|
self.sync_directory()
|
||||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
.map_err(|io_err| OpenWriteError::wrap_io_error(io_err, path.to_path_buf()))?;
|
||||||
|
|
||||||
let writer = SafeFileWriter::new(file);
|
let writer = SafeFileWriter::new(file);
|
||||||
Ok(BufWriter::new(Box::new(writer)))
|
Ok(BufWriter::new(Box::new(writer)))
|
||||||
@@ -473,26 +431,26 @@ impl Directory for MmapDirectory {
|
|||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
match File::open(&full_path) {
|
match File::open(&full_path) {
|
||||||
Ok(mut file) => {
|
Ok(mut file) => {
|
||||||
file.read_to_end(&mut buffer)
|
file.read_to_end(&mut buffer).map_err(|io_error| {
|
||||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
OpenReadError::wrap_io_error(io_error, path.to_path_buf())
|
||||||
|
})?;
|
||||||
Ok(buffer)
|
Ok(buffer)
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(io_error) => {
|
||||||
if e.kind() == io::ErrorKind::NotFound {
|
if io_error.kind() == io::ErrorKind::NotFound {
|
||||||
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
|
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
|
||||||
} else {
|
} else {
|
||||||
Err(IOError::with_path(path.to_owned(), e).into())
|
Err(OpenReadError::wrap_io_error(io_error, path.to_path_buf()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
fn atomic_write(&self, path: &Path, content: &[u8]) -> io::Result<()> {
|
||||||
debug!("Atomic Write {:?}", path);
|
debug!("Atomic Write {:?}", path);
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
let meta_file = atomicwrites::AtomicFile::new(full_path, atomicwrites::AllowOverwrite);
|
atomic_write(&full_path, content)?;
|
||||||
meta_file.write(|f| f.write_all(data))?;
|
self.sync_directory()
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn acquire_lock(&self, lock: &Lock) -> Result<DirectoryLock, LockError> {
|
fn acquire_lock(&self, lock: &Lock) -> Result<DirectoryLock, LockError> {
|
||||||
@@ -527,12 +485,10 @@ mod tests {
|
|||||||
// The following tests are specific to the MmapDirectory
|
// The following tests are specific to the MmapDirectory
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::indexer::LogMergePolicy;
|
|
||||||
use crate::schema::{Schema, SchemaBuilder, TEXT};
|
use crate::schema::{Schema, SchemaBuilder, TEXT};
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::ReloadPolicy;
|
use crate::ReloadPolicy;
|
||||||
use std::fs;
|
use crate::{common::HasLen, indexer::LogMergePolicy};
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_open_non_existent_path() {
|
fn test_open_non_existent_path() {
|
||||||
@@ -545,7 +501,7 @@ mod tests {
|
|||||||
// cannot be mmapped.
|
// cannot be mmapped.
|
||||||
//
|
//
|
||||||
// In that case the directory returns a SharedVecSlice.
|
// In that case the directory returns a SharedVecSlice.
|
||||||
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||||
let path = PathBuf::from("test");
|
let path = PathBuf::from("test");
|
||||||
{
|
{
|
||||||
let mut w = mmap_directory.open_write(&path).unwrap();
|
let mut w = mmap_directory.open_write(&path).unwrap();
|
||||||
@@ -561,7 +517,7 @@ mod tests {
|
|||||||
|
|
||||||
// here we test if the cache releases
|
// here we test if the cache releases
|
||||||
// mmaps correctly.
|
// mmaps correctly.
|
||||||
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||||
let num_paths = 10;
|
let num_paths = 10;
|
||||||
let paths: Vec<PathBuf> = (0..num_paths)
|
let paths: Vec<PathBuf> = (0..num_paths)
|
||||||
.map(|i| PathBuf::from(&*format!("file_{}", i)))
|
.map(|i| PathBuf::from(&*format!("file_{}", i)))
|
||||||
@@ -621,27 +577,6 @@ mod tests {
|
|||||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_watch_wrapper() {
|
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
|
||||||
let counter_clone = counter.clone();
|
|
||||||
let tmp_dir = tempfile::TempDir::new().unwrap();
|
|
||||||
let tmp_dirpath = tmp_dir.path().to_owned();
|
|
||||||
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
|
|
||||||
let tmp_file = tmp_dirpath.join(*META_FILEPATH);
|
|
||||||
let _handle = watch_wrapper.watch(Box::new(move || {
|
|
||||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
|
||||||
}));
|
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
|
||||||
let _handle2 = watch_wrapper.watch(Box::new(move || {
|
|
||||||
let _ = sender.send(());
|
|
||||||
}));
|
|
||||||
assert_eq!(counter.load(Ordering::SeqCst), 0);
|
|
||||||
fs::write(&tmp_file, b"whateverwilldo").unwrap();
|
|
||||||
assert!(receiver.recv().is_ok());
|
|
||||||
assert!(counter.load(Ordering::SeqCst) >= 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_mmap_released() {
|
fn test_mmap_released() {
|
||||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||||
@@ -652,7 +587,7 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let index = Index::create(mmap_directory.clone(), schema).unwrap();
|
let index = Index::create(mmap_directory.clone(), schema).unwrap();
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let mut log_merge_policy = LogMergePolicy::default();
|
let mut log_merge_policy = LogMergePolicy::default();
|
||||||
log_merge_policy.set_min_merge_size(3);
|
log_merge_policy.set_min_merge_size(3);
|
||||||
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
||||||
|
|||||||
@@ -9,10 +9,12 @@ mod mmap_directory;
|
|||||||
|
|
||||||
mod directory;
|
mod directory;
|
||||||
mod directory_lock;
|
mod directory_lock;
|
||||||
|
mod file_slice;
|
||||||
|
mod file_watcher;
|
||||||
mod footer;
|
mod footer;
|
||||||
mod managed_directory;
|
mod managed_directory;
|
||||||
|
mod owned_bytes;
|
||||||
mod ram_directory;
|
mod ram_directory;
|
||||||
mod read_only_source;
|
|
||||||
mod watch_event_router;
|
mod watch_event_router;
|
||||||
|
|
||||||
/// Errors specific to the directory module.
|
/// Errors specific to the directory module.
|
||||||
@@ -21,11 +23,14 @@ pub mod error;
|
|||||||
pub use self::directory::DirectoryLock;
|
pub use self::directory::DirectoryLock;
|
||||||
pub use self::directory::{Directory, DirectoryClone};
|
pub use self::directory::{Directory, DirectoryClone};
|
||||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||||
|
pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes};
|
||||||
|
pub use self::file_slice::{FileHandle, FileSlice};
|
||||||
|
pub use self::owned_bytes::OwnedBytes;
|
||||||
pub use self::ram_directory::RAMDirectory;
|
pub use self::ram_directory::RAMDirectory;
|
||||||
pub use self::read_only_source::ReadOnlySource;
|
|
||||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||||
use std::io::{self, BufWriter, Write};
|
use std::io::{self, BufWriter, Write};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
/// Outcome of the Garbage collection
|
/// Outcome of the Garbage collection
|
||||||
pub struct GarbageCollectionResult {
|
pub struct GarbageCollectionResult {
|
||||||
/// List of files that were deleted in this cycle
|
/// List of files that were deleted in this cycle
|
||||||
|
|||||||
290
src/directory/owned_bytes.rs
Normal file
290
src/directory/owned_bytes.rs
Normal file
@@ -0,0 +1,290 @@
|
|||||||
|
use crate::directory::FileHandle;
|
||||||
|
use stable_deref_trait::StableDeref;
|
||||||
|
use std::convert::TryInto;
|
||||||
|
use std::mem;
|
||||||
|
use std::ops::Deref;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::{fmt, io};
|
||||||
|
|
||||||
|
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
|
||||||
|
/// this data as a static slice.
|
||||||
|
///
|
||||||
|
/// The backing object is required to be `StableDeref`.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct OwnedBytes {
|
||||||
|
data: &'static [u8],
|
||||||
|
box_stable_deref: Arc<dyn Deref<Target = [u8]> + Sync + Send>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileHandle for OwnedBytes {
|
||||||
|
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
||||||
|
Ok(self.slice(from, to))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OwnedBytes {
|
||||||
|
/// Creates an empty `OwnedBytes`.
|
||||||
|
pub fn empty() -> OwnedBytes {
|
||||||
|
OwnedBytes::new(&[][..])
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates an `OwnedBytes` intance given a `StableDeref` object.
|
||||||
|
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
|
||||||
|
data_holder: T,
|
||||||
|
) -> OwnedBytes {
|
||||||
|
let box_stable_deref = Arc::new(data_holder);
|
||||||
|
let bytes: &[u8] = box_stable_deref.as_ref();
|
||||||
|
let data = unsafe { mem::transmute::<_, &'static [u8]>(bytes.deref()) };
|
||||||
|
OwnedBytes {
|
||||||
|
box_stable_deref,
|
||||||
|
data,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// creates a fileslice that is just a view over a slice of the data.
|
||||||
|
pub fn slice(&self, from: usize, to: usize) -> Self {
|
||||||
|
OwnedBytes {
|
||||||
|
data: &self.data[from..to],
|
||||||
|
box_stable_deref: self.box_stable_deref.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the underlying slice of data.
|
||||||
|
/// `Deref` and `AsRef` are also available.
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn as_slice(&self) -> &[u8] {
|
||||||
|
self.data
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the len of the slice.
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.data.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
|
||||||
|
///
|
||||||
|
/// Left will hold `split_len` bytes.
|
||||||
|
///
|
||||||
|
/// This operation is cheap and does not require to copy any memory.
|
||||||
|
/// On the other hand, both `left` and `right` retain a handle over
|
||||||
|
/// the entire slice of memory. In other words, the memory will only
|
||||||
|
/// be released when both left and right are dropped.
|
||||||
|
pub fn split(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
|
||||||
|
let right_box_stable_deref = self.box_stable_deref.clone();
|
||||||
|
let left = OwnedBytes {
|
||||||
|
data: &self.data[..split_len],
|
||||||
|
box_stable_deref: self.box_stable_deref,
|
||||||
|
};
|
||||||
|
let right = OwnedBytes {
|
||||||
|
data: &self.data[split_len..],
|
||||||
|
box_stable_deref: right_box_stable_deref,
|
||||||
|
};
|
||||||
|
(left, right)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true iff this `OwnedBytes` is empty.
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.as_slice().is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Drops the left most `advance_len` bytes.
|
||||||
|
///
|
||||||
|
/// See also [.clip(clip_len: usize))](#method.clip).
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn advance(&mut self, advance_len: usize) {
|
||||||
|
self.data = &self.data[advance_len..]
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reads an `u8` from the `OwnedBytes` and advance by one byte.
|
||||||
|
pub fn read_u8(&mut self) -> u8 {
|
||||||
|
assert!(!self.is_empty());
|
||||||
|
|
||||||
|
let byte = self.as_slice()[0];
|
||||||
|
self.advance(1);
|
||||||
|
byte
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reads an `u64` encoded as little-endian from the `OwnedBytes` and advance by 8 bytes.
|
||||||
|
pub fn read_u64(&mut self) -> u64 {
|
||||||
|
assert!(self.len() > 7);
|
||||||
|
|
||||||
|
let octlet: [u8; 8] = self.as_slice()[..8].try_into().unwrap();
|
||||||
|
self.advance(8);
|
||||||
|
u64::from_le_bytes(octlet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for OwnedBytes {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
// We truncate the bytes in order to make sure the debug string
|
||||||
|
// is not too long.
|
||||||
|
let bytes_truncated: &[u8] = if self.len() > 8 {
|
||||||
|
&self.as_slice()[..10]
|
||||||
|
} else {
|
||||||
|
self.as_slice()
|
||||||
|
};
|
||||||
|
write!(f, "OwnedBytes({:?}, len={})", bytes_truncated, self.len())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for OwnedBytes {
|
||||||
|
type Target = [u8];
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
self.as_slice()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl io::Read for OwnedBytes {
|
||||||
|
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||||
|
let read_len = {
|
||||||
|
let data = self.as_slice();
|
||||||
|
if data.len() >= buf.len() {
|
||||||
|
let buf_len = buf.len();
|
||||||
|
buf.copy_from_slice(&data[..buf_len]);
|
||||||
|
buf.len()
|
||||||
|
} else {
|
||||||
|
let data_len = data.len();
|
||||||
|
buf[..data_len].copy_from_slice(data);
|
||||||
|
data_len
|
||||||
|
}
|
||||||
|
};
|
||||||
|
self.advance(read_len);
|
||||||
|
Ok(read_len)
|
||||||
|
}
|
||||||
|
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||||
|
let read_len = {
|
||||||
|
let data = self.as_slice();
|
||||||
|
buf.extend(data);
|
||||||
|
data.len()
|
||||||
|
};
|
||||||
|
self.advance(read_len);
|
||||||
|
Ok(read_len)
|
||||||
|
}
|
||||||
|
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
||||||
|
let read_len = self.read(buf)?;
|
||||||
|
if read_len != buf.len() {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::UnexpectedEof,
|
||||||
|
"failed to fill whole buffer",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<[u8]> for OwnedBytes {
|
||||||
|
fn as_ref(&self) -> &[u8] {
|
||||||
|
self.as_slice()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::io::{self, Read};
|
||||||
|
|
||||||
|
use super::OwnedBytes;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_debug() {
|
||||||
|
let short_bytes = OwnedBytes::new(b"abcd".as_ref());
|
||||||
|
assert_eq!(
|
||||||
|
format!("{:?}", short_bytes),
|
||||||
|
"OwnedBytes([97, 98, 99, 100], len=4)"
|
||||||
|
);
|
||||||
|
let long_bytes = OwnedBytes::new(b"abcdefghijklmnopq".as_ref());
|
||||||
|
assert_eq!(
|
||||||
|
format!("{:?}", long_bytes),
|
||||||
|
"OwnedBytes([97, 98, 99, 100, 101, 102, 103, 104, 105, 106], len=17)"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_read() -> io::Result<()> {
|
||||||
|
let mut bytes = OwnedBytes::new(b"abcdefghiklmnopqrstuvwxyz".as_ref());
|
||||||
|
{
|
||||||
|
let mut buf = [0u8; 5];
|
||||||
|
bytes.read_exact(&mut buf[..]).unwrap();
|
||||||
|
assert_eq!(&buf, b"abcde");
|
||||||
|
assert_eq!(bytes.as_slice(), b"fghiklmnopqrstuvwxyz")
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let mut buf = [0u8; 2];
|
||||||
|
bytes.read_exact(&mut buf[..]).unwrap();
|
||||||
|
assert_eq!(&buf, b"fg");
|
||||||
|
assert_eq!(bytes.as_slice(), b"hiklmnopqrstuvwxyz")
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_read_right_at_the_end() -> io::Result<()> {
|
||||||
|
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
||||||
|
let mut buf = [0u8; 5];
|
||||||
|
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
|
||||||
|
assert_eq!(&buf, b"abcde");
|
||||||
|
assert_eq!(bytes.as_slice(), b"");
|
||||||
|
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
|
||||||
|
assert_eq!(&buf, b"abcde");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_read_incomplete() -> io::Result<()> {
|
||||||
|
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
||||||
|
let mut buf = [0u8; 7];
|
||||||
|
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
|
||||||
|
assert_eq!(&buf[..5], b"abcde");
|
||||||
|
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_read_to_end() -> io::Result<()> {
|
||||||
|
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
||||||
|
let mut buf = Vec::new();
|
||||||
|
bytes.read_to_end(&mut buf)?;
|
||||||
|
assert_eq!(buf.as_slice(), b"abcde".as_ref());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_read_u8() -> io::Result<()> {
|
||||||
|
let mut bytes = OwnedBytes::new(b"\xFF".as_ref());
|
||||||
|
assert_eq!(bytes.read_u8(), 255);
|
||||||
|
assert_eq!(bytes.len(), 0);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_read_u64() -> io::Result<()> {
|
||||||
|
let mut bytes = OwnedBytes::new(b"\0\xFF\xFF\xFF\xFF\xFF\xFF\xFF".as_ref());
|
||||||
|
assert_eq!(bytes.read_u64(), u64::MAX - 255);
|
||||||
|
assert_eq!(bytes.len(), 0);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_split() {
|
||||||
|
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
|
||||||
|
let (left, right) = bytes.split(3);
|
||||||
|
assert_eq!(left.as_slice(), b"abc");
|
||||||
|
assert_eq!(right.as_slice(), b"defghi");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_split_boundary() {
|
||||||
|
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
|
||||||
|
{
|
||||||
|
let (left, right) = bytes.clone().split(0);
|
||||||
|
assert_eq!(left.as_slice(), b"");
|
||||||
|
assert_eq!(right.as_slice(), b"abcdefghi");
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let (left, right) = bytes.split(9);
|
||||||
|
assert_eq!(left.as_slice(), b"abcdefghi");
|
||||||
|
assert_eq!(right.as_slice(), b"");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
use crate::core::META_FILEPATH;
|
|
||||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||||
use crate::directory::AntiCallToken;
|
use crate::directory::AntiCallToken;
|
||||||
use crate::directory::WatchCallbackList;
|
use crate::directory::WatchCallbackList;
|
||||||
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
|
use crate::directory::{Directory, FileSlice, WatchCallback, WatchHandle};
|
||||||
use crate::directory::{TerminatingWrite, WritePtr};
|
use crate::directory::{TerminatingWrite, WritePtr};
|
||||||
|
use crate::{common::HasLen, core::META_FILEPATH};
|
||||||
use fail::fail_point;
|
use fail::fail_point;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
@@ -12,6 +12,8 @@ use std::path::{Path, PathBuf};
|
|||||||
use std::result;
|
use std::result;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
|
use super::FileHandle;
|
||||||
|
|
||||||
/// Writer associated with the `RAMDirectory`
|
/// Writer associated with the `RAMDirectory`
|
||||||
///
|
///
|
||||||
/// The Writer just writes a buffer.
|
/// The Writer just writes a buffer.
|
||||||
@@ -80,17 +82,17 @@ impl TerminatingWrite for VecWriter {
|
|||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct InnerDirectory {
|
struct InnerDirectory {
|
||||||
fs: HashMap<PathBuf, ReadOnlySource>,
|
fs: HashMap<PathBuf, FileSlice>,
|
||||||
watch_router: WatchCallbackList,
|
watch_router: WatchCallbackList,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InnerDirectory {
|
impl InnerDirectory {
|
||||||
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
|
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
|
||||||
let data = ReadOnlySource::new(Vec::from(data));
|
let data = FileSlice::from(data.to_vec());
|
||||||
self.fs.insert(path, data).is_some()
|
self.fs.insert(path, data).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
|
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> {
|
||||||
self.fs
|
self.fs
|
||||||
.get(path)
|
.get(path)
|
||||||
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
||||||
@@ -151,11 +153,11 @@ impl RAMDirectory {
|
|||||||
/// written using the `atomic_write` api.
|
/// written using the `atomic_write` api.
|
||||||
///
|
///
|
||||||
/// If an error is encounterred, files may be persisted partially.
|
/// If an error is encounterred, files may be persisted partially.
|
||||||
pub fn persist(&self, dest: &mut dyn Directory) -> crate::Result<()> {
|
pub fn persist(&self, dest: &dyn Directory) -> crate::Result<()> {
|
||||||
let wlock = self.fs.write().unwrap();
|
let wlock = self.fs.write().unwrap();
|
||||||
for (path, source) in wlock.fs.iter() {
|
for (path, file) in wlock.fs.iter() {
|
||||||
let mut dest_wrt = dest.open_write(path)?;
|
let mut dest_wrt = dest.open_write(path)?;
|
||||||
dest_wrt.write_all(source.as_slice())?;
|
dest_wrt.write_all(file.read_bytes()?.as_slice())?;
|
||||||
dest_wrt.terminate()?;
|
dest_wrt.terminate()?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -163,24 +165,37 @@ impl RAMDirectory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Directory for RAMDirectory {
|
impl Directory for RAMDirectory {
|
||||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> {
|
||||||
|
let file_slice = self.open_read(path)?;
|
||||||
|
Ok(Box::new(file_slice))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||||
self.fs.read().unwrap().open_read(path)
|
self.fs.read().unwrap().open_read(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||||
fail_point!("RAMDirectory::delete", |_| {
|
fail_point!("RAMDirectory::delete", |_| {
|
||||||
use crate::directory::error::IOError;
|
Err(DeleteError::IOError {
|
||||||
let io_error = IOError::from(io::Error::from(io::ErrorKind::Other));
|
io_error: io::Error::from(io::ErrorKind::Other),
|
||||||
Err(DeleteError::from(io_error))
|
filepath: path.to_path_buf(),
|
||||||
|
})
|
||||||
});
|
});
|
||||||
self.fs.write().unwrap().delete(path)
|
self.fs.write().unwrap().delete(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exists(&self, path: &Path) -> bool {
|
fn exists(&self, path: &Path) -> Result<bool, OpenReadError> {
|
||||||
self.fs.read().unwrap().exists(path)
|
Ok(self
|
||||||
|
.fs
|
||||||
|
.read()
|
||||||
|
.map_err(|e| OpenReadError::IOError {
|
||||||
|
io_error: io::Error::new(io::ErrorKind::Other, e.to_string()),
|
||||||
|
filepath: path.to_path_buf(),
|
||||||
|
})?
|
||||||
|
.exists(path))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||||
let mut fs = self.fs.write().unwrap();
|
let mut fs = self.fs.write().unwrap();
|
||||||
let path_buf = PathBuf::from(path);
|
let path_buf = PathBuf::from(path);
|
||||||
let vec_writer = VecWriter::new(path_buf.clone(), self.clone());
|
let vec_writer = VecWriter::new(path_buf.clone(), self.clone());
|
||||||
@@ -194,23 +209,26 @@ impl Directory for RAMDirectory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||||
Ok(self.open_read(path)?.as_slice().to_owned())
|
let bytes =
|
||||||
|
self.open_read(path)?
|
||||||
|
.read_bytes()
|
||||||
|
.map_err(|io_error| OpenReadError::IOError {
|
||||||
|
io_error,
|
||||||
|
filepath: path.to_path_buf(),
|
||||||
|
})?;
|
||||||
|
Ok(bytes.as_slice().to_owned())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||||
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
|
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
|
||||||
io::ErrorKind::Other,
|
io::ErrorKind::Other,
|
||||||
msg.unwrap_or_else(|| "Undefined".to_string())
|
msg.unwrap_or_else(|| "Undefined".to_string())
|
||||||
)));
|
)));
|
||||||
let path_buf = PathBuf::from(path);
|
let path_buf = PathBuf::from(path);
|
||||||
|
|
||||||
// Reserve the path to prevent calls to .write() to succeed.
|
self.fs.write().unwrap().write(path_buf, data);
|
||||||
self.fs.write().unwrap().write(path_buf.clone(), &[]);
|
|
||||||
|
|
||||||
let mut vec_writer = VecWriter::new(path_buf, self.clone());
|
if path == *META_FILEPATH {
|
||||||
vec_writer.write_all(data)?;
|
|
||||||
vec_writer.flush()?;
|
|
||||||
if path == Path::new(&*META_FILEPATH) {
|
|
||||||
let _ = self.fs.write().unwrap().watch_router.broadcast();
|
let _ = self.fs.write().unwrap().watch_router.broadcast();
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -234,13 +252,13 @@ mod tests {
|
|||||||
let msg_seq: &'static [u8] = b"sequential is the way";
|
let msg_seq: &'static [u8] = b"sequential is the way";
|
||||||
let path_atomic: &'static Path = Path::new("atomic");
|
let path_atomic: &'static Path = Path::new("atomic");
|
||||||
let path_seq: &'static Path = Path::new("seq");
|
let path_seq: &'static Path = Path::new("seq");
|
||||||
let mut directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(directory.atomic_write(path_atomic, msg_atomic).is_ok());
|
assert!(directory.atomic_write(path_atomic, msg_atomic).is_ok());
|
||||||
let mut wrt = directory.open_write(path_seq).unwrap();
|
let mut wrt = directory.open_write(path_seq).unwrap();
|
||||||
assert!(wrt.write_all(msg_seq).is_ok());
|
assert!(wrt.write_all(msg_seq).is_ok());
|
||||||
assert!(wrt.flush().is_ok());
|
assert!(wrt.flush().is_ok());
|
||||||
let mut directory_copy = RAMDirectory::create();
|
let directory_copy = RAMDirectory::create();
|
||||||
assert!(directory.persist(&mut directory_copy).is_ok());
|
assert!(directory.persist(&directory_copy).is_ok());
|
||||||
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
||||||
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,137 +0,0 @@
|
|||||||
use crate::common::HasLen;
|
|
||||||
use stable_deref_trait::{CloneStableDeref, StableDeref};
|
|
||||||
use std::ops::Deref;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
|
||||||
|
|
||||||
/// Read object that represents files in tantivy.
|
|
||||||
///
|
|
||||||
/// These read objects are only in charge to deliver
|
|
||||||
/// the data in the form of a constant read-only `&[u8]`.
|
|
||||||
/// Whatever happens to the directory file, the data
|
|
||||||
/// hold by this object should never be altered or destroyed.
|
|
||||||
pub struct ReadOnlySource {
|
|
||||||
data: Arc<BoxedData>,
|
|
||||||
start: usize,
|
|
||||||
stop: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl StableDeref for ReadOnlySource {}
|
|
||||||
unsafe impl CloneStableDeref for ReadOnlySource {}
|
|
||||||
|
|
||||||
impl Deref for ReadOnlySource {
|
|
||||||
type Target = [u8];
|
|
||||||
|
|
||||||
fn deref(&self) -> &[u8] {
|
|
||||||
self.as_slice()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Arc<BoxedData>> for ReadOnlySource {
|
|
||||||
fn from(data: Arc<BoxedData>) -> Self {
|
|
||||||
let len = data.len();
|
|
||||||
ReadOnlySource {
|
|
||||||
data,
|
|
||||||
start: 0,
|
|
||||||
stop: len,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReadOnlySource {
|
|
||||||
pub(crate) fn new<D>(data: D) -> ReadOnlySource
|
|
||||||
where
|
|
||||||
D: Deref<Target = [u8]> + Send + Sync + 'static,
|
|
||||||
{
|
|
||||||
let len = data.len();
|
|
||||||
ReadOnlySource {
|
|
||||||
data: Arc::new(Box::new(data)),
|
|
||||||
start: 0,
|
|
||||||
stop: len,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates an empty ReadOnlySource
|
|
||||||
pub fn empty() -> ReadOnlySource {
|
|
||||||
ReadOnlySource::new(&[][..])
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the data underlying the ReadOnlySource object.
|
|
||||||
pub fn as_slice(&self) -> &[u8] {
|
|
||||||
&self.data[self.start..self.stop]
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Splits into 2 `ReadOnlySource`, at the offset given
|
|
||||||
/// as an argument.
|
|
||||||
pub fn split(self, addr: usize) -> (ReadOnlySource, ReadOnlySource) {
|
|
||||||
let left = self.slice(0, addr);
|
|
||||||
let right = self.slice_from(addr);
|
|
||||||
(left, right)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Splits into 2 `ReadOnlySource`, at the offset `end - right_len`.
|
|
||||||
pub fn split_from_end(self, right_len: usize) -> (ReadOnlySource, ReadOnlySource) {
|
|
||||||
let left_len = self.len() - right_len;
|
|
||||||
self.split(left_len)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a ReadOnlySource that is just a
|
|
||||||
/// view over a slice of the data.
|
|
||||||
///
|
|
||||||
/// Keep in mind that any living slice extends
|
|
||||||
/// the lifetime of the original ReadOnlySource,
|
|
||||||
///
|
|
||||||
/// For instance, if `ReadOnlySource` wraps 500MB
|
|
||||||
/// worth of data in anonymous memory, and only a
|
|
||||||
/// 1KB slice is remaining, the whole `500MBs`
|
|
||||||
/// are retained in memory.
|
|
||||||
pub fn slice(&self, start: usize, stop: usize) -> ReadOnlySource {
|
|
||||||
assert!(
|
|
||||||
start <= stop,
|
|
||||||
"Requested negative slice [{}..{}]",
|
|
||||||
start,
|
|
||||||
stop
|
|
||||||
);
|
|
||||||
assert!(stop <= self.len());
|
|
||||||
ReadOnlySource {
|
|
||||||
data: self.data.clone(),
|
|
||||||
start: self.start + start,
|
|
||||||
stop: self.start + stop,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Like `.slice(...)` but enforcing only the `from`
|
|
||||||
/// boundary.
|
|
||||||
///
|
|
||||||
/// Equivalent to `.slice(from_offset, self.len())`
|
|
||||||
pub fn slice_from(&self, from_offset: usize) -> ReadOnlySource {
|
|
||||||
self.slice(from_offset, self.len())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Like `.slice(...)` but enforcing only the `to`
|
|
||||||
/// boundary.
|
|
||||||
///
|
|
||||||
/// Equivalent to `.slice(0, to_offset)`
|
|
||||||
pub fn slice_to(&self, to_offset: usize) -> ReadOnlySource {
|
|
||||||
self.slice(0, to_offset)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HasLen for ReadOnlySource {
|
|
||||||
fn len(&self) -> usize {
|
|
||||||
self.stop - self.start
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Clone for ReadOnlySource {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
self.slice_from(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Vec<u8>> for ReadOnlySource {
|
|
||||||
fn from(data: Vec<u8>) -> ReadOnlySource {
|
|
||||||
ReadOnlySource::new(data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -20,45 +20,47 @@ mod mmap_directory_tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_simple() {
|
fn test_simple() -> crate::Result<()> {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_simple(&mut directory);
|
super::test_simple(&directory)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_write_create_the_file() {
|
fn test_write_create_the_file() {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_write_create_the_file(&mut directory);
|
super::test_write_create_the_file(&directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_rewrite_forbidden() {
|
fn test_rewrite_forbidden() -> crate::Result<()> {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_rewrite_forbidden(&mut directory);
|
super::test_rewrite_forbidden(&directory)?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_directory_delete() {
|
fn test_directory_delete() -> crate::Result<()> {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_directory_delete(&mut directory);
|
super::test_directory_delete(&directory)?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_lock_non_blocking() {
|
fn test_lock_non_blocking() {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_lock_non_blocking(&mut directory);
|
super::test_lock_non_blocking(&directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_lock_blocking() {
|
fn test_lock_blocking() {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_lock_blocking(&mut directory);
|
super::test_lock_blocking(&directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_watch() {
|
fn test_watch() {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_watch(&mut directory);
|
super::test_watch(&directory);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,45 +74,47 @@ mod ram_directory_tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_simple() {
|
fn test_simple() -> crate::Result<()> {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_simple(&mut directory);
|
super::test_simple(&directory)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_write_create_the_file() {
|
fn test_write_create_the_file() {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_write_create_the_file(&mut directory);
|
super::test_write_create_the_file(&directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_rewrite_forbidden() {
|
fn test_rewrite_forbidden() -> crate::Result<()> {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_rewrite_forbidden(&mut directory);
|
super::test_rewrite_forbidden(&directory)?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_directory_delete() {
|
fn test_directory_delete() -> crate::Result<()> {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_directory_delete(&mut directory);
|
super::test_directory_delete(&directory)?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_lock_non_blocking() {
|
fn test_lock_non_blocking() {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_lock_non_blocking(&mut directory);
|
super::test_lock_non_blocking(&directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_lock_blocking() {
|
fn test_lock_blocking() {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_lock_blocking(&mut directory);
|
super::test_lock_blocking(&directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_watch() {
|
fn test_watch() {
|
||||||
let mut directory = make_directory();
|
let directory = make_directory();
|
||||||
super::test_watch(&mut directory);
|
super::test_watch(&directory);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -118,68 +122,61 @@ mod ram_directory_tests {
|
|||||||
#[should_panic]
|
#[should_panic]
|
||||||
fn ram_directory_panics_if_flush_forgotten() {
|
fn ram_directory_panics_if_flush_forgotten() {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
let mut ram_directory = RAMDirectory::create();
|
let ram_directory = RAMDirectory::create();
|
||||||
let mut write_file = ram_directory.open_write(test_path).unwrap();
|
let mut write_file = ram_directory.open_write(test_path).unwrap();
|
||||||
assert!(write_file.write_all(&[4]).is_ok());
|
assert!(write_file.write_all(&[4]).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_simple(directory: &mut dyn Directory) {
|
fn test_simple(directory: &dyn Directory) -> crate::Result<()> {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
{
|
let mut write_file = directory.open_write(test_path)?;
|
||||||
let mut write_file = directory.open_write(test_path).unwrap();
|
assert!(directory.exists(test_path).unwrap());
|
||||||
assert!(directory.exists(test_path));
|
write_file.write_all(&[4])?;
|
||||||
write_file.write_all(&[4]).unwrap();
|
write_file.write_all(&[3])?;
|
||||||
write_file.write_all(&[3]).unwrap();
|
write_file.write_all(&[7, 3, 5])?;
|
||||||
write_file.write_all(&[7, 3, 5]).unwrap();
|
write_file.flush()?;
|
||||||
write_file.flush().unwrap();
|
let read_file = directory.open_read(test_path)?.read_bytes()?;
|
||||||
}
|
assert_eq!(read_file.as_slice(), &[4u8, 3u8, 7u8, 3u8, 5u8]);
|
||||||
{
|
mem::drop(read_file);
|
||||||
let read_file = directory.open_read(test_path).unwrap();
|
|
||||||
let data: &[u8] = &*read_file;
|
|
||||||
assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]);
|
|
||||||
}
|
|
||||||
assert!(directory.delete(test_path).is_ok());
|
assert!(directory.delete(test_path).is_ok());
|
||||||
assert!(!directory.exists(test_path));
|
assert!(!directory.exists(test_path).unwrap());
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_rewrite_forbidden(directory: &mut dyn Directory) {
|
fn test_rewrite_forbidden(directory: &dyn Directory) -> crate::Result<()> {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
{
|
directory.open_write(test_path)?;
|
||||||
directory.open_write(test_path).unwrap();
|
assert!(directory.exists(test_path).unwrap());
|
||||||
assert!(directory.exists(test_path));
|
assert!(directory.open_write(test_path).is_err());
|
||||||
}
|
|
||||||
{
|
|
||||||
assert!(directory.open_write(test_path).is_err());
|
|
||||||
}
|
|
||||||
assert!(directory.delete(test_path).is_ok());
|
assert!(directory.delete(test_path).is_ok());
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_write_create_the_file(directory: &mut dyn Directory) {
|
fn test_write_create_the_file(directory: &dyn Directory) {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
{
|
{
|
||||||
assert!(directory.open_read(test_path).is_err());
|
assert!(directory.open_read(test_path).is_err());
|
||||||
let _w = directory.open_write(test_path).unwrap();
|
let _w = directory.open_write(test_path).unwrap();
|
||||||
assert!(directory.exists(test_path));
|
assert!(directory.exists(test_path).unwrap());
|
||||||
assert!(directory.open_read(test_path).is_ok());
|
assert!(directory.open_read(test_path).is_ok());
|
||||||
assert!(directory.delete(test_path).is_ok());
|
assert!(directory.delete(test_path).is_ok());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_directory_delete(directory: &mut dyn Directory) {
|
fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
assert!(directory.open_read(test_path).is_err());
|
assert!(directory.open_read(test_path).is_err());
|
||||||
let mut write_file = directory.open_write(&test_path).unwrap();
|
let mut write_file = directory.open_write(&test_path)?;
|
||||||
write_file.write_all(&[1, 2, 3, 4]).unwrap();
|
write_file.write_all(&[1, 2, 3, 4])?;
|
||||||
write_file.flush().unwrap();
|
write_file.flush()?;
|
||||||
{
|
{
|
||||||
let read_handle = directory.open_read(&test_path).unwrap();
|
let read_handle = directory.open_read(&test_path)?.read_bytes()?;
|
||||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]);
|
||||||
// Mapped files can't be deleted on Windows
|
// Mapped files can't be deleted on Windows
|
||||||
if !cfg!(windows) {
|
if !cfg!(windows) {
|
||||||
assert!(directory.delete(&test_path).is_ok());
|
assert!(directory.delete(&test_path).is_ok());
|
||||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
|
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -189,44 +186,40 @@ fn test_directory_delete(directory: &mut dyn Directory) {
|
|||||||
|
|
||||||
assert!(directory.open_read(&test_path).is_err());
|
assert!(directory.open_read(&test_path).is_err());
|
||||||
assert!(directory.delete(&test_path).is_err());
|
assert!(directory.delete(&test_path).is_err());
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_watch(directory: &mut dyn Directory) {
|
fn test_watch(directory: &dyn Directory) {
|
||||||
let num_progress: Arc<AtomicUsize> = Default::default();
|
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let counter_clone = counter.clone();
|
let (tx, rx) = crossbeam::channel::unbounded();
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
let timeout = Duration::from_millis(500);
|
||||||
let watch_callback = Box::new(move || {
|
|
||||||
counter_clone.fetch_add(1, SeqCst);
|
let handle = directory
|
||||||
});
|
.watch(WatchCallback::new(move || {
|
||||||
// This callback is used to synchronize watching in our unit test.
|
let val = counter.fetch_add(1, SeqCst);
|
||||||
// We bind it to a variable because the callback is removed when that
|
tx.send(val + 1).unwrap();
|
||||||
// handle is dropped.
|
|
||||||
let watch_handle = directory.watch(watch_callback).unwrap();
|
|
||||||
let _progress_listener = directory
|
|
||||||
.watch(Box::new(move || {
|
|
||||||
let val = num_progress.fetch_add(1, SeqCst);
|
|
||||||
let _ = sender.send(val);
|
|
||||||
}))
|
}))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
for i in 0..10 {
|
|
||||||
assert_eq!(i, counter.load(SeqCst));
|
|
||||||
assert!(directory
|
|
||||||
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
|
|
||||||
.is_ok());
|
|
||||||
assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i));
|
|
||||||
assert_eq!(i + 1, counter.load(SeqCst));
|
|
||||||
}
|
|
||||||
mem::drop(watch_handle);
|
|
||||||
assert!(directory
|
assert!(directory
|
||||||
.atomic_write(Path::new("meta.json"), b"random_test_data")
|
.atomic_write(Path::new("meta.json"), b"foo")
|
||||||
.is_ok());
|
.is_ok());
|
||||||
assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok());
|
assert_eq!(rx.recv_timeout(timeout), Ok(1));
|
||||||
assert_eq!(10, counter.load(SeqCst));
|
|
||||||
|
assert!(directory
|
||||||
|
.atomic_write(Path::new("meta.json"), b"bar")
|
||||||
|
.is_ok());
|
||||||
|
assert_eq!(rx.recv_timeout(timeout), Ok(2));
|
||||||
|
|
||||||
|
mem::drop(handle);
|
||||||
|
|
||||||
|
assert!(directory
|
||||||
|
.atomic_write(Path::new("meta.json"), b"qux")
|
||||||
|
.is_ok());
|
||||||
|
assert!(rx.recv_timeout(timeout).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_lock_non_blocking(directory: &mut dyn Directory) {
|
fn test_lock_non_blocking(directory: &dyn Directory) {
|
||||||
{
|
{
|
||||||
let lock_a_res = directory.acquire_lock(&Lock {
|
let lock_a_res = directory.acquire_lock(&Lock {
|
||||||
filepath: PathBuf::from("a.lock"),
|
filepath: PathBuf::from("a.lock"),
|
||||||
@@ -251,7 +244,7 @@ fn test_lock_non_blocking(directory: &mut dyn Directory) {
|
|||||||
assert!(lock_a_res.is_ok());
|
assert!(lock_a_res.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_lock_blocking(directory: &mut dyn Directory) {
|
fn test_lock_blocking(directory: &dyn Directory) {
|
||||||
let lock_a_res = directory.acquire_lock(&Lock {
|
let lock_a_res = directory.acquire_lock(&Lock {
|
||||||
filepath: PathBuf::from("a.lock"),
|
filepath: PathBuf::from("a.lock"),
|
||||||
is_blocking: true,
|
is_blocking: true,
|
||||||
|
|||||||
@@ -4,8 +4,20 @@ use std::sync::Arc;
|
|||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use std::sync::Weak;
|
use std::sync::Weak;
|
||||||
|
|
||||||
/// Type alias for callbacks registered when watching files of a `Directory`.
|
/// Cloneable wrapper for callbacks registered when watching files of a `Directory`.
|
||||||
pub type WatchCallback = Box<dyn Fn() -> () + Sync + Send>;
|
#[derive(Clone)]
|
||||||
|
pub struct WatchCallback(Arc<dyn Fn() + Sync + Send>);
|
||||||
|
|
||||||
|
impl WatchCallback {
|
||||||
|
/// Wraps a `Fn()` to create a WatchCallback.
|
||||||
|
pub fn new<F: Fn() + Sync + Send + 'static>(op: F) -> Self {
|
||||||
|
WatchCallback(Arc::new(op))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&self) {
|
||||||
|
self.0()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Helper struct to implement the watch method in `Directory` implementations.
|
/// Helper struct to implement the watch method in `Directory` implementations.
|
||||||
///
|
///
|
||||||
@@ -29,10 +41,17 @@ impl WatchHandle {
|
|||||||
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
|
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
|
||||||
WatchHandle(watch_callback)
|
WatchHandle(watch_callback)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns an empty watch handle.
|
||||||
|
///
|
||||||
|
/// This function is only useful when implementing a readonly directory.
|
||||||
|
pub fn empty() -> WatchHandle {
|
||||||
|
WatchHandle::new(Arc::new(WatchCallback::new(|| {})))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WatchCallbackList {
|
impl WatchCallbackList {
|
||||||
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
|
/// Subscribes a new callback and returns a handle that controls the lifetime of the callback.
|
||||||
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
|
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
|
||||||
let watch_callback_arc = Arc::new(watch_callback);
|
let watch_callback_arc = Arc::new(watch_callback);
|
||||||
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
|
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
|
||||||
@@ -40,13 +59,13 @@ impl WatchCallbackList {
|
|||||||
WatchHandle::new(watch_callback_arc)
|
WatchHandle::new(watch_callback_arc)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn list_callback(&self) -> Vec<Arc<WatchCallback>> {
|
fn list_callback(&self) -> Vec<WatchCallback> {
|
||||||
let mut callbacks = vec![];
|
let mut callbacks: Vec<WatchCallback> = vec![];
|
||||||
let mut router_wlock = self.router.write().unwrap();
|
let mut router_wlock = self.router.write().unwrap();
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
while i < router_wlock.len() {
|
while i < router_wlock.len() {
|
||||||
if let Some(watch) = router_wlock[i].upgrade() {
|
if let Some(watch) = router_wlock[i].upgrade() {
|
||||||
callbacks.push(watch);
|
callbacks.push(watch.as_ref().clone());
|
||||||
i += 1;
|
i += 1;
|
||||||
} else {
|
} else {
|
||||||
router_wlock.swap_remove(i);
|
router_wlock.swap_remove(i);
|
||||||
@@ -68,7 +87,7 @@ impl WatchCallbackList {
|
|||||||
.name("watch-callbacks".to_string())
|
.name("watch-callbacks".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
for callback in callbacks {
|
for callback in callbacks {
|
||||||
callback();
|
callback.call();
|
||||||
}
|
}
|
||||||
let _ = sender.send(());
|
let _ = sender.send(());
|
||||||
});
|
});
|
||||||
@@ -84,7 +103,7 @@ impl WatchCallbackList {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::directory::WatchCallbackList;
|
use crate::directory::{WatchCallback, WatchCallbackList};
|
||||||
use futures::executor::block_on;
|
use futures::executor::block_on;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
@@ -95,7 +114,7 @@ mod tests {
|
|||||||
let watch_event_router = WatchCallbackList::default();
|
let watch_event_router = WatchCallbackList::default();
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let counter_clone = counter.clone();
|
let counter_clone = counter.clone();
|
||||||
let inc_callback = Box::new(move || {
|
let inc_callback = WatchCallback::new(move || {
|
||||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
});
|
});
|
||||||
block_on(watch_event_router.broadcast());
|
block_on(watch_event_router.broadcast());
|
||||||
@@ -123,7 +142,7 @@ mod tests {
|
|||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let inc_callback = |inc: usize| {
|
let inc_callback = |inc: usize| {
|
||||||
let counter_clone = counter.clone();
|
let counter_clone = counter.clone();
|
||||||
Box::new(move || {
|
WatchCallback::new(move || {
|
||||||
counter_clone.fetch_add(inc, Ordering::SeqCst);
|
counter_clone.fetch_add(inc, Ordering::SeqCst);
|
||||||
})
|
})
|
||||||
};
|
};
|
||||||
@@ -151,7 +170,7 @@ mod tests {
|
|||||||
let watch_event_router = WatchCallbackList::default();
|
let watch_event_router = WatchCallbackList::default();
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let counter_clone = counter.clone();
|
let counter_clone = counter.clone();
|
||||||
let inc_callback = Box::new(move || {
|
let inc_callback = WatchCallback::new(move || {
|
||||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
});
|
});
|
||||||
let handle_a = watch_event_router.subscribe(inc_callback);
|
let handle_a = watch_event_router.subscribe(inc_callback);
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use std::borrow::BorrowMut;
|
|||||||
pub const TERMINATED: DocId = std::i32::MAX as u32;
|
pub const TERMINATED: DocId = std::i32::MAX as u32;
|
||||||
|
|
||||||
/// Represents an iterable set of sorted doc ids.
|
/// Represents an iterable set of sorted doc ids.
|
||||||
pub trait DocSet {
|
pub trait DocSet: Send {
|
||||||
/// Goes to the next element.
|
/// Goes to the next element.
|
||||||
///
|
///
|
||||||
/// The DocId of the next element is returned.
|
/// The DocId of the next element is returned.
|
||||||
@@ -38,6 +38,7 @@ pub trait DocSet {
|
|||||||
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a DocSet.
|
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a DocSet.
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn seek(&mut self, target: DocId) -> DocId {
|
||||||
let mut doc = self.doc();
|
let mut doc = self.doc();
|
||||||
|
debug_assert!(doc <= target);
|
||||||
while doc < target {
|
while doc < target {
|
||||||
doc = self.advance();
|
doc = self.advance();
|
||||||
}
|
}
|
||||||
@@ -128,6 +129,14 @@ impl<'a> DocSet for &'a mut dyn DocSet {
|
|||||||
fn size_hint(&self) -> u32 {
|
fn size_hint(&self) -> u32 {
|
||||||
(**self).size_hint()
|
(**self).size_hint()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||||
|
(**self).count(delete_bitset)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn count_including_deleted(&mut self) -> u32 {
|
||||||
|
(**self).count_including_deleted()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
||||||
|
|||||||
110
src/error.rs
110
src/error.rs
@@ -2,21 +2,27 @@
|
|||||||
|
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
|
||||||
use crate::directory::error::{Incompatibility, LockError};
|
use crate::directory::error::{Incompatibility, LockError};
|
||||||
use crate::fastfield::FastFieldNotAvailableError;
|
use crate::fastfield::FastFieldNotAvailableError;
|
||||||
use crate::query;
|
use crate::query;
|
||||||
use crate::schema;
|
use crate::{
|
||||||
|
directory::error::{OpenDirectoryError, OpenReadError, OpenWriteError},
|
||||||
|
schema,
|
||||||
|
};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::PoisonError;
|
use std::sync::PoisonError;
|
||||||
|
|
||||||
|
/// Represents a `DataCorruption` error.
|
||||||
|
///
|
||||||
|
/// When facing data corruption, tantivy actually panic or return this error.
|
||||||
pub struct DataCorruption {
|
pub struct DataCorruption {
|
||||||
filepath: Option<PathBuf>,
|
filepath: Option<PathBuf>,
|
||||||
comment: String,
|
comment: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DataCorruption {
|
impl DataCorruption {
|
||||||
|
/// Creates a `DataCorruption` Error.
|
||||||
pub fn new(filepath: PathBuf, comment: String) -> DataCorruption {
|
pub fn new(filepath: PathBuf, comment: String) -> DataCorruption {
|
||||||
DataCorruption {
|
DataCorruption {
|
||||||
filepath: Some(filepath),
|
filepath: Some(filepath),
|
||||||
@@ -24,10 +30,11 @@ impl DataCorruption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn comment_only(comment: String) -> DataCorruption {
|
/// Creates a `DataCorruption` Error, when the filepath is irrelevant.
|
||||||
|
pub fn comment_only<TStr: ToString>(comment: TStr) -> DataCorruption {
|
||||||
DataCorruption {
|
DataCorruption {
|
||||||
filepath: None,
|
filepath: None,
|
||||||
comment,
|
comment: comment.to_string(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -43,44 +50,47 @@ impl fmt::Debug for DataCorruption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The library's failure based error enum
|
/// The library's error enum
|
||||||
#[derive(Debug, Fail)]
|
#[derive(Debug, Error)]
|
||||||
pub enum TantivyError {
|
pub enum TantivyError {
|
||||||
/// Path does not exist.
|
/// Failed to open the directory.
|
||||||
#[fail(display = "Path does not exist: '{:?}'", _0)]
|
#[error("Failed to open the directory: '{0:?}'")]
|
||||||
PathDoesNotExist(PathBuf),
|
OpenDirectoryError(#[from] OpenDirectoryError),
|
||||||
/// File already exists, this is a problem when we try to write into a new file.
|
/// Failed to open a file for read.
|
||||||
#[fail(display = "File already exists: '{:?}'", _0)]
|
#[error("Failed to open file for read: '{0:?}'")]
|
||||||
FileAlreadyExists(PathBuf),
|
OpenReadError(#[from] OpenReadError),
|
||||||
|
/// Failed to open a file for write.
|
||||||
|
#[error("Failed to open file for write: '{0:?}'")]
|
||||||
|
OpenWriteError(#[from] OpenWriteError),
|
||||||
/// Index already exists in this directory
|
/// Index already exists in this directory
|
||||||
#[fail(display = "Index already exists")]
|
#[error("Index already exists")]
|
||||||
IndexAlreadyExists,
|
IndexAlreadyExists,
|
||||||
/// Failed to acquire file lock
|
/// Failed to acquire file lock
|
||||||
#[fail(display = "Failed to acquire Lockfile: {:?}. {:?}", _0, _1)]
|
#[error("Failed to acquire Lockfile: {0:?}. {1:?}")]
|
||||||
LockFailure(LockError, Option<String>),
|
LockFailure(LockError, Option<String>),
|
||||||
/// IO Error.
|
/// IO Error.
|
||||||
#[fail(display = "An IO error occurred: '{}'", _0)]
|
#[error("An IO error occurred: '{0}'")]
|
||||||
IOError(#[cause] IOError),
|
IOError(#[from] io::Error),
|
||||||
/// Data corruption.
|
/// Data corruption.
|
||||||
#[fail(display = "{:?}", _0)]
|
#[error("Data corrupted: '{0:?}'")]
|
||||||
DataCorruption(DataCorruption),
|
DataCorruption(DataCorruption),
|
||||||
/// A thread holding the locked panicked and poisoned the lock.
|
/// A thread holding the locked panicked and poisoned the lock.
|
||||||
#[fail(display = "A thread holding the locked panicked and poisoned the lock")]
|
#[error("A thread holding the locked panicked and poisoned the lock")]
|
||||||
Poisoned,
|
Poisoned,
|
||||||
/// Invalid argument was passed by the user.
|
/// Invalid argument was passed by the user.
|
||||||
#[fail(display = "An invalid argument was passed: '{}'", _0)]
|
#[error("An invalid argument was passed: '{0}'")]
|
||||||
InvalidArgument(String),
|
InvalidArgument(String),
|
||||||
/// An Error happened in one of the thread.
|
/// An Error happened in one of the thread.
|
||||||
#[fail(display = "An error occurred in a thread: '{}'", _0)]
|
#[error("An error occurred in a thread: '{0}'")]
|
||||||
ErrorInThread(String),
|
ErrorInThread(String),
|
||||||
/// An Error appeared related to the schema.
|
/// An Error appeared related to the schema.
|
||||||
#[fail(display = "Schema error: '{}'", _0)]
|
#[error("Schema error: '{0}'")]
|
||||||
SchemaError(String),
|
SchemaError(String),
|
||||||
/// System error. (e.g.: We failed spawning a new thread)
|
/// System error. (e.g.: We failed spawning a new thread)
|
||||||
#[fail(display = "System error.'{}'", _0)]
|
#[error("System error.'{0}'")]
|
||||||
SystemError(String),
|
SystemError(String),
|
||||||
/// Index incompatible with current version of tantivy
|
/// Index incompatible with current version of tantivy
|
||||||
#[fail(display = "{:?}", _0)]
|
#[error("{0:?}")]
|
||||||
IncompatibleIndex(Incompatibility),
|
IncompatibleIndex(Incompatibility),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,31 +99,17 @@ impl From<DataCorruption> for TantivyError {
|
|||||||
TantivyError::DataCorruption(data_corruption)
|
TantivyError::DataCorruption(data_corruption)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<FastFieldNotAvailableError> for TantivyError {
|
impl From<FastFieldNotAvailableError> for TantivyError {
|
||||||
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
|
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
|
||||||
TantivyError::SchemaError(format!("{}", fastfield_error))
|
TantivyError::SchemaError(format!("{}", fastfield_error))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<LockError> for TantivyError {
|
impl From<LockError> for TantivyError {
|
||||||
fn from(lock_error: LockError) -> TantivyError {
|
fn from(lock_error: LockError) -> TantivyError {
|
||||||
TantivyError::LockFailure(lock_error, None)
|
TantivyError::LockFailure(lock_error, None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<IOError> for TantivyError {
|
|
||||||
fn from(io_error: IOError) -> TantivyError {
|
|
||||||
TantivyError::IOError(io_error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<io::Error> for TantivyError {
|
|
||||||
fn from(io_error: io::Error) -> TantivyError {
|
|
||||||
TantivyError::IOError(io_error.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<query::QueryParserError> for TantivyError {
|
impl From<query::QueryParserError> for TantivyError {
|
||||||
fn from(parsing_error: query::QueryParserError) -> TantivyError {
|
fn from(parsing_error: query::QueryParserError) -> TantivyError {
|
||||||
TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error))
|
TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error))
|
||||||
@@ -126,15 +122,9 @@ impl<Guard> From<PoisonError<Guard>> for TantivyError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<OpenReadError> for TantivyError {
|
impl From<chrono::ParseError> for TantivyError {
|
||||||
fn from(error: OpenReadError) -> TantivyError {
|
fn from(err: chrono::ParseError) -> TantivyError {
|
||||||
match error {
|
TantivyError::InvalidArgument(err.to_string())
|
||||||
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
|
|
||||||
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
|
|
||||||
OpenReadError::IncompatibleIndex(incompatibility) => {
|
|
||||||
TantivyError::IncompatibleIndex(incompatibility)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -144,35 +134,9 @@ impl From<schema::DocParsingError> for TantivyError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<OpenWriteError> for TantivyError {
|
|
||||||
fn from(error: OpenWriteError) -> TantivyError {
|
|
||||||
match error {
|
|
||||||
OpenWriteError::FileAlreadyExists(filepath) => {
|
|
||||||
TantivyError::FileAlreadyExists(filepath)
|
|
||||||
}
|
|
||||||
OpenWriteError::IOError(io_error) => TantivyError::IOError(io_error),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<OpenDirectoryError> for TantivyError {
|
|
||||||
fn from(error: OpenDirectoryError) -> TantivyError {
|
|
||||||
match error {
|
|
||||||
OpenDirectoryError::DoesNotExist(directory_path) => {
|
|
||||||
TantivyError::PathDoesNotExist(directory_path)
|
|
||||||
}
|
|
||||||
OpenDirectoryError::NotADirectory(directory_path) => {
|
|
||||||
TantivyError::InvalidArgument(format!("{:?} is not a directory", directory_path))
|
|
||||||
}
|
|
||||||
OpenDirectoryError::IoError(err) => TantivyError::IOError(IOError::from(err)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<serde_json::Error> for TantivyError {
|
impl From<serde_json::Error> for TantivyError {
|
||||||
fn from(error: serde_json::Error) -> TantivyError {
|
fn from(error: serde_json::Error) -> TantivyError {
|
||||||
let io_err = io::Error::from(error);
|
TantivyError::IOError(error.into())
|
||||||
TantivyError::IOError(io_err.into())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,31 +6,114 @@ pub use self::writer::BytesFastFieldWriter;
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::schema::Schema;
|
use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value};
|
||||||
use crate::Index;
|
use crate::{query::TermQuery, schema::FAST, schema::INDEXED, schema::STORED};
|
||||||
|
use crate::{DocAddress, DocSet, Index, Searcher, Term};
|
||||||
|
use std::ops::Deref;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_bytes() {
|
fn test_bytes() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let field = schema_builder.add_bytes_field("bytesfield");
|
let bytes_field = schema_builder.add_bytes_field("bytesfield", FAST);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(field=>vec![0u8, 1, 2, 3]));
|
index_writer.add_document(doc!(bytes_field=>vec![0u8, 1, 2, 3]));
|
||||||
index_writer.add_document(doc!(field=>vec![]));
|
index_writer.add_document(doc!(bytes_field=>vec![]));
|
||||||
index_writer.add_document(doc!(field=>vec![255u8]));
|
index_writer.add_document(doc!(bytes_field=>vec![255u8]));
|
||||||
index_writer.add_document(doc!(field=>vec![1u8, 3, 5, 7, 9]));
|
index_writer.add_document(doc!(bytes_field=>vec![1u8, 3, 5, 7, 9]));
|
||||||
index_writer.add_document(doc!(field=>vec![0u8; 1000]));
|
index_writer.add_document(doc!(bytes_field=>vec![0u8; 1000]));
|
||||||
assert!(index_writer.commit().is_ok());
|
index_writer.commit()?;
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader()?.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let bytes_reader = segment_reader.fast_fields().bytes(field).unwrap();
|
let bytes_reader = segment_reader.fast_fields().bytes(bytes_field).unwrap();
|
||||||
|
|
||||||
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
|
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
|
||||||
assert!(bytes_reader.get_bytes(1).is_empty());
|
assert!(bytes_reader.get_bytes(1).is_empty());
|
||||||
assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
|
assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
|
||||||
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]);
|
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]);
|
||||||
let long = vec![0u8; 1000];
|
let long = vec![0u8; 1000];
|
||||||
assert_eq!(bytes_reader.get_bytes(4), long.as_slice());
|
assert_eq!(bytes_reader.get_bytes(4), long.as_slice());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_index_for_test<T: Into<BytesOptions>>(
|
||||||
|
byte_options: T,
|
||||||
|
) -> crate::Result<impl Deref<Target = Searcher>> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let field = schema_builder.add_bytes_field("string_bytes", byte_options.into());
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
field => b"tantivy".as_ref(),
|
||||||
|
field => b"lucene".as_ref()
|
||||||
|
));
|
||||||
|
index_writer.commit()?;
|
||||||
|
Ok(index.reader()?.searcher())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_stored_bytes() -> crate::Result<()> {
|
||||||
|
let searcher = create_index_for_test(STORED)?;
|
||||||
|
assert_eq!(searcher.num_docs(), 1);
|
||||||
|
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
||||||
|
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||||
|
let values: Vec<&Value> = retrieved_doc.get_all(field).collect();
|
||||||
|
assert_eq!(values.len(), 2);
|
||||||
|
let values_bytes: Vec<&[u8]> = values
|
||||||
|
.into_iter()
|
||||||
|
.flat_map(|value| value.bytes_value())
|
||||||
|
.collect();
|
||||||
|
assert_eq!(values_bytes, &[&b"tantivy"[..], &b"lucene"[..]]);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_non_stored_bytes() -> crate::Result<()> {
|
||||||
|
let searcher = create_index_for_test(INDEXED)?;
|
||||||
|
assert_eq!(searcher.num_docs(), 1);
|
||||||
|
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
||||||
|
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||||
|
assert!(retrieved_doc.get_first(field).is_none());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_bytes() -> crate::Result<()> {
|
||||||
|
let searcher = create_index_for_test(INDEXED)?;
|
||||||
|
assert_eq!(searcher.num_docs(), 1);
|
||||||
|
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||||
|
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
||||||
|
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||||
|
let term_weight = term_query.specialized_weight(&searcher, true)?;
|
||||||
|
let term_scorer = term_weight.specialized_scorer(searcher.segment_reader(0), 1.0)?;
|
||||||
|
assert_eq!(term_scorer.doc(), 0u32);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_non_index_bytes() -> crate::Result<()> {
|
||||||
|
let searcher = create_index_for_test(STORED)?;
|
||||||
|
assert_eq!(searcher.num_docs(), 1);
|
||||||
|
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||||
|
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
||||||
|
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||||
|
let term_weight_err = term_query.specialized_weight(&searcher, false);
|
||||||
|
assert!(matches!(
|
||||||
|
term_weight_err,
|
||||||
|
Err(crate::TantivyError::SchemaError(_))
|
||||||
|
));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_fast_bytes_multivalue_value() -> crate::Result<()> {
|
||||||
|
let searcher = create_index_for_test(FAST)?;
|
||||||
|
assert_eq!(searcher.num_docs(), 1);
|
||||||
|
let fast_fields = searcher.segment_reader(0u32).fast_fields();
|
||||||
|
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||||
|
let fast_field_reader = fast_fields.bytes(field).unwrap();
|
||||||
|
assert_eq!(fast_field_reader.get_bytes(0u32), b"tantivy");
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use owning_ref::OwningRef;
|
use crate::directory::FileSlice;
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
use crate::directory::ReadOnlySource;
|
|
||||||
use crate::fastfield::FastFieldReader;
|
use crate::fastfield::FastFieldReader;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
@@ -17,16 +16,16 @@ use crate::DocId;
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BytesFastFieldReader {
|
pub struct BytesFastFieldReader {
|
||||||
idx_reader: FastFieldReader<u64>,
|
idx_reader: FastFieldReader<u64>,
|
||||||
values: OwningRef<ReadOnlySource, [u8]>,
|
values: OwnedBytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BytesFastFieldReader {
|
impl BytesFastFieldReader {
|
||||||
pub(crate) fn open(
|
pub(crate) fn open(
|
||||||
idx_reader: FastFieldReader<u64>,
|
idx_reader: FastFieldReader<u64>,
|
||||||
values_source: ReadOnlySource,
|
values_file: FileSlice,
|
||||||
) -> BytesFastFieldReader {
|
) -> crate::Result<BytesFastFieldReader> {
|
||||||
let values = OwningRef::new(values_source).map(|source| &source[..]);
|
let values = values_file.read_bytes()?;
|
||||||
BytesFastFieldReader { idx_reader, values }
|
Ok(BytesFastFieldReader { idx_reader, values })
|
||||||
}
|
}
|
||||||
|
|
||||||
fn range(&self, doc: DocId) -> (usize, usize) {
|
fn range(&self, doc: DocId) -> (usize, usize) {
|
||||||
@@ -38,7 +37,7 @@ impl BytesFastFieldReader {
|
|||||||
/// Returns the bytes associated to the given `doc`
|
/// Returns the bytes associated to the given `doc`
|
||||||
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
||||||
let (start, stop) = self.range(doc);
|
let (start, stop) = self.range(doc);
|
||||||
&self.values[start..stop]
|
&self.values.as_slice()[start..stop]
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the overall number of bytes in this bytes fast field.
|
/// Returns the overall number of bytes in this bytes fast field.
|
||||||
|
|||||||
@@ -49,16 +49,10 @@ impl BytesFastFieldWriter {
|
|||||||
/// matching field values present in the document.
|
/// matching field values present in the document.
|
||||||
pub fn add_document(&mut self, doc: &Document) {
|
pub fn add_document(&mut self, doc: &Document) {
|
||||||
self.next_doc();
|
self.next_doc();
|
||||||
for field_value in doc.field_values() {
|
for field_value in doc.get_all(self.field) {
|
||||||
if field_value.field() == self.field {
|
if let Value::Bytes(ref bytes) = field_value {
|
||||||
if let Value::Bytes(ref bytes) = *field_value.value() {
|
self.vals.extend_from_slice(bytes);
|
||||||
self.vals.extend_from_slice(bytes);
|
return;
|
||||||
} else {
|
|
||||||
panic!(
|
|
||||||
"Bytes field contained non-Bytes Value!. Field {:?} = {:?}",
|
|
||||||
self.field, field_value
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -76,21 +70,18 @@ impl BytesFastFieldWriter {
|
|||||||
|
|
||||||
/// Serializes the fast field values by pushing them to the `FastFieldSerializer`.
|
/// Serializes the fast field values by pushing them to the `FastFieldSerializer`.
|
||||||
pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> {
|
pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> {
|
||||||
{
|
// writing the offset index
|
||||||
// writing the offset index
|
let mut doc_index_serializer =
|
||||||
let mut doc_index_serializer =
|
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
||||||
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
for &offset in &self.doc_index {
|
||||||
for &offset in &self.doc_index {
|
doc_index_serializer.add_val(offset)?;
|
||||||
doc_index_serializer.add_val(offset)?;
|
|
||||||
}
|
|
||||||
doc_index_serializer.add_val(self.vals.len() as u64)?;
|
|
||||||
doc_index_serializer.close_field()?;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
// writing the values themselves
|
|
||||||
let mut value_serializer = serializer.new_bytes_fast_field_with_idx(self.field, 1)?;
|
|
||||||
value_serializer.write_all(&self.vals)?;
|
|
||||||
}
|
}
|
||||||
|
doc_index_serializer.add_val(self.vals.len() as u64)?;
|
||||||
|
doc_index_serializer.close_field()?;
|
||||||
|
// writing the values themselves
|
||||||
|
serializer
|
||||||
|
.new_bytes_fast_field_with_idx(self.field, 1)?
|
||||||
|
.write_all(&self.vals)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use crate::common::{BitSet, HasLen};
|
use crate::common::{BitSet, HasLen};
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::FileSlice;
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
use crate::directory::WritePtr;
|
use crate::directory::WritePtr;
|
||||||
use crate::space_usage::ByteCount;
|
use crate::space_usage::ByteCount;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
@@ -9,6 +10,8 @@ use std::io::Write;
|
|||||||
/// Write a delete `BitSet`
|
/// Write a delete `BitSet`
|
||||||
///
|
///
|
||||||
/// where `delete_bitset` is the set of deleted `DocId`.
|
/// where `delete_bitset` is the set of deleted `DocId`.
|
||||||
|
/// Warning: this function does not call terminate. The caller is in charge of
|
||||||
|
/// closing the writer properly.
|
||||||
pub fn write_delete_bitset(
|
pub fn write_delete_bitset(
|
||||||
delete_bitset: &BitSet,
|
delete_bitset: &BitSet,
|
||||||
max_doc: u32,
|
max_doc: u32,
|
||||||
@@ -37,22 +40,41 @@ pub fn write_delete_bitset(
|
|||||||
/// Set of deleted `DocId`s.
|
/// Set of deleted `DocId`s.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DeleteBitSet {
|
pub struct DeleteBitSet {
|
||||||
data: ReadOnlySource,
|
data: OwnedBytes,
|
||||||
len: usize,
|
len: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DeleteBitSet {
|
impl DeleteBitSet {
|
||||||
/// Opens a delete bitset given its data source.
|
#[cfg(test)]
|
||||||
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
|
pub(crate) fn for_test(docs: &[DocId], max_doc: u32) -> DeleteBitSet {
|
||||||
let num_deleted: usize = data
|
use crate::directory::{Directory, RAMDirectory, TerminatingWrite};
|
||||||
|
use std::path::Path;
|
||||||
|
assert!(docs.iter().all(|&doc| doc < max_doc));
|
||||||
|
let mut bitset = BitSet::with_max_value(max_doc);
|
||||||
|
for &doc in docs {
|
||||||
|
bitset.insert(doc);
|
||||||
|
}
|
||||||
|
let directory = RAMDirectory::create();
|
||||||
|
let path = Path::new("dummydeletebitset");
|
||||||
|
let mut wrt = directory.open_write(path).unwrap();
|
||||||
|
write_delete_bitset(&bitset, max_doc, &mut wrt).unwrap();
|
||||||
|
wrt.terminate().unwrap();
|
||||||
|
let file = directory.open_read(path).unwrap();
|
||||||
|
Self::open(file).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Opens a delete bitset given its file.
|
||||||
|
pub fn open(file: FileSlice) -> crate::Result<DeleteBitSet> {
|
||||||
|
let bytes = file.read_bytes()?;
|
||||||
|
let num_deleted: usize = bytes
|
||||||
.as_slice()
|
.as_slice()
|
||||||
.iter()
|
.iter()
|
||||||
.map(|b| b.count_ones() as usize)
|
.map(|b| b.count_ones() as usize)
|
||||||
.sum();
|
.sum();
|
||||||
DeleteBitSet {
|
Ok(DeleteBitSet {
|
||||||
data,
|
data: bytes,
|
||||||
len: num_deleted,
|
len: num_deleted,
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true iff the document is still "alive". In other words, if it has not been deleted.
|
/// Returns true iff the document is still "alive". In other words, if it has not been deleted.
|
||||||
@@ -64,7 +86,7 @@ impl DeleteBitSet {
|
|||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||||
let byte_offset = doc / 8u32;
|
let byte_offset = doc / 8u32;
|
||||||
let b: u8 = (*self.data)[byte_offset as usize];
|
let b: u8 = self.data.as_slice()[byte_offset as usize];
|
||||||
let shift = (doc & 7u32) as u8;
|
let shift = (doc & 7u32) as u8;
|
||||||
b & (1u8 << shift) != 0
|
b & (1u8 << shift) != 0
|
||||||
}
|
}
|
||||||
@@ -83,42 +105,35 @@ impl HasLen for DeleteBitSet {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::DeleteBitSet;
|
||||||
use crate::directory::*;
|
use crate::common::HasLen;
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
|
#[test]
|
||||||
let test_path = PathBuf::from("test");
|
fn test_delete_bitset_empty() {
|
||||||
let mut directory = RAMDirectory::create();
|
let delete_bitset = DeleteBitSet::for_test(&[], 10);
|
||||||
{
|
for doc in 0..10 {
|
||||||
let mut writer = directory.open_write(&*test_path).unwrap();
|
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
|
||||||
write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
|
|
||||||
writer.terminate().unwrap();
|
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&test_path).unwrap();
|
assert_eq!(delete_bitset.len(), 0);
|
||||||
let delete_bitset = DeleteBitSet::open(source);
|
|
||||||
for doc in 0..max_doc {
|
|
||||||
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
|
|
||||||
}
|
|
||||||
assert_eq!(delete_bitset.len(), bitset.len());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_bitset() {
|
fn test_delete_bitset() {
|
||||||
{
|
let delete_bitset = DeleteBitSet::for_test(&[1, 9], 10);
|
||||||
let mut bitset = BitSet::with_max_value(10);
|
assert!(delete_bitset.is_alive(0));
|
||||||
bitset.insert(1);
|
assert!(delete_bitset.is_deleted(1));
|
||||||
bitset.insert(9);
|
assert!(delete_bitset.is_alive(2));
|
||||||
test_delete_bitset_helper(&bitset, 10);
|
assert!(delete_bitset.is_alive(3));
|
||||||
}
|
assert!(delete_bitset.is_alive(4));
|
||||||
{
|
assert!(delete_bitset.is_alive(5));
|
||||||
let mut bitset = BitSet::with_max_value(8);
|
assert!(delete_bitset.is_alive(6));
|
||||||
bitset.insert(1);
|
assert!(delete_bitset.is_alive(6));
|
||||||
bitset.insert(2);
|
assert!(delete_bitset.is_alive(7));
|
||||||
bitset.insert(3);
|
assert!(delete_bitset.is_alive(8));
|
||||||
bitset.insert(5);
|
assert!(delete_bitset.is_deleted(9));
|
||||||
bitset.insert(7);
|
for doc in 0..10 {
|
||||||
test_delete_bitset_helper(&bitset, 8);
|
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
|
||||||
}
|
}
|
||||||
|
assert_eq!(delete_bitset.len(), 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ use std::result;
|
|||||||
/// `FastFieldNotAvailableError` is returned when the
|
/// `FastFieldNotAvailableError` is returned when the
|
||||||
/// user requested for a fast field reader, and the field was not
|
/// user requested for a fast field reader, and the field was not
|
||||||
/// defined in the schema as a fast field.
|
/// defined in the schema as a fast field.
|
||||||
#[derive(Debug, Fail)]
|
#[derive(Debug, Error)]
|
||||||
#[fail(display = "Fast field not available: '{:?}'", field_name)]
|
#[error("Fast field not available: '{field_name:?}'")]
|
||||||
pub struct FastFieldNotAvailableError {
|
pub struct FastFieldNotAvailableError {
|
||||||
field_name: String,
|
field_name: String,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use super::MultiValueIntFastFieldReader;
|
use super::MultiValuedFastFieldReader;
|
||||||
|
use crate::error::DataCorruption;
|
||||||
use crate::schema::Facet;
|
use crate::schema::Facet;
|
||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
use crate::termdict::TermOrdinal;
|
use crate::termdict::TermOrdinal;
|
||||||
@@ -19,7 +20,7 @@ use std::str;
|
|||||||
/// list of facets. This ordinal is segment local and
|
/// list of facets. This ordinal is segment local and
|
||||||
/// only makes sense for a given segment.
|
/// only makes sense for a given segment.
|
||||||
pub struct FacetReader {
|
pub struct FacetReader {
|
||||||
term_ords: MultiValueIntFastFieldReader<u64>,
|
term_ords: MultiValuedFastFieldReader<u64>,
|
||||||
term_dict: TermDictionary,
|
term_dict: TermDictionary,
|
||||||
buffer: Vec<u8>,
|
buffer: Vec<u8>,
|
||||||
}
|
}
|
||||||
@@ -28,12 +29,12 @@ impl FacetReader {
|
|||||||
/// Creates a new `FacetReader`.
|
/// Creates a new `FacetReader`.
|
||||||
///
|
///
|
||||||
/// A facet reader just wraps :
|
/// A facet reader just wraps :
|
||||||
/// - a `MultiValueIntFastFieldReader` that makes it possible to
|
/// - a `MultiValuedFastFieldReader` that makes it possible to
|
||||||
/// access the list of facet ords for a given document.
|
/// access the list of facet ords for a given document.
|
||||||
/// - a `TermDictionary` that helps associating a facet to
|
/// - a `TermDictionary` that helps associating a facet to
|
||||||
/// an ordinal and vice versa.
|
/// an ordinal and vice versa.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
term_ords: MultiValueIntFastFieldReader<u64>,
|
term_ords: MultiValuedFastFieldReader<u64>,
|
||||||
term_dict: TermDictionary,
|
term_dict: TermDictionary,
|
||||||
) -> FacetReader {
|
) -> FacetReader {
|
||||||
FacetReader {
|
FacetReader {
|
||||||
@@ -62,18 +63,73 @@ impl FacetReader {
|
|||||||
&mut self,
|
&mut self,
|
||||||
facet_ord: TermOrdinal,
|
facet_ord: TermOrdinal,
|
||||||
output: &mut Facet,
|
output: &mut Facet,
|
||||||
) -> Result<(), str::Utf8Error> {
|
) -> crate::Result<()> {
|
||||||
let found_term = self
|
let found_term = self
|
||||||
.term_dict
|
.term_dict
|
||||||
.ord_to_term(facet_ord as u64, &mut self.buffer);
|
.ord_to_term(facet_ord as u64, &mut self.buffer)?;
|
||||||
assert!(found_term, "Term ordinal {} no found.", facet_ord);
|
assert!(found_term, "Term ordinal {} no found.", facet_ord);
|
||||||
let facet_str = str::from_utf8(&self.buffer[..])?;
|
let facet_str = str::from_utf8(&self.buffer[..])
|
||||||
|
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?;
|
||||||
output.set_facet_str(facet_str);
|
output.set_facet_str(facet_str);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the list of facet ordinals associated to a document.
|
/// Return the list of facet ordinals associated to a document.
|
||||||
pub fn facet_ords(&mut self, doc: DocId, output: &mut Vec<u64>) {
|
pub fn facet_ords(&self, doc: DocId, output: &mut Vec<u64>) {
|
||||||
self.term_ords.get_vals(doc, output);
|
self.term_ords.get_vals(doc, output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::Index;
|
||||||
|
use crate::{
|
||||||
|
schema::{Facet, SchemaBuilder},
|
||||||
|
Document,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_facet_not_populated_for_all_docs() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = SchemaBuilder::default();
|
||||||
|
let facet_field = schema_builder.add_facet_field("facet");
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(facet_field=>Facet::from_text("/a/b")));
|
||||||
|
index_writer.add_document(Document::default());
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let facet_reader = searcher
|
||||||
|
.segment_reader(0u32)
|
||||||
|
.facet_reader(facet_field)
|
||||||
|
.unwrap();
|
||||||
|
let mut facet_ords = Vec::new();
|
||||||
|
facet_reader.facet_ords(0u32, &mut facet_ords);
|
||||||
|
assert_eq!(&facet_ords, &[2u64]);
|
||||||
|
facet_reader.facet_ords(1u32, &mut facet_ords);
|
||||||
|
assert!(facet_ords.is_empty());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_facet_not_populated_for_any_docs() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = SchemaBuilder::default();
|
||||||
|
let facet_field = schema_builder.add_facet_field("facet");
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(Document::default());
|
||||||
|
index_writer.add_document(Document::default());
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let facet_reader = searcher
|
||||||
|
.segment_reader(0u32)
|
||||||
|
.facet_reader(facet_field)
|
||||||
|
.unwrap();
|
||||||
|
let mut facet_ords = Vec::new();
|
||||||
|
facet_reader.facet_ords(0u32, &mut facet_ords);
|
||||||
|
assert!(facet_ords.is_empty());
|
||||||
|
facet_reader.facet_ords(1u32, &mut facet_ords);
|
||||||
|
assert!(facet_ords.is_empty());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -28,16 +28,19 @@ pub use self::delete::write_delete_bitset;
|
|||||||
pub use self::delete::DeleteBitSet;
|
pub use self::delete::DeleteBitSet;
|
||||||
pub use self::error::{FastFieldNotAvailableError, Result};
|
pub use self::error::{FastFieldNotAvailableError, Result};
|
||||||
pub use self::facet_reader::FacetReader;
|
pub use self::facet_reader::FacetReader;
|
||||||
pub use self::multivalued::{MultiValueIntFastFieldReader, MultiValueIntFastFieldWriter};
|
pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter};
|
||||||
pub use self::reader::FastFieldReader;
|
pub use self::reader::FastFieldReader;
|
||||||
pub use self::readers::FastFieldReaders;
|
pub use self::readers::FastFieldReaders;
|
||||||
pub use self::serializer::FastFieldSerializer;
|
pub use self::serializer::FastFieldSerializer;
|
||||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||||
use crate::chrono::{NaiveDateTime, Utc};
|
|
||||||
use crate::common;
|
use crate::common;
|
||||||
use crate::schema::Cardinality;
|
use crate::schema::Cardinality;
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
use crate::schema::Value;
|
use crate::schema::Value;
|
||||||
|
use crate::{
|
||||||
|
chrono::{NaiveDateTime, Utc},
|
||||||
|
schema::Type,
|
||||||
|
};
|
||||||
|
|
||||||
mod bytes;
|
mod bytes;
|
||||||
mod delete;
|
mod delete;
|
||||||
@@ -76,6 +79,9 @@ pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd {
|
|||||||
fn make_zero() -> Self {
|
fn make_zero() -> Self {
|
||||||
Self::from_u64(0i64.to_u64())
|
Self::from_u64(0i64.to_u64())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the `schema::Type` for this FastValue.
|
||||||
|
fn to_type() -> Type;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for u64 {
|
impl FastValue for u64 {
|
||||||
@@ -98,6 +104,10 @@ impl FastValue for u64 {
|
|||||||
fn as_u64(&self) -> u64 {
|
fn as_u64(&self) -> u64 {
|
||||||
*self
|
*self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn to_type() -> Type {
|
||||||
|
Type::U64
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for i64 {
|
impl FastValue for i64 {
|
||||||
@@ -119,6 +129,10 @@ impl FastValue for i64 {
|
|||||||
fn as_u64(&self) -> u64 {
|
fn as_u64(&self) -> u64 {
|
||||||
*self as u64
|
*self as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn to_type() -> Type {
|
||||||
|
Type::I64
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for f64 {
|
impl FastValue for f64 {
|
||||||
@@ -140,6 +154,10 @@ impl FastValue for f64 {
|
|||||||
fn as_u64(&self) -> u64 {
|
fn as_u64(&self) -> u64 {
|
||||||
self.to_bits()
|
self.to_bits()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn to_type() -> Type {
|
||||||
|
Type::F64
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for crate::DateTime {
|
impl FastValue for crate::DateTime {
|
||||||
@@ -162,6 +180,10 @@ impl FastValue for crate::DateTime {
|
|||||||
fn as_u64(&self) -> u64 {
|
fn as_u64(&self) -> u64 {
|
||||||
self.timestamp().as_u64()
|
self.timestamp().as_u64()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn to_type() -> Type {
|
||||||
|
Type::Date
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn value_to_u64(value: &Value) -> u64 {
|
fn value_to_u64(value: &Value) -> u64 {
|
||||||
@@ -187,6 +209,7 @@ mod tests {
|
|||||||
use crate::schema::FAST;
|
use crate::schema::FAST;
|
||||||
use crate::schema::{Document, IntOptions};
|
use crate::schema::{Document, IntOptions};
|
||||||
use crate::{Index, SegmentId, SegmentReader};
|
use crate::{Index, SegmentId, SegmentReader};
|
||||||
|
use common::HasLen;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use rand::prelude::SliceRandom;
|
use rand::prelude::SliceRandom;
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
@@ -217,9 +240,9 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_small() {
|
fn test_intfastfield_small() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||||
@@ -232,27 +255,24 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
{
|
assert_eq!(file.len(), 36 as usize);
|
||||||
assert_eq!(source.len(), 36 as usize);
|
let composite_file = CompositeFile::open(&file)?;
|
||||||
}
|
let file = composite_file.open_read(*FIELD).unwrap();
|
||||||
{
|
let fast_field_reader = FastFieldReader::<u64>::open(file)?;
|
||||||
let composite_file = CompositeFile::open(&source).unwrap();
|
assert_eq!(fast_field_reader.get(0), 13u64);
|
||||||
let field_source = composite_file.open_read(*FIELD).unwrap();
|
assert_eq!(fast_field_reader.get(1), 14u64);
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(field_source);
|
assert_eq!(fast_field_reader.get(2), 2u64);
|
||||||
assert_eq!(fast_field_reader.get(0), 13u64);
|
Ok(())
|
||||||
assert_eq!(fast_field_reader.get(1), 14u64);
|
|
||||||
assert_eq!(fast_field_reader.get(2), 2u64);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_large() {
|
fn test_intfastfield_large() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
||||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = FastFieldSerializer::from_write(write)?;
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>4u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>4u64));
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64));
|
||||||
@@ -263,19 +283,15 @@ mod tests {
|
|||||||
fast_field_writers.add_document(&doc!(*FIELD=>1_002u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>1_002u64));
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>1_501u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>1_501u64));
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>215u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>215u64));
|
||||||
fast_field_writers
|
fast_field_writers.serialize(&mut serializer, &HashMap::new())?;
|
||||||
.serialize(&mut serializer, &HashMap::new())
|
serializer.close()?;
|
||||||
.unwrap();
|
|
||||||
serializer.close().unwrap();
|
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path)?;
|
||||||
|
assert_eq!(file.len(), 61 as usize);
|
||||||
{
|
{
|
||||||
assert_eq!(source.len(), 61 as usize);
|
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||||
}
|
|
||||||
{
|
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||||
assert_eq!(fast_field_reader.get(0), 4u64);
|
assert_eq!(fast_field_reader.get(0), 4u64);
|
||||||
assert_eq!(fast_field_reader.get(1), 14_082_001u64);
|
assert_eq!(fast_field_reader.get(1), 14_082_001u64);
|
||||||
assert_eq!(fast_field_reader.get(2), 3_052u64);
|
assert_eq!(fast_field_reader.get(2), 3_052u64);
|
||||||
@@ -286,12 +302,13 @@ mod tests {
|
|||||||
assert_eq!(fast_field_reader.get(7), 1_501u64);
|
assert_eq!(fast_field_reader.get(7), 1_501u64);
|
||||||
assert_eq!(fast_field_reader.get(8), 215u64);
|
assert_eq!(fast_field_reader.get(8), 215u64);
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_null_amplitude() {
|
fn test_intfastfield_null_amplitude() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
|
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
@@ -305,24 +322,23 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
|
assert_eq!(file.len(), 34 as usize);
|
||||||
{
|
{
|
||||||
assert_eq!(source.len(), 34 as usize);
|
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||||
}
|
|
||||||
{
|
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||||
for doc in 0..10_000 {
|
for doc in 0..10_000 {
|
||||||
assert_eq!(fast_field_reader.get(doc), 100_000u64);
|
assert_eq!(fast_field_reader.get(doc), 100_000u64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_large_numbers() {
|
fn test_intfastfield_large_numbers() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
|
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
@@ -338,14 +354,12 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
|
assert_eq!(file.len(), 80042 as usize);
|
||||||
{
|
{
|
||||||
assert_eq!(source.len(), 80042 as usize);
|
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||||
}
|
|
||||||
{
|
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||||
assert_eq!(fast_field_reader.get(0), 0u64);
|
assert_eq!(fast_field_reader.get(0), 0u64);
|
||||||
for doc in 1..10_001 {
|
for doc in 1..10_001 {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -354,12 +368,13 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_signed_intfastfield() {
|
fn test_signed_intfastfield() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
let i64_field = schema_builder.add_i64_field("field", FAST);
|
let i64_field = schema_builder.add_i64_field("field", FAST);
|
||||||
@@ -378,14 +393,12 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
|
assert_eq!(file.len(), 17709 as usize);
|
||||||
{
|
{
|
||||||
assert_eq!(source.len(), 17709 as usize);
|
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||||
}
|
|
||||||
{
|
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
|
||||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<i64>::open(data);
|
let fast_field_reader = FastFieldReader::<i64>::open(data)?;
|
||||||
|
|
||||||
assert_eq!(fast_field_reader.min_value(), -100i64);
|
assert_eq!(fast_field_reader.min_value(), -100i64);
|
||||||
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
||||||
@@ -398,12 +411,13 @@ mod tests {
|
|||||||
assert_eq!(buffer[i], -100i64 + 53i64 + i as i64);
|
assert_eq!(buffer[i], -100i64 + 53i64 + i as i64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_signed_intfastfield_default_val() {
|
fn test_signed_intfastfield_default_val() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let i64_field = schema_builder.add_i64_field("field", FAST);
|
let i64_field = schema_builder.add_i64_field("field", FAST);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -420,13 +434,14 @@ mod tests {
|
|||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<i64>::open(data);
|
let fast_field_reader = FastFieldReader::<i64>::open(data)?;
|
||||||
assert_eq!(fast_field_reader.get(0u32), 0i64);
|
assert_eq!(fast_field_reader.get(0u32), 0i64);
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warning: this generates the same permutation at each call
|
// Warning: this generates the same permutation at each call
|
||||||
@@ -437,28 +452,26 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_permutation() {
|
fn test_intfastfield_permutation() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let permutation = generate_permutation();
|
let permutation = generate_permutation();
|
||||||
let n = permutation.len();
|
let n = permutation.len();
|
||||||
let mut directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
||||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = FastFieldSerializer::from_write(write)?;
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
for &x in &permutation {
|
for &x in &permutation {
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers.serialize(&mut serializer, &HashMap::new())?;
|
||||||
.serialize(&mut serializer, &HashMap::new())
|
serializer.close()?;
|
||||||
.unwrap();
|
|
||||||
serializer.close().unwrap();
|
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path)?;
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||||
|
|
||||||
let mut a = 0u64;
|
let mut a = 0u64;
|
||||||
for _ in 0..n {
|
for _ in 0..n {
|
||||||
@@ -466,6 +479,7 @@ mod tests {
|
|||||||
a = fast_field_reader.get(a as u32);
|
a = fast_field_reader.get(a as u32);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -474,7 +488,7 @@ mod tests {
|
|||||||
let date_field = schema_builder.add_date_field("date", FAST);
|
let date_field = schema_builder.add_date_field("date", FAST);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
|
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
@@ -511,7 +525,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
date_field => crate::DateTime::from_u64(1i64.to_u64()),
|
date_field => crate::DateTime::from_u64(1i64.to_u64()),
|
||||||
@@ -598,7 +612,7 @@ mod bench {
|
|||||||
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
|
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let permutation = generate_permutation();
|
let permutation = generate_permutation();
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||||
@@ -611,11 +625,11 @@ mod bench {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
let fast_field_reader = FastFieldReader::<u64>::open(data).unwrap();
|
||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let n = test::black_box(7000u32);
|
let n = test::black_box(7000u32);
|
||||||
@@ -632,7 +646,7 @@ mod bench {
|
|||||||
fn bench_intfastfield_fflookup(b: &mut Bencher) {
|
fn bench_intfastfield_fflookup(b: &mut Bencher) {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let permutation = generate_permutation();
|
let permutation = generate_permutation();
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||||
@@ -645,11 +659,11 @@ mod bench {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
let fast_field_reader = FastFieldReader::<u64>::open(data).unwrap();
|
||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let n = test::black_box(1000u32);
|
let n = test::black_box(1000u32);
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
mod reader;
|
mod reader;
|
||||||
mod writer;
|
mod writer;
|
||||||
|
|
||||||
pub use self::reader::MultiValueIntFastFieldReader;
|
pub use self::reader::MultiValuedFastFieldReader;
|
||||||
pub use self::writer::MultiValueIntFastFieldWriter;
|
pub use self::writer::MultiValuedFastFieldWriter;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
@@ -25,7 +25,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(field=>1u64, field=>3u64));
|
index_writer.add_document(doc!(field=>1u64, field=>3u64));
|
||||||
index_writer.add_document(doc!());
|
index_writer.add_document(doc!());
|
||||||
index_writer.add_document(doc!(field=>4u64));
|
index_writer.add_document(doc!(field=>4u64));
|
||||||
@@ -64,7 +64,7 @@ mod tests {
|
|||||||
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
|
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let first_time_stamp = chrono::Utc::now();
|
let first_time_stamp = chrono::Utc::now();
|
||||||
index_writer.add_document(
|
index_writer.add_document(
|
||||||
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
|
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
|
||||||
@@ -100,6 +100,7 @@ mod tests {
|
|||||||
.get_first(date_field)
|
.get_first(date_field)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.date_value()
|
.date_value()
|
||||||
|
.unwrap()
|
||||||
.timestamp(),
|
.timestamp(),
|
||||||
first_time_stamp.timestamp()
|
first_time_stamp.timestamp()
|
||||||
);
|
);
|
||||||
@@ -108,7 +109,7 @@ mod tests {
|
|||||||
.get_first(time_i)
|
.get_first(time_i)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.i64_value(),
|
.i64_value(),
|
||||||
1i64
|
Some(1i64)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -131,6 +132,7 @@ mod tests {
|
|||||||
.get_first(date_field)
|
.get_first(date_field)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.date_value()
|
.date_value()
|
||||||
|
.unwrap()
|
||||||
.timestamp(),
|
.timestamp(),
|
||||||
two_secs_ahead.timestamp()
|
two_secs_ahead.timestamp()
|
||||||
);
|
);
|
||||||
@@ -139,7 +141,7 @@ mod tests {
|
|||||||
.get_first(time_i)
|
.get_first(time_i)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.i64_value(),
|
.i64_value(),
|
||||||
3i64
|
Some(3i64)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -186,7 +188,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(field=> 1i64, field => 3i64));
|
index_writer.add_document(doc!(field=> 1i64, field => 3i64));
|
||||||
index_writer.add_document(doc!());
|
index_writer.add_document(doc!());
|
||||||
index_writer.add_document(doc!(field=> -4i64));
|
index_writer.add_document(doc!(field=> -4i64));
|
||||||
@@ -197,22 +199,14 @@ mod tests {
|
|||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let mut vals = Vec::new();
|
let mut vals = Vec::new();
|
||||||
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
|
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
|
||||||
{
|
multi_value_reader.get_vals(2, &mut vals);
|
||||||
multi_value_reader.get_vals(2, &mut vals);
|
assert_eq!(&vals, &[-4i64]);
|
||||||
assert_eq!(&vals, &[-4i64]);
|
multi_value_reader.get_vals(0, &mut vals);
|
||||||
}
|
assert_eq!(&vals, &[1i64, 3i64]);
|
||||||
{
|
multi_value_reader.get_vals(1, &mut vals);
|
||||||
multi_value_reader.get_vals(0, &mut vals);
|
assert!(vals.is_empty());
|
||||||
assert_eq!(&vals, &[1i64, 3i64]);
|
multi_value_reader.get_vals(3, &mut vals);
|
||||||
}
|
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
|
||||||
{
|
|
||||||
multi_value_reader.get_vals(1, &mut vals);
|
|
||||||
assert!(vals.is_empty());
|
|
||||||
}
|
|
||||||
{
|
|
||||||
multi_value_reader.get_vals(3, &mut vals);
|
|
||||||
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
#[ignore]
|
||||||
@@ -221,7 +215,7 @@ mod tests {
|
|||||||
let field = schema_builder.add_facet_field("facetfield");
|
let field = schema_builder.add_facet_field("facetfield");
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
for i in 0..100_000 {
|
for i in 0..100_000 {
|
||||||
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
|
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,29 +10,22 @@ use crate::DocId;
|
|||||||
/// The `idx_reader` associated, for each document, the index of its first value.
|
/// The `idx_reader` associated, for each document, the index of its first value.
|
||||||
///
|
///
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct MultiValueIntFastFieldReader<Item: FastValue> {
|
pub struct MultiValuedFastFieldReader<Item: FastValue> {
|
||||||
idx_reader: FastFieldReader<u64>,
|
idx_reader: FastFieldReader<u64>,
|
||||||
vals_reader: FastFieldReader<Item>,
|
vals_reader: FastFieldReader<Item>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
|
impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||||
pub(crate) fn open(
|
pub(crate) fn open(
|
||||||
idx_reader: FastFieldReader<u64>,
|
idx_reader: FastFieldReader<u64>,
|
||||||
vals_reader: FastFieldReader<Item>,
|
vals_reader: FastFieldReader<Item>,
|
||||||
) -> MultiValueIntFastFieldReader<Item> {
|
) -> MultiValuedFastFieldReader<Item> {
|
||||||
MultiValueIntFastFieldReader {
|
MultiValuedFastFieldReader {
|
||||||
idx_reader,
|
idx_reader,
|
||||||
vals_reader,
|
vals_reader,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn into_u64s_reader(self) -> MultiValueIntFastFieldReader<u64> {
|
|
||||||
MultiValueIntFastFieldReader {
|
|
||||||
idx_reader: self.idx_reader,
|
|
||||||
vals_reader: self.vals_reader.into_u64_reader(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns `(start, stop)`, such that the values associated
|
/// Returns `(start, stop)`, such that the values associated
|
||||||
/// to the given document are `start..stop`.
|
/// to the given document are `start..stop`.
|
||||||
fn range(&self, doc: DocId) -> (u64, u64) {
|
fn range(&self, doc: DocId) -> (u64, u64) {
|
||||||
@@ -74,7 +67,7 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index
|
let mut index_writer = index
|
||||||
.writer_with_num_threads(1, 30_000_000)
|
.writer_for_tests()
|
||||||
.expect("Failed to create index writer.");
|
.expect("Failed to create index writer.");
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from("/category/cat2"),
|
facet_field => Facet::from("/category/cat2"),
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ use std::io;
|
|||||||
/// in your schema
|
/// in your schema
|
||||||
/// - add your document simply by calling `.add_document(...)`.
|
/// - add your document simply by calling `.add_document(...)`.
|
||||||
///
|
///
|
||||||
/// The `MultiValueIntFastFieldWriter` can be acquired from the
|
/// The `MultiValuedFastFieldWriter` can be acquired from the
|
||||||
/// fastfield writer, by calling [`.get_multivalue_writer(...)`](./struct.FastFieldsWriter.html#method.get_multivalue_writer).
|
/// fastfield writer, by calling [`.get_multivalue_writer(...)`](./struct.FastFieldsWriter.html#method.get_multivalue_writer).
|
||||||
///
|
///
|
||||||
/// Once acquired, writing is done by calling calls to
|
/// Once acquired, writing is done by calling calls to
|
||||||
@@ -29,17 +29,17 @@ use std::io;
|
|||||||
/// This makes it possible to push unordered term ids,
|
/// This makes it possible to push unordered term ids,
|
||||||
/// during indexing and remap them to their respective
|
/// during indexing and remap them to their respective
|
||||||
/// term ids when the segment is getting serialized.
|
/// term ids when the segment is getting serialized.
|
||||||
pub struct MultiValueIntFastFieldWriter {
|
pub struct MultiValuedFastFieldWriter {
|
||||||
field: Field,
|
field: Field,
|
||||||
vals: Vec<UnorderedTermId>,
|
vals: Vec<UnorderedTermId>,
|
||||||
doc_index: Vec<u64>,
|
doc_index: Vec<u64>,
|
||||||
is_facet: bool,
|
is_facet: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MultiValueIntFastFieldWriter {
|
impl MultiValuedFastFieldWriter {
|
||||||
/// Creates a new `IntFastFieldWriter`
|
/// Creates a new `IntFastFieldWriter`
|
||||||
pub(crate) fn new(field: Field, is_facet: bool) -> Self {
|
pub(crate) fn new(field: Field, is_facet: bool) -> Self {
|
||||||
MultiValueIntFastFieldWriter {
|
MultiValuedFastFieldWriter {
|
||||||
field,
|
field,
|
||||||
vals: Vec::new(),
|
vals: Vec::new(),
|
||||||
doc_index: Vec::new(),
|
doc_index: Vec::new(),
|
||||||
@@ -47,7 +47,7 @@ impl MultiValueIntFastFieldWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Access the field associated to the `MultiValueIntFastFieldWriter`
|
/// Access the field associated to the `MultiValuedFastFieldWriter`
|
||||||
pub fn field(&self) -> Field {
|
pub fn field(&self) -> Field {
|
||||||
self.field
|
self.field
|
||||||
}
|
}
|
||||||
@@ -143,7 +143,7 @@ impl MultiValueIntFastFieldWriter {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|val| *mapping.get(val).expect("Missing term ordinal"));
|
.map(|val| *mapping.get(val).expect("Missing term ordinal"));
|
||||||
doc_vals.extend(remapped_vals);
|
doc_vals.extend(remapped_vals);
|
||||||
doc_vals.sort();
|
doc_vals.sort_unstable();
|
||||||
for &val in &doc_vals {
|
for &val in &doc_vals {
|
||||||
value_serializer.add_val(val)?;
|
value_serializer.add_val(val)?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,13 +3,12 @@ use crate::common::bitpacker::BitUnpacker;
|
|||||||
use crate::common::compute_num_bits;
|
use crate::common::compute_num_bits;
|
||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::common::CompositeFile;
|
use crate::common::CompositeFile;
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::FileSlice;
|
||||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||||
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
|
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::schema::FAST;
|
use crate::schema::FAST;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use owning_ref::OwningRef;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
@@ -20,43 +19,27 @@ use std::path::Path;
|
|||||||
/// fast field is required.
|
/// fast field is required.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct FastFieldReader<Item: FastValue> {
|
pub struct FastFieldReader<Item: FastValue> {
|
||||||
bit_unpacker: BitUnpacker<OwningRef<ReadOnlySource, [u8]>>,
|
bit_unpacker: BitUnpacker,
|
||||||
min_value_u64: u64,
|
min_value_u64: u64,
|
||||||
max_value_u64: u64,
|
max_value_u64: u64,
|
||||||
_phantom: PhantomData<Item>,
|
_phantom: PhantomData<Item>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Item: FastValue> FastFieldReader<Item> {
|
impl<Item: FastValue> FastFieldReader<Item> {
|
||||||
/// Opens a fast field given a source.
|
/// Opens a fast field given a file.
|
||||||
pub fn open(data: ReadOnlySource) -> Self {
|
pub fn open(file: FileSlice) -> crate::Result<Self> {
|
||||||
let min_value: u64;
|
let mut bytes = file.read_bytes()?;
|
||||||
let amplitude: u64;
|
let min_value = u64::deserialize(&mut bytes)?;
|
||||||
{
|
let amplitude = u64::deserialize(&mut bytes)?;
|
||||||
let mut cursor = data.as_slice();
|
|
||||||
min_value =
|
|
||||||
u64::deserialize(&mut cursor).expect("Failed to read the min_value of fast field.");
|
|
||||||
amplitude =
|
|
||||||
u64::deserialize(&mut cursor).expect("Failed to read the amplitude of fast field.");
|
|
||||||
}
|
|
||||||
let max_value = min_value + amplitude;
|
let max_value = min_value + amplitude;
|
||||||
let num_bits = compute_num_bits(amplitude);
|
let num_bits = compute_num_bits(amplitude);
|
||||||
let owning_ref = OwningRef::new(data).map(|data| &data[16..]);
|
let bit_unpacker = BitUnpacker::new(bytes, num_bits);
|
||||||
let bit_unpacker = BitUnpacker::new(owning_ref, num_bits);
|
Ok(FastFieldReader {
|
||||||
FastFieldReader {
|
|
||||||
min_value_u64: min_value,
|
min_value_u64: min_value,
|
||||||
max_value_u64: max_value,
|
max_value_u64: max_value,
|
||||||
bit_unpacker,
|
bit_unpacker,
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
}
|
})
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> {
|
|
||||||
FastFieldReader {
|
|
||||||
bit_unpacker: self.bit_unpacker,
|
|
||||||
min_value_u64: self.min_value_u64,
|
|
||||||
max_value_u64: self.max_value_u64,
|
|
||||||
_phantom: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the value associated to the given document.
|
/// Return the value associated to the given document.
|
||||||
@@ -135,7 +118,7 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
|
|||||||
let field = schema_builder.add_u64_field("field", FAST);
|
let field = schema_builder.add_u64_field("field", FAST);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let path = Path::new("__dummy__");
|
let path = Path::new("__dummy__");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory
|
let write: WritePtr = directory
|
||||||
.open_write(path)
|
.open_write(path)
|
||||||
@@ -157,12 +140,11 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
|
|||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let source = directory.open_read(path).expect("Failed to open the file");
|
let file = directory.open_read(path).expect("Failed to open the file");
|
||||||
let composite_file =
|
let composite_file = CompositeFile::open(&file).expect("Failed to read the composite file");
|
||||||
CompositeFile::open(&source).expect("Failed to read the composite file");
|
let field_file = composite_file
|
||||||
let field_source = composite_file
|
|
||||||
.open_read(field)
|
.open_read(field)
|
||||||
.expect("File component not found");
|
.expect("File component not found");
|
||||||
FastFieldReader::open(field_source)
|
FastFieldReader::open(field_file).unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,28 +1,22 @@
|
|||||||
use crate::common::CompositeFile;
|
use crate::common::CompositeFile;
|
||||||
use crate::fastfield::BytesFastFieldReader;
|
use crate::directory::FileSlice;
|
||||||
use crate::fastfield::MultiValueIntFastFieldReader;
|
use crate::fastfield::MultiValuedFastFieldReader;
|
||||||
|
use crate::fastfield::{BytesFastFieldReader, FastValue};
|
||||||
use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
|
use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
|
||||||
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
||||||
use crate::space_usage::PerFieldSpaceUsage;
|
use crate::space_usage::PerFieldSpaceUsage;
|
||||||
use std::collections::HashMap;
|
use crate::TantivyError;
|
||||||
|
|
||||||
/// Provides access to all of the FastFieldReader.
|
/// Provides access to all of the FastFieldReader.
|
||||||
///
|
///
|
||||||
/// Internally, `FastFieldReaders` have preloaded fast field readers,
|
/// Internally, `FastFieldReaders` have preloaded fast field readers,
|
||||||
/// and just wraps several `HashMap`.
|
/// and just wraps several `HashMap`.
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct FastFieldReaders {
|
pub struct FastFieldReaders {
|
||||||
fast_field_i64: HashMap<Field, FastFieldReader<i64>>,
|
schema: Schema,
|
||||||
fast_field_u64: HashMap<Field, FastFieldReader<u64>>,
|
|
||||||
fast_field_f64: HashMap<Field, FastFieldReader<f64>>,
|
|
||||||
fast_field_date: HashMap<Field, FastFieldReader<crate::DateTime>>,
|
|
||||||
fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>,
|
|
||||||
fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>,
|
|
||||||
fast_field_f64s: HashMap<Field, MultiValueIntFastFieldReader<f64>>,
|
|
||||||
fast_field_dates: HashMap<Field, MultiValueIntFastFieldReader<crate::DateTime>>,
|
|
||||||
fast_bytes: HashMap<Field, BytesFastFieldReader>,
|
|
||||||
fast_fields_composite: CompositeFile,
|
fast_fields_composite: CompositeFile,
|
||||||
}
|
}
|
||||||
|
#[derive(Eq, PartialEq, Debug)]
|
||||||
enum FastType {
|
enum FastType {
|
||||||
I64,
|
I64,
|
||||||
U64,
|
U64,
|
||||||
@@ -50,221 +44,167 @@ fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FastFieldReaders {
|
impl FastFieldReaders {
|
||||||
pub(crate) fn load_all(
|
pub(crate) fn new(
|
||||||
schema: &Schema,
|
schema: Schema,
|
||||||
fast_fields_composite: &CompositeFile,
|
fast_fields_composite: CompositeFile,
|
||||||
) -> crate::Result<FastFieldReaders> {
|
) -> crate::Result<FastFieldReaders> {
|
||||||
let mut fast_field_readers = FastFieldReaders {
|
Ok(FastFieldReaders {
|
||||||
fast_field_i64: Default::default(),
|
fast_fields_composite,
|
||||||
fast_field_u64: Default::default(),
|
schema,
|
||||||
fast_field_f64: Default::default(),
|
})
|
||||||
fast_field_date: Default::default(),
|
|
||||||
fast_field_i64s: Default::default(),
|
|
||||||
fast_field_u64s: Default::default(),
|
|
||||||
fast_field_f64s: Default::default(),
|
|
||||||
fast_field_dates: Default::default(),
|
|
||||||
fast_bytes: Default::default(),
|
|
||||||
fast_fields_composite: fast_fields_composite.clone(),
|
|
||||||
};
|
|
||||||
for (field, field_entry) in schema.fields() {
|
|
||||||
let field_type = field_entry.field_type();
|
|
||||||
if field_type == &FieldType::Bytes {
|
|
||||||
let idx_reader = fast_fields_composite
|
|
||||||
.open_read_with_idx(field, 0)
|
|
||||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
|
||||||
.map(FastFieldReader::open)?;
|
|
||||||
let data = fast_fields_composite
|
|
||||||
.open_read_with_idx(field, 1)
|
|
||||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
|
||||||
fast_field_readers
|
|
||||||
.fast_bytes
|
|
||||||
.insert(field, BytesFastFieldReader::open(idx_reader, data));
|
|
||||||
} else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) {
|
|
||||||
match cardinality {
|
|
||||||
Cardinality::SingleValue => {
|
|
||||||
if let Some(fast_field_data) = fast_fields_composite.open_read(field) {
|
|
||||||
match fast_type {
|
|
||||||
FastType::U64 => {
|
|
||||||
let fast_field_reader = FastFieldReader::open(fast_field_data);
|
|
||||||
fast_field_readers
|
|
||||||
.fast_field_u64
|
|
||||||
.insert(field, fast_field_reader);
|
|
||||||
}
|
|
||||||
FastType::I64 => {
|
|
||||||
fast_field_readers.fast_field_i64.insert(
|
|
||||||
field,
|
|
||||||
FastFieldReader::open(fast_field_data.clone()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
FastType::F64 => {
|
|
||||||
fast_field_readers.fast_field_f64.insert(
|
|
||||||
field,
|
|
||||||
FastFieldReader::open(fast_field_data.clone()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
FastType::Date => {
|
|
||||||
fast_field_readers.fast_field_date.insert(
|
|
||||||
field,
|
|
||||||
FastFieldReader::open(fast_field_data.clone()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Cardinality::MultiValues => {
|
|
||||||
let idx_opt = fast_fields_composite.open_read_with_idx(field, 0);
|
|
||||||
let data_opt = fast_fields_composite.open_read_with_idx(field, 1);
|
|
||||||
if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) {
|
|
||||||
let idx_reader = FastFieldReader::open(fast_field_idx);
|
|
||||||
match fast_type {
|
|
||||||
FastType::I64 => {
|
|
||||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
|
||||||
let multivalued_int_fast_field =
|
|
||||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
|
||||||
fast_field_readers
|
|
||||||
.fast_field_i64s
|
|
||||||
.insert(field, multivalued_int_fast_field);
|
|
||||||
}
|
|
||||||
FastType::U64 => {
|
|
||||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
|
||||||
let multivalued_int_fast_field =
|
|
||||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
|
||||||
fast_field_readers
|
|
||||||
.fast_field_u64s
|
|
||||||
.insert(field, multivalued_int_fast_field);
|
|
||||||
}
|
|
||||||
FastType::F64 => {
|
|
||||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
|
||||||
let multivalued_int_fast_field =
|
|
||||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
|
||||||
fast_field_readers
|
|
||||||
.fast_field_f64s
|
|
||||||
.insert(field, multivalued_int_fast_field);
|
|
||||||
}
|
|
||||||
FastType::Date => {
|
|
||||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
|
||||||
let multivalued_int_fast_field =
|
|
||||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
|
||||||
fast_field_readers
|
|
||||||
.fast_field_dates
|
|
||||||
.insert(field, multivalued_int_fast_field);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(fast_field_readers)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn space_usage(&self) -> PerFieldSpaceUsage {
|
pub(crate) fn space_usage(&self) -> PerFieldSpaceUsage {
|
||||||
self.fast_fields_composite.space_usage()
|
self.fast_fields_composite.space_usage()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn fast_field_data(&self, field: Field, idx: usize) -> crate::Result<FileSlice> {
|
||||||
|
self.fast_fields_composite
|
||||||
|
.open_read_with_idx(field, idx)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
let field_name = self.schema.get_field_entry(field).name();
|
||||||
|
TantivyError::SchemaError(format!("Field({}) data was not found", field_name))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_type(
|
||||||
|
&self,
|
||||||
|
field: Field,
|
||||||
|
expected_fast_type: FastType,
|
||||||
|
expected_cardinality: Cardinality,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
|
let (fast_type, cardinality) =
|
||||||
|
type_and_cardinality(field_entry.field_type()).ok_or_else(|| {
|
||||||
|
crate::TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is not a fast field.",
|
||||||
|
field_entry.name()
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
if fast_type != expected_fast_type {
|
||||||
|
return Err(crate::TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is of type {:?}, expected {:?}.",
|
||||||
|
field_entry.name(),
|
||||||
|
fast_type,
|
||||||
|
expected_fast_type
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
if cardinality != expected_cardinality {
|
||||||
|
return Err(crate::TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is of cardinality {:?}, expected {:?}.",
|
||||||
|
field_entry.name(),
|
||||||
|
cardinality,
|
||||||
|
expected_cardinality
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn typed_fast_field_reader<TFastValue: FastValue>(
|
||||||
|
&self,
|
||||||
|
field: Field,
|
||||||
|
) -> crate::Result<FastFieldReader<TFastValue>> {
|
||||||
|
let fast_field_slice = self.fast_field_data(field, 0)?;
|
||||||
|
FastFieldReader::open(fast_field_slice)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn typed_fast_field_multi_reader<TFastValue: FastValue>(
|
||||||
|
&self,
|
||||||
|
field: Field,
|
||||||
|
) -> crate::Result<MultiValuedFastFieldReader<TFastValue>> {
|
||||||
|
let fast_field_slice_idx = self.fast_field_data(field, 0)?;
|
||||||
|
let fast_field_slice_vals = self.fast_field_data(field, 1)?;
|
||||||
|
let idx_reader = FastFieldReader::open(fast_field_slice_idx)?;
|
||||||
|
let vals_reader: FastFieldReader<TFastValue> =
|
||||||
|
FastFieldReader::open(fast_field_slice_vals)?;
|
||||||
|
Ok(MultiValuedFastFieldReader::open(idx_reader, vals_reader))
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the `u64` fast field reader reader associated to `field`.
|
/// Returns the `u64` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a u64 fast field, this method returns `None`.
|
/// If `field` is not a u64 fast field, this method returns `None`.
|
||||||
pub fn u64(&self, field: Field) -> Option<FastFieldReader<u64>> {
|
pub fn u64(&self, field: Field) -> crate::Result<FastFieldReader<u64>> {
|
||||||
self.fast_field_u64.get(&field).cloned()
|
self.check_type(field, FastType::U64, Cardinality::SingleValue)?;
|
||||||
}
|
self.typed_fast_field_reader(field)
|
||||||
|
|
||||||
/// If the field is a u64-fast field return the associated reader.
|
|
||||||
/// If the field is a i64-fast field, return the associated u64 reader. Values are
|
|
||||||
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. ///
|
|
||||||
///
|
|
||||||
/// This method is useful when merging segment reader.
|
|
||||||
pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> {
|
|
||||||
if let Some(u64_ff_reader) = self.u64(field) {
|
|
||||||
return Some(u64_ff_reader);
|
|
||||||
}
|
|
||||||
if let Some(i64_ff_reader) = self.i64(field) {
|
|
||||||
return Some(i64_ff_reader.into_u64_reader());
|
|
||||||
}
|
|
||||||
if let Some(f64_ff_reader) = self.f64(field) {
|
|
||||||
return Some(f64_ff_reader.into_u64_reader());
|
|
||||||
}
|
|
||||||
if let Some(date_ff_reader) = self.date(field) {
|
|
||||||
return Some(date_ff_reader.into_u64_reader());
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `i64` fast field reader reader associated to `field`.
|
/// Returns the `i64` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a i64 fast field, this method returns `None`.
|
/// If `field` is not a i64 fast field, this method returns `None`.
|
||||||
pub fn i64(&self, field: Field) -> Option<FastFieldReader<i64>> {
|
pub fn i64(&self, field: Field) -> crate::Result<FastFieldReader<i64>> {
|
||||||
self.fast_field_i64.get(&field).cloned()
|
self.check_type(field, FastType::I64, Cardinality::SingleValue)?;
|
||||||
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `i64` fast field reader reader associated to `field`.
|
/// Returns the `i64` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a i64 fast field, this method returns `None`.
|
/// If `field` is not a i64 fast field, this method returns `None`.
|
||||||
pub fn date(&self, field: Field) -> Option<FastFieldReader<crate::DateTime>> {
|
pub fn date(&self, field: Field) -> crate::Result<FastFieldReader<crate::DateTime>> {
|
||||||
self.fast_field_date.get(&field).cloned()
|
self.check_type(field, FastType::Date, Cardinality::SingleValue)?;
|
||||||
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `f64` fast field reader reader associated to `field`.
|
/// Returns the `f64` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a f64 fast field, this method returns `None`.
|
/// If `field` is not a f64 fast field, this method returns `None`.
|
||||||
pub fn f64(&self, field: Field) -> Option<FastFieldReader<f64>> {
|
pub fn f64(&self, field: Field) -> crate::Result<FastFieldReader<f64>> {
|
||||||
self.fast_field_f64.get(&field).cloned()
|
self.check_type(field, FastType::F64, Cardinality::SingleValue)?;
|
||||||
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`.
|
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a u64 multi-valued fast field, this method returns `None`.
|
/// If `field` is not a u64 multi-valued fast field, this method returns `None`.
|
||||||
pub fn u64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<u64>> {
|
pub fn u64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u64>> {
|
||||||
self.fast_field_u64s.get(&field).cloned()
|
self.check_type(field, FastType::U64, Cardinality::MultiValues)?;
|
||||||
}
|
self.typed_fast_field_multi_reader(field)
|
||||||
|
|
||||||
/// If the field is a u64s-fast field return the associated reader.
|
|
||||||
/// If the field is a i64s-fast field, return the associated u64s reader. Values are
|
|
||||||
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping.
|
|
||||||
///
|
|
||||||
/// This method is useful when merging segment reader.
|
|
||||||
pub(crate) fn u64s_lenient(&self, field: Field) -> Option<MultiValueIntFastFieldReader<u64>> {
|
|
||||||
if let Some(u64s_ff_reader) = self.u64s(field) {
|
|
||||||
return Some(u64s_ff_reader);
|
|
||||||
}
|
|
||||||
if let Some(i64s_ff_reader) = self.i64s(field) {
|
|
||||||
return Some(i64s_ff_reader.into_u64s_reader());
|
|
||||||
}
|
|
||||||
if let Some(f64s_ff_reader) = self.f64s(field) {
|
|
||||||
return Some(f64s_ff_reader.into_u64s_reader());
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `i64s` multi-valued fast field reader reader associated to `field`.
|
/// Returns a `i64s` multi-valued fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a i64 multi-valued fast field, this method returns `None`.
|
/// If `field` is not a i64 multi-valued fast field, this method returns `None`.
|
||||||
pub fn i64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<i64>> {
|
pub fn i64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<i64>> {
|
||||||
self.fast_field_i64s.get(&field).cloned()
|
self.check_type(field, FastType::I64, Cardinality::MultiValues)?;
|
||||||
|
self.typed_fast_field_multi_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `f64s` multi-valued fast field reader reader associated to `field`.
|
/// Returns a `f64s` multi-valued fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a f64 multi-valued fast field, this method returns `None`.
|
/// If `field` is not a f64 multi-valued fast field, this method returns `None`.
|
||||||
pub fn f64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<f64>> {
|
pub fn f64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<f64>> {
|
||||||
self.fast_field_f64s.get(&field).cloned()
|
self.check_type(field, FastType::F64, Cardinality::MultiValues)?;
|
||||||
|
self.typed_fast_field_multi_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `crate::DateTime` multi-valued fast field reader reader associated to `field`.
|
/// Returns a `crate::DateTime` multi-valued fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a `crate::DateTime` multi-valued fast field, this method returns `None`.
|
/// If `field` is not a `crate::DateTime` multi-valued fast field, this method returns `None`.
|
||||||
pub fn dates(&self, field: Field) -> Option<MultiValueIntFastFieldReader<crate::DateTime>> {
|
pub fn dates(
|
||||||
self.fast_field_dates.get(&field).cloned()
|
&self,
|
||||||
|
field: Field,
|
||||||
|
) -> crate::Result<MultiValuedFastFieldReader<crate::DateTime>> {
|
||||||
|
self.check_type(field, FastType::Date, Cardinality::MultiValues)?;
|
||||||
|
self.typed_fast_field_multi_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `bytes` fast field reader associated to `field`.
|
/// Returns the `bytes` fast field reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a bytes fast field, returns `None`.
|
/// If `field` is not a bytes fast field, returns `None`.
|
||||||
pub fn bytes(&self, field: Field) -> Option<BytesFastFieldReader> {
|
pub fn bytes(&self, field: Field) -> crate::Result<BytesFastFieldReader> {
|
||||||
self.fast_bytes.get(&field).cloned()
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
|
if let FieldType::Bytes(bytes_option) = field_entry.field_type() {
|
||||||
|
if !bytes_option.is_fast() {
|
||||||
|
return Err(crate::TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is not a fast field.",
|
||||||
|
field_entry.name()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
let fast_field_idx_file = self.fast_field_data(field, 0)?;
|
||||||
|
let idx_reader = FastFieldReader::open(fast_field_idx_file)?;
|
||||||
|
let data = self.fast_field_data(field, 1)?;
|
||||||
|
BytesFastFieldReader::open(idx_reader, data)
|
||||||
|
} else {
|
||||||
|
Err(FastFieldNotAvailableError::new(field_entry).into())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use super::multivalued::MultiValueIntFastFieldWriter;
|
use super::multivalued::MultiValuedFastFieldWriter;
|
||||||
use crate::common;
|
use crate::common;
|
||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::common::VInt;
|
use crate::common::VInt;
|
||||||
@@ -13,7 +13,7 @@ use std::io;
|
|||||||
/// The fastfieldswriter regroup all of the fast field writers.
|
/// The fastfieldswriter regroup all of the fast field writers.
|
||||||
pub struct FastFieldsWriter {
|
pub struct FastFieldsWriter {
|
||||||
single_value_writers: Vec<IntFastFieldWriter>,
|
single_value_writers: Vec<IntFastFieldWriter>,
|
||||||
multi_values_writers: Vec<MultiValueIntFastFieldWriter>,
|
multi_values_writers: Vec<MultiValuedFastFieldWriter>,
|
||||||
bytes_value_writers: Vec<BytesFastFieldWriter>,
|
bytes_value_writers: Vec<BytesFastFieldWriter>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -33,7 +33,7 @@ impl FastFieldsWriter {
|
|||||||
let mut bytes_value_writers = Vec::new();
|
let mut bytes_value_writers = Vec::new();
|
||||||
|
|
||||||
for (field, field_entry) in schema.fields() {
|
for (field, field_entry) in schema.fields() {
|
||||||
match *field_entry.field_type() {
|
match field_entry.field_type() {
|
||||||
FieldType::I64(ref int_options)
|
FieldType::I64(ref int_options)
|
||||||
| FieldType::U64(ref int_options)
|
| FieldType::U64(ref int_options)
|
||||||
| FieldType::F64(ref int_options)
|
| FieldType::F64(ref int_options)
|
||||||
@@ -46,19 +46,21 @@ impl FastFieldsWriter {
|
|||||||
single_value_writers.push(fast_field_writer);
|
single_value_writers.push(fast_field_writer);
|
||||||
}
|
}
|
||||||
Some(Cardinality::MultiValues) => {
|
Some(Cardinality::MultiValues) => {
|
||||||
let fast_field_writer = MultiValueIntFastFieldWriter::new(field, false);
|
let fast_field_writer = MultiValuedFastFieldWriter::new(field, false);
|
||||||
multi_values_writers.push(fast_field_writer);
|
multi_values_writers.push(fast_field_writer);
|
||||||
}
|
}
|
||||||
None => {}
|
None => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::HierarchicalFacet => {
|
FieldType::HierarchicalFacet => {
|
||||||
let fast_field_writer = MultiValueIntFastFieldWriter::new(field, true);
|
let fast_field_writer = MultiValuedFastFieldWriter::new(field, true);
|
||||||
multi_values_writers.push(fast_field_writer);
|
multi_values_writers.push(fast_field_writer);
|
||||||
}
|
}
|
||||||
FieldType::Bytes => {
|
FieldType::Bytes(bytes_option) => {
|
||||||
let fast_field_writer = BytesFastFieldWriter::new(field);
|
if bytes_option.is_fast() {
|
||||||
bytes_value_writers.push(fast_field_writer);
|
let fast_field_writer = BytesFastFieldWriter::new(field);
|
||||||
|
bytes_value_writers.push(fast_field_writer);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
@@ -85,7 +87,7 @@ impl FastFieldsWriter {
|
|||||||
pub fn get_multivalue_writer(
|
pub fn get_multivalue_writer(
|
||||||
&mut self,
|
&mut self,
|
||||||
field: Field,
|
field: Field,
|
||||||
) -> Option<&mut MultiValueIntFastFieldWriter> {
|
) -> Option<&mut MultiValuedFastFieldWriter> {
|
||||||
// TODO optimize
|
// TODO optimize
|
||||||
self.multi_values_writers
|
self.multi_values_writers
|
||||||
.iter_mut()
|
.iter_mut()
|
||||||
@@ -126,6 +128,7 @@ impl FastFieldsWriter {
|
|||||||
for field_writer in &self.single_value_writers {
|
for field_writer in &self.single_value_writers {
|
||||||
field_writer.serialize(serializer)?;
|
field_writer.serialize(serializer)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
for field_writer in &self.multi_values_writers {
|
for field_writer in &self.multi_values_writers {
|
||||||
let field = field_writer.field();
|
let field = field_writer.field();
|
||||||
field_writer.serialize(serializer, mapping.get(&field))?;
|
field_writer.serialize(serializer, mapping.get(&field))?;
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ mod reader;
|
|||||||
mod serializer;
|
mod serializer;
|
||||||
mod writer;
|
mod writer;
|
||||||
|
|
||||||
pub use self::reader::FieldNormReader;
|
pub use self::reader::{FieldNormReader, FieldNormReaders};
|
||||||
pub use self::serializer::FieldNormsSerializer;
|
pub use self::serializer::FieldNormsSerializer;
|
||||||
pub use self::writer::FieldNormsWriter;
|
pub use self::writer::FieldNormsWriter;
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,47 @@
|
|||||||
use super::{fieldnorm_to_id, id_to_fieldnorm};
|
use super::{fieldnorm_to_id, id_to_fieldnorm};
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::common::CompositeFile;
|
||||||
|
use crate::directory::FileSlice;
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
|
use crate::schema::Field;
|
||||||
|
use crate::space_usage::PerFieldSpaceUsage;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
/// Reader for the fieldnorm (for each document, the number of tokens indexed in the
|
||||||
|
/// field) of all indexed fields in the index.
|
||||||
|
///
|
||||||
|
/// Each fieldnorm is approximately compressed over one byte. We refer to this byte as
|
||||||
|
/// `fieldnorm_id`.
|
||||||
|
/// The mapping from `fieldnorm` to `fieldnorm_id` is given by monotonic.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct FieldNormReaders {
|
||||||
|
data: Arc<CompositeFile>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FieldNormReaders {
|
||||||
|
/// Creates a field norm reader.
|
||||||
|
pub fn open(file: FileSlice) -> crate::Result<FieldNormReaders> {
|
||||||
|
let data = CompositeFile::open(&file)?;
|
||||||
|
Ok(FieldNormReaders {
|
||||||
|
data: Arc::new(data),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the FieldNormReader for a specific field.
|
||||||
|
pub fn get_field(&self, field: Field) -> crate::Result<Option<FieldNormReader>> {
|
||||||
|
if let Some(file) = self.data.open_read(field) {
|
||||||
|
let fieldnorm_reader = FieldNormReader::open(file)?;
|
||||||
|
Ok(Some(fieldnorm_reader))
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a break down of the space usage per field.
|
||||||
|
pub fn space_usage(&self) -> PerFieldSpaceUsage {
|
||||||
|
self.data.space_usage()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Reads the fieldnorm associated to a document.
|
/// Reads the fieldnorm associated to a document.
|
||||||
/// The fieldnorm represents the length associated to
|
/// The fieldnorm represents the length associated to
|
||||||
@@ -19,14 +60,57 @@ use crate::DocId;
|
|||||||
/// Apart from compression, this scale also makes it possible to
|
/// Apart from compression, this scale also makes it possible to
|
||||||
/// precompute computationally expensive functions of the fieldnorm
|
/// precompute computationally expensive functions of the fieldnorm
|
||||||
/// in a very short array.
|
/// in a very short array.
|
||||||
pub struct FieldNormReader {
|
#[derive(Clone)]
|
||||||
data: ReadOnlySource,
|
pub struct FieldNormReader(ReaderImplEnum);
|
||||||
|
|
||||||
|
impl From<ReaderImplEnum> for FieldNormReader {
|
||||||
|
fn from(reader_enum: ReaderImplEnum) -> FieldNormReader {
|
||||||
|
FieldNormReader(reader_enum)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
enum ReaderImplEnum {
|
||||||
|
FromData(OwnedBytes),
|
||||||
|
Const {
|
||||||
|
num_docs: u32,
|
||||||
|
fieldnorm_id: u8,
|
||||||
|
fieldnorm: u32,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FieldNormReader {
|
impl FieldNormReader {
|
||||||
/// Opens a field norm reader given its data source.
|
/// Creates a `FieldNormReader` with a constant fieldnorm.
|
||||||
pub fn open(data: ReadOnlySource) -> Self {
|
///
|
||||||
FieldNormReader { data }
|
/// The fieldnorm will be subjected to compression as if it was coming
|
||||||
|
/// from an array-backed fieldnorm reader.
|
||||||
|
pub fn constant(num_docs: u32, fieldnorm: u32) -> FieldNormReader {
|
||||||
|
let fieldnorm_id = fieldnorm_to_id(fieldnorm);
|
||||||
|
let fieldnorm = id_to_fieldnorm(fieldnorm_id);
|
||||||
|
ReaderImplEnum::Const {
|
||||||
|
num_docs,
|
||||||
|
fieldnorm_id,
|
||||||
|
fieldnorm,
|
||||||
|
}
|
||||||
|
.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Opens a field norm reader given its file.
|
||||||
|
pub fn open(fieldnorm_file: FileSlice) -> crate::Result<Self> {
|
||||||
|
let data = fieldnorm_file.read_bytes()?;
|
||||||
|
Ok(FieldNormReader::new(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new(data: OwnedBytes) -> Self {
|
||||||
|
ReaderImplEnum::FromData(data).into()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of documents in this segment.
|
||||||
|
pub fn num_docs(&self) -> u32 {
|
||||||
|
match &self.0 {
|
||||||
|
ReaderImplEnum::FromData(data) => data.len() as u32,
|
||||||
|
ReaderImplEnum::Const { num_docs, .. } => *num_docs,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `fieldnorm` associated to a doc id.
|
/// Returns the `fieldnorm` associated to a doc id.
|
||||||
@@ -39,15 +123,25 @@ impl FieldNormReader {
|
|||||||
/// The fieldnorm is effectively decoded from the
|
/// The fieldnorm is effectively decoded from the
|
||||||
/// `fieldnorm_id` by doing a simple table lookup.
|
/// `fieldnorm_id` by doing a simple table lookup.
|
||||||
pub fn fieldnorm(&self, doc_id: DocId) -> u32 {
|
pub fn fieldnorm(&self, doc_id: DocId) -> u32 {
|
||||||
let fieldnorm_id = self.fieldnorm_id(doc_id);
|
match &self.0 {
|
||||||
id_to_fieldnorm(fieldnorm_id)
|
ReaderImplEnum::FromData(data) => {
|
||||||
|
let fieldnorm_id = data.as_slice()[doc_id as usize];
|
||||||
|
id_to_fieldnorm(fieldnorm_id)
|
||||||
|
}
|
||||||
|
ReaderImplEnum::Const { fieldnorm, .. } => *fieldnorm,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `fieldnorm_id` associated to a document.
|
/// Returns the `fieldnorm_id` associated to a document.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 {
|
pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 {
|
||||||
let fielnorms_data = self.data.as_slice();
|
match &self.0 {
|
||||||
fielnorms_data[doc_id as usize]
|
ReaderImplEnum::FromData(data) => {
|
||||||
|
let fieldnorm_id = data.as_slice()[doc_id as usize];
|
||||||
|
fieldnorm_id
|
||||||
|
}
|
||||||
|
ReaderImplEnum::Const { fieldnorm_id, .. } => *fieldnorm_id,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a `fieldnorm_id` into a fieldnorm.
|
/// Converts a `fieldnorm_id` into a fieldnorm.
|
||||||
@@ -62,18 +156,48 @@ impl FieldNormReader {
|
|||||||
pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
|
pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
|
||||||
fieldnorm_to_id(fieldnorm)
|
fieldnorm_to_id(fieldnorm)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn for_test(field_norms: &[u32]) -> FieldNormReader {
|
||||||
|
let field_norms_id = field_norms
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.map(FieldNormReader::fieldnorm_to_id)
|
||||||
|
.collect::<Vec<u8>>();
|
||||||
|
let field_norms_data = OwnedBytes::new(field_norms_id);
|
||||||
|
FieldNormReader::new(field_norms_data)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
impl From<Vec<u32>> for FieldNormReader {
|
mod tests {
|
||||||
fn from(field_norms: Vec<u32>) -> FieldNormReader {
|
use crate::fieldnorm::FieldNormReader;
|
||||||
let field_norms_id = field_norms
|
|
||||||
.into_iter()
|
#[test]
|
||||||
.map(FieldNormReader::fieldnorm_to_id)
|
fn test_from_fieldnorms_array() {
|
||||||
.collect::<Vec<u8>>();
|
let fieldnorms = &[1, 2, 3, 4, 1_000_000];
|
||||||
let field_norms_data = ReadOnlySource::from(field_norms_id);
|
let fieldnorm_reader = FieldNormReader::for_test(fieldnorms);
|
||||||
FieldNormReader {
|
assert_eq!(fieldnorm_reader.num_docs(), 5);
|
||||||
data: field_norms_data,
|
assert_eq!(fieldnorm_reader.fieldnorm(0), 1);
|
||||||
}
|
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(2), 3);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(3), 4);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(4), 983_064);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_const_fieldnorm_reader_small_fieldnorm_id() {
|
||||||
|
let fieldnorm_reader = FieldNormReader::constant(1_000_000u32, 10u32);
|
||||||
|
assert_eq!(fieldnorm_reader.num_docs(), 1_000_000u32);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(0u32), 10u32);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm_id(0u32), 10u8);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_const_fieldnorm_reader_large_fieldnorm_id() {
|
||||||
|
let fieldnorm_reader = FieldNormReader::constant(1_000_000u32, 300u32);
|
||||||
|
assert_eq!(fieldnorm_reader.num_docs(), 1_000_000u32);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(0u32), 280u32);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm_id(0u32), 72u8);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use super::fieldnorm_to_id;
|
|||||||
use super::FieldNormsSerializer;
|
use super::FieldNormsSerializer;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use std::io;
|
use std::{io, iter};
|
||||||
|
|
||||||
/// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte
|
/// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte
|
||||||
/// of each document for each field with field norms.
|
/// of each document for each field with field norms.
|
||||||
@@ -44,7 +44,9 @@ impl FieldNormsWriter {
|
|||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
FieldNormsWriter {
|
FieldNormsWriter {
|
||||||
fields,
|
fields,
|
||||||
fieldnorms_buffer: (0..max_field).map(|_| Vec::new()).collect::<Vec<_>>(),
|
fieldnorms_buffer: iter::repeat_with(Vec::new)
|
||||||
|
.take(max_field)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,11 +80,12 @@ impl FieldNormsWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Serialize the seen fieldnorm values to the serializer for all fields.
|
/// Serialize the seen fieldnorm values to the serializer for all fields.
|
||||||
pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> {
|
pub fn serialize(&self, mut fieldnorms_serializer: FieldNormsSerializer) -> io::Result<()> {
|
||||||
for &field in self.fields.iter() {
|
for &field in self.fields.iter() {
|
||||||
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
|
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
|
||||||
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
|
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
|
||||||
}
|
}
|
||||||
|
fieldnorms_serializer.close()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,45 +1,93 @@
|
|||||||
use rand::thread_rng;
|
|
||||||
use std::collections::HashSet;
|
|
||||||
|
|
||||||
use crate::schema::*;
|
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::Searcher;
|
use crate::Searcher;
|
||||||
|
use crate::{doc, schema::*};
|
||||||
|
use rand::thread_rng;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
|
fn check_index_content(searcher: &Searcher, vals: &[u64]) -> crate::Result<()> {
|
||||||
assert!(searcher.segment_readers().len() < 20);
|
assert!(searcher.segment_readers().len() < 20);
|
||||||
assert_eq!(searcher.num_docs() as usize, vals.len());
|
assert_eq!(searcher.num_docs() as usize, vals.len());
|
||||||
|
for segment_reader in searcher.segment_readers() {
|
||||||
|
let store_reader = segment_reader.get_store_reader()?;
|
||||||
|
for doc_id in 0..segment_reader.max_doc() {
|
||||||
|
let _doc = store_reader.get(doc_id)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
#[ignore]
|
||||||
fn test_indexing() {
|
fn test_functional_store() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
|
let id_field = schema_builder.add_u64_field("id", INDEXED | STORED);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let reader = index.reader()?;
|
||||||
|
|
||||||
|
let mut rng = thread_rng();
|
||||||
|
|
||||||
|
let mut index_writer = index.writer_with_num_threads(3, 12_000_000)?;
|
||||||
|
|
||||||
|
let mut doc_set: Vec<u64> = Vec::new();
|
||||||
|
|
||||||
|
let mut doc_id = 0u64;
|
||||||
|
for iteration in 0..500 {
|
||||||
|
dbg!(iteration);
|
||||||
|
let num_docs: usize = rng.gen_range(0..4);
|
||||||
|
if doc_set.len() >= 1 {
|
||||||
|
let doc_to_remove_id = rng.gen_range(0..doc_set.len());
|
||||||
|
let removed_doc_id = doc_set.swap_remove(doc_to_remove_id);
|
||||||
|
index_writer.delete_term(Term::from_field_u64(id_field, removed_doc_id));
|
||||||
|
}
|
||||||
|
for _ in 0..num_docs {
|
||||||
|
doc_set.push(doc_id);
|
||||||
|
index_writer.add_document(doc!(id_field=>doc_id));
|
||||||
|
doc_id += 1;
|
||||||
|
}
|
||||||
|
index_writer.commit()?;
|
||||||
|
reader.reload()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
check_index_content(&searcher, &doc_set)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[ignore]
|
||||||
|
fn test_functional_indexing() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
let id_field = schema_builder.add_u64_field("id", INDEXED);
|
let id_field = schema_builder.add_u64_field("id", INDEXED);
|
||||||
let multiples_field = schema_builder.add_u64_field("multiples", INDEXED);
|
let multiples_field = schema_builder.add_u64_field("multiples", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_from_tempdir(schema).unwrap();
|
let index = Index::create_from_tempdir(schema)?;
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
|
|
||||||
let mut rng = thread_rng();
|
let mut rng = thread_rng();
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(3, 120_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(3, 120_000_000)?;
|
||||||
|
|
||||||
let mut committed_docs: HashSet<u64> = HashSet::new();
|
let mut committed_docs: HashSet<u64> = HashSet::new();
|
||||||
let mut uncommitted_docs: HashSet<u64> = HashSet::new();
|
let mut uncommitted_docs: HashSet<u64> = HashSet::new();
|
||||||
|
|
||||||
for _ in 0..200 {
|
for _ in 0..200 {
|
||||||
let random_val = rng.gen_range(0, 20);
|
let random_val = rng.gen_range(0..20);
|
||||||
if random_val == 0 {
|
if random_val == 0 {
|
||||||
index_writer.commit().expect("Commit failed");
|
index_writer.commit()?;
|
||||||
committed_docs.extend(&uncommitted_docs);
|
committed_docs.extend(&uncommitted_docs);
|
||||||
uncommitted_docs.clear();
|
uncommitted_docs.clear();
|
||||||
reader.reload().unwrap();
|
reader.reload()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
// check that everything is correct.
|
// check that everything is correct.
|
||||||
check_index_content(&searcher, &committed_docs);
|
check_index_content(
|
||||||
|
&searcher,
|
||||||
|
&committed_docs.iter().cloned().collect::<Vec<u64>>(),
|
||||||
|
)?;
|
||||||
} else {
|
} else {
|
||||||
if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) {
|
if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) {
|
||||||
let doc_id_term = Term::from_field_u64(id_field, random_val);
|
let doc_id_term = Term::from_field_u64(id_field, random_val);
|
||||||
@@ -55,4 +103,5 @@ fn test_indexing() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ impl DeleteQueue {
|
|||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
let block = Arc::new(Block {
|
let block = Arc::new(Block {
|
||||||
operations: Arc::default(),
|
operations: Arc::new([]),
|
||||||
next: NextBlock::from(self.clone()),
|
next: NextBlock::from(self.clone()),
|
||||||
});
|
});
|
||||||
wlock.last_block = Arc::downgrade(&block);
|
wlock.last_block = Arc::downgrade(&block);
|
||||||
@@ -108,7 +108,7 @@ impl DeleteQueue {
|
|||||||
let delete_operations = mem::replace(&mut self_wlock.writer, vec![]);
|
let delete_operations = mem::replace(&mut self_wlock.writer, vec![]);
|
||||||
|
|
||||||
let new_block = Arc::new(Block {
|
let new_block = Arc::new(Block {
|
||||||
operations: Arc::new(delete_operations.into_boxed_slice()),
|
operations: Arc::from(delete_operations.into_boxed_slice()),
|
||||||
next: NextBlock::from(self.clone()),
|
next: NextBlock::from(self.clone()),
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -167,7 +167,7 @@ impl NextBlock {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct Block {
|
struct Block {
|
||||||
operations: Arc<Box<[DeleteOperation]>>,
|
operations: Arc<[DeleteOperation]>,
|
||||||
next: NextBlock,
|
next: NextBlock,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -108,9 +108,9 @@ fn compute_deleted_bitset(
|
|||||||
// Limit doc helps identify the first document
|
// Limit doc helps identify the first document
|
||||||
// that may be affected by the delete operation.
|
// that may be affected by the delete operation.
|
||||||
let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp);
|
let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp);
|
||||||
let inverted_index = segment_reader.inverted_index(delete_op.term.field());
|
let inverted_index = segment_reader.inverted_index(delete_op.term.field())?;
|
||||||
if let Some(mut docset) =
|
if let Some(mut docset) =
|
||||||
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)
|
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)?
|
||||||
{
|
{
|
||||||
let mut deleted_doc = docset.doc();
|
let mut deleted_doc = docset.doc();
|
||||||
while deleted_doc != TERMINATED {
|
while deleted_doc != TERMINATED {
|
||||||
@@ -449,7 +449,7 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the merge policy.
|
/// Accessor to the merge policy.
|
||||||
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
|
pub fn get_merge_policy(&self) -> Arc<dyn MergePolicy> {
|
||||||
self.segment_updater.get_merge_policy()
|
self.segment_updater.get_merge_policy()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -536,6 +536,7 @@ impl IndexWriter {
|
|||||||
/// when no documents are remaining.
|
/// when no documents are remaining.
|
||||||
///
|
///
|
||||||
/// Returns the former segment_ready channel.
|
/// Returns the former segment_ready channel.
|
||||||
|
#[allow(unused_must_use)]
|
||||||
fn recreate_document_channel(&mut self) -> OperationReceiver {
|
fn recreate_document_channel(&mut self) -> OperationReceiver {
|
||||||
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
||||||
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
||||||
@@ -575,7 +576,7 @@ impl IndexWriter {
|
|||||||
//
|
//
|
||||||
// This will drop the document queue, and the thread
|
// This will drop the document queue, and the thread
|
||||||
// should terminate.
|
// should terminate.
|
||||||
mem::replace(self, new_index_writer);
|
*self = new_index_writer;
|
||||||
|
|
||||||
// Drains the document receiver pipeline :
|
// Drains the document receiver pipeline :
|
||||||
// Workers don't need to index the pending documents.
|
// Workers don't need to index the pending documents.
|
||||||
@@ -799,7 +800,7 @@ mod tests {
|
|||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let index_writer = index.writer_for_tests().unwrap();
|
||||||
let operations = vec![
|
let operations = vec![
|
||||||
UserOperation::Add(doc!(text_field=>"a")),
|
UserOperation::Add(doc!(text_field=>"a")),
|
||||||
UserOperation::Add(doc!(text_field=>"b")),
|
UserOperation::Add(doc!(text_field=>"b")),
|
||||||
@@ -814,7 +815,7 @@ mod tests {
|
|||||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(text_field => "hello1"));
|
index_writer.add_document(doc!(text_field => "hello1"));
|
||||||
index_writer.add_document(doc!(text_field => "hello2"));
|
index_writer.add_document(doc!(text_field => "hello2"));
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
@@ -863,7 +864,7 @@ mod tests {
|
|||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let a_term = Term::from_field_text(text_field, "a");
|
let a_term = Term::from_field_text(text_field, "a");
|
||||||
let b_term = Term::from_field_text(text_field, "b");
|
let b_term = Term::from_field_text(text_field, "b");
|
||||||
let operations = vec![
|
let operations = vec![
|
||||||
@@ -925,8 +926,8 @@ mod tests {
|
|||||||
fn test_lockfile_already_exists_error_msg() {
|
fn test_lockfile_already_exists_error_msg() {
|
||||||
let schema_builder = schema::Schema::builder();
|
let schema_builder = schema::Schema::builder();
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let _index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let _index_writer = index.writer_for_tests().unwrap();
|
||||||
match index.writer_with_num_threads(1, 3_000_000) {
|
match index.writer_for_tests() {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let err_msg = err.to_string();
|
let err_msg = err.to_string();
|
||||||
assert!(err_msg.contains("already an `IndexWriter`"));
|
assert!(err_msg.contains("already an `IndexWriter`"));
|
||||||
@@ -978,7 +979,7 @@ mod tests {
|
|||||||
let num_docs_containing = |s: &str| {
|
let num_docs_containing = |s: &str| {
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_text(text_field, s);
|
let term = Term::from_field_text(text_field, s);
|
||||||
searcher.doc_freq(&term)
|
searcher.doc_freq(&term).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -1014,7 +1015,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
let num_docs_containing = |s: &str| {
|
let num_docs_containing = |s: &str| {
|
||||||
let term_a = Term::from_field_text(text_field, s);
|
let term_a = Term::from_field_text(text_field, s);
|
||||||
reader.searcher().doc_freq(&term_a)
|
reader.searcher().doc_freq(&term_a).unwrap()
|
||||||
};
|
};
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
@@ -1109,6 +1110,7 @@ mod tests {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.searcher()
|
.searcher()
|
||||||
.doc_freq(&term_a)
|
.doc_freq(&term_a)
|
||||||
|
.unwrap()
|
||||||
};
|
};
|
||||||
assert_eq!(num_docs_containing("a"), 0);
|
assert_eq!(num_docs_containing("a"), 0);
|
||||||
assert_eq!(num_docs_containing("b"), 100);
|
assert_eq!(num_docs_containing("b"), 100);
|
||||||
@@ -1128,7 +1130,7 @@ mod tests {
|
|||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_text(text_field, s);
|
let term = Term::from_field_text(text_field, s);
|
||||||
searcher.doc_freq(&term)
|
searcher.doc_freq(&term).unwrap()
|
||||||
};
|
};
|
||||||
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
|
||||||
|
|
||||||
@@ -1179,7 +1181,15 @@ mod tests {
|
|||||||
|
|
||||||
// working with an empty index == no documents
|
// working with an empty index == no documents
|
||||||
let term_b = Term::from_field_text(text_field, "b");
|
let term_b = Term::from_field_text(text_field, "b");
|
||||||
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_b), 0);
|
assert_eq!(
|
||||||
|
index
|
||||||
|
.reader()
|
||||||
|
.unwrap()
|
||||||
|
.searcher()
|
||||||
|
.doc_freq(&term_b)
|
||||||
|
.unwrap(),
|
||||||
|
0
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1199,7 +1209,15 @@ mod tests {
|
|||||||
|
|
||||||
let term_a = Term::from_field_text(text_field, "a");
|
let term_a = Term::from_field_text(text_field, "a");
|
||||||
// expect the document with that term to be in the index
|
// expect the document with that term to be in the index
|
||||||
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
|
assert_eq!(
|
||||||
|
index
|
||||||
|
.reader()
|
||||||
|
.unwrap()
|
||||||
|
.searcher()
|
||||||
|
.doc_freq(&term_a)
|
||||||
|
.unwrap(),
|
||||||
|
1
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1225,7 +1243,15 @@ mod tests {
|
|||||||
// Find original docs in the index
|
// Find original docs in the index
|
||||||
let term_a = Term::from_field_text(text_field, "a");
|
let term_a = Term::from_field_text(text_field, "a");
|
||||||
// expect the document with that term to be in the index
|
// expect the document with that term to be in the index
|
||||||
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
|
assert_eq!(
|
||||||
|
index
|
||||||
|
.reader()
|
||||||
|
.unwrap()
|
||||||
|
.searcher()
|
||||||
|
.doc_freq(&term_a)
|
||||||
|
.unwrap(),
|
||||||
|
1
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1260,7 +1286,7 @@ mod tests {
|
|||||||
let idfield = schema_builder.add_text_field("id", STRING);
|
let idfield = schema_builder.add_text_field("id", STRING);
|
||||||
schema_builder.add_text_field("optfield", STRING);
|
schema_builder.add_text_field("optfield", STRING);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(idfield=>"myid"));
|
index_writer.add_document(doc!(idfield=>"myid"));
|
||||||
let commit = index_writer.commit();
|
let commit = index_writer.commit();
|
||||||
assert!(commit.is_ok());
|
assert!(commit.is_ok());
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000;
|
|||||||
const DEFAULT_MIN_MERGE_SIZE: usize = 8;
|
const DEFAULT_MIN_MERGE_SIZE: usize = 8;
|
||||||
const DEFAULT_MAX_MERGE_SIZE: usize = 10_000_000;
|
const DEFAULT_MAX_MERGE_SIZE: usize = 10_000_000;
|
||||||
|
|
||||||
/// `LogMergePolicy` tries tries to merge segments that have a similar number of
|
/// `LogMergePolicy` tries to merge segments that have a similar number of
|
||||||
/// documents.
|
/// documents.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct LogMergePolicy {
|
pub struct LogMergePolicy {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -29,8 +29,9 @@ pub use self::segment_writer::SegmentWriter;
|
|||||||
/// Alias for the default merge policy, which is the `LogMergePolicy`.
|
/// Alias for the default merge policy, which is the `LogMergePolicy`.
|
||||||
pub type DefaultMergePolicy = LogMergePolicy;
|
pub type DefaultMergePolicy = LogMergePolicy;
|
||||||
|
|
||||||
|
#[cfg(feature = "mmap")]
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests_mmap {
|
||||||
use crate::schema::{self, Schema};
|
use crate::schema::{self, Schema};
|
||||||
use crate::{Index, Term};
|
use crate::{Index, Term};
|
||||||
|
|
||||||
@@ -39,7 +40,7 @@ mod tests {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||||
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
|
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
// there must be one deleted document in the segment
|
// there must be one deleted document in the segment
|
||||||
index_writer.add_document(doc!(text_field=>"b"));
|
index_writer.add_document(doc!(text_field=>"b"));
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "b"));
|
index_writer.delete_term(Term::from_field_text(text_field, "b"));
|
||||||
|
|||||||
@@ -9,6 +9,15 @@ pub struct DeleteOperation {
|
|||||||
pub term: Term,
|
pub term: Term,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for DeleteOperation {
|
||||||
|
fn default() -> Self {
|
||||||
|
DeleteOperation {
|
||||||
|
opstamp: 0u64,
|
||||||
|
term: Term::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Timestamped Add operation.
|
/// Timestamped Add operation.
|
||||||
#[derive(Eq, PartialEq, Debug)]
|
#[derive(Eq, PartialEq, Debug)]
|
||||||
pub struct AddOperation {
|
pub struct AddOperation {
|
||||||
|
|||||||
@@ -8,15 +8,16 @@ use crate::store::StoreWriter;
|
|||||||
/// Segment serializer is in charge of laying out on disk
|
/// Segment serializer is in charge of laying out on disk
|
||||||
/// the data accumulated and sorted by the `SegmentWriter`.
|
/// the data accumulated and sorted by the `SegmentWriter`.
|
||||||
pub struct SegmentSerializer {
|
pub struct SegmentSerializer {
|
||||||
|
segment: Segment,
|
||||||
store_writer: StoreWriter,
|
store_writer: StoreWriter,
|
||||||
fast_field_serializer: FastFieldSerializer,
|
fast_field_serializer: FastFieldSerializer,
|
||||||
fieldnorms_serializer: FieldNormsSerializer,
|
fieldnorms_serializer: Option<FieldNormsSerializer>,
|
||||||
postings_serializer: InvertedIndexSerializer,
|
postings_serializer: InvertedIndexSerializer,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentSerializer {
|
impl SegmentSerializer {
|
||||||
/// Creates a new `SegmentSerializer`.
|
/// Creates a new `SegmentSerializer`.
|
||||||
pub fn for_segment(segment: &mut Segment) -> crate::Result<SegmentSerializer> {
|
pub fn for_segment(mut segment: Segment) -> crate::Result<SegmentSerializer> {
|
||||||
let store_write = segment.open_write(SegmentComponent::STORE)?;
|
let store_write = segment.open_write(SegmentComponent::STORE)?;
|
||||||
|
|
||||||
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
|
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
|
||||||
@@ -25,15 +26,20 @@ impl SegmentSerializer {
|
|||||||
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
|
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
|
||||||
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
|
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
|
||||||
|
|
||||||
let postings_serializer = InvertedIndexSerializer::open(segment)?;
|
let postings_serializer = InvertedIndexSerializer::open(&mut segment)?;
|
||||||
Ok(SegmentSerializer {
|
Ok(SegmentSerializer {
|
||||||
|
segment,
|
||||||
store_writer: StoreWriter::new(store_write),
|
store_writer: StoreWriter::new(store_write),
|
||||||
fast_field_serializer,
|
fast_field_serializer,
|
||||||
fieldnorms_serializer,
|
fieldnorms_serializer: Some(fieldnorms_serializer),
|
||||||
postings_serializer,
|
postings_serializer,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn segment(&self) -> &Segment {
|
||||||
|
&self.segment
|
||||||
|
}
|
||||||
|
|
||||||
/// Accessor to the `PostingsSerializer`.
|
/// Accessor to the `PostingsSerializer`.
|
||||||
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer {
|
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer {
|
||||||
&mut self.postings_serializer
|
&mut self.postings_serializer
|
||||||
@@ -44,9 +50,11 @@ impl SegmentSerializer {
|
|||||||
&mut self.fast_field_serializer
|
&mut self.fast_field_serializer
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the field norm serializer.
|
/// Extract the field norm serializer.
|
||||||
pub fn get_fieldnorms_serializer(&mut self) -> &mut FieldNormsSerializer {
|
///
|
||||||
&mut self.fieldnorms_serializer
|
/// Note the fieldnorms serializer can only be extracted once.
|
||||||
|
pub fn extract_fieldnorms_serializer(&mut self) -> Option<FieldNormsSerializer> {
|
||||||
|
self.fieldnorms_serializer.take()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the `StoreWriter`.
|
/// Accessor to the `StoreWriter`.
|
||||||
@@ -55,11 +63,13 @@ impl SegmentSerializer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Finalize the segment serialization.
|
/// Finalize the segment serialization.
|
||||||
pub fn close(self) -> crate::Result<()> {
|
pub fn close(mut self) -> crate::Result<()> {
|
||||||
|
if let Some(fieldnorms_serializer) = self.extract_fieldnorms_serializer() {
|
||||||
|
fieldnorms_serializer.close()?;
|
||||||
|
}
|
||||||
self.fast_field_serializer.close()?;
|
self.fast_field_serializer.close()?;
|
||||||
self.postings_serializer.close()?;
|
self.postings_serializer.close()?;
|
||||||
self.store_writer.close()?;
|
self.store_writer.close()?;
|
||||||
self.fieldnorms_serializer.close()?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ const NUM_MERGE_THREADS: usize = 4;
|
|||||||
/// and flushed.
|
/// and flushed.
|
||||||
///
|
///
|
||||||
/// This method is not part of tantivy's public API
|
/// This method is not part of tantivy's public API
|
||||||
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::Result<()> {
|
pub fn save_new_metas(schema: Schema, directory: &dyn Directory) -> crate::Result<()> {
|
||||||
save_metas(
|
save_metas(
|
||||||
&IndexMeta {
|
&IndexMeta {
|
||||||
segments: Vec::new(),
|
segments: Vec::new(),
|
||||||
@@ -64,7 +64,7 @@ pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::R
|
|||||||
/// and flushed.
|
/// and flushed.
|
||||||
///
|
///
|
||||||
/// This method is not part of tantivy's public API
|
/// This method is not part of tantivy's public API
|
||||||
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result<()> {
|
fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate::Result<()> {
|
||||||
info!("save metas");
|
info!("save metas");
|
||||||
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
||||||
// Just adding a new line at the end of the buffer.
|
// Just adding a new line at the end of the buffer.
|
||||||
@@ -112,7 +112,7 @@ fn merge(
|
|||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
) -> crate::Result<SegmentEntry> {
|
) -> crate::Result<SegmentEntry> {
|
||||||
// first we need to apply deletes to our segment.
|
// first we need to apply deletes to our segment.
|
||||||
let mut merged_segment = index.new_segment();
|
let merged_segment = index.new_segment();
|
||||||
|
|
||||||
// First we apply all of the delet to the merged segment, up to the target opstamp.
|
// First we apply all of the delet to the merged segment, up to the target opstamp.
|
||||||
for segment_entry in &mut segment_entries {
|
for segment_entry in &mut segment_entries {
|
||||||
@@ -131,12 +131,13 @@ fn merge(
|
|||||||
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
|
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
|
||||||
|
|
||||||
// ... we just serialize this index merger in our new segment to merge the two segments.
|
// ... we just serialize this index merger in our new segment to merge the two segments.
|
||||||
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
|
let segment_serializer = SegmentSerializer::for_segment(merged_segment.clone())?;
|
||||||
|
|
||||||
let num_docs = merger.write(segment_serializer)?;
|
let num_docs = merger.write(segment_serializer)?;
|
||||||
|
|
||||||
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
|
let merged_segment_id = merged_segment.id();
|
||||||
|
|
||||||
|
let segment_meta = index.new_segment_meta(merged_segment_id, num_docs);
|
||||||
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
|
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -153,7 +154,7 @@ pub(crate) struct InnerSegmentUpdater {
|
|||||||
|
|
||||||
index: Index,
|
index: Index,
|
||||||
segment_manager: SegmentManager,
|
segment_manager: SegmentManager,
|
||||||
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
|
merge_policy: RwLock<Arc<dyn MergePolicy>>,
|
||||||
killed: AtomicBool,
|
killed: AtomicBool,
|
||||||
stamper: Stamper,
|
stamper: Stamper,
|
||||||
merge_operations: MergeOperationInventory,
|
merge_operations: MergeOperationInventory,
|
||||||
@@ -192,19 +193,19 @@ impl SegmentUpdater {
|
|||||||
merge_thread_pool,
|
merge_thread_pool,
|
||||||
index,
|
index,
|
||||||
segment_manager,
|
segment_manager,
|
||||||
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
|
merge_policy: RwLock::new(Arc::new(DefaultMergePolicy::default())),
|
||||||
killed: AtomicBool::new(false),
|
killed: AtomicBool::new(false),
|
||||||
stamper,
|
stamper,
|
||||||
merge_operations: Default::default(),
|
merge_operations: Default::default(),
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
|
pub fn get_merge_policy(&self) -> Arc<dyn MergePolicy> {
|
||||||
self.merge_policy.read().unwrap().clone()
|
self.merge_policy.read().unwrap().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
|
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
|
||||||
let arc_merge_policy = Arc::new(merge_policy);
|
let arc_merge_policy = Arc::from(merge_policy);
|
||||||
*self.merge_policy.write().unwrap() = arc_merge_policy;
|
*self.merge_policy.write().unwrap() = arc_merge_policy;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -449,9 +450,8 @@ impl SegmentUpdater {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|merge_candidate: MergeCandidate| {
|
.map(|merge_candidate: MergeCandidate| {
|
||||||
MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0)
|
MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0)
|
||||||
})
|
});
|
||||||
.collect::<Vec<_>>();
|
merge_candidates.extend(committed_merge_candidates);
|
||||||
merge_candidates.extend(committed_merge_candidates.into_iter());
|
|
||||||
|
|
||||||
for merge_operation in merge_candidates {
|
for merge_operation in merge_candidates {
|
||||||
if let Err(err) = self.start_merge(merge_operation) {
|
if let Err(err) = self.start_merge(merge_operation) {
|
||||||
@@ -521,7 +521,7 @@ impl SegmentUpdater {
|
|||||||
///
|
///
|
||||||
/// Upon termination of the current merging threads,
|
/// Upon termination of the current merging threads,
|
||||||
/// merge opportunity may appear.
|
/// merge opportunity may appear.
|
||||||
//
|
///
|
||||||
/// We keep waiting until the merge policy judges that
|
/// We keep waiting until the merge policy judges that
|
||||||
/// no opportunity is available.
|
/// no opportunity is available.
|
||||||
///
|
///
|
||||||
@@ -554,7 +554,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(MergeWheneverPossible));
|
index_writer.set_merge_policy(Box::new(MergeWheneverPossible));
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -607,7 +607,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
@@ -678,7 +678,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use super::operation::AddOperation;
|
|||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::core::SerializableSegment;
|
use crate::core::SerializableSegment;
|
||||||
use crate::fastfield::FastFieldsWriter;
|
use crate::fastfield::FastFieldsWriter;
|
||||||
use crate::fieldnorm::FieldNormsWriter;
|
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
|
||||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||||
use crate::postings::compute_table_size;
|
use crate::postings::compute_table_size;
|
||||||
use crate::postings::MultiFieldPostingsWriter;
|
use crate::postings::MultiFieldPostingsWriter;
|
||||||
@@ -14,10 +14,8 @@ use crate::schema::{Field, FieldEntry};
|
|||||||
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
|
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
|
||||||
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
|
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
|
||||||
use crate::tokenizer::{TokenStreamChain, Tokenizer};
|
use crate::tokenizer::{TokenStreamChain, Tokenizer};
|
||||||
use crate::DocId;
|
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use std::io;
|
use crate::{DocId, SegmentComponent};
|
||||||
use std::str;
|
|
||||||
|
|
||||||
/// Computes the initial size of the hash table.
|
/// Computes the initial size of the hash table.
|
||||||
///
|
///
|
||||||
@@ -48,6 +46,7 @@ pub struct SegmentWriter {
|
|||||||
fieldnorms_writer: FieldNormsWriter,
|
fieldnorms_writer: FieldNormsWriter,
|
||||||
doc_opstamps: Vec<Opstamp>,
|
doc_opstamps: Vec<Opstamp>,
|
||||||
tokenizers: Vec<Option<TextAnalyzer>>,
|
tokenizers: Vec<Option<TextAnalyzer>>,
|
||||||
|
term_buffer: Term,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentWriter {
|
impl SegmentWriter {
|
||||||
@@ -62,11 +61,12 @@ impl SegmentWriter {
|
|||||||
/// - schema
|
/// - schema
|
||||||
pub fn for_segment(
|
pub fn for_segment(
|
||||||
memory_budget: usize,
|
memory_budget: usize,
|
||||||
mut segment: Segment,
|
segment: Segment,
|
||||||
schema: &Schema,
|
schema: &Schema,
|
||||||
) -> crate::Result<SegmentWriter> {
|
) -> crate::Result<SegmentWriter> {
|
||||||
|
let tokenizer_manager = segment.index().tokenizers().clone();
|
||||||
let table_num_bits = initial_table_size(memory_budget)?;
|
let table_num_bits = initial_table_size(memory_budget)?;
|
||||||
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
|
let segment_serializer = SegmentSerializer::for_segment(segment)?;
|
||||||
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
|
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
|
||||||
let tokenizers = schema
|
let tokenizers = schema
|
||||||
.fields()
|
.fields()
|
||||||
@@ -76,7 +76,7 @@ impl SegmentWriter {
|
|||||||
.get_indexing_options()
|
.get_indexing_options()
|
||||||
.and_then(|text_index_option| {
|
.and_then(|text_index_option| {
|
||||||
let tokenizer_name = &text_index_option.tokenizer();
|
let tokenizer_name = &text_index_option.tokenizer();
|
||||||
segment.index().tokenizers().get(tokenizer_name)
|
tokenizer_manager.get(tokenizer_name)
|
||||||
}),
|
}),
|
||||||
_ => None,
|
_ => None,
|
||||||
},
|
},
|
||||||
@@ -90,6 +90,7 @@ impl SegmentWriter {
|
|||||||
fast_field_writers: FastFieldsWriter::from_schema(schema),
|
fast_field_writers: FastFieldsWriter::from_schema(schema),
|
||||||
doc_opstamps: Vec::with_capacity(1_000),
|
doc_opstamps: Vec::with_capacity(1_000),
|
||||||
tokenizers,
|
tokenizers,
|
||||||
|
term_buffer: Term::new(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -115,7 +116,11 @@ impl SegmentWriter {
|
|||||||
/// Indexes a new document
|
/// Indexes a new document
|
||||||
///
|
///
|
||||||
/// As a user, you should rather use `IndexWriter`'s add_document.
|
/// As a user, you should rather use `IndexWriter`'s add_document.
|
||||||
pub fn add_document(&mut self, add_operation: AddOperation, schema: &Schema) -> io::Result<()> {
|
pub fn add_document(
|
||||||
|
&mut self,
|
||||||
|
add_operation: AddOperation,
|
||||||
|
schema: &Schema,
|
||||||
|
) -> crate::Result<()> {
|
||||||
let doc_id = self.max_doc;
|
let doc_id = self.max_doc;
|
||||||
let mut doc = add_operation.document;
|
let mut doc = add_operation.document;
|
||||||
self.doc_opstamps.push(add_operation.opstamp);
|
self.doc_opstamps.push(add_operation.opstamp);
|
||||||
@@ -123,34 +128,45 @@ impl SegmentWriter {
|
|||||||
self.fast_field_writers.add_document(&doc);
|
self.fast_field_writers.add_document(&doc);
|
||||||
|
|
||||||
for (field, field_values) in doc.get_sorted_field_values() {
|
for (field, field_values) in doc.get_sorted_field_values() {
|
||||||
let field_options = schema.get_field_entry(field);
|
let field_entry = schema.get_field_entry(field);
|
||||||
if !field_options.is_indexed() {
|
let make_schema_error = || {
|
||||||
|
crate::TantivyError::SchemaError(format!(
|
||||||
|
"Expected a {:?} for field {:?}",
|
||||||
|
field_entry.field_type().value_type(),
|
||||||
|
field_entry.name()
|
||||||
|
))
|
||||||
|
};
|
||||||
|
if !field_entry.is_indexed() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
match *field_options.field_type() {
|
let (term_buffer, multifield_postings) =
|
||||||
|
(&mut self.term_buffer, &mut self.multifield_postings);
|
||||||
|
match *field_entry.field_type() {
|
||||||
FieldType::HierarchicalFacet => {
|
FieldType::HierarchicalFacet => {
|
||||||
let facets: Vec<&str> = field_values
|
term_buffer.set_field(field);
|
||||||
.iter()
|
let facets =
|
||||||
.flat_map(|field_value| match *field_value.value() {
|
field_values
|
||||||
Value::Facet(ref facet) => Some(facet.encoded_str()),
|
.iter()
|
||||||
_ => {
|
.flat_map(|field_value| match *field_value.value() {
|
||||||
panic!("Expected hierarchical facet");
|
Value::Facet(ref facet) => Some(facet.encoded_str()),
|
||||||
}
|
_ => {
|
||||||
})
|
panic!("Expected hierarchical facet");
|
||||||
.collect();
|
}
|
||||||
let mut term = Term::for_field(field); // we set the Term
|
});
|
||||||
for fake_str in facets {
|
for facet_str in facets {
|
||||||
let mut unordered_term_id_opt = None;
|
let mut unordered_term_id_opt = None;
|
||||||
FacetTokenizer.token_stream(fake_str).process(&mut |token| {
|
FacetTokenizer
|
||||||
term.set_text(&token.text);
|
.token_stream(facet_str)
|
||||||
let unordered_term_id =
|
.process(&mut |token| {
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
term_buffer.set_text(&token.text);
|
||||||
unordered_term_id_opt = Some(unordered_term_id);
|
let unordered_term_id =
|
||||||
});
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
|
unordered_term_id_opt = Some(unordered_term_id);
|
||||||
|
});
|
||||||
if let Some(unordered_term_id) = unordered_term_id_opt {
|
if let Some(unordered_term_id) = unordered_term_id_opt {
|
||||||
self.fast_field_writers
|
self.fast_field_writers
|
||||||
.get_multivalue_writer(field)
|
.get_multivalue_writer(field)
|
||||||
.expect("multified writer for facet missing")
|
.expect("writer for facet missing")
|
||||||
.add_val(unordered_term_id);
|
.add_val(unordered_term_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -167,7 +183,6 @@ impl SegmentWriter {
|
|||||||
if let Some(last_token) = tok_str.tokens.last() {
|
if let Some(last_token) = tok_str.tokens.last() {
|
||||||
total_offset += last_token.offset_to;
|
total_offset += last_token.offset_to;
|
||||||
}
|
}
|
||||||
|
|
||||||
token_streams
|
token_streams
|
||||||
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
||||||
}
|
}
|
||||||
@@ -177,7 +192,6 @@ impl SegmentWriter {
|
|||||||
{
|
{
|
||||||
offsets.push(total_offset);
|
offsets.push(total_offset);
|
||||||
total_offset += text.len();
|
total_offset += text.len();
|
||||||
|
|
||||||
token_streams.push(tokenizer.token_stream(text));
|
token_streams.push(tokenizer.token_stream(text));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -189,8 +203,12 @@ impl SegmentWriter {
|
|||||||
0
|
0
|
||||||
} else {
|
} else {
|
||||||
let mut token_stream = TokenStreamChain::new(offsets, token_streams);
|
let mut token_stream = TokenStreamChain::new(offsets, token_streams);
|
||||||
self.multifield_postings
|
multifield_postings.index_text(
|
||||||
.index_text(doc_id, field, &mut token_stream)
|
doc_id,
|
||||||
|
field,
|
||||||
|
&mut token_stream,
|
||||||
|
term_buffer,
|
||||||
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
self.fieldnorms_writer.record(doc_id, field, num_tokens);
|
self.fieldnorms_writer.record(doc_id, field, num_tokens);
|
||||||
@@ -198,49 +216,67 @@ impl SegmentWriter {
|
|||||||
FieldType::U64(ref int_option) => {
|
FieldType::U64(ref int_option) => {
|
||||||
if int_option.is_indexed() {
|
if int_option.is_indexed() {
|
||||||
for field_value in field_values {
|
for field_value in field_values {
|
||||||
let term = Term::from_field_u64(
|
term_buffer.set_field(field_value.field());
|
||||||
field_value.field(),
|
let u64_val = field_value
|
||||||
field_value.value().u64_value(),
|
.value()
|
||||||
);
|
.u64_value()
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
.ok_or_else(make_schema_error)?;
|
||||||
|
term_buffer.set_u64(u64_val);
|
||||||
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Date(ref int_option) => {
|
FieldType::Date(ref int_option) => {
|
||||||
if int_option.is_indexed() {
|
if int_option.is_indexed() {
|
||||||
for field_value in field_values {
|
for field_value in field_values {
|
||||||
let term = Term::from_field_i64(
|
term_buffer.set_field(field_value.field());
|
||||||
field_value.field(),
|
let date_val = field_value
|
||||||
field_value.value().date_value().timestamp(),
|
.value()
|
||||||
);
|
.date_value()
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
.ok_or_else(make_schema_error)?;
|
||||||
|
term_buffer.set_i64(date_val.timestamp());
|
||||||
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::I64(ref int_option) => {
|
FieldType::I64(ref int_option) => {
|
||||||
if int_option.is_indexed() {
|
if int_option.is_indexed() {
|
||||||
for field_value in field_values {
|
for field_value in field_values {
|
||||||
let term = Term::from_field_i64(
|
term_buffer.set_field(field_value.field());
|
||||||
field_value.field(),
|
let i64_val = field_value
|
||||||
field_value.value().i64_value(),
|
.value()
|
||||||
);
|
.i64_value()
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
.ok_or_else(make_schema_error)?;
|
||||||
|
term_buffer.set_i64(i64_val);
|
||||||
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::F64(ref int_option) => {
|
FieldType::F64(ref int_option) => {
|
||||||
if int_option.is_indexed() {
|
if int_option.is_indexed() {
|
||||||
for field_value in field_values {
|
for field_value in field_values {
|
||||||
let term = Term::from_field_f64(
|
term_buffer.set_field(field_value.field());
|
||||||
field_value.field(),
|
let f64_val = field_value
|
||||||
field_value.value().f64_value(),
|
.value()
|
||||||
);
|
.f64_value()
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
.ok_or_else(make_schema_error)?;
|
||||||
|
term_buffer.set_f64(f64_val);
|
||||||
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Bytes => {
|
FieldType::Bytes(ref option) => {
|
||||||
// Do nothing. Bytes only supports fast fields.
|
if option.is_indexed() {
|
||||||
|
for field_value in field_values {
|
||||||
|
term_buffer.set_field(field_value.field());
|
||||||
|
let bytes = field_value
|
||||||
|
.value()
|
||||||
|
.bytes_value()
|
||||||
|
.ok_or_else(make_schema_error)?;
|
||||||
|
term_buffer.set_bytes(bytes);
|
||||||
|
self.multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -280,9 +316,16 @@ fn write(
|
|||||||
fieldnorms_writer: &FieldNormsWriter,
|
fieldnorms_writer: &FieldNormsWriter,
|
||||||
mut serializer: SegmentSerializer,
|
mut serializer: SegmentSerializer,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let term_ord_map = multifield_postings.serialize(serializer.get_postings_serializer())?;
|
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
|
||||||
|
fieldnorms_writer.serialize(fieldnorms_serializer)?;
|
||||||
|
}
|
||||||
|
let fieldnorm_data = serializer
|
||||||
|
.segment()
|
||||||
|
.open_read(SegmentComponent::FIELDNORMS)?;
|
||||||
|
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||||
|
let term_ord_map =
|
||||||
|
multifield_postings.serialize(serializer.get_postings_serializer(), fieldnorm_readers)?;
|
||||||
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
|
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
|
||||||
fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer())?;
|
|
||||||
serializer.close()?;
|
serializer.close()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
524
src/lib.rs
524
src/lib.rs
@@ -96,7 +96,7 @@
|
|||||||
//! A good place for you to get started is to check out
|
//! A good place for you to get started is to check out
|
||||||
//! the example code (
|
//! the example code (
|
||||||
//! [literate programming](https://tantivy-search.github.io/examples/basic_search.html) /
|
//! [literate programming](https://tantivy-search.github.io/examples/basic_search.html) /
|
||||||
//! [source code](https://github.com/tantivy-search/tantivy/blob/master/examples/basic_search.rs))
|
//! [source code](https://github.com/tantivy-search/tantivy/blob/main/examples/basic_search.rs))
|
||||||
|
|
||||||
#[cfg_attr(test, macro_use)]
|
#[cfg_attr(test, macro_use)]
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
@@ -105,7 +105,7 @@ extern crate serde_json;
|
|||||||
extern crate log;
|
extern crate log;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate failure;
|
extern crate thiserror;
|
||||||
|
|
||||||
#[cfg(all(test, feature = "unstable"))]
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
extern crate test;
|
extern crate test;
|
||||||
@@ -134,7 +134,7 @@ mod core;
|
|||||||
mod indexer;
|
mod indexer;
|
||||||
|
|
||||||
#[allow(unused_doc_comments)]
|
#[allow(unused_doc_comments)]
|
||||||
mod error;
|
pub mod error;
|
||||||
pub mod tokenizer;
|
pub mod tokenizer;
|
||||||
|
|
||||||
pub mod collector;
|
pub mod collector;
|
||||||
@@ -157,6 +157,7 @@ pub use self::snippet::{Snippet, SnippetGenerator};
|
|||||||
|
|
||||||
mod docset;
|
mod docset;
|
||||||
pub use self::docset::{DocSet, TERMINATED};
|
pub use self::docset::{DocSet, TERMINATED};
|
||||||
|
pub use crate::common::HasLen;
|
||||||
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
||||||
pub use crate::core::{Executor, SegmentComponent};
|
pub use crate::core::{Executor, SegmentComponent};
|
||||||
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
|
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
|
||||||
@@ -173,7 +174,7 @@ use once_cell::sync::Lazy;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// Index format version.
|
/// Index format version.
|
||||||
const INDEX_FORMAT_VERSION: u32 = 1;
|
const INDEX_FORMAT_VERSION: u32 = 3;
|
||||||
|
|
||||||
/// Structure version for the index.
|
/// Structure version for the index.
|
||||||
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
@@ -245,11 +246,10 @@ pub type DocId = u32;
|
|||||||
/// with opstamp `n+1`.
|
/// with opstamp `n+1`.
|
||||||
pub type Opstamp = u64;
|
pub type Opstamp = u64;
|
||||||
|
|
||||||
/// A f32 that represents the relevance of the document to the query
|
/// A Score that represents the relevance of the document to the query
|
||||||
///
|
///
|
||||||
/// This is modelled internally as a `f32`. The
|
/// This is modelled internally as a `f32`. The larger the number, the more relevant
|
||||||
/// larger the number, the more relevant the document
|
/// the document to the search query.
|
||||||
/// to the search
|
|
||||||
pub type Score = f32;
|
pub type Score = f32;
|
||||||
|
|
||||||
/// A `SegmentLocalId` identifies a segment.
|
/// A `SegmentLocalId` identifies a segment.
|
||||||
@@ -277,12 +277,11 @@ impl DocAddress {
|
|||||||
///
|
///
|
||||||
/// The id used for the segment is actually an ordinal
|
/// The id used for the segment is actually an ordinal
|
||||||
/// in the list of `Segment`s held by a `Searcher`.
|
/// in the list of `Segment`s held by a `Searcher`.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
pub struct DocAddress(pub SegmentLocalId, pub DocId);
|
pub struct DocAddress(pub SegmentLocalId, pub DocId);
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, TERMINATED};
|
||||||
@@ -290,7 +289,6 @@ mod tests {
|
|||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::IndexWriter;
|
|
||||||
use crate::Postings;
|
use crate::Postings;
|
||||||
use crate::ReloadPolicy;
|
use crate::ReloadPolicy;
|
||||||
use rand::distributions::Bernoulli;
|
use rand::distributions::Bernoulli;
|
||||||
@@ -298,17 +296,26 @@ mod tests {
|
|||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
use rand::{Rng, SeedableRng};
|
use rand::{Rng, SeedableRng};
|
||||||
|
|
||||||
pub fn assert_nearly_equals(expected: f32, val: f32) {
|
/// Checks if left and right are close one to each other.
|
||||||
assert!(
|
/// Panics if the two values are more than 0.5% apart.
|
||||||
nearly_equals(val, expected),
|
#[macro_export]
|
||||||
"Got {}, expected {}.",
|
macro_rules! assert_nearly_equals {
|
||||||
val,
|
($left:expr, $right:expr) => {{
|
||||||
expected
|
match (&$left, &$right) {
|
||||||
);
|
(left_val, right_val) => {
|
||||||
}
|
let diff = (left_val - right_val).abs();
|
||||||
|
let add = left_val.abs() + right_val.abs();
|
||||||
pub fn nearly_equals(a: f32, b: f32) -> bool {
|
if diff > 0.0005 * add {
|
||||||
(a - b).abs() < 0.0005 * (a + b).abs()
|
panic!(
|
||||||
|
r#"assertion failed: `(left ~= right)`
|
||||||
|
left: `{:?}`,
|
||||||
|
right: `{:?}`"#,
|
||||||
|
&*left_val, &*right_val
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
|
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
|
||||||
@@ -346,14 +353,14 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
fn test_indexing() {
|
fn test_indexing() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_from_tempdir(schema).unwrap();
|
let index = Index::create_from_tempdir(schema).unwrap();
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
{
|
{
|
||||||
let doc = doc!(text_field=>"af b");
|
let doc = doc!(text_field=>"af b");
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
@@ -368,100 +375,76 @@ mod tests {
|
|||||||
}
|
}
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_docfreq1() {
|
fn test_docfreq1() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
{
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
index_writer.commit()?;
|
||||||
index_writer.commit().unwrap();
|
index_writer.add_document(doc!(text_field=>"a"));
|
||||||
}
|
index_writer.add_document(doc!(text_field=>"a a"));
|
||||||
{
|
index_writer.commit()?;
|
||||||
index_writer.add_document(doc!(text_field=>"a"));
|
index_writer.add_document(doc!(text_field=>"c"));
|
||||||
index_writer.add_document(doc!(text_field=>"a a"));
|
index_writer.commit()?;
|
||||||
index_writer.commit().unwrap();
|
let reader = index.reader()?;
|
||||||
}
|
let searcher = reader.searcher();
|
||||||
{
|
let term_a = Term::from_field_text(text_field, "a");
|
||||||
index_writer.add_document(doc!(text_field=>"c"));
|
assert_eq!(searcher.doc_freq(&term_a)?, 3);
|
||||||
index_writer.commit().unwrap();
|
let term_b = Term::from_field_text(text_field, "b");
|
||||||
}
|
assert_eq!(searcher.doc_freq(&term_b)?, 1);
|
||||||
{
|
let term_c = Term::from_field_text(text_field, "c");
|
||||||
let reader = index.reader().unwrap();
|
assert_eq!(searcher.doc_freq(&term_c)?, 2);
|
||||||
let searcher = reader.searcher();
|
let term_d = Term::from_field_text(text_field, "d");
|
||||||
let term_a = Term::from_field_text(text_field, "a");
|
assert_eq!(searcher.doc_freq(&term_d)?, 0);
|
||||||
assert_eq!(searcher.doc_freq(&term_a), 3);
|
Ok(())
|
||||||
let term_b = Term::from_field_text(text_field, "b");
|
|
||||||
assert_eq!(searcher.doc_freq(&term_b), 1);
|
|
||||||
let term_c = Term::from_field_text(text_field, "c");
|
|
||||||
assert_eq!(searcher.doc_freq(&term_c), 2);
|
|
||||||
let term_d = Term::from_field_text(text_field, "d");
|
|
||||||
assert_eq!(searcher.doc_freq(&term_d), 0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_fieldnorm_no_docs_with_field() {
|
fn test_fieldnorm_no_docs_with_field() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let title_field = schema_builder.add_text_field("title", TEXT);
|
let title_field = schema_builder.add_text_field("title", TEXT);
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let index_reader = index.reader()?;
|
||||||
|
let searcher = index_reader.searcher();
|
||||||
|
let reader = searcher.segment_reader(0);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field)?;
|
||||||
{
|
assert_eq!(fieldnorm_reader.fieldnorm(0), 3);
|
||||||
let doc = doc!(text_field=>"a b c");
|
|
||||||
index_writer.add_document(doc);
|
|
||||||
}
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let index_reader = index.reader().unwrap();
|
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field)?;
|
||||||
let searcher = index_reader.searcher();
|
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0);
|
||||||
let reader = searcher.segment_reader(0);
|
|
||||||
{
|
|
||||||
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field);
|
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(0), 3);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field);
|
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_fieldnorm() {
|
fn test_fieldnorm() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
{
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
{
|
index_writer.add_document(doc!());
|
||||||
let doc = doc!(text_field=>"a b c");
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
index_writer.add_document(doc);
|
index_writer.commit()?;
|
||||||
}
|
let reader = index.reader()?;
|
||||||
{
|
let searcher = reader.searcher();
|
||||||
let doc = doc!();
|
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
||||||
index_writer.add_document(doc);
|
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field)?;
|
||||||
}
|
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
|
||||||
{
|
assert_eq!(fieldnorms_reader.fieldnorm(1), 0);
|
||||||
let doc = doc!(text_field=>"a b");
|
assert_eq!(fieldnorms_reader.fieldnorm(2), 2);
|
||||||
index_writer.add_document(doc);
|
Ok(())
|
||||||
}
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
|
||||||
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field);
|
|
||||||
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
|
|
||||||
assert_eq!(fieldnorms_reader.fieldnorm(1), 0);
|
|
||||||
assert_eq!(fieldnorms_reader.fieldnorm(2), 2);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
|
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
|
||||||
@@ -476,7 +459,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_postings1() {
|
fn test_delete_postings1() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let term_abcd = Term::from_field_text(text_field, "abcd");
|
let term_abcd = Term::from_field_text(text_field, "abcd");
|
||||||
@@ -492,7 +475,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
// 0
|
// 0
|
||||||
index_writer.add_document(doc!(text_field=>"a b"));
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
// 1
|
// 1
|
||||||
@@ -508,19 +491,19 @@ mod tests {
|
|||||||
index_writer.add_document(doc!(text_field=>" b c"));
|
index_writer.add_document(doc!(text_field=>" b c"));
|
||||||
// 5
|
// 5
|
||||||
index_writer.add_document(doc!(text_field=>" a"));
|
index_writer.add_document(doc!(text_field=>" a"));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
reader.reload().unwrap();
|
reader.reload()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let inverted_index = segment_reader.inverted_index(text_field);
|
let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||||
assert!(inverted_index
|
assert!(inverted_index
|
||||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.is_none());
|
.is_none());
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||||
assert_eq!(postings.doc(), 5);
|
assert_eq!(postings.doc(), 5);
|
||||||
@@ -528,7 +511,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||||
assert_eq!(postings.doc(), 3);
|
assert_eq!(postings.doc(), 3);
|
||||||
@@ -539,25 +522,25 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
// 0
|
// 0
|
||||||
index_writer.add_document(doc!(text_field=>"a b"));
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
// 1
|
// 1
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
||||||
index_writer.rollback().unwrap();
|
index_writer.rollback()?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
reader.reload().unwrap();
|
reader.reload()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let seg_reader = searcher.segment_reader(0);
|
let seg_reader = searcher.segment_reader(0);
|
||||||
let inverted_index = seg_reader.inverted_index(term_abcd.field());
|
let inverted_index = seg_reader.inverted_index(term_abcd.field())?;
|
||||||
|
|
||||||
assert!(inverted_index
|
assert!(inverted_index
|
||||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.is_none());
|
.is_none());
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(advance_undeleted(&mut postings, seg_reader));
|
assert!(advance_undeleted(&mut postings, seg_reader));
|
||||||
assert_eq!(postings.doc(), 5);
|
assert_eq!(postings.doc(), 5);
|
||||||
@@ -565,7 +548,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(advance_undeleted(&mut postings, seg_reader));
|
assert!(advance_undeleted(&mut postings, seg_reader));
|
||||||
assert_eq!(postings.doc(), 3);
|
assert_eq!(postings.doc(), 3);
|
||||||
@@ -576,30 +559,30 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(text_field=>"a b"));
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
||||||
index_writer.rollback().unwrap();
|
index_writer.rollback()?;
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "a"));
|
index_writer.delete_term(Term::from_field_text(text_field, "a"));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
reader.reload().unwrap();
|
reader.reload()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let inverted_index = segment_reader.inverted_index(term_abcd.field());
|
let inverted_index = segment_reader.inverted_index(term_abcd.field())?;
|
||||||
assert!(inverted_index
|
assert!(inverted_index
|
||||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.is_none());
|
.is_none());
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(!advance_undeleted(&mut postings, segment_reader));
|
assert!(!advance_undeleted(&mut postings, segment_reader));
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||||
assert_eq!(postings.doc(), 3);
|
assert_eq!(postings.doc(), 3);
|
||||||
@@ -609,101 +592,107 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||||
assert_eq!(postings.doc(), 4);
|
assert_eq!(postings.doc(), 4);
|
||||||
assert!(!advance_undeleted(&mut postings, segment_reader));
|
assert!(!advance_undeleted(&mut postings, segment_reader));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexed_u64() {
|
fn test_indexed_u64() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let field = schema_builder.add_u64_field("value", INDEXED);
|
let field = schema_builder.add_u64_field("value", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(field=>1u64));
|
index_writer.add_document(doc!(field=>1u64));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_u64(field, 1u64);
|
let term = Term::from_field_u64(field, 1u64);
|
||||||
let mut postings = searcher
|
let mut postings = searcher
|
||||||
.segment_reader(0)
|
.segment_reader(0)
|
||||||
.inverted_index(term.field())
|
.inverted_index(term.field())?
|
||||||
.read_postings(&term, IndexRecordOption::Basic)
|
.read_postings(&term, IndexRecordOption::Basic)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexed_i64() {
|
fn test_indexed_i64() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let value_field = schema_builder.add_i64_field("value", INDEXED);
|
let value_field = schema_builder.add_i64_field("value", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let negative_val = -1i64;
|
let negative_val = -1i64;
|
||||||
index_writer.add_document(doc!(value_field => negative_val));
|
index_writer.add_document(doc!(value_field => negative_val));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_i64(value_field, negative_val);
|
let term = Term::from_field_i64(value_field, negative_val);
|
||||||
let mut postings = searcher
|
let mut postings = searcher
|
||||||
.segment_reader(0)
|
.segment_reader(0)
|
||||||
.inverted_index(term.field())
|
.inverted_index(term.field())?
|
||||||
.read_postings(&term, IndexRecordOption::Basic)
|
.read_postings(&term, IndexRecordOption::Basic)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexed_f64() {
|
fn test_indexed_f64() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let value_field = schema_builder.add_f64_field("value", INDEXED);
|
let value_field = schema_builder.add_f64_field("value", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let val = std::f64::consts::PI;
|
let val = std::f64::consts::PI;
|
||||||
index_writer.add_document(doc!(value_field => val));
|
index_writer.add_document(doc!(value_field => val));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_f64(value_field, val);
|
let term = Term::from_field_f64(value_field, val);
|
||||||
let mut postings = searcher
|
let mut postings = searcher
|
||||||
.segment_reader(0)
|
.segment_reader(0)
|
||||||
.inverted_index(term.field())
|
.inverted_index(term.field())?
|
||||||
.read_postings(&term, IndexRecordOption::Basic)
|
.read_postings(&term, IndexRecordOption::Basic)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexedfield_not_in_documents() {
|
fn test_indexedfield_not_in_documents() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let absent_field = schema_builder.add_text_field("text", TEXT);
|
let absent_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(text_field=>"a"));
|
index_writer.add_document(doc!(text_field=>"a"));
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
segment_reader.inverted_index(absent_field); //< should not panic
|
let inverted_index = segment_reader.inverted_index(absent_field)?;
|
||||||
|
assert_eq!(inverted_index.terms().num_terms(), 0);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_postings2() {
|
fn test_delete_postings2() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -711,125 +700,112 @@ mod tests {
|
|||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()
|
.try_into()?;
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(text_field=>"63"));
|
||||||
let add_document = |index_writer: &mut IndexWriter, val: &'static str| {
|
index_writer.add_document(doc!(text_field=>"70"));
|
||||||
let doc = doc!(text_field=>val);
|
index_writer.add_document(doc!(text_field=>"34"));
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc!(text_field=>"1"));
|
||||||
};
|
index_writer.add_document(doc!(text_field=>"38"));
|
||||||
|
index_writer.add_document(doc!(text_field=>"33"));
|
||||||
let remove_document = |index_writer: &mut IndexWriter, val: &'static str| {
|
index_writer.add_document(doc!(text_field=>"40"));
|
||||||
let delterm = Term::from_field_text(text_field, val);
|
index_writer.add_document(doc!(text_field=>"17"));
|
||||||
index_writer.delete_term(delterm);
|
index_writer.delete_term(Term::from_field_text(text_field, "38"));
|
||||||
};
|
index_writer.delete_term(Term::from_field_text(text_field, "34"));
|
||||||
|
index_writer.commit()?;
|
||||||
add_document(&mut index_writer, "63");
|
reader.reload()?;
|
||||||
add_document(&mut index_writer, "70");
|
assert_eq!(reader.searcher().num_docs(), 6);
|
||||||
add_document(&mut index_writer, "34");
|
Ok(())
|
||||||
add_document(&mut index_writer, "1");
|
|
||||||
add_document(&mut index_writer, "38");
|
|
||||||
add_document(&mut index_writer, "33");
|
|
||||||
add_document(&mut index_writer, "40");
|
|
||||||
add_document(&mut index_writer, "17");
|
|
||||||
remove_document(&mut index_writer, "38");
|
|
||||||
remove_document(&mut index_writer, "34");
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
reader.reload().unwrap();
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
assert_eq!(searcher.num_docs(), 6);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_termfreq() {
|
fn test_termfreq() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let doc = doc!(text_field=>"af af af bc bc");
|
index_writer.add_document(doc!(text_field=>"af af af bc bc"));
|
||||||
index_writer.add_document(doc);
|
index_writer.commit()?;
|
||||||
index_writer.commit().unwrap();
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let index_reader = index.reader().unwrap();
|
let index_reader = index.reader()?;
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
let reader = searcher.segment_reader(0);
|
let reader = searcher.segment_reader(0);
|
||||||
let inverted_index = reader.inverted_index(text_field);
|
let inverted_index = reader.inverted_index(text_field)?;
|
||||||
let term_abcd = Term::from_field_text(text_field, "abcd");
|
let term_abcd = Term::from_field_text(text_field, "abcd");
|
||||||
assert!(inverted_index
|
assert!(inverted_index
|
||||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.is_none());
|
.is_none());
|
||||||
let term_af = Term::from_field_text(text_field, "af");
|
let term_af = Term::from_field_text(text_field, "af");
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.term_freq(), 3);
|
assert_eq!(postings.term_freq(), 3);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_searcher_1() {
|
fn test_searcher_1() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
{
|
// writing the segment
|
||||||
// writing the segment
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
index_writer.add_document(doc!(text_field=>"af af af b"));
|
||||||
index_writer.add_document(doc!(text_field=>"af af af b"));
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
index_writer.add_document(doc!(text_field=>"a b c d"));
|
||||||
index_writer.add_document(doc!(text_field=>"a b c d"));
|
index_writer.commit()?;
|
||||||
index_writer.commit().unwrap();
|
|
||||||
}
|
reader.reload()?;
|
||||||
{
|
let searcher = reader.searcher();
|
||||||
reader.reload().unwrap();
|
let get_doc_ids = |terms: Vec<Term>| {
|
||||||
let searcher = reader.searcher();
|
let query = BooleanQuery::new_multiterms_query(terms);
|
||||||
let get_doc_ids = |terms: Vec<Term>| {
|
searcher
|
||||||
let query = BooleanQuery::new_multiterms_query(terms);
|
.search(&query, &TEST_COLLECTOR_WITH_SCORE)
|
||||||
let topdocs = searcher.search(&query, &TEST_COLLECTOR_WITH_SCORE).unwrap();
|
.map(|topdocs| topdocs.docs().to_vec())
|
||||||
topdocs.docs().to_vec()
|
};
|
||||||
};
|
assert_eq!(
|
||||||
assert_eq!(
|
get_doc_ids(vec![Term::from_field_text(text_field, "a")])?,
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
|
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
||||||
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
);
|
||||||
);
|
assert_eq!(
|
||||||
assert_eq!(
|
get_doc_ids(vec![Term::from_field_text(text_field, "af")])?,
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
|
vec![DocAddress(0, 0)]
|
||||||
vec![DocAddress(0, 0)]
|
);
|
||||||
);
|
assert_eq!(
|
||||||
assert_eq!(
|
get_doc_ids(vec![Term::from_field_text(text_field, "b")])?,
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
|
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
||||||
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
);
|
||||||
);
|
assert_eq!(
|
||||||
assert_eq!(
|
get_doc_ids(vec![Term::from_field_text(text_field, "c")])?,
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "c")]),
|
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
||||||
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
);
|
||||||
);
|
assert_eq!(
|
||||||
assert_eq!(
|
get_doc_ids(vec![Term::from_field_text(text_field, "d")])?,
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "d")]),
|
vec![DocAddress(0, 2)]
|
||||||
vec![DocAddress(0, 2)]
|
);
|
||||||
);
|
assert_eq!(
|
||||||
assert_eq!(
|
get_doc_ids(vec![
|
||||||
get_doc_ids(vec![
|
Term::from_field_text(text_field, "b"),
|
||||||
Term::from_field_text(text_field, "b"),
|
Term::from_field_text(text_field, "a"),
|
||||||
Term::from_field_text(text_field, "a"),
|
])?,
|
||||||
]),
|
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
||||||
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
);
|
||||||
);
|
Ok(())
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_searcher_2() {
|
fn test_searcher_2() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -837,19 +813,17 @@ mod tests {
|
|||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()
|
.try_into()?;
|
||||||
.unwrap();
|
|
||||||
assert_eq!(reader.searcher().num_docs(), 0u64);
|
assert_eq!(reader.searcher().num_docs(), 0u64);
|
||||||
{
|
// writing the segment
|
||||||
// writing the segment
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
index_writer.add_document(doc!(text_field=>"af b"));
|
||||||
index_writer.add_document(doc!(text_field=>"af b"));
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
index_writer.add_document(doc!(text_field=>"a b c d"));
|
||||||
index_writer.add_document(doc!(text_field=>"a b c d"));
|
index_writer.commit()?;
|
||||||
index_writer.commit().unwrap();
|
reader.reload()?;
|
||||||
}
|
|
||||||
reader.reload().unwrap();
|
|
||||||
assert_eq!(reader.searcher().num_docs(), 3u64);
|
assert_eq!(reader.searcher().num_docs(), 3u64);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -861,17 +835,17 @@ mod tests {
|
|||||||
text_field => "some other value",
|
text_field => "some other value",
|
||||||
other_text_field => "short");
|
other_text_field => "short");
|
||||||
assert_eq!(document.len(), 3);
|
assert_eq!(document.len(), 3);
|
||||||
let values = document.get_all(text_field);
|
let values: Vec<&Value> = document.get_all(text_field).collect();
|
||||||
assert_eq!(values.len(), 2);
|
assert_eq!(values.len(), 2);
|
||||||
assert_eq!(values[0].text(), Some("tantivy"));
|
assert_eq!(values[0].text(), Some("tantivy"));
|
||||||
assert_eq!(values[1].text(), Some("some other value"));
|
assert_eq!(values[1].text(), Some("some other value"));
|
||||||
let values = document.get_all(other_text_field);
|
let values: Vec<&Value> = document.get_all(other_text_field).collect();
|
||||||
assert_eq!(values.len(), 1);
|
assert_eq!(values.len(), 1);
|
||||||
assert_eq!(values[0].text(), Some("short"));
|
assert_eq!(values[0].text(), Some("short"));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_wrong_fast_field_type() {
|
fn test_wrong_fast_field_type() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
|
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
|
||||||
let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
|
let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
|
||||||
@@ -881,57 +855,58 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 50_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
{
|
{
|
||||||
let document =
|
let document =
|
||||||
doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64);
|
doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64);
|
||||||
index_writer.add_document(document);
|
index_writer.add_document(document);
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
}
|
}
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
||||||
{
|
{
|
||||||
let fast_field_reader_opt = segment_reader.fast_fields().u64(text_field);
|
let fast_field_reader_res = segment_reader.fast_fields().u64(text_field);
|
||||||
assert!(fast_field_reader_opt.is_none());
|
assert!(fast_field_reader_res.is_err());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let fast_field_reader_opt = segment_reader.fast_fields().u64(stored_int_field);
|
let fast_field_reader_opt = segment_reader.fast_fields().u64(stored_int_field);
|
||||||
assert!(fast_field_reader_opt.is_none());
|
assert!(fast_field_reader_opt.is_err());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_signed);
|
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_signed);
|
||||||
assert!(fast_field_reader_opt.is_none());
|
assert!(fast_field_reader_opt.is_err());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_float);
|
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_float);
|
||||||
assert!(fast_field_reader_opt.is_none());
|
assert!(fast_field_reader_opt.is_err());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_unsigned);
|
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_unsigned);
|
||||||
assert!(fast_field_reader_opt.is_some());
|
assert!(fast_field_reader_opt.is_ok());
|
||||||
let fast_field_reader = fast_field_reader_opt.unwrap();
|
let fast_field_reader = fast_field_reader_opt.unwrap();
|
||||||
assert_eq!(fast_field_reader.get(0), 4u64)
|
assert_eq!(fast_field_reader.get(0), 4u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let fast_field_reader_opt = segment_reader.fast_fields().i64(fast_field_signed);
|
let fast_field_reader_res = segment_reader.fast_fields().i64(fast_field_signed);
|
||||||
assert!(fast_field_reader_opt.is_some());
|
assert!(fast_field_reader_res.is_ok());
|
||||||
let fast_field_reader = fast_field_reader_opt.unwrap();
|
let fast_field_reader = fast_field_reader_res.unwrap();
|
||||||
assert_eq!(fast_field_reader.get(0), 4i64)
|
assert_eq!(fast_field_reader.get(0), 4i64)
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let fast_field_reader_opt = segment_reader.fast_fields().f64(fast_field_float);
|
let fast_field_reader_res = segment_reader.fast_fields().f64(fast_field_float);
|
||||||
assert!(fast_field_reader_opt.is_some());
|
assert!(fast_field_reader_res.is_ok());
|
||||||
let fast_field_reader = fast_field_reader_opt.unwrap();
|
let fast_field_reader = fast_field_reader_res.unwrap();
|
||||||
assert_eq!(fast_field_reader.get(0), 4f64)
|
assert_eq!(fast_field_reader.get(0), 4f64)
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// motivated by #729
|
// motivated by #729
|
||||||
#[test]
|
#[test]
|
||||||
fn test_update_via_delete_insert() {
|
fn test_update_via_delete_insert() -> crate::Result<()> {
|
||||||
use crate::collector::Count;
|
use crate::collector::Count;
|
||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::AllQuery;
|
use crate::query::AllQuery;
|
||||||
@@ -945,17 +920,17 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema.clone());
|
let index = Index::create_in_ram(schema.clone());
|
||||||
let index_reader = index.reader().unwrap();
|
let index_reader = index.reader()?;
|
||||||
|
|
||||||
let mut index_writer = index.writer(3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
|
|
||||||
for doc_id in 0u64..DOC_COUNT {
|
for doc_id in 0u64..DOC_COUNT {
|
||||||
index_writer.add_document(doc!(id => doc_id));
|
index_writer.add_document(doc!(id => doc_id));
|
||||||
}
|
}
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
|
|
||||||
index_reader.reload().unwrap();
|
index_reader.reload()?;
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -966,12 +941,11 @@ mod tests {
|
|||||||
// update the 10 elements by deleting and re-adding
|
// update the 10 elements by deleting and re-adding
|
||||||
for doc_id in 0u64..DOC_COUNT {
|
for doc_id in 0u64..DOC_COUNT {
|
||||||
index_writer.delete_term(Term::from_field_u64(id, doc_id));
|
index_writer.delete_term(Term::from_field_u64(id, doc_id));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
index_reader.reload().unwrap();
|
index_reader.reload()?;
|
||||||
let doc = doc!(id => doc_id);
|
index_writer.add_document(doc!(id => doc_id));
|
||||||
index_writer.add_document(doc);
|
index_writer.commit()?;
|
||||||
index_writer.commit().unwrap();
|
index_reader.reload()?;
|
||||||
index_reader.reload().unwrap();
|
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
// The number of document should be stable.
|
// The number of document should be stable.
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -980,7 +954,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
index_reader.reload().unwrap();
|
index_reader.reload()?;
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
let segment_ids: Vec<SegmentId> = searcher
|
let segment_ids: Vec<SegmentId> = searcher
|
||||||
.segment_readers()
|
.segment_readers()
|
||||||
@@ -989,12 +963,18 @@ mod tests {
|
|||||||
.collect();
|
.collect();
|
||||||
block_on(index_writer.merge(&segment_ids)).unwrap();
|
block_on(index_writer.merge(&segment_ids)).unwrap();
|
||||||
|
|
||||||
index_reader.reload().unwrap();
|
index_reader.reload()?;
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
|
assert_eq!(searcher.search(&AllQuery, &Count)?, DOC_COUNT as usize);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
assert_eq!(
|
#[test]
|
||||||
searcher.search(&AllQuery, &Count).unwrap(),
|
fn test_validate_checksum() -> crate::Result<()> {
|
||||||
DOC_COUNT as usize
|
let index_path = tempfile::tempdir().expect("dir");
|
||||||
);
|
let schema = Schema::builder().build();
|
||||||
|
let index = Index::create_in_dir(&index_path, schema)?;
|
||||||
|
assert!(index.validate_checksum()?.is_empty());
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,11 +38,11 @@ const LONG_SKIP_INTERVAL: u64 = (LONG_SKIP_IN_BLOCKS * COMPRESSION_BLOCK_SIZE) a
|
|||||||
pub mod tests {
|
pub mod tests {
|
||||||
|
|
||||||
use super::PositionSerializer;
|
use super::PositionSerializer;
|
||||||
use crate::directory::ReadOnlySource;
|
|
||||||
use crate::positions::reader::PositionReader;
|
use crate::positions::reader::PositionReader;
|
||||||
|
use crate::{common::HasLen, directory::FileSlice};
|
||||||
use std::iter;
|
use std::iter;
|
||||||
|
|
||||||
fn create_stream_buffer(vals: &[u32]) -> (ReadOnlySource, ReadOnlySource) {
|
fn create_stream_buffer(vals: &[u32]) -> (FileSlice, FileSlice) {
|
||||||
let mut skip_buffer = vec![];
|
let mut skip_buffer = vec![];
|
||||||
let mut stream_buffer = vec![];
|
let mut stream_buffer = vec![];
|
||||||
{
|
{
|
||||||
@@ -53,10 +53,7 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
(
|
(FileSlice::from(stream_buffer), FileSlice::from(skip_buffer))
|
||||||
ReadOnlySource::from(stream_buffer),
|
|
||||||
ReadOnlySource::from(skip_buffer),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -65,7 +62,7 @@ pub mod tests {
|
|||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||||
assert_eq!(skip.len(), 12);
|
assert_eq!(skip.len(), 12);
|
||||||
assert_eq!(stream.len(), 1168);
|
assert_eq!(stream.len(), 1168);
|
||||||
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap();
|
||||||
for &n in &[1, 10, 127, 128, 130, 312] {
|
for &n in &[1, 10, 127, 128, 130, 312] {
|
||||||
let mut v = vec![0u32; n];
|
let mut v = vec![0u32; n];
|
||||||
position_reader.read(0, &mut v[..]);
|
position_reader.read(0, &mut v[..]);
|
||||||
@@ -81,7 +78,7 @@ pub mod tests {
|
|||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||||
assert_eq!(skip.len(), 12);
|
assert_eq!(skip.len(), 12);
|
||||||
assert_eq!(stream.len(), 1168);
|
assert_eq!(stream.len(), 1168);
|
||||||
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap();
|
||||||
for &offset in &[1u64, 10u64, 127u64, 128u64, 130u64, 312u64] {
|
for &offset in &[1u64, 10u64, 127u64, 128u64, 130u64, 312u64] {
|
||||||
for &len in &[1, 10, 130, 500] {
|
for &len in &[1, 10, 130, 500] {
|
||||||
let mut v = vec![0u32; len];
|
let mut v = vec![0u32; len];
|
||||||
@@ -100,7 +97,7 @@ pub mod tests {
|
|||||||
assert_eq!(skip.len(), 12);
|
assert_eq!(skip.len(), 12);
|
||||||
assert_eq!(stream.len(), 1168);
|
assert_eq!(stream.len(), 1168);
|
||||||
|
|
||||||
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap();
|
||||||
let mut buf = [0u32; 7];
|
let mut buf = [0u32; 7];
|
||||||
let mut c = 0;
|
let mut c = 0;
|
||||||
|
|
||||||
@@ -122,7 +119,7 @@ pub mod tests {
|
|||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||||
assert_eq!(skip.len(), 15_749);
|
assert_eq!(skip.len(), 15_749);
|
||||||
assert_eq!(stream.len(), 4_987_872);
|
assert_eq!(stream.len(), 4_987_872);
|
||||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0);
|
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0).unwrap();
|
||||||
let mut buf = [0u32; 256];
|
let mut buf = [0u32; 256];
|
||||||
position_reader.read(128, &mut buf);
|
position_reader.read(128, &mut buf);
|
||||||
for i in 0..256 {
|
for i in 0..256 {
|
||||||
@@ -142,7 +139,8 @@ pub mod tests {
|
|||||||
assert_eq!(skip.len(), 15_749);
|
assert_eq!(skip.len(), 15_749);
|
||||||
assert_eq!(stream.len(), 4_987_872);
|
assert_eq!(stream.len(), 4_987_872);
|
||||||
let mut buf = [0u32; 1];
|
let mut buf = [0u32; 1];
|
||||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 200_000);
|
let mut position_reader =
|
||||||
|
PositionReader::new(stream.clone(), skip.clone(), 200_000).unwrap();
|
||||||
position_reader.read(230, &mut buf);
|
position_reader.read(230, &mut buf);
|
||||||
position_reader.read(9, &mut buf);
|
position_reader.read(9, &mut buf);
|
||||||
}
|
}
|
||||||
@@ -157,7 +155,7 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||||
let mut buf = Vec::new();
|
let mut buf = Vec::new();
|
||||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0);
|
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0).unwrap();
|
||||||
let mut offset = 0;
|
let mut offset = 0;
|
||||||
for i in 1..24 {
|
for i in 1..24 {
|
||||||
buf.resize(i, 0);
|
buf.resize(i, 0);
|
||||||
@@ -175,7 +173,7 @@ pub mod tests {
|
|||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||||
assert_eq!(skip.len(), 15_749);
|
assert_eq!(skip.len(), 15_749);
|
||||||
assert_eq!(stream.len(), 1_000_000);
|
assert_eq!(stream.len(), 1_000_000);
|
||||||
let mut position_reader = PositionReader::new(stream, skip, 128 * 1024);
|
let mut position_reader = PositionReader::new(stream, skip, 128 * 1024).unwrap();
|
||||||
let mut buf = [0u32; 1];
|
let mut buf = [0u32; 1];
|
||||||
position_reader.read(0, &mut buf);
|
position_reader.read(0, &mut buf);
|
||||||
assert_eq!(buf[0], CONST_VAL);
|
assert_eq!(buf[0], CONST_VAL);
|
||||||
@@ -194,7 +192,8 @@ pub mod tests {
|
|||||||
128 * 1024 + 7,
|
128 * 1024 + 7,
|
||||||
128 * 10 * 1024 + 10,
|
128 * 10 * 1024 + 10,
|
||||||
] {
|
] {
|
||||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), offset);
|
let mut position_reader =
|
||||||
|
PositionReader::new(stream.clone(), skip.clone(), offset).unwrap();
|
||||||
let mut buf = [0u32; 1];
|
let mut buf = [0u32; 1];
|
||||||
position_reader.read(0, &mut buf);
|
position_reader.read(0, &mut buf);
|
||||||
assert_eq!(buf[0], offset as u32);
|
assert_eq!(buf[0], offset as u32);
|
||||||
|
|||||||
@@ -1,8 +1,13 @@
|
|||||||
|
use std::io;
|
||||||
|
|
||||||
use crate::common::{BinarySerializable, FixedSize};
|
use crate::common::{BinarySerializable, FixedSize};
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::FileSlice;
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
use crate::positions::COMPRESSION_BLOCK_SIZE;
|
use crate::positions::COMPRESSION_BLOCK_SIZE;
|
||||||
use crate::positions::LONG_SKIP_INTERVAL;
|
use crate::positions::LONG_SKIP_INTERVAL;
|
||||||
use crate::positions::LONG_SKIP_IN_BLOCKS;
|
use crate::positions::LONG_SKIP_IN_BLOCKS;
|
||||||
|
use bitpacking::{BitPacker, BitPacker4x};
|
||||||
|
|
||||||
/// Positions works as a long sequence of compressed block.
|
/// Positions works as a long sequence of compressed block.
|
||||||
/// All terms are chained one after the other.
|
/// All terms are chained one after the other.
|
||||||
///
|
///
|
||||||
@@ -23,28 +28,28 @@ use crate::positions::LONG_SKIP_IN_BLOCKS;
|
|||||||
/// A given block obviously takes `(128 x num_bit_for_the_block / num_bits_in_a_byte)`,
|
/// A given block obviously takes `(128 x num_bit_for_the_block / num_bits_in_a_byte)`,
|
||||||
/// so skipping a block without decompressing it is just a matter of advancing that many
|
/// so skipping a block without decompressing it is just a matter of advancing that many
|
||||||
/// bytes.
|
/// bytes.
|
||||||
use bitpacking::{BitPacker, BitPacker4x};
|
|
||||||
use owned_read::OwnedRead;
|
|
||||||
|
|
||||||
struct Positions {
|
struct Positions {
|
||||||
bit_packer: BitPacker4x,
|
bit_packer: BitPacker4x,
|
||||||
skip_source: ReadOnlySource,
|
skip_file: FileSlice,
|
||||||
position_source: ReadOnlySource,
|
position_file: FileSlice,
|
||||||
long_skip_source: ReadOnlySource,
|
long_skip_data: OwnedBytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Positions {
|
impl Positions {
|
||||||
pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions {
|
pub fn new(position_file: FileSlice, skip_file: FileSlice) -> io::Result<Positions> {
|
||||||
let (body, footer) = skip_source.split_from_end(u32::SIZE_IN_BYTES);
|
let (body, footer) = skip_file.split_from_end(u32::SIZE_IN_BYTES);
|
||||||
let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
|
let footer_data = footer.read_bytes()?;
|
||||||
let (skip_source, long_skip_source) =
|
let num_long_skips = u32::deserialize(&mut footer_data.as_slice())?;
|
||||||
|
let (skip_file, long_skip_file) =
|
||||||
body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize));
|
body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize));
|
||||||
Positions {
|
let long_skip_data = long_skip_file.read_bytes()?;
|
||||||
|
Ok(Positions {
|
||||||
bit_packer: BitPacker4x::new(),
|
bit_packer: BitPacker4x::new(),
|
||||||
skip_source,
|
skip_file,
|
||||||
long_skip_source,
|
long_skip_data,
|
||||||
position_source,
|
position_file,
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the offset of the block associated to the given `long_skip_id`.
|
/// Returns the offset of the block associated to the given `long_skip_id`.
|
||||||
@@ -54,19 +59,23 @@ impl Positions {
|
|||||||
if long_skip_id == 0 {
|
if long_skip_id == 0 {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
let long_skip_slice = self.long_skip_source.as_slice();
|
let long_skip_slice = self.long_skip_data.as_slice();
|
||||||
let mut long_skip_blocks: &[u8] = &long_skip_slice[(long_skip_id - 1) * 8..][..8];
|
let mut long_skip_blocks: &[u8] = &long_skip_slice[(long_skip_id - 1) * 8..][..8];
|
||||||
u64::deserialize(&mut long_skip_blocks).expect("Index corrupted")
|
u64::deserialize(&mut long_skip_blocks).expect("Index corrupted")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reader(&self, offset: u64) -> PositionReader {
|
fn reader(&self, offset: u64) -> io::Result<PositionReader> {
|
||||||
let long_skip_id = (offset / LONG_SKIP_INTERVAL) as usize;
|
let long_skip_id = (offset / LONG_SKIP_INTERVAL) as usize;
|
||||||
let offset_num_bytes: u64 = self.long_skip(long_skip_id);
|
let offset_num_bytes: u64 = self.long_skip(long_skip_id);
|
||||||
let mut position_read = OwnedRead::new(self.position_source.clone());
|
let position_read = self
|
||||||
position_read.advance(offset_num_bytes as usize);
|
.position_file
|
||||||
let mut skip_read = OwnedRead::new(self.skip_source.clone());
|
.slice_from(offset_num_bytes as usize)
|
||||||
skip_read.advance(long_skip_id * LONG_SKIP_IN_BLOCKS);
|
.read_bytes()?;
|
||||||
PositionReader {
|
let skip_read = self
|
||||||
|
.skip_file
|
||||||
|
.slice_from(long_skip_id * LONG_SKIP_IN_BLOCKS)
|
||||||
|
.read_bytes()?;
|
||||||
|
Ok(PositionReader {
|
||||||
bit_packer: self.bit_packer,
|
bit_packer: self.bit_packer,
|
||||||
skip_read,
|
skip_read,
|
||||||
position_read,
|
position_read,
|
||||||
@@ -74,13 +83,14 @@ impl Positions {
|
|||||||
block_offset: std::i64::MAX as u64,
|
block_offset: std::i64::MAX as u64,
|
||||||
anchor_offset: (long_skip_id as u64) * LONG_SKIP_INTERVAL,
|
anchor_offset: (long_skip_id as u64) * LONG_SKIP_INTERVAL,
|
||||||
abs_offset: offset,
|
abs_offset: offset,
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct PositionReader {
|
pub struct PositionReader {
|
||||||
skip_read: OwnedRead,
|
skip_read: OwnedBytes,
|
||||||
position_read: OwnedRead,
|
position_read: OwnedBytes,
|
||||||
bit_packer: BitPacker4x,
|
bit_packer: BitPacker4x,
|
||||||
buffer: Box<[u32; COMPRESSION_BLOCK_SIZE]>,
|
buffer: Box<[u32; COMPRESSION_BLOCK_SIZE]>,
|
||||||
|
|
||||||
@@ -92,11 +102,12 @@ pub struct PositionReader {
|
|||||||
|
|
||||||
impl PositionReader {
|
impl PositionReader {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
position_source: ReadOnlySource,
|
position_file: FileSlice,
|
||||||
skip_source: ReadOnlySource,
|
skip_file: FileSlice,
|
||||||
offset: u64,
|
offset: u64,
|
||||||
) -> PositionReader {
|
) -> io::Result<PositionReader> {
|
||||||
Positions::new(position_source, skip_source).reader(offset)
|
let positions = Positions::new(position_file, skip_file)?;
|
||||||
|
positions.reader(offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn advance_num_blocks(&mut self, num_blocks: usize) {
|
fn advance_num_blocks(&mut self, num_blocks: usize) {
|
||||||
@@ -121,7 +132,7 @@ impl PositionReader {
|
|||||||
"offset arguments should be increasing."
|
"offset arguments should be increasing."
|
||||||
);
|
);
|
||||||
let delta_to_block_offset = offset as i64 - self.block_offset as i64;
|
let delta_to_block_offset = offset as i64 - self.block_offset as i64;
|
||||||
if delta_to_block_offset < 0 || delta_to_block_offset >= 128 {
|
if !(0..128).contains(&delta_to_block_offset) {
|
||||||
// The first position is not within the first block.
|
// The first position is not within the first block.
|
||||||
// We need to decompress the first block.
|
// We need to decompress the first block.
|
||||||
let delta_to_anchor_offset = offset - self.anchor_offset;
|
let delta_to_anchor_offset = offset - self.anchor_offset;
|
||||||
@@ -130,7 +141,7 @@ impl PositionReader {
|
|||||||
self.advance_num_blocks(num_blocks_to_skip);
|
self.advance_num_blocks(num_blocks_to_skip);
|
||||||
self.anchor_offset = offset - (offset % COMPRESSION_BLOCK_SIZE as u64);
|
self.anchor_offset = offset - (offset % COMPRESSION_BLOCK_SIZE as u64);
|
||||||
self.block_offset = self.anchor_offset;
|
self.block_offset = self.anchor_offset;
|
||||||
let num_bits = self.skip_read.get(0);
|
let num_bits = self.skip_read.as_slice()[0];
|
||||||
self.bit_packer
|
self.bit_packer
|
||||||
.decompress(self.position_read.as_ref(), self.buffer.as_mut(), num_bits);
|
.decompress(self.position_read.as_ref(), self.buffer.as_mut(), num_bits);
|
||||||
} else {
|
} else {
|
||||||
@@ -140,7 +151,7 @@ impl PositionReader {
|
|||||||
self.anchor_offset = self.block_offset;
|
self.anchor_offset = self.block_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut num_bits = self.skip_read.get(0);
|
let mut num_bits = self.skip_read.as_slice()[0];
|
||||||
let mut position_data = self.position_read.as_ref();
|
let mut position_data = self.position_read.as_ref();
|
||||||
|
|
||||||
for i in 1.. {
|
for i in 1.. {
|
||||||
@@ -154,7 +165,7 @@ impl PositionReader {
|
|||||||
output = &mut output[remaining_in_block..];
|
output = &mut output[remaining_in_block..];
|
||||||
offset += remaining_in_block as u64;
|
offset += remaining_in_block as u64;
|
||||||
position_data = &position_data[(num_bits as usize * COMPRESSION_BLOCK_SIZE / 8)..];
|
position_data = &position_data[(num_bits as usize * COMPRESSION_BLOCK_SIZE / 8)..];
|
||||||
num_bits = self.skip_read.get(i);
|
num_bits = self.skip_read.as_slice()[i];
|
||||||
self.bit_packer
|
self.bit_packer
|
||||||
.decompress(position_data, self.buffer.as_mut(), num_bits);
|
.decompress(position_data, self.buffer.as_mut(), num_bits);
|
||||||
self.block_offset += COMPRESSION_BLOCK_SIZE as u64;
|
self.block_offset += COMPRESSION_BLOCK_SIZE as u64;
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ use std::io::{self, Write};
|
|||||||
pub struct PositionSerializer<W: io::Write> {
|
pub struct PositionSerializer<W: io::Write> {
|
||||||
bit_packer: BitPacker4x,
|
bit_packer: BitPacker4x,
|
||||||
write_stream: CountingWriter<W>,
|
write_stream: CountingWriter<W>,
|
||||||
write_skiplist: W,
|
write_skip_index: W,
|
||||||
block: Vec<u32>,
|
block: Vec<u32>,
|
||||||
buffer: Vec<u8>,
|
buffer: Vec<u8>,
|
||||||
num_ints: u64,
|
num_ints: u64,
|
||||||
@@ -16,11 +16,11 @@ pub struct PositionSerializer<W: io::Write> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<W: io::Write> PositionSerializer<W> {
|
impl<W: io::Write> PositionSerializer<W> {
|
||||||
pub fn new(write_stream: W, write_skiplist: W) -> PositionSerializer<W> {
|
pub fn new(write_stream: W, write_skip_index: W) -> PositionSerializer<W> {
|
||||||
PositionSerializer {
|
PositionSerializer {
|
||||||
bit_packer: BitPacker4x::new(),
|
bit_packer: BitPacker4x::new(),
|
||||||
write_stream: CountingWriter::wrap(write_stream),
|
write_stream: CountingWriter::wrap(write_stream),
|
||||||
write_skiplist,
|
write_skip_index,
|
||||||
block: Vec::with_capacity(128),
|
block: Vec::with_capacity(128),
|
||||||
buffer: vec![0u8; 128 * 4],
|
buffer: vec![0u8; 128 * 4],
|
||||||
num_ints: 0u64,
|
num_ints: 0u64,
|
||||||
@@ -52,7 +52,7 @@ impl<W: io::Write> PositionSerializer<W> {
|
|||||||
|
|
||||||
fn flush_block(&mut self) -> io::Result<()> {
|
fn flush_block(&mut self) -> io::Result<()> {
|
||||||
let num_bits = self.bit_packer.num_bits(&self.block[..]);
|
let num_bits = self.bit_packer.num_bits(&self.block[..]);
|
||||||
self.write_skiplist.write_all(&[num_bits])?;
|
self.write_skip_index.write_all(&[num_bits])?;
|
||||||
let written_len = self
|
let written_len = self
|
||||||
.bit_packer
|
.bit_packer
|
||||||
.compress(&self.block[..], &mut self.buffer, num_bits);
|
.compress(&self.block[..], &mut self.buffer, num_bits);
|
||||||
@@ -70,10 +70,10 @@ impl<W: io::Write> PositionSerializer<W> {
|
|||||||
self.flush_block()?;
|
self.flush_block()?;
|
||||||
}
|
}
|
||||||
for &long_skip in &self.long_skips {
|
for &long_skip in &self.long_skips {
|
||||||
long_skip.serialize(&mut self.write_skiplist)?;
|
long_skip.serialize(&mut self.write_skip_index)?;
|
||||||
}
|
}
|
||||||
(self.long_skips.len() as u32).serialize(&mut self.write_skiplist)?;
|
(self.long_skips.len() as u32).serialize(&mut self.write_skip_index)?;
|
||||||
self.write_skiplist.flush()?;
|
self.write_skip_index.flush()?;
|
||||||
self.write_stream.flush()?;
|
self.write_stream.flush()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,24 @@
|
|||||||
|
use std::io;
|
||||||
|
|
||||||
use crate::common::{BinarySerializable, VInt};
|
use crate::common::{BinarySerializable, VInt};
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::FileSlice;
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
|
use crate::fieldnorm::FieldNormReader;
|
||||||
use crate::postings::compression::{
|
use crate::postings::compression::{
|
||||||
AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE,
|
AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE,
|
||||||
};
|
};
|
||||||
use crate::postings::{BlockInfo, FreqReadingOption, SkipReader};
|
use crate::postings::{BlockInfo, FreqReadingOption, SkipReader};
|
||||||
|
use crate::query::BM25Weight;
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::{DocId, TERMINATED};
|
use crate::{DocId, Score, TERMINATED};
|
||||||
|
|
||||||
|
fn max_score<I: Iterator<Item = Score>>(mut it: I) -> Option<Score> {
|
||||||
|
if let Some(first) = it.next() {
|
||||||
|
Some(it.fold(first, Score::max))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// `BlockSegmentPostings` is a cursor iterating over blocks
|
/// `BlockSegmentPostings` is a cursor iterating over blocks
|
||||||
/// of documents.
|
/// of documents.
|
||||||
@@ -14,16 +27,18 @@ use crate::{DocId, TERMINATED};
|
|||||||
///
|
///
|
||||||
/// While it is useful for some very specific high-performance
|
/// While it is useful for some very specific high-performance
|
||||||
/// use cases, you should prefer using `SegmentPostings` for most usage.
|
/// use cases, you should prefer using `SegmentPostings` for most usage.
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct BlockSegmentPostings {
|
pub struct BlockSegmentPostings {
|
||||||
pub(crate) doc_decoder: BlockDecoder,
|
pub(crate) doc_decoder: BlockDecoder,
|
||||||
loaded_offset: usize,
|
loaded_offset: usize,
|
||||||
freq_decoder: BlockDecoder,
|
freq_decoder: BlockDecoder,
|
||||||
freq_reading_option: FreqReadingOption,
|
freq_reading_option: FreqReadingOption,
|
||||||
|
block_max_score_cache: Option<Score>,
|
||||||
|
|
||||||
doc_freq: usize,
|
doc_freq: u32,
|
||||||
|
|
||||||
data: ReadOnlySource,
|
data: OwnedBytes,
|
||||||
skip_reader: SkipReader,
|
pub(crate) skip_reader: SkipReader,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_bitpacked_block(
|
fn decode_bitpacked_block(
|
||||||
@@ -47,60 +62,104 @@ fn decode_vint_block(
|
|||||||
doc_offset: DocId,
|
doc_offset: DocId,
|
||||||
num_vint_docs: usize,
|
num_vint_docs: usize,
|
||||||
) {
|
) {
|
||||||
doc_decoder.clear();
|
let num_consumed_bytes =
|
||||||
let num_consumed_bytes = doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs);
|
doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs, TERMINATED);
|
||||||
if let Some(freq_decoder) = freq_decoder_opt {
|
if let Some(freq_decoder) = freq_decoder_opt {
|
||||||
freq_decoder.uncompress_vint_unsorted(&data[num_consumed_bytes..], num_vint_docs);
|
freq_decoder.uncompress_vint_unsorted(
|
||||||
|
&data[num_consumed_bytes..],
|
||||||
|
num_vint_docs,
|
||||||
|
TERMINATED,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn split_into_skips_and_postings(
|
fn split_into_skips_and_postings(
|
||||||
doc_freq: u32,
|
doc_freq: u32,
|
||||||
data: ReadOnlySource,
|
mut bytes: OwnedBytes,
|
||||||
) -> (Option<ReadOnlySource>, ReadOnlySource) {
|
) -> (Option<OwnedBytes>, OwnedBytes) {
|
||||||
if doc_freq < COMPRESSION_BLOCK_SIZE as u32 {
|
if doc_freq < COMPRESSION_BLOCK_SIZE as u32 {
|
||||||
return (None, data);
|
return (None, bytes);
|
||||||
}
|
}
|
||||||
let mut data_byte_arr = data.as_slice();
|
let skip_len = VInt::deserialize(&mut bytes).expect("Data corrupted").0 as usize;
|
||||||
let skip_len = VInt::deserialize(&mut data_byte_arr)
|
let (skip_data, postings_data) = bytes.split(skip_len);
|
||||||
.expect("Data corrupted")
|
|
||||||
.0 as usize;
|
|
||||||
let vint_len = data.len() - data_byte_arr.len();
|
|
||||||
let (skip_data, postings_data) = data.slice_from(vint_len).split(skip_len);
|
|
||||||
(Some(skip_data), postings_data)
|
(Some(skip_data), postings_data)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockSegmentPostings {
|
impl BlockSegmentPostings {
|
||||||
pub(crate) fn from_data(
|
pub(crate) fn open(
|
||||||
doc_freq: u32,
|
doc_freq: u32,
|
||||||
data: ReadOnlySource,
|
data: FileSlice,
|
||||||
record_option: IndexRecordOption,
|
record_option: IndexRecordOption,
|
||||||
requested_option: IndexRecordOption,
|
requested_option: IndexRecordOption,
|
||||||
) -> BlockSegmentPostings {
|
) -> io::Result<BlockSegmentPostings> {
|
||||||
let freq_reading_option = match (record_option, requested_option) {
|
let freq_reading_option = match (record_option, requested_option) {
|
||||||
(IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq,
|
(IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq,
|
||||||
(_, IndexRecordOption::Basic) => FreqReadingOption::SkipFreq,
|
(_, IndexRecordOption::Basic) => FreqReadingOption::SkipFreq,
|
||||||
(_, _) => FreqReadingOption::ReadFreq,
|
(_, _) => FreqReadingOption::ReadFreq,
|
||||||
};
|
};
|
||||||
|
|
||||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, data);
|
let (skip_data_opt, postings_data) =
|
||||||
|
split_into_skips_and_postings(doc_freq, data.read_bytes()?);
|
||||||
let skip_reader = match skip_data_opt {
|
let skip_reader = match skip_data_opt {
|
||||||
Some(skip_data) => SkipReader::new(skip_data, doc_freq, record_option),
|
Some(skip_data) => SkipReader::new(skip_data, doc_freq, record_option),
|
||||||
None => SkipReader::new(ReadOnlySource::empty(), doc_freq, record_option),
|
None => SkipReader::new(OwnedBytes::empty(), doc_freq, record_option),
|
||||||
};
|
};
|
||||||
|
|
||||||
let doc_freq = doc_freq as usize;
|
|
||||||
let mut block_segment_postings = BlockSegmentPostings {
|
let mut block_segment_postings = BlockSegmentPostings {
|
||||||
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
||||||
loaded_offset: std::usize::MAX,
|
loaded_offset: std::usize::MAX,
|
||||||
freq_decoder: BlockDecoder::with_val(1),
|
freq_decoder: BlockDecoder::with_val(1),
|
||||||
freq_reading_option,
|
freq_reading_option,
|
||||||
|
block_max_score_cache: None,
|
||||||
doc_freq,
|
doc_freq,
|
||||||
data: postings_data,
|
data: postings_data,
|
||||||
skip_reader,
|
skip_reader,
|
||||||
};
|
};
|
||||||
block_segment_postings.advance();
|
block_segment_postings.load_block();
|
||||||
block_segment_postings
|
Ok(block_segment_postings)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the block_max_score for the current block.
|
||||||
|
/// It does not require the block to be loaded. For instance, it is ok to call this method
|
||||||
|
/// after having called `.shallow_advance(..)`.
|
||||||
|
///
|
||||||
|
/// See `TermScorer::block_max_score(..)` for more information.
|
||||||
|
pub fn block_max_score(
|
||||||
|
&mut self,
|
||||||
|
fieldnorm_reader: &FieldNormReader,
|
||||||
|
bm25_weight: &BM25Weight,
|
||||||
|
) -> Score {
|
||||||
|
if let Some(score) = self.block_max_score_cache {
|
||||||
|
return score;
|
||||||
|
}
|
||||||
|
if let Some(skip_reader_max_score) = self.skip_reader.block_max_score(bm25_weight) {
|
||||||
|
// if we are on a full block, the skip reader should have the block max information
|
||||||
|
// for us
|
||||||
|
self.block_max_score_cache = Some(skip_reader_max_score);
|
||||||
|
return skip_reader_max_score;
|
||||||
|
}
|
||||||
|
// this is the last block of the segment posting list.
|
||||||
|
// If it is actually loaded, we can compute block max manually.
|
||||||
|
if self.block_is_loaded() {
|
||||||
|
let docs = self.doc_decoder.output_array().iter().cloned();
|
||||||
|
let freqs = self.freq_decoder.output_array().iter().cloned();
|
||||||
|
let bm25_scores = docs.zip(freqs).map(|(doc, term_freq)| {
|
||||||
|
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
|
||||||
|
bm25_weight.score(fieldnorm_id, term_freq)
|
||||||
|
});
|
||||||
|
let block_max_score = max_score(bm25_scores).unwrap_or(0.0);
|
||||||
|
self.block_max_score_cache = Some(block_max_score);
|
||||||
|
return block_max_score;
|
||||||
|
}
|
||||||
|
// We do not have access to any good block max value. We return bm25_weight.max_score()
|
||||||
|
// as it is a valid upperbound.
|
||||||
|
//
|
||||||
|
// We do not cache it however, so that it gets computed when once block is loaded.
|
||||||
|
bm25_weight.max_score()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
|
||||||
|
self.freq_reading_option
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resets the block segment postings on another position
|
// Resets the block segment postings on another position
|
||||||
@@ -113,24 +172,26 @@ impl BlockSegmentPostings {
|
|||||||
// # Warning
|
// # Warning
|
||||||
//
|
//
|
||||||
// This does not reset the positions list.
|
// This does not reset the positions list.
|
||||||
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: ReadOnlySource) {
|
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: OwnedBytes) {
|
||||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
|
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
|
||||||
self.data = ReadOnlySource::new(postings_data);
|
self.data = postings_data;
|
||||||
self.loaded_offset = std::usize::MAX;
|
self.block_max_score_cache = None;
|
||||||
self.loaded_offset = std::usize::MAX;
|
self.loaded_offset = std::usize::MAX;
|
||||||
if let Some(skip_data) = skip_data_opt {
|
if let Some(skip_data) = skip_data_opt {
|
||||||
self.skip_reader.reset(skip_data, doc_freq);
|
self.skip_reader.reset(skip_data, doc_freq);
|
||||||
} else {
|
} else {
|
||||||
self.skip_reader.reset(ReadOnlySource::empty(), doc_freq);
|
self.skip_reader.reset(OwnedBytes::empty(), doc_freq);
|
||||||
}
|
}
|
||||||
self.doc_freq = doc_freq as usize;
|
self.doc_freq = doc_freq;
|
||||||
|
self.load_block();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the document frequency associated to this block postings.
|
/// Returns the overall number of documents in the block postings.
|
||||||
|
/// It does not take in account whether documents are deleted or not.
|
||||||
///
|
///
|
||||||
/// This `doc_freq` is simply the sum of the length of all of the blocks
|
/// This `doc_freq` is simply the sum of the length of all of the blocks
|
||||||
/// length, and it does not take in account deleted documents.
|
/// length, and it does not take in account deleted documents.
|
||||||
pub fn doc_freq(&self) -> usize {
|
pub fn doc_freq(&self) -> u32 {
|
||||||
self.doc_freq
|
self.doc_freq
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -140,11 +201,20 @@ impl BlockSegmentPostings {
|
|||||||
/// returned by `.docs()` is empty.
|
/// returned by `.docs()` is empty.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn docs(&self) -> &[DocId] {
|
pub fn docs(&self) -> &[DocId] {
|
||||||
|
debug_assert!(self.block_is_loaded());
|
||||||
self.doc_decoder.output_array()
|
self.doc_decoder.output_array()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a full block, regardless of whetehr the block is complete or incomplete (
|
||||||
|
/// as it happens for the last block of the posting list).
|
||||||
|
///
|
||||||
|
/// In the latter case, the block is guaranteed to be padded with the sentinel value:
|
||||||
|
/// `TERMINATED`. The array is also guaranteed to be aligned on 16 bytes = 128 bits.
|
||||||
|
///
|
||||||
|
/// This method is useful to run SSE2 linear search.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub(crate) fn docs_aligned(&self) -> &AlignedBuffer {
|
pub(crate) fn docs_aligned(&self) -> &AlignedBuffer {
|
||||||
|
debug_assert!(self.block_is_loaded());
|
||||||
self.doc_decoder.output_aligned()
|
self.doc_decoder.output_aligned()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -157,12 +227,14 @@ impl BlockSegmentPostings {
|
|||||||
/// Return the array of `term freq` in the block.
|
/// Return the array of `term freq` in the block.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn freqs(&self) -> &[u32] {
|
pub fn freqs(&self) -> &[u32] {
|
||||||
|
debug_assert!(self.block_is_loaded());
|
||||||
self.freq_decoder.output_array()
|
self.freq_decoder.output_array()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the frequency at index `idx` of the block.
|
/// Return the frequency at index `idx` of the block.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn freq(&self, idx: usize) -> u32 {
|
pub fn freq(&self, idx: usize) -> u32 {
|
||||||
|
debug_assert!(self.block_is_loaded());
|
||||||
self.freq_decoder.output(idx)
|
self.freq_decoder.output(idx)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -173,23 +245,40 @@ impl BlockSegmentPostings {
|
|||||||
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
|
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn block_len(&self) -> usize {
|
pub fn block_len(&self) -> usize {
|
||||||
|
debug_assert!(self.block_is_loaded());
|
||||||
self.doc_decoder.output_len
|
self.doc_decoder.output_len
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn position_offset(&self) -> u64 {
|
|
||||||
self.skip_reader.position_offset()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Position on a block that may contains `target_doc`.
|
/// Position on a block that may contains `target_doc`.
|
||||||
///
|
///
|
||||||
/// If all docs are smaller than target, the block loaded may be empty,
|
/// If all docs are smaller than target, the block loaded may be empty,
|
||||||
/// or be the last an incomplete VInt block.
|
/// or be the last an incomplete VInt block.
|
||||||
pub fn seek(&mut self, target_doc: DocId) {
|
pub fn seek(&mut self, target_doc: DocId) {
|
||||||
self.skip_reader.seek(target_doc);
|
self.shallow_seek(target_doc);
|
||||||
self.load_block();
|
self.load_block();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_block(&mut self) {
|
pub(crate) fn position_offset(&self) -> u64 {
|
||||||
|
self.skip_reader.position_offset()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Dangerous API! This calls seek on the skip list,
|
||||||
|
/// but does not `.load_block()` afterwards.
|
||||||
|
///
|
||||||
|
/// `.load_block()` needs to be called manually afterwards.
|
||||||
|
/// If all docs are smaller than target, the block loaded may be empty,
|
||||||
|
/// or be the last an incomplete VInt block.
|
||||||
|
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
|
||||||
|
if self.skip_reader.seek(target_doc) {
|
||||||
|
self.block_max_score_cache = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn block_is_loaded(&self) -> bool {
|
||||||
|
self.loaded_offset == self.skip_reader.byte_offset()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn load_block(&mut self) {
|
||||||
let offset = self.skip_reader.byte_offset();
|
let offset = self.skip_reader.byte_offset();
|
||||||
if self.loaded_offset == offset {
|
if self.loaded_offset == offset {
|
||||||
return;
|
return;
|
||||||
@@ -214,7 +303,14 @@ impl BlockSegmentPostings {
|
|||||||
tf_num_bits,
|
tf_num_bits,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
BlockInfo::VInt(num_vint_docs) => {
|
BlockInfo::VInt { num_docs } => {
|
||||||
|
let data = {
|
||||||
|
if num_docs == 0 {
|
||||||
|
&[]
|
||||||
|
} else {
|
||||||
|
&self.data.as_slice()[offset..]
|
||||||
|
}
|
||||||
|
};
|
||||||
decode_vint_block(
|
decode_vint_block(
|
||||||
&mut self.doc_decoder,
|
&mut self.doc_decoder,
|
||||||
if let FreqReadingOption::ReadFreq = self.freq_reading_option {
|
if let FreqReadingOption::ReadFreq = self.freq_reading_option {
|
||||||
@@ -222,9 +318,9 @@ impl BlockSegmentPostings {
|
|||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
},
|
},
|
||||||
&self.data.as_slice()[offset..],
|
data,
|
||||||
self.skip_reader.last_doc_in_previous_block,
|
self.skip_reader.last_doc_in_previous_block,
|
||||||
num_vint_docs as usize,
|
num_docs as usize,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -233,24 +329,23 @@ impl BlockSegmentPostings {
|
|||||||
/// Advance to the next block.
|
/// Advance to the next block.
|
||||||
///
|
///
|
||||||
/// Returns false iff there was no remaining blocks.
|
/// Returns false iff there was no remaining blocks.
|
||||||
pub fn advance(&mut self) -> bool {
|
pub fn advance(&mut self) {
|
||||||
if !self.skip_reader.advance() {
|
self.skip_reader.advance();
|
||||||
return false;
|
self.block_max_score_cache = None;
|
||||||
}
|
|
||||||
self.load_block();
|
self.load_block();
|
||||||
true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an empty segment postings object
|
/// Returns an empty segment postings object
|
||||||
pub fn empty() -> BlockSegmentPostings {
|
pub fn empty() -> BlockSegmentPostings {
|
||||||
BlockSegmentPostings {
|
BlockSegmentPostings {
|
||||||
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
||||||
loaded_offset: std::usize::MAX,
|
loaded_offset: 0,
|
||||||
freq_decoder: BlockDecoder::with_val(1),
|
freq_decoder: BlockDecoder::with_val(1),
|
||||||
freq_reading_option: FreqReadingOption::NoFreq,
|
freq_reading_option: FreqReadingOption::NoFreq,
|
||||||
|
block_max_score_cache: None,
|
||||||
doc_freq: 0,
|
doc_freq: 0,
|
||||||
data: ReadOnlySource::new(vec![]),
|
data: OwnedBytes::empty(),
|
||||||
skip_reader: SkipReader::new(ReadOnlySource::new(vec![]), 0, IndexRecordOption::Basic),
|
skip_reader: SkipReader::new(OwnedBytes::empty(), 0, IndexRecordOption::Basic),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -273,8 +368,10 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_empty_segment_postings() {
|
fn test_empty_segment_postings() {
|
||||||
let mut postings = SegmentPostings::empty();
|
let mut postings = SegmentPostings::empty();
|
||||||
|
assert_eq!(postings.doc(), TERMINATED);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
|
assert_eq!(postings.doc_freq(), 0);
|
||||||
assert_eq!(postings.len(), 0);
|
assert_eq!(postings.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -294,7 +391,10 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_empty_block_segment_postings() {
|
fn test_empty_block_segment_postings() {
|
||||||
let mut postings = BlockSegmentPostings::empty();
|
let mut postings = BlockSegmentPostings::empty();
|
||||||
assert!(!postings.advance());
|
assert!(postings.docs().is_empty());
|
||||||
|
assert_eq!(postings.doc_freq(), 0);
|
||||||
|
postings.advance();
|
||||||
|
assert!(postings.docs().is_empty());
|
||||||
assert_eq!(postings.doc_freq(), 0);
|
assert_eq!(postings.doc_freq(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -306,13 +406,14 @@ mod tests {
|
|||||||
assert_eq!(block_segments.doc_freq(), 100_000);
|
assert_eq!(block_segments.doc_freq(), 100_000);
|
||||||
loop {
|
loop {
|
||||||
let block = block_segments.docs();
|
let block = block_segments.docs();
|
||||||
|
if block.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
for (i, doc) in block.iter().cloned().enumerate() {
|
for (i, doc) in block.iter().cloned().enumerate() {
|
||||||
assert_eq!(offset + (i as u32), doc);
|
assert_eq!(offset + (i as u32), doc);
|
||||||
}
|
}
|
||||||
offset += block.len() as u32;
|
offset += block.len() as u32;
|
||||||
if block_segments.advance() {
|
block_segments.advance();
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -354,7 +455,7 @@ mod tests {
|
|||||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let mut last_doc = 0u32;
|
let mut last_doc = 0u32;
|
||||||
for &doc in docs {
|
for &doc in docs {
|
||||||
for _ in last_doc..doc {
|
for _ in last_doc..doc {
|
||||||
@@ -366,14 +467,16 @@ mod tests {
|
|||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader().unwrap().searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let inverted_index = segment_reader.inverted_index(int_field);
|
let inverted_index = segment_reader.inverted_index(int_field).unwrap();
|
||||||
let term = Term::from_field_u64(int_field, 0u64);
|
let term = Term::from_field_u64(int_field, 0u64);
|
||||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
let term_info = inverted_index.get_term_info(&term).unwrap().unwrap();
|
||||||
inverted_index.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)
|
inverted_index
|
||||||
|
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_block_segment_postings_skip2() {
|
fn test_block_segment_postings_seek() {
|
||||||
let mut docs = vec![0];
|
let mut docs = vec![0];
|
||||||
for i in 0..1300 {
|
for i in 0..1300 {
|
||||||
docs.push((i * i / 100) + i);
|
docs.push((i * i / 100) + i);
|
||||||
@@ -390,38 +493,38 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_reset_block_segment_postings() {
|
fn test_reset_block_segment_postings() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
// create two postings list, one containg even number,
|
// create two postings list, one containg even number,
|
||||||
// the other containing odd numbers.
|
// the other containing odd numbers.
|
||||||
for i in 0..6 {
|
for i in 0..6 {
|
||||||
let doc = doc!(int_field=> (i % 2) as u64);
|
let doc = doc!(int_field=> (i % 2) as u64);
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader()?.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
|
|
||||||
let mut block_segments;
|
let mut block_segments;
|
||||||
{
|
{
|
||||||
let term = Term::from_field_u64(int_field, 0u64);
|
let term = Term::from_field_u64(int_field, 0u64);
|
||||||
let inverted_index = segment_reader.inverted_index(int_field);
|
let inverted_index = segment_reader.inverted_index(int_field)?;
|
||||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
let term_info = inverted_index.get_term_info(&term)?.unwrap();
|
||||||
block_segments = inverted_index
|
block_segments = inverted_index
|
||||||
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic);
|
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)?;
|
||||||
}
|
}
|
||||||
assert_eq!(block_segments.docs(), &[0, 2, 4]);
|
assert_eq!(block_segments.docs(), &[0, 2, 4]);
|
||||||
{
|
{
|
||||||
let term = Term::from_field_u64(int_field, 1u64);
|
let term = Term::from_field_u64(int_field, 1u64);
|
||||||
let inverted_index = segment_reader.inverted_index(int_field);
|
let inverted_index = segment_reader.inverted_index(int_field)?;
|
||||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
let term_info = inverted_index.get_term_info(&term)?.unwrap();
|
||||||
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments);
|
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments)?;
|
||||||
}
|
}
|
||||||
assert!(block_segments.advance());
|
|
||||||
assert_eq!(block_segments.docs(), &[1, 3, 5]);
|
assert_eq!(block_segments.docs(), &[1, 3, 5]);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use crate::common::FixedSize;
|
use crate::common::FixedSize;
|
||||||
use crate::docset::TERMINATED;
|
|
||||||
use bitpacking::{BitPacker, BitPacker4x};
|
use bitpacking::{BitPacker, BitPacker4x};
|
||||||
|
|
||||||
pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
|
pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
|
||||||
@@ -53,8 +52,10 @@ impl BlockEncoder {
|
|||||||
/// We ensure that the OutputBuffer is align on 128 bits
|
/// We ensure that the OutputBuffer is align on 128 bits
|
||||||
/// in order to run SSE2 linear search on it.
|
/// in order to run SSE2 linear search on it.
|
||||||
#[repr(align(128))]
|
#[repr(align(128))]
|
||||||
|
#[derive(Clone)]
|
||||||
pub(crate) struct AlignedBuffer(pub [u32; COMPRESSION_BLOCK_SIZE]);
|
pub(crate) struct AlignedBuffer(pub [u32; COMPRESSION_BLOCK_SIZE]);
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct BlockDecoder {
|
pub struct BlockDecoder {
|
||||||
bitpacker: BitPacker4x,
|
bitpacker: BitPacker4x,
|
||||||
output: AlignedBuffer,
|
output: AlignedBuffer,
|
||||||
@@ -107,10 +108,6 @@ impl BlockDecoder {
|
|||||||
pub fn output(&self, idx: usize) -> u32 {
|
pub fn output(&self, idx: usize) -> u32 {
|
||||||
self.output.0[idx]
|
self.output.0[idx]
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear(&mut self) {
|
|
||||||
self.output.0.iter_mut().for_each(|el| *el = TERMINATED);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait VIntEncoder {
|
pub trait VIntEncoder {
|
||||||
@@ -147,11 +144,14 @@ pub trait VIntDecoder {
|
|||||||
/// For instance, if delta encoded are `1, 3, 9`, and the
|
/// For instance, if delta encoded are `1, 3, 9`, and the
|
||||||
/// `offset` is 5, then the output will be:
|
/// `offset` is 5, then the output will be:
|
||||||
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
|
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
|
||||||
|
///
|
||||||
|
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
|
||||||
fn uncompress_vint_sorted(
|
fn uncompress_vint_sorted(
|
||||||
&mut self,
|
&mut self,
|
||||||
compressed_data: &[u8],
|
compressed_data: &[u8],
|
||||||
offset: u32,
|
offset: u32,
|
||||||
num_els: usize,
|
num_els: usize,
|
||||||
|
padding: u32,
|
||||||
) -> usize;
|
) -> usize;
|
||||||
|
|
||||||
/// Uncompress an array of `u32s`, compressed using variable
|
/// Uncompress an array of `u32s`, compressed using variable
|
||||||
@@ -159,7 +159,14 @@ pub trait VIntDecoder {
|
|||||||
///
|
///
|
||||||
/// The method takes a number of int to decompress, and returns
|
/// The method takes a number of int to decompress, and returns
|
||||||
/// the amount of bytes that were read to decompress them.
|
/// the amount of bytes that were read to decompress them.
|
||||||
fn uncompress_vint_unsorted(&mut self, compressed_data: &[u8], num_els: usize) -> usize;
|
///
|
||||||
|
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
|
||||||
|
fn uncompress_vint_unsorted(
|
||||||
|
&mut self,
|
||||||
|
compressed_data: &[u8],
|
||||||
|
num_els: usize,
|
||||||
|
padding: u32,
|
||||||
|
) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VIntEncoder for BlockEncoder {
|
impl VIntEncoder for BlockEncoder {
|
||||||
@@ -178,13 +185,21 @@ impl VIntDecoder for BlockDecoder {
|
|||||||
compressed_data: &[u8],
|
compressed_data: &[u8],
|
||||||
offset: u32,
|
offset: u32,
|
||||||
num_els: usize,
|
num_els: usize,
|
||||||
|
padding: u32,
|
||||||
) -> usize {
|
) -> usize {
|
||||||
self.output_len = num_els;
|
self.output_len = num_els;
|
||||||
|
self.output.0.iter_mut().for_each(|el| *el = padding);
|
||||||
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
|
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn uncompress_vint_unsorted(&mut self, compressed_data: &[u8], num_els: usize) -> usize {
|
fn uncompress_vint_unsorted(
|
||||||
|
&mut self,
|
||||||
|
compressed_data: &[u8],
|
||||||
|
num_els: usize,
|
||||||
|
padding: u32,
|
||||||
|
) -> usize {
|
||||||
self.output_len = num_els;
|
self.output_len = num_els;
|
||||||
|
self.output.0.iter_mut().for_each(|el| *el = padding);
|
||||||
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
|
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -193,6 +208,7 @@ impl VIntDecoder for BlockDecoder {
|
|||||||
pub mod tests {
|
pub mod tests {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::TERMINATED;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_encode_sorted_block() {
|
fn test_encode_sorted_block() {
|
||||||
@@ -271,18 +287,20 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn test_encode_vint() {
|
fn test_encode_vint() {
|
||||||
{
|
const PADDING_VALUE: u32 = 234_234_345u32;
|
||||||
let expected_length = 154;
|
let expected_length = 154;
|
||||||
let mut encoder = BlockEncoder::new();
|
let mut encoder = BlockEncoder::new();
|
||||||
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
|
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
|
||||||
for offset in &[0u32, 1u32, 2u32] {
|
for offset in &[0u32, 1u32, 2u32] {
|
||||||
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
|
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
|
||||||
assert!(encoded_data.len() <= expected_length);
|
assert!(encoded_data.len() <= expected_length);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::default();
|
||||||
let consumed_num_bytes =
|
let consumed_num_bytes =
|
||||||
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
|
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len(), PADDING_VALUE);
|
||||||
assert_eq!(consumed_num_bytes, encoded_data.len());
|
assert_eq!(consumed_num_bytes, encoded_data.len());
|
||||||
assert_eq!(input, decoder.output_array());
|
assert_eq!(input, decoder.output_array());
|
||||||
|
for i in input.len()..COMPRESSION_BLOCK_SIZE {
|
||||||
|
assert_eq!(decoder.output(i), PADDING_VALUE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -292,6 +310,7 @@ pub mod tests {
|
|||||||
mod bench {
|
mod bench {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::TERMINATED;
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use rand::SeedableRng;
|
use rand::SeedableRng;
|
||||||
@@ -322,7 +341,7 @@ mod bench {
|
|||||||
let mut encoder = BlockEncoder::new();
|
let mut encoder = BlockEncoder::new();
|
||||||
let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
|
let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
|
||||||
let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32);
|
let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32);
|
||||||
let mut decoder = BlockDecoder::new();
|
let mut decoder = BlockDecoder::default();
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
|
decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
|
||||||
});
|
});
|
||||||
@@ -357,9 +376,9 @@ mod bench {
|
|||||||
let mut encoder = BlockEncoder::new();
|
let mut encoder = BlockEncoder::new();
|
||||||
let data = generate_array(NUM_INTS_BENCH_VINT, 0.001);
|
let data = generate_array(NUM_INTS_BENCH_VINT, 0.001);
|
||||||
let compressed = encoder.compress_vint_sorted(&data, 0u32);
|
let compressed = encoder.compress_vint_sorted(&data, 0u32);
|
||||||
let mut decoder = BlockDecoder::new();
|
let mut decoder = BlockDecoder::default();
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT);
|
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT, TERMINATED);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,20 +15,14 @@ mod stacker;
|
|||||||
mod term_info;
|
mod term_info;
|
||||||
|
|
||||||
pub(crate) use self::block_search::BlockSearcher;
|
pub(crate) use self::block_search::BlockSearcher;
|
||||||
|
|
||||||
pub(crate) use self::postings_writer::MultiFieldPostingsWriter;
|
|
||||||
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
|
|
||||||
|
|
||||||
pub use self::postings::Postings;
|
|
||||||
pub(crate) use self::skip::{BlockInfo, SkipReader};
|
|
||||||
pub use self::term_info::TermInfo;
|
|
||||||
|
|
||||||
pub use self::block_segment_postings::BlockSegmentPostings;
|
pub use self::block_segment_postings::BlockSegmentPostings;
|
||||||
|
pub use self::postings::Postings;
|
||||||
|
pub(crate) use self::postings_writer::MultiFieldPostingsWriter;
|
||||||
pub use self::segment_postings::SegmentPostings;
|
pub use self::segment_postings::SegmentPostings;
|
||||||
|
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
|
||||||
|
pub(crate) use self::skip::{BlockInfo, SkipReader};
|
||||||
pub(crate) use self::stacker::compute_table_size;
|
pub(crate) use self::stacker::compute_table_size;
|
||||||
|
pub use self::term_info::TermInfo;
|
||||||
pub use crate::common::HasLen;
|
|
||||||
|
|
||||||
pub(crate) type UnorderedTermId = u64;
|
pub(crate) type UnorderedTermId = u64;
|
||||||
|
|
||||||
@@ -42,8 +36,8 @@ pub(crate) enum FreqReadingOption {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
|
use super::InvertedIndexSerializer;
|
||||||
use super::*;
|
use super::Postings;
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
use crate::core::SegmentComponent;
|
use crate::core::SegmentComponent;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
@@ -53,63 +47,59 @@ pub mod tests {
|
|||||||
use crate::indexer::SegmentWriter;
|
use crate::indexer::SegmentWriter;
|
||||||
use crate::merge_policy::NoMergePolicy;
|
use crate::merge_policy::NoMergePolicy;
|
||||||
use crate::query::Scorer;
|
use crate::query::Scorer;
|
||||||
use crate::schema::{Document, Schema, Term, INDEXED, STRING, TEXT};
|
|
||||||
use crate::schema::{Field, TextOptions};
|
use crate::schema::{Field, TextOptions};
|
||||||
use crate::schema::{IndexRecordOption, TextFieldIndexing};
|
use crate::schema::{IndexRecordOption, TextFieldIndexing};
|
||||||
|
use crate::schema::{Schema, Term, INDEXED, TEXT};
|
||||||
use crate::tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN};
|
use crate::tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
use crate::HasLen;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
use once_cell::sync::Lazy;
|
use std::{iter, mem};
|
||||||
use rand::rngs::StdRng;
|
|
||||||
use rand::{Rng, SeedableRng};
|
|
||||||
use std::iter;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_position_write() {
|
pub fn test_position_write() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut segment = index.new_segment();
|
let mut segment = index.new_segment();
|
||||||
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap();
|
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment)?;
|
||||||
{
|
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4, None)?;
|
||||||
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap();
|
field_serializer.new_term("abc".as_bytes(), 12u32)?;
|
||||||
field_serializer.new_term("abc".as_bytes()).unwrap();
|
for doc_id in 0u32..120u32 {
|
||||||
for doc_id in 0u32..120u32 {
|
let delta_positions = vec![1, 2, 3, 2];
|
||||||
let delta_positions = vec![1, 2, 3, 2];
|
field_serializer.write_doc(doc_id, 4, &delta_positions)?;
|
||||||
field_serializer
|
|
||||||
.write_doc(doc_id, 4, &delta_positions)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
field_serializer.close_term().unwrap();
|
|
||||||
}
|
}
|
||||||
posting_serializer.close().unwrap();
|
field_serializer.close_term()?;
|
||||||
let read = segment.open_read(SegmentComponent::POSITIONS).unwrap();
|
mem::drop(field_serializer);
|
||||||
|
posting_serializer.close()?;
|
||||||
|
let read = segment.open_read(SegmentComponent::POSITIONS)?;
|
||||||
assert!(read.len() <= 140);
|
assert!(read.len() <= 140);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_skip_positions() {
|
pub fn test_skip_positions() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let title = schema_builder.add_text_field("title", TEXT);
|
let title = schema_builder.add_text_field("title", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 30_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(title => r#"abc abc abc"#));
|
index_writer.add_document(doc!(title => r#"abc abc abc"#));
|
||||||
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
|
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
|
||||||
for _ in 0..1_000 {
|
for _ in 0..1_000 {
|
||||||
index_writer.add_document(doc!(title => r#"abc abc abc"#));
|
index_writer.add_document(doc!(title => r#"abc abc abc"#));
|
||||||
}
|
}
|
||||||
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
|
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
|
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader()?.searcher();
|
||||||
let inverted_index = searcher.segment_reader(0u32).inverted_index(title);
|
let inverted_index = searcher.segment_reader(0u32).inverted_index(title)?;
|
||||||
let term = Term::from_field_text(title, "abc");
|
let term = Term::from_field_text(title, "abc");
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
postings.positions(&mut positions);
|
postings.positions(&mut positions);
|
||||||
@@ -123,7 +113,7 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.advance(), 1);
|
assert_eq!(postings.advance(), 1);
|
||||||
@@ -132,7 +122,7 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.seek(1), 1);
|
assert_eq!(postings.seek(1), 1);
|
||||||
assert_eq!(postings.doc(), 1);
|
assert_eq!(postings.doc(), 1);
|
||||||
@@ -141,7 +131,7 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.seek(1002), 1002);
|
assert_eq!(postings.seek(1002), 1002);
|
||||||
assert_eq!(postings.doc(), 1002);
|
assert_eq!(postings.doc(), 1002);
|
||||||
@@ -150,7 +140,7 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.seek(100), 100);
|
assert_eq!(postings.seek(100), 100);
|
||||||
assert_eq!(postings.seek(1002), 1002);
|
assert_eq!(postings.seek(1002), 1002);
|
||||||
@@ -158,10 +148,11 @@ pub mod tests {
|
|||||||
postings.positions(&mut positions);
|
postings.positions(&mut positions);
|
||||||
assert_eq!(&[0, 5], &positions[..]);
|
assert_eq!(&[0, 5], &positions[..]);
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_drop_token_that_are_too_long() {
|
pub fn test_drop_token_that_are_too_long() -> crate::Result<()> {
|
||||||
let ok_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN).collect();
|
let ok_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN).collect();
|
||||||
let mut exceeding_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN + 1).collect();
|
let mut exceeding_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN + 1).collect();
|
||||||
exceeding_token_text.push_str(" hello");
|
exceeding_token_text.push_str(" hello");
|
||||||
@@ -178,7 +169,7 @@ pub mod tests {
|
|||||||
.tokenizers()
|
.tokenizers()
|
||||||
.register("simple_no_truncation", SimpleTokenizer);
|
.register("simple_no_truncation", SimpleTokenizer);
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
{
|
{
|
||||||
index_writer.add_document(doc!(text_field=>exceeding_token_text));
|
index_writer.add_document(doc!(text_field=>exceeding_token_text));
|
||||||
@@ -186,10 +177,10 @@ pub mod tests {
|
|||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0u32);
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
let inverted_index = segment_reader.inverted_index(text_field);
|
let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||||
assert_eq!(inverted_index.terms().num_terms(), 1);
|
assert_eq!(inverted_index.terms().num_terms(), 1);
|
||||||
let mut bytes = vec![];
|
let mut bytes = vec![];
|
||||||
assert!(inverted_index.terms().ord_to_term(0, &mut bytes));
|
assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
|
||||||
assert_eq!(&bytes, b"hello");
|
assert_eq!(&bytes, b"hello");
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
@@ -198,16 +189,17 @@ pub mod tests {
|
|||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(1u32);
|
let segment_reader = searcher.segment_reader(1u32);
|
||||||
let inverted_index = segment_reader.inverted_index(text_field);
|
let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||||
assert_eq!(inverted_index.terms().num_terms(), 1);
|
assert_eq!(inverted_index.terms().num_terms(), 1);
|
||||||
let mut bytes = vec![];
|
let mut bytes = vec![];
|
||||||
assert!(inverted_index.terms().ord_to_term(0, &mut bytes));
|
assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
|
||||||
assert_eq!(&bytes[..], ok_token_text.as_bytes());
|
assert_eq!(&bytes[..], ok_token_text.as_bytes());
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_position_and_fieldnorm1() {
|
pub fn test_position_and_fieldnorm1() -> crate::Result<()> {
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
@@ -219,42 +211,38 @@ pub mod tests {
|
|||||||
let mut segment_writer =
|
let mut segment_writer =
|
||||||
SegmentWriter::for_segment(3_000_000, segment.clone(), &schema).unwrap();
|
SegmentWriter::for_segment(3_000_000, segment.clone(), &schema).unwrap();
|
||||||
{
|
{
|
||||||
let mut doc = Document::default();
|
|
||||||
// checking that position works if the field has two values
|
// checking that position works if the field has two values
|
||||||
doc.add_text(text_field, "a b a c a d a a.");
|
|
||||||
doc.add_text(text_field, "d d d d a");
|
|
||||||
let op = AddOperation {
|
let op = AddOperation {
|
||||||
opstamp: 0u64,
|
opstamp: 0u64,
|
||||||
document: doc,
|
document: doc!(
|
||||||
|
text_field => "a b a c a d a a.",
|
||||||
|
text_field => "d d d d a"
|
||||||
|
),
|
||||||
};
|
};
|
||||||
segment_writer.add_document(op, &schema).unwrap();
|
segment_writer.add_document(op, &schema)?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut doc = Document::default();
|
|
||||||
doc.add_text(text_field, "b a");
|
|
||||||
let op = AddOperation {
|
let op = AddOperation {
|
||||||
opstamp: 1u64,
|
opstamp: 1u64,
|
||||||
document: doc,
|
document: doc!(text_field => "b a"),
|
||||||
};
|
};
|
||||||
segment_writer.add_document(op, &schema).unwrap();
|
segment_writer.add_document(op, &schema).unwrap();
|
||||||
}
|
}
|
||||||
for i in 2..1000 {
|
for i in 2..1000 {
|
||||||
let mut doc = Document::default();
|
let mut text: String = iter::repeat("e ").take(i).collect();
|
||||||
let mut text = iter::repeat("e ").take(i).collect::<String>();
|
|
||||||
text.push_str(" a");
|
text.push_str(" a");
|
||||||
doc.add_text(text_field, &text);
|
|
||||||
let op = AddOperation {
|
let op = AddOperation {
|
||||||
opstamp: 2u64,
|
opstamp: 2u64,
|
||||||
document: doc,
|
document: doc!(text_field => text),
|
||||||
};
|
};
|
||||||
segment_writer.add_document(op, &schema).unwrap();
|
segment_writer.add_document(op, &schema).unwrap();
|
||||||
}
|
}
|
||||||
segment_writer.finalize().unwrap();
|
segment_writer.finalize()?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let segment_reader = SegmentReader::open(&segment).unwrap();
|
let segment_reader = SegmentReader::open(&segment)?;
|
||||||
{
|
{
|
||||||
let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field);
|
let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field)?;
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(0), 8 + 5);
|
assert_eq!(fieldnorm_reader.fieldnorm(0), 8 + 5);
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
|
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
|
||||||
for i in 2..1000 {
|
for i in 2..1000 {
|
||||||
@@ -267,15 +255,15 @@ pub mod tests {
|
|||||||
{
|
{
|
||||||
let term_a = Term::from_field_text(text_field, "abcdef");
|
let term_a = Term::from_field_text(text_field, "abcdef");
|
||||||
assert!(segment_reader
|
assert!(segment_reader
|
||||||
.inverted_index(term_a.field())
|
.inverted_index(term_a.field())?
|
||||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.is_none());
|
.is_none());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let term_a = Term::from_field_text(text_field, "a");
|
let term_a = Term::from_field_text(text_field, "a");
|
||||||
let mut postings_a = segment_reader
|
let mut postings_a = segment_reader
|
||||||
.inverted_index(term_a.field())
|
.inverted_index(term_a.field())?
|
||||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings_a.len(), 1000);
|
assert_eq!(postings_a.len(), 1000);
|
||||||
assert_eq!(postings_a.doc(), 0);
|
assert_eq!(postings_a.doc(), 0);
|
||||||
@@ -297,8 +285,8 @@ pub mod tests {
|
|||||||
{
|
{
|
||||||
let term_e = Term::from_field_text(text_field, "e");
|
let term_e = Term::from_field_text(text_field, "e");
|
||||||
let mut postings_e = segment_reader
|
let mut postings_e = segment_reader
|
||||||
.inverted_index(term_e.field())
|
.inverted_index(term_e.field())?
|
||||||
.read_postings(&term_e, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_e, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings_e.len(), 1000 - 2);
|
assert_eq!(postings_e.len(), 1000 - 2);
|
||||||
for i in 2u32..1000u32 {
|
for i in 2u32..1000u32 {
|
||||||
@@ -314,17 +302,18 @@ pub mod tests {
|
|||||||
assert_eq!(postings_e.doc(), TERMINATED);
|
assert_eq!(postings_e.doc(), TERMINATED);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_position_and_fieldnorm2() {
|
pub fn test_position_and_fieldnorm2() -> crate::Result<()> {
|
||||||
let mut positions: Vec<u32> = Vec::new();
|
let mut positions: Vec<u32> = Vec::new();
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(text_field => "g b b d c g c"));
|
index_writer.add_document(doc!(text_field => "g b b d c g c"));
|
||||||
index_writer.add_document(doc!(text_field => "g a b b a d c g c"));
|
index_writer.add_document(doc!(text_field => "g a b b a d c g c"));
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
@@ -333,16 +322,17 @@ pub mod tests {
|
|||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader().unwrap().searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let mut postings = segment_reader
|
let mut postings = segment_reader
|
||||||
.inverted_index(text_field)
|
.inverted_index(text_field)?
|
||||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 1u32);
|
assert_eq!(postings.doc(), 1u32);
|
||||||
postings.positions(&mut positions);
|
postings.positions(&mut positions);
|
||||||
assert_eq!(&positions[..], &[1u32, 4]);
|
assert_eq!(&positions[..], &[1u32, 4]);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_skip_next() {
|
fn test_skip_next() -> crate::Result<()> {
|
||||||
let term_0 = Term::from_field_u64(Field::from_field_id(0), 0);
|
let term_0 = Term::from_field_u64(Field::from_field_id(0), 0);
|
||||||
let term_1 = Term::from_field_u64(Field::from_field_id(0), 1);
|
let term_1 = Term::from_field_u64(Field::from_field_id(0), 1);
|
||||||
let term_2 = Term::from_field_u64(Field::from_field_id(0), 2);
|
let term_2 = Term::from_field_u64(Field::from_field_id(0), 2);
|
||||||
@@ -353,10 +343,9 @@ pub mod tests {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let value_field = schema_builder.add_u64_field("value", INDEXED);
|
let value_field = schema_builder.add_u64_field("value", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
for i in 0u64..num_docs as u64 {
|
for i in 0u64..num_docs as u64 {
|
||||||
let doc = doc!(value_field => 2u64, value_field => i % 2u64);
|
let doc = doc!(value_field => 2u64, value_field => i % 2u64);
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
@@ -365,15 +354,15 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
index
|
index
|
||||||
};
|
};
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader()?.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
|
|
||||||
// check that the basic usage works
|
// check that the basic usage works
|
||||||
for i in 0..num_docs - 1 {
|
for i in 0..num_docs - 1 {
|
||||||
for j in i + 1..num_docs {
|
for j in i + 1..num_docs {
|
||||||
let mut segment_postings = segment_reader
|
let mut segment_postings = segment_reader
|
||||||
.inverted_index(term_2.field())
|
.inverted_index(term_2.field())?
|
||||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
.read_postings(&term_2, IndexRecordOption::Basic)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(segment_postings.seek(i), i);
|
assert_eq!(segment_postings.seek(i), i);
|
||||||
assert_eq!(segment_postings.doc(), i);
|
assert_eq!(segment_postings.doc(), i);
|
||||||
@@ -385,8 +374,8 @@ pub mod tests {
|
|||||||
|
|
||||||
{
|
{
|
||||||
let mut segment_postings = segment_reader
|
let mut segment_postings = segment_reader
|
||||||
.inverted_index(term_2.field())
|
.inverted_index(term_2.field())?
|
||||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
.read_postings(&term_2, IndexRecordOption::Basic)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// check that `skip_next` advances the iterator
|
// check that `skip_next` advances the iterator
|
||||||
@@ -405,8 +394,8 @@ pub mod tests {
|
|||||||
// check that filtering works
|
// check that filtering works
|
||||||
{
|
{
|
||||||
let mut segment_postings = segment_reader
|
let mut segment_postings = segment_reader
|
||||||
.inverted_index(term_0.field())
|
.inverted_index(term_0.field())?
|
||||||
.read_postings(&term_0, IndexRecordOption::Basic)
|
.read_postings(&term_0, IndexRecordOption::Basic)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
for i in 0..num_docs / 2 {
|
for i in 0..num_docs / 2 {
|
||||||
@@ -415,8 +404,8 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut segment_postings = segment_reader
|
let mut segment_postings = segment_reader
|
||||||
.inverted_index(term_0.field())
|
.inverted_index(term_0.field())?
|
||||||
.read_postings(&term_0, IndexRecordOption::Basic)
|
.read_postings(&term_0, IndexRecordOption::Basic)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
for i in 0..num_docs / 2 - 1 {
|
for i in 0..num_docs / 2 - 1 {
|
||||||
@@ -427,19 +416,19 @@ pub mod tests {
|
|||||||
|
|
||||||
// delete some of the documents
|
// delete some of the documents
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.delete_term(term_0);
|
index_writer.delete_term(term_0);
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
}
|
}
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader()?.searcher();
|
||||||
assert_eq!(searcher.segment_readers().len(), 1);
|
assert_eq!(searcher.segment_readers().len(), 1);
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
|
|
||||||
// make sure seeking still works
|
// make sure seeking still works
|
||||||
for i in 0..num_docs {
|
for i in 0..num_docs {
|
||||||
let mut segment_postings = segment_reader
|
let mut segment_postings = segment_reader
|
||||||
.inverted_index(term_2.field())
|
.inverted_index(term_2.field())?
|
||||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
.read_postings(&term_2, IndexRecordOption::Basic)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
if i % 2 == 0 {
|
if i % 2 == 0 {
|
||||||
@@ -455,8 +444,8 @@ pub mod tests {
|
|||||||
// now try with a longer sequence
|
// now try with a longer sequence
|
||||||
{
|
{
|
||||||
let mut segment_postings = segment_reader
|
let mut segment_postings = segment_reader
|
||||||
.inverted_index(term_2.field())
|
.inverted_index(term_2.field())?
|
||||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
.read_postings(&term_2, IndexRecordOption::Basic)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let mut last = 2; // start from 5 to avoid seeking to 3 twice
|
let mut last = 2; // start from 5 to avoid seeking to 3 twice
|
||||||
@@ -481,69 +470,21 @@ pub mod tests {
|
|||||||
|
|
||||||
// delete everything else
|
// delete everything else
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.delete_term(term_1);
|
index_writer.delete_term(term_1);
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
}
|
}
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader()?.searcher();
|
||||||
|
|
||||||
// finally, check that it's empty
|
// finally, check that it's empty
|
||||||
{
|
{
|
||||||
let searchable_segment_ids = index
|
let searchable_segment_ids = index.searchable_segment_ids()?;
|
||||||
.searchable_segment_ids()
|
|
||||||
.expect("could not get index segment ids");
|
|
||||||
assert!(searchable_segment_ids.is_empty());
|
assert!(searchable_segment_ids.is_empty());
|
||||||
assert_eq!(searcher.num_docs(), 0);
|
assert_eq!(searcher.num_docs(), 0);
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub static TERM_A: Lazy<Term> = Lazy::new(|| {
|
|
||||||
let field = Field::from_field_id(0);
|
|
||||||
Term::from_field_text(field, "a")
|
|
||||||
});
|
|
||||||
pub static TERM_B: Lazy<Term> = Lazy::new(|| {
|
|
||||||
let field = Field::from_field_id(0);
|
|
||||||
Term::from_field_text(field, "b")
|
|
||||||
});
|
|
||||||
pub static TERM_C: Lazy<Term> = Lazy::new(|| {
|
|
||||||
let field = Field::from_field_id(0);
|
|
||||||
Term::from_field_text(field, "c")
|
|
||||||
});
|
|
||||||
pub static TERM_D: Lazy<Term> = Lazy::new(|| {
|
|
||||||
let field = Field::from_field_id(0);
|
|
||||||
Term::from_field_text(field, "d")
|
|
||||||
});
|
|
||||||
|
|
||||||
pub static INDEX: Lazy<Index> = Lazy::new(|| {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let text_field = schema_builder.add_text_field("text", STRING);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
|
|
||||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let posting_list_size = 1_000_000;
|
|
||||||
{
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
for _ in 0..posting_list_size {
|
|
||||||
let mut doc = Document::default();
|
|
||||||
if rng.gen_bool(1f64 / 15f64) {
|
|
||||||
doc.add_text(text_field, "a");
|
|
||||||
}
|
|
||||||
if rng.gen_bool(1f64 / 10f64) {
|
|
||||||
doc.add_text(text_field, "b");
|
|
||||||
}
|
|
||||||
if rng.gen_bool(1f64 / 5f64) {
|
|
||||||
doc.add_text(text_field, "c");
|
|
||||||
}
|
|
||||||
doc.add_text(text_field, "d");
|
|
||||||
index_writer.add_document(doc);
|
|
||||||
}
|
|
||||||
assert!(index_writer.commit().is_ok());
|
|
||||||
}
|
|
||||||
index
|
|
||||||
});
|
|
||||||
|
|
||||||
/// Wraps a given docset, and forward alls call but the
|
/// Wraps a given docset, and forward alls call but the
|
||||||
/// `.skip_next(...)`. This is useful to test that a specialized
|
/// `.skip_next(...)`. This is useful to test that a specialized
|
||||||
/// implementation of `.skip_next(...)` is consistent
|
/// implementation of `.skip_next(...)` is consistent
|
||||||
@@ -582,6 +523,9 @@ pub mod tests {
|
|||||||
) {
|
) {
|
||||||
for target in targets {
|
for target in targets {
|
||||||
let mut postings_opt = postings_factory();
|
let mut postings_opt = postings_factory();
|
||||||
|
if target < postings_opt.doc() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
let mut postings_unopt = UnoptimizedDocSet::wrap(postings_factory());
|
let mut postings_unopt = UnoptimizedDocSet::wrap(postings_factory());
|
||||||
let skip_result_opt = postings_opt.seek(target);
|
let skip_result_opt = postings_opt.seek(target);
|
||||||
let skip_result_unopt = postings_unopt.seek(target);
|
let skip_result_unopt = postings_unopt.seek(target);
|
||||||
@@ -605,15 +549,65 @@ pub mod tests {
|
|||||||
|
|
||||||
#[cfg(all(test, feature = "unstable"))]
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
mod bench {
|
mod bench {
|
||||||
|
|
||||||
use super::tests::*;
|
|
||||||
use crate::docset::TERMINATED;
|
use crate::docset::TERMINATED;
|
||||||
use crate::query::Intersection;
|
use crate::query::Intersection;
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
|
use crate::schema::{Document, Field, Schema, Term, STRING};
|
||||||
use crate::tests;
|
use crate::tests;
|
||||||
use crate::DocSet;
|
use crate::DocSet;
|
||||||
|
use crate::Index;
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
use rand::rngs::StdRng;
|
||||||
|
use rand::{Rng, SeedableRng};
|
||||||
use test::{self, Bencher};
|
use test::{self, Bencher};
|
||||||
|
|
||||||
|
pub static TERM_A: Lazy<Term> = Lazy::new(|| {
|
||||||
|
let field = Field::from_field_id(0);
|
||||||
|
Term::from_field_text(field, "a")
|
||||||
|
});
|
||||||
|
pub static TERM_B: Lazy<Term> = Lazy::new(|| {
|
||||||
|
let field = Field::from_field_id(0);
|
||||||
|
Term::from_field_text(field, "b")
|
||||||
|
});
|
||||||
|
pub static TERM_C: Lazy<Term> = Lazy::new(|| {
|
||||||
|
let field = Field::from_field_id(0);
|
||||||
|
Term::from_field_text(field, "c")
|
||||||
|
});
|
||||||
|
pub static TERM_D: Lazy<Term> = Lazy::new(|| {
|
||||||
|
let field = Field::from_field_id(0);
|
||||||
|
Term::from_field_text(field, "d")
|
||||||
|
});
|
||||||
|
|
||||||
|
pub static INDEX: Lazy<Index> = Lazy::new(|| {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text_field = schema_builder.add_text_field("text", STRING);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
|
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||||
|
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let posting_list_size = 1_000_000;
|
||||||
|
{
|
||||||
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
for _ in 0..posting_list_size {
|
||||||
|
let mut doc = Document::default();
|
||||||
|
if rng.gen_bool(1f64 / 15f64) {
|
||||||
|
doc.add_text(text_field, "a");
|
||||||
|
}
|
||||||
|
if rng.gen_bool(1f64 / 10f64) {
|
||||||
|
doc.add_text(text_field, "b");
|
||||||
|
}
|
||||||
|
if rng.gen_bool(1f64 / 5f64) {
|
||||||
|
doc.add_text(text_field, "c");
|
||||||
|
}
|
||||||
|
doc.add_text(text_field, "d");
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
|
assert!(index_writer.commit().is_ok());
|
||||||
|
}
|
||||||
|
index
|
||||||
|
});
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_segment_postings(b: &mut Bencher) {
|
fn bench_segment_postings(b: &mut Bencher) {
|
||||||
let reader = INDEX.reader().unwrap();
|
let reader = INDEX.reader().unwrap();
|
||||||
@@ -623,7 +617,9 @@ mod bench {
|
|||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut segment_postings = segment_reader
|
let mut segment_postings = segment_reader
|
||||||
.inverted_index(TERM_A.field())
|
.inverted_index(TERM_A.field())
|
||||||
|
.unwrap()
|
||||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||||
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
while segment_postings.advance() != TERMINATED {}
|
while segment_postings.advance() != TERMINATED {}
|
||||||
});
|
});
|
||||||
@@ -637,19 +633,27 @@ mod bench {
|
|||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let segment_postings_a = segment_reader
|
let segment_postings_a = segment_reader
|
||||||
.inverted_index(TERM_A.field())
|
.inverted_index(TERM_A.field())
|
||||||
|
.unwrap()
|
||||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||||
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let segment_postings_b = segment_reader
|
let segment_postings_b = segment_reader
|
||||||
.inverted_index(TERM_B.field())
|
.inverted_index(TERM_B.field())
|
||||||
|
.unwrap()
|
||||||
.read_postings(&*TERM_B, IndexRecordOption::Basic)
|
.read_postings(&*TERM_B, IndexRecordOption::Basic)
|
||||||
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let segment_postings_c = segment_reader
|
let segment_postings_c = segment_reader
|
||||||
.inverted_index(TERM_C.field())
|
.inverted_index(TERM_C.field())
|
||||||
|
.unwrap()
|
||||||
.read_postings(&*TERM_C, IndexRecordOption::Basic)
|
.read_postings(&*TERM_C, IndexRecordOption::Basic)
|
||||||
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let segment_postings_d = segment_reader
|
let segment_postings_d = segment_reader
|
||||||
.inverted_index(TERM_D.field())
|
.inverted_index(TERM_D.field())
|
||||||
|
.unwrap()
|
||||||
.read_postings(&*TERM_D, IndexRecordOption::Basic)
|
.read_postings(&*TERM_D, IndexRecordOption::Basic)
|
||||||
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut intersection = Intersection::new(vec![
|
let mut intersection = Intersection::new(vec![
|
||||||
segment_postings_a,
|
segment_postings_a,
|
||||||
@@ -669,7 +673,9 @@ mod bench {
|
|||||||
|
|
||||||
let mut segment_postings = segment_reader
|
let mut segment_postings = segment_reader
|
||||||
.inverted_index(TERM_A.field())
|
.inverted_index(TERM_A.field())
|
||||||
|
.unwrap()
|
||||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||||
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let mut existing_docs = Vec::new();
|
let mut existing_docs = Vec::new();
|
||||||
@@ -685,7 +691,9 @@ mod bench {
|
|||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut segment_postings = segment_reader
|
let mut segment_postings = segment_reader
|
||||||
.inverted_index(TERM_A.field())
|
.inverted_index(TERM_A.field())
|
||||||
|
.unwrap()
|
||||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||||
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
for doc in &existing_docs {
|
for doc in &existing_docs {
|
||||||
if segment_postings.seek(*doc) == TERMINATED {
|
if segment_postings.seek(*doc) == TERMINATED {
|
||||||
@@ -724,12 +732,14 @@ mod bench {
|
|||||||
let n: u32 = test::black_box(17);
|
let n: u32 = test::black_box(17);
|
||||||
let mut segment_postings = segment_reader
|
let mut segment_postings = segment_reader
|
||||||
.inverted_index(TERM_A.field())
|
.inverted_index(TERM_A.field())
|
||||||
|
.unwrap()
|
||||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||||
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut s = 0u32;
|
let mut s = 0u32;
|
||||||
while segment_postings.doc() != TERMINATED {
|
while segment_postings.doc() != TERMINATED {
|
||||||
s += (segment_postings.doc() & n) % 1024;
|
s += (segment_postings.doc() & n) % 1024;
|
||||||
segment_postings.advance()
|
segment_postings.advance();
|
||||||
}
|
}
|
||||||
s
|
s
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use super::stacker::{Addr, MemoryArena, TermHashMap};
|
use super::stacker::{Addr, MemoryArena, TermHashMap};
|
||||||
|
|
||||||
|
use crate::fieldnorm::FieldNormReaders;
|
||||||
use crate::postings::recorder::{
|
use crate::postings::recorder::{
|
||||||
BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder,
|
BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder,
|
||||||
};
|
};
|
||||||
@@ -37,12 +38,8 @@ fn posting_from_field_entry(field_entry: &FieldEntry) -> Box<dyn PostingsWriter>
|
|||||||
| FieldType::I64(_)
|
| FieldType::I64(_)
|
||||||
| FieldType::F64(_)
|
| FieldType::F64(_)
|
||||||
| FieldType::Date(_)
|
| FieldType::Date(_)
|
||||||
|
| FieldType::Bytes(_)
|
||||||
| FieldType::HierarchicalFacet => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(),
|
| FieldType::HierarchicalFacet => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(),
|
||||||
FieldType::Bytes => {
|
|
||||||
// FieldType::Bytes cannot actually be indexed.
|
|
||||||
// TODO fix during the indexer refactoring described in #276
|
|
||||||
SpecializedPostingsWriter::<NothingRecorder>::new_boxed()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -104,6 +101,7 @@ impl MultiFieldPostingsWriter {
|
|||||||
doc: DocId,
|
doc: DocId,
|
||||||
field: Field,
|
field: Field,
|
||||||
token_stream: &mut dyn TokenStream,
|
token_stream: &mut dyn TokenStream,
|
||||||
|
term_buffer: &mut Term,
|
||||||
) -> u32 {
|
) -> u32 {
|
||||||
let postings_writer =
|
let postings_writer =
|
||||||
self.per_field_postings_writers[field.field_id() as usize].deref_mut();
|
self.per_field_postings_writers[field.field_id() as usize].deref_mut();
|
||||||
@@ -113,6 +111,7 @@ impl MultiFieldPostingsWriter {
|
|||||||
field,
|
field,
|
||||||
token_stream,
|
token_stream,
|
||||||
&mut self.heap,
|
&mut self.heap,
|
||||||
|
term_buffer,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -128,6 +127,7 @@ impl MultiFieldPostingsWriter {
|
|||||||
pub fn serialize(
|
pub fn serialize(
|
||||||
&self,
|
&self,
|
||||||
serializer: &mut InvertedIndexSerializer,
|
serializer: &mut InvertedIndexSerializer,
|
||||||
|
fieldnorm_readers: FieldNormReaders,
|
||||||
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
|
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
|
||||||
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
|
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
|
||||||
self.term_index.iter().collect();
|
self.term_index.iter().collect();
|
||||||
@@ -157,12 +157,17 @@ impl MultiFieldPostingsWriter {
|
|||||||
unordered_term_mappings.insert(field, mapping);
|
unordered_term_mappings.insert(field, mapping);
|
||||||
}
|
}
|
||||||
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {}
|
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {}
|
||||||
FieldType::Bytes => {}
|
FieldType::Bytes(_) => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
let postings_writer = &self.per_field_postings_writers[field.field_id() as usize];
|
let postings_writer =
|
||||||
let mut field_serializer =
|
self.per_field_postings_writers[field.field_id() as usize].as_ref();
|
||||||
serializer.new_field(field, postings_writer.total_num_tokens())?;
|
let fieldnorm_reader = fieldnorm_readers.get_field(field)?;
|
||||||
|
let mut field_serializer = serializer.new_field(
|
||||||
|
field,
|
||||||
|
postings_writer.total_num_tokens(),
|
||||||
|
fieldnorm_reader,
|
||||||
|
)?;
|
||||||
postings_writer.serialize(
|
postings_writer.serialize(
|
||||||
&term_offsets[start..stop],
|
&term_offsets[start..stop],
|
||||||
&mut field_serializer,
|
&mut field_serializer,
|
||||||
@@ -214,13 +219,20 @@ pub trait PostingsWriter {
|
|||||||
field: Field,
|
field: Field,
|
||||||
token_stream: &mut dyn TokenStream,
|
token_stream: &mut dyn TokenStream,
|
||||||
heap: &mut MemoryArena,
|
heap: &mut MemoryArena,
|
||||||
|
term_buffer: &mut Term,
|
||||||
) -> u32 {
|
) -> u32 {
|
||||||
let mut term = Term::for_field(field);
|
term_buffer.set_field(field);
|
||||||
let mut sink = |token: &Token| {
|
let mut sink = |token: &Token| {
|
||||||
// We skip all tokens with a len greater than u16.
|
// We skip all tokens with a len greater than u16.
|
||||||
if token.text.len() <= MAX_TOKEN_LEN {
|
if token.text.len() <= MAX_TOKEN_LEN {
|
||||||
term.set_text(token.text.as_str());
|
term_buffer.set_text(token.text.as_str());
|
||||||
self.subscribe(term_index, doc_id, token.position as u32, &term, heap);
|
self.subscribe(
|
||||||
|
term_index,
|
||||||
|
doc_id,
|
||||||
|
token.position as u32,
|
||||||
|
&term_buffer,
|
||||||
|
heap,
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
info!(
|
info!(
|
||||||
"A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \
|
"A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \
|
||||||
@@ -297,7 +309,8 @@ impl<Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<Rec>
|
|||||||
let mut buffer_lender = BufferLender::default();
|
let mut buffer_lender = BufferLender::default();
|
||||||
for &(term_bytes, addr, _) in term_addrs {
|
for &(term_bytes, addr, _) in term_addrs {
|
||||||
let recorder: Rec = termdict_heap.read(addr);
|
let recorder: Rec = termdict_heap.read(addr);
|
||||||
serializer.new_term(&term_bytes[4..])?;
|
let term_doc_freq = recorder.term_doc_freq().unwrap_or(0u32);
|
||||||
|
serializer.new_term(&term_bytes[4..], term_doc_freq)?;
|
||||||
recorder.serialize(&mut buffer_lender, serializer, heap)?;
|
recorder.serialize(&mut buffer_lender, serializer, heap)?;
|
||||||
serializer.close_term()?;
|
serializer.close_term()?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,6 +75,10 @@ pub(crate) trait Recorder: Copy + 'static {
|
|||||||
serializer: &mut FieldSerializer<'_>,
|
serializer: &mut FieldSerializer<'_>,
|
||||||
heap: &MemoryArena,
|
heap: &MemoryArena,
|
||||||
) -> io::Result<()>;
|
) -> io::Result<()>;
|
||||||
|
/// Returns the number of document containing this term.
|
||||||
|
///
|
||||||
|
/// Returns `None` if not available.
|
||||||
|
fn term_doc_freq(&self) -> Option<u32>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Only records the doc ids
|
/// Only records the doc ids
|
||||||
@@ -113,11 +117,16 @@ impl Recorder for NothingRecorder {
|
|||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let buffer = buffer_lender.lend_u8();
|
let buffer = buffer_lender.lend_u8();
|
||||||
self.stack.read_to_end(heap, buffer);
|
self.stack.read_to_end(heap, buffer);
|
||||||
|
// TODO avoid reading twice.
|
||||||
for doc in VInt32Reader::new(&buffer[..]) {
|
for doc in VInt32Reader::new(&buffer[..]) {
|
||||||
serializer.write_doc(doc as u32, 0u32, &[][..])?;
|
serializer.write_doc(doc as u32, 0u32, &[][..])?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn term_doc_freq(&self) -> Option<u32> {
|
||||||
|
None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Recorder encoding document ids, and term frequencies
|
/// Recorder encoding document ids, and term frequencies
|
||||||
@@ -126,6 +135,7 @@ pub struct TermFrequencyRecorder {
|
|||||||
stack: ExpUnrolledLinkedList,
|
stack: ExpUnrolledLinkedList,
|
||||||
current_doc: DocId,
|
current_doc: DocId,
|
||||||
current_tf: u32,
|
current_tf: u32,
|
||||||
|
term_doc_freq: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Recorder for TermFrequencyRecorder {
|
impl Recorder for TermFrequencyRecorder {
|
||||||
@@ -134,6 +144,7 @@ impl Recorder for TermFrequencyRecorder {
|
|||||||
stack: ExpUnrolledLinkedList::new(),
|
stack: ExpUnrolledLinkedList::new(),
|
||||||
current_doc: u32::max_value(),
|
current_doc: u32::max_value(),
|
||||||
current_tf: 0u32,
|
current_tf: 0u32,
|
||||||
|
term_doc_freq: 0u32,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,6 +153,7 @@ impl Recorder for TermFrequencyRecorder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
|
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
|
||||||
|
self.term_doc_freq += 1;
|
||||||
self.current_doc = doc;
|
self.current_doc = doc;
|
||||||
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
|
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
|
||||||
}
|
}
|
||||||
@@ -172,6 +184,10 @@ impl Recorder for TermFrequencyRecorder {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn term_doc_freq(&self) -> Option<u32> {
|
||||||
|
Some(self.term_doc_freq)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Recorder encoding term frequencies as well as positions.
|
/// Recorder encoding term frequencies as well as positions.
|
||||||
@@ -179,12 +195,14 @@ impl Recorder for TermFrequencyRecorder {
|
|||||||
pub struct TFAndPositionRecorder {
|
pub struct TFAndPositionRecorder {
|
||||||
stack: ExpUnrolledLinkedList,
|
stack: ExpUnrolledLinkedList,
|
||||||
current_doc: DocId,
|
current_doc: DocId,
|
||||||
|
term_doc_freq: u32,
|
||||||
}
|
}
|
||||||
impl Recorder for TFAndPositionRecorder {
|
impl Recorder for TFAndPositionRecorder {
|
||||||
fn new() -> Self {
|
fn new() -> Self {
|
||||||
TFAndPositionRecorder {
|
TFAndPositionRecorder {
|
||||||
stack: ExpUnrolledLinkedList::new(),
|
stack: ExpUnrolledLinkedList::new(),
|
||||||
current_doc: u32::max_value(),
|
current_doc: u32::max_value(),
|
||||||
|
term_doc_freq: 0u32,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -194,6 +212,7 @@ impl Recorder for TFAndPositionRecorder {
|
|||||||
|
|
||||||
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
|
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
|
||||||
self.current_doc = doc;
|
self.current_doc = doc;
|
||||||
|
self.term_doc_freq += 1u32;
|
||||||
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
|
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,6 +252,10 @@ impl Recorder for TFAndPositionRecorder {
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn term_doc_freq(&self) -> Option<u32> {
|
||||||
|
Some(self.term_doc_freq)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -1,27 +1,21 @@
|
|||||||
use crate::common::HasLen;
|
use crate::common::HasLen;
|
||||||
|
|
||||||
use crate::docset::DocSet;
|
use crate::docset::DocSet;
|
||||||
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::positions::PositionReader;
|
use crate::positions::PositionReader;
|
||||||
|
|
||||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||||
use crate::postings::serializer::PostingsSerializer;
|
|
||||||
use crate::postings::BlockSearcher;
|
use crate::postings::BlockSearcher;
|
||||||
|
|
||||||
use crate::postings::Postings;
|
|
||||||
|
|
||||||
use crate::schema::IndexRecordOption;
|
|
||||||
use crate::DocId;
|
|
||||||
|
|
||||||
use crate::directory::ReadOnlySource;
|
|
||||||
use crate::postings::BlockSegmentPostings;
|
use crate::postings::BlockSegmentPostings;
|
||||||
|
use crate::postings::Postings;
|
||||||
|
use crate::{DocId, TERMINATED};
|
||||||
|
|
||||||
/// `SegmentPostings` represents the inverted list or postings associated to
|
/// `SegmentPostings` represents the inverted list or postings associated to
|
||||||
/// a term in a `Segment`.
|
/// a term in a `Segment`.
|
||||||
///
|
///
|
||||||
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
|
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
|
||||||
/// Positions on the other hand, are optionally entirely decoded upfront.
|
/// Positions on the other hand, are optionally entirely decoded upfront.
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct SegmentPostings {
|
pub struct SegmentPostings {
|
||||||
block_cursor: BlockSegmentPostings,
|
pub(crate) block_cursor: BlockSegmentPostings,
|
||||||
cur: usize,
|
cur: usize,
|
||||||
position_reader: Option<PositionReader>,
|
position_reader: Option<PositionReader>,
|
||||||
block_searcher: BlockSearcher,
|
block_searcher: BlockSearcher,
|
||||||
@@ -38,6 +32,31 @@ impl SegmentPostings {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Compute the number of non-deleted documents.
|
||||||
|
///
|
||||||
|
/// This method will clone and scan through the posting lists.
|
||||||
|
/// (this is a rather expensive operation).
|
||||||
|
pub fn doc_freq_given_deletes(&self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||||
|
let mut docset = self.clone();
|
||||||
|
let mut doc_freq = 0;
|
||||||
|
loop {
|
||||||
|
let doc = docset.doc();
|
||||||
|
if doc == TERMINATED {
|
||||||
|
return doc_freq;
|
||||||
|
}
|
||||||
|
if delete_bitset.is_alive(doc) {
|
||||||
|
doc_freq += 1u32;
|
||||||
|
}
|
||||||
|
docset.advance();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the overall number of documents in the block postings.
|
||||||
|
/// It does not take in account whether documents are deleted or not.
|
||||||
|
pub fn doc_freq(&self) -> u32 {
|
||||||
|
self.block_cursor.doc_freq()
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates a segment postings object with the given documents
|
/// Creates a segment postings object with the given documents
|
||||||
/// and no frequency encoded.
|
/// and no frequency encoded.
|
||||||
///
|
///
|
||||||
@@ -46,10 +65,16 @@ impl SegmentPostings {
|
|||||||
/// It serializes the doc ids using tantivy's codec
|
/// It serializes the doc ids using tantivy's codec
|
||||||
/// and returns a `SegmentPostings` object that embeds a
|
/// and returns a `SegmentPostings` object that embeds a
|
||||||
/// buffer with the serialized data.
|
/// buffer with the serialized data.
|
||||||
|
#[cfg(test)]
|
||||||
pub fn create_from_docs(docs: &[u32]) -> SegmentPostings {
|
pub fn create_from_docs(docs: &[u32]) -> SegmentPostings {
|
||||||
|
use crate::directory::FileSlice;
|
||||||
|
use crate::postings::serializer::PostingsSerializer;
|
||||||
|
use crate::schema::IndexRecordOption;
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
{
|
{
|
||||||
let mut postings_serializer = PostingsSerializer::new(&mut buffer, false, false);
|
let mut postings_serializer =
|
||||||
|
PostingsSerializer::new(&mut buffer, 0.0, IndexRecordOption::Basic, None);
|
||||||
|
postings_serializer.new_term(docs.len() as u32);
|
||||||
for &doc in docs {
|
for &doc in docs {
|
||||||
postings_serializer.write_doc(doc, 1u32);
|
postings_serializer.write_doc(doc, 1u32);
|
||||||
}
|
}
|
||||||
@@ -57,12 +82,61 @@ impl SegmentPostings {
|
|||||||
.close_term(docs.len() as u32)
|
.close_term(docs.len() as u32)
|
||||||
.expect("In memory Serialization should never fail.");
|
.expect("In memory Serialization should never fail.");
|
||||||
}
|
}
|
||||||
let block_segment_postings = BlockSegmentPostings::from_data(
|
let block_segment_postings = BlockSegmentPostings::open(
|
||||||
docs.len() as u32,
|
docs.len() as u32,
|
||||||
ReadOnlySource::from(buffer),
|
FileSlice::from(buffer),
|
||||||
IndexRecordOption::Basic,
|
IndexRecordOption::Basic,
|
||||||
IndexRecordOption::Basic,
|
IndexRecordOption::Basic,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
SegmentPostings::from_block_postings(block_segment_postings, None)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper functions to create `SegmentPostings` for tests.
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn create_from_docs_and_tfs(
|
||||||
|
doc_and_tfs: &[(u32, u32)],
|
||||||
|
fieldnorms: Option<&[u32]>,
|
||||||
|
) -> SegmentPostings {
|
||||||
|
use crate::directory::FileSlice;
|
||||||
|
use crate::postings::serializer::PostingsSerializer;
|
||||||
|
use crate::schema::IndexRecordOption;
|
||||||
|
use crate::fieldnorm::FieldNormReader;
|
||||||
|
use crate::Score;
|
||||||
|
let mut buffer: Vec<u8> = Vec::new();
|
||||||
|
let fieldnorm_reader = fieldnorms.map(FieldNormReader::for_test);
|
||||||
|
let average_field_norm = fieldnorms
|
||||||
|
.map(|fieldnorms| {
|
||||||
|
if fieldnorms.len() == 0 {
|
||||||
|
return 0.0;
|
||||||
|
}
|
||||||
|
let total_num_tokens: u64 = fieldnorms
|
||||||
|
.iter()
|
||||||
|
.map(|&fieldnorm| fieldnorm as u64)
|
||||||
|
.sum::<u64>();
|
||||||
|
total_num_tokens as Score / fieldnorms.len() as Score
|
||||||
|
})
|
||||||
|
.unwrap_or(0.0);
|
||||||
|
let mut postings_serializer = PostingsSerializer::new(
|
||||||
|
&mut buffer,
|
||||||
|
average_field_norm,
|
||||||
|
IndexRecordOption::WithFreqs,
|
||||||
|
fieldnorm_reader,
|
||||||
);
|
);
|
||||||
|
postings_serializer.new_term(doc_and_tfs.len() as u32);
|
||||||
|
for &(doc, tf) in doc_and_tfs {
|
||||||
|
postings_serializer.write_doc(doc, tf);
|
||||||
|
}
|
||||||
|
postings_serializer
|
||||||
|
.close_term(doc_and_tfs.len() as u32)
|
||||||
|
.unwrap();
|
||||||
|
let block_segment_postings = BlockSegmentPostings::open(
|
||||||
|
doc_and_tfs.len() as u32,
|
||||||
|
FileSlice::from(buffer),
|
||||||
|
IndexRecordOption::WithFreqs,
|
||||||
|
IndexRecordOption::WithFreqs,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
SegmentPostings::from_block_postings(block_segment_postings, None)
|
SegmentPostings::from_block_postings(block_segment_postings, None)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -90,6 +164,7 @@ impl DocSet for SegmentPostings {
|
|||||||
// next needs to be called a first time to point to the correct element.
|
// next needs to be called a first time to point to the correct element.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> DocId {
|
||||||
|
debug_assert!(self.block_cursor.block_is_loaded());
|
||||||
if self.cur == COMPRESSION_BLOCK_SIZE - 1 {
|
if self.cur == COMPRESSION_BLOCK_SIZE - 1 {
|
||||||
self.cur = 0;
|
self.cur = 0;
|
||||||
self.block_cursor.advance();
|
self.block_cursor.advance();
|
||||||
@@ -100,14 +175,15 @@ impl DocSet for SegmentPostings {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn seek(&mut self, target: DocId) -> DocId {
|
||||||
if self.doc() == target {
|
debug_assert!(self.doc() <= target);
|
||||||
return target;
|
if self.doc() >= target {
|
||||||
|
return self.doc();
|
||||||
}
|
}
|
||||||
|
|
||||||
self.block_cursor.seek(target);
|
self.block_cursor.seek(target);
|
||||||
|
|
||||||
// At this point we are on the block, that might contain our document.
|
// At this point we are on the block, that might contain our document.
|
||||||
let output = self.block_cursor.docs_aligned();
|
let output = self.block_cursor.docs_aligned();
|
||||||
|
|
||||||
self.cur = self.block_searcher.search_in_block(&output, target);
|
self.cur = self.block_searcher.search_in_block(&output, target);
|
||||||
|
|
||||||
// The last block is not full and padded with the value TERMINATED,
|
// The last block is not full and padded with the value TERMINATED,
|
||||||
@@ -123,11 +199,12 @@ impl DocSet for SegmentPostings {
|
|||||||
// After the search, the cursor should point to the first value of TERMINATED.
|
// After the search, the cursor should point to the first value of TERMINATED.
|
||||||
let doc = output.0[self.cur];
|
let doc = output.0[self.cur];
|
||||||
debug_assert!(doc >= target);
|
debug_assert!(doc >= target);
|
||||||
|
debug_assert_eq!(doc, self.doc());
|
||||||
doc
|
doc
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the current document's `DocId`.
|
/// Return the current document's `DocId`.
|
||||||
#[inline]
|
#[inline(always)]
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
self.block_cursor.doc(self.cur)
|
self.block_cursor.doc(self.cur)
|
||||||
}
|
}
|
||||||
@@ -139,7 +216,7 @@ impl DocSet for SegmentPostings {
|
|||||||
|
|
||||||
impl HasLen for SegmentPostings {
|
impl HasLen for SegmentPostings {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
self.block_cursor.doc_freq()
|
self.block_cursor.doc_freq() as usize
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -194,6 +271,7 @@ mod tests {
|
|||||||
use crate::common::HasLen;
|
use crate::common::HasLen;
|
||||||
|
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, TERMINATED};
|
||||||
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::postings::postings::Postings;
|
use crate::postings::postings::Postings;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -216,4 +294,14 @@ mod tests {
|
|||||||
let postings = SegmentPostings::empty();
|
let postings = SegmentPostings::empty();
|
||||||
assert_eq!(postings.term_freq(), 1);
|
assert_eq!(postings.term_freq(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_doc_freq() {
|
||||||
|
let docs = SegmentPostings::create_from_docs(&[0, 2, 10]);
|
||||||
|
assert_eq!(docs.doc_freq(), 3);
|
||||||
|
let delete_bitset = DeleteBitSet::for_test(&[2], 12);
|
||||||
|
assert_eq!(docs.doc_freq_given_deletes(&delete_bitset), 2);
|
||||||
|
let all_deleted = DeleteBitSet::for_test(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 12);
|
||||||
|
assert_eq!(docs.doc_freq_given_deletes(&all_deleted), 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,13 +3,16 @@ use crate::common::{BinarySerializable, VInt};
|
|||||||
use crate::common::{CompositeWrite, CountingWriter};
|
use crate::common::{CompositeWrite, CountingWriter};
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::directory::WritePtr;
|
use crate::directory::WritePtr;
|
||||||
|
use crate::fieldnorm::FieldNormReader;
|
||||||
use crate::positions::PositionSerializer;
|
use crate::positions::PositionSerializer;
|
||||||
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
|
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
|
||||||
use crate::postings::skip::SkipSerializer;
|
use crate::postings::skip::SkipSerializer;
|
||||||
use crate::schema::Schema;
|
use crate::query::BM25Weight;
|
||||||
use crate::schema::{Field, FieldEntry, FieldType};
|
use crate::schema::{Field, FieldEntry, FieldType};
|
||||||
|
use crate::schema::{IndexRecordOption, Schema};
|
||||||
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
|
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
|
||||||
use crate::DocId;
|
use crate::{DocId, Score};
|
||||||
|
use std::cmp::Ordering;
|
||||||
use std::io::{self, Write};
|
use std::io::{self, Write};
|
||||||
|
|
||||||
/// `InvertedIndexSerializer` is in charge of serializing
|
/// `InvertedIndexSerializer` is in charge of serializing
|
||||||
@@ -89,20 +92,22 @@ impl InvertedIndexSerializer {
|
|||||||
&mut self,
|
&mut self,
|
||||||
field: Field,
|
field: Field,
|
||||||
total_num_tokens: u64,
|
total_num_tokens: u64,
|
||||||
|
fieldnorm_reader: Option<FieldNormReader>,
|
||||||
) -> io::Result<FieldSerializer<'_>> {
|
) -> io::Result<FieldSerializer<'_>> {
|
||||||
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
|
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
|
||||||
let term_dictionary_write = self.terms_write.for_field(field);
|
let term_dictionary_write = self.terms_write.for_field(field);
|
||||||
let postings_write = self.postings_write.for_field(field);
|
let postings_write = self.postings_write.for_field(field);
|
||||||
total_num_tokens.serialize(postings_write)?;
|
|
||||||
let positions_write = self.positions_write.for_field(field);
|
let positions_write = self.positions_write.for_field(field);
|
||||||
let positionsidx_write = self.positionsidx_write.for_field(field);
|
let positionsidx_write = self.positionsidx_write.for_field(field);
|
||||||
let field_type: FieldType = (*field_entry.field_type()).clone();
|
let field_type: FieldType = (*field_entry.field_type()).clone();
|
||||||
FieldSerializer::create(
|
FieldSerializer::create(
|
||||||
&field_type,
|
&field_type,
|
||||||
|
total_num_tokens,
|
||||||
term_dictionary_write,
|
term_dictionary_write,
|
||||||
postings_write,
|
postings_write,
|
||||||
positions_write,
|
positions_write,
|
||||||
positionsidx_write,
|
positionsidx_write,
|
||||||
|
fieldnorm_reader,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -130,26 +135,32 @@ pub struct FieldSerializer<'a> {
|
|||||||
impl<'a> FieldSerializer<'a> {
|
impl<'a> FieldSerializer<'a> {
|
||||||
fn create(
|
fn create(
|
||||||
field_type: &FieldType,
|
field_type: &FieldType,
|
||||||
|
total_num_tokens: u64,
|
||||||
term_dictionary_write: &'a mut CountingWriter<WritePtr>,
|
term_dictionary_write: &'a mut CountingWriter<WritePtr>,
|
||||||
postings_write: &'a mut CountingWriter<WritePtr>,
|
postings_write: &'a mut CountingWriter<WritePtr>,
|
||||||
positions_write: &'a mut CountingWriter<WritePtr>,
|
positions_write: &'a mut CountingWriter<WritePtr>,
|
||||||
positionsidx_write: &'a mut CountingWriter<WritePtr>,
|
positionsidx_write: &'a mut CountingWriter<WritePtr>,
|
||||||
|
fieldnorm_reader: Option<FieldNormReader>,
|
||||||
) -> io::Result<FieldSerializer<'a>> {
|
) -> io::Result<FieldSerializer<'a>> {
|
||||||
let (term_freq_enabled, position_enabled): (bool, bool) = match field_type {
|
total_num_tokens.serialize(postings_write)?;
|
||||||
|
let mode = match field_type {
|
||||||
FieldType::Str(ref text_options) => {
|
FieldType::Str(ref text_options) => {
|
||||||
if let Some(text_indexing_options) = text_options.get_indexing_options() {
|
if let Some(text_indexing_options) = text_options.get_indexing_options() {
|
||||||
let index_option = text_indexing_options.index_option();
|
text_indexing_options.index_option()
|
||||||
(index_option.has_freq(), index_option.has_positions())
|
|
||||||
} else {
|
} else {
|
||||||
(false, false)
|
IndexRecordOption::Basic
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => (false, false),
|
_ => IndexRecordOption::Basic,
|
||||||
};
|
};
|
||||||
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
|
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
|
||||||
|
let average_fieldnorm = fieldnorm_reader
|
||||||
|
.as_ref()
|
||||||
|
.map(|ff_reader| (total_num_tokens as Score / ff_reader.num_docs() as Score))
|
||||||
|
.unwrap_or(0.0);
|
||||||
let postings_serializer =
|
let postings_serializer =
|
||||||
PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled);
|
PostingsSerializer::new(postings_write, average_fieldnorm, mode, fieldnorm_reader);
|
||||||
let positions_serializer_opt = if position_enabled {
|
let positions_serializer_opt = if mode.has_positions() {
|
||||||
Some(PositionSerializer::new(positions_write, positionsidx_write))
|
Some(PositionSerializer::new(positions_write, positionsidx_write))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
@@ -166,14 +177,16 @@ impl<'a> FieldSerializer<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn current_term_info(&self) -> TermInfo {
|
fn current_term_info(&self) -> TermInfo {
|
||||||
let positions_idx = self
|
let positions_idx =
|
||||||
.positions_serializer_opt
|
if let Some(positions_serializer) = self.positions_serializer_opt.as_ref() {
|
||||||
.as_ref()
|
positions_serializer.positions_idx()
|
||||||
.map(PositionSerializer::positions_idx)
|
} else {
|
||||||
.unwrap_or(0u64);
|
0u64
|
||||||
|
};
|
||||||
TermInfo {
|
TermInfo {
|
||||||
doc_freq: 0,
|
doc_freq: 0,
|
||||||
postings_offset: self.postings_serializer.addr(),
|
postings_start_offset: self.postings_serializer.addr(),
|
||||||
|
postings_stop_offset: 0u64,
|
||||||
positions_idx,
|
positions_idx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -181,18 +194,20 @@ impl<'a> FieldSerializer<'a> {
|
|||||||
/// Starts the postings for a new term.
|
/// Starts the postings for a new term.
|
||||||
/// * term - the term. It needs to come after the previous term according
|
/// * term - the term. It needs to come after the previous term according
|
||||||
/// to the lexicographical order.
|
/// to the lexicographical order.
|
||||||
/// * doc_freq - return the number of document containing the term.
|
/// * term_doc_freq - return the number of document containing the term.
|
||||||
pub fn new_term(&mut self, term: &[u8]) -> io::Result<TermOrdinal> {
|
pub fn new_term(&mut self, term: &[u8], term_doc_freq: u32) -> io::Result<TermOrdinal> {
|
||||||
assert!(
|
assert!(
|
||||||
!self.term_open,
|
!self.term_open,
|
||||||
"Called new_term, while the previous term was not closed."
|
"Called new_term, while the previous term was not closed."
|
||||||
);
|
);
|
||||||
|
|
||||||
self.term_open = true;
|
self.term_open = true;
|
||||||
self.postings_serializer.clear();
|
self.postings_serializer.clear();
|
||||||
self.current_term_info = self.current_term_info();
|
self.current_term_info = self.current_term_info();
|
||||||
self.term_dictionary_builder.insert_key(term)?;
|
self.term_dictionary_builder.insert_key(term)?;
|
||||||
let term_ordinal = self.num_terms;
|
let term_ordinal = self.num_terms;
|
||||||
self.num_terms += 1;
|
self.num_terms += 1;
|
||||||
|
self.postings_serializer.new_term(term_doc_freq);
|
||||||
Ok(term_ordinal)
|
Ok(term_ordinal)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -225,10 +240,11 @@ impl<'a> FieldSerializer<'a> {
|
|||||||
/// using `VInt` encoding.
|
/// using `VInt` encoding.
|
||||||
pub fn close_term(&mut self) -> io::Result<()> {
|
pub fn close_term(&mut self) -> io::Result<()> {
|
||||||
if self.term_open {
|
if self.term_open {
|
||||||
self.term_dictionary_builder
|
|
||||||
.insert_value(&self.current_term_info)?;
|
|
||||||
self.postings_serializer
|
self.postings_serializer
|
||||||
.close_term(self.current_term_info.doc_freq)?;
|
.close_term(self.current_term_info.doc_freq)?;
|
||||||
|
self.current_term_info.postings_stop_offset = self.postings_serializer.addr();
|
||||||
|
self.term_dictionary_builder
|
||||||
|
.insert_value(&self.current_term_info)?;
|
||||||
self.term_open = false;
|
self.term_open = false;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -304,16 +320,27 @@ pub struct PostingsSerializer<W: Write> {
|
|||||||
postings_write: Vec<u8>,
|
postings_write: Vec<u8>,
|
||||||
skip_write: SkipSerializer,
|
skip_write: SkipSerializer,
|
||||||
|
|
||||||
termfreq_enabled: bool,
|
mode: IndexRecordOption,
|
||||||
termfreq_sum_enabled: bool,
|
fieldnorm_reader: Option<FieldNormReader>,
|
||||||
|
|
||||||
|
bm25_weight: Option<BM25Weight>,
|
||||||
|
|
||||||
|
num_docs: u32, // Number of docs in the segment
|
||||||
|
avg_fieldnorm: Score, // Average number of term in the field for that segment.
|
||||||
|
// this value is used to compute the block wand information.
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<W: Write> PostingsSerializer<W> {
|
impl<W: Write> PostingsSerializer<W> {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
write: W,
|
write: W,
|
||||||
termfreq_enabled: bool,
|
avg_fieldnorm: Score,
|
||||||
termfreq_sum_enabled: bool,
|
mode: IndexRecordOption,
|
||||||
|
fieldnorm_reader: Option<FieldNormReader>,
|
||||||
) -> PostingsSerializer<W> {
|
) -> PostingsSerializer<W> {
|
||||||
|
let num_docs = fieldnorm_reader
|
||||||
|
.as_ref()
|
||||||
|
.map(|fieldnorm_reader| fieldnorm_reader.num_docs())
|
||||||
|
.unwrap_or(0u32);
|
||||||
PostingsSerializer {
|
PostingsSerializer {
|
||||||
output_write: CountingWriter::wrap(write),
|
output_write: CountingWriter::wrap(write),
|
||||||
|
|
||||||
@@ -324,8 +351,24 @@ impl<W: Write> PostingsSerializer<W> {
|
|||||||
skip_write: SkipSerializer::new(),
|
skip_write: SkipSerializer::new(),
|
||||||
|
|
||||||
last_doc_id_encoded: 0u32,
|
last_doc_id_encoded: 0u32,
|
||||||
termfreq_enabled,
|
mode,
|
||||||
termfreq_sum_enabled,
|
|
||||||
|
fieldnorm_reader,
|
||||||
|
bm25_weight: None,
|
||||||
|
|
||||||
|
num_docs,
|
||||||
|
avg_fieldnorm,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_term(&mut self, term_doc_freq: u32) {
|
||||||
|
if self.mode.has_freq() && self.num_docs > 0 {
|
||||||
|
let bm25_weight = BM25Weight::for_one_term(
|
||||||
|
term_doc_freq as u64,
|
||||||
|
self.num_docs as u64,
|
||||||
|
self.avg_fieldnorm,
|
||||||
|
);
|
||||||
|
self.bm25_weight = Some(bm25_weight);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -341,17 +384,43 @@ impl<W: Write> PostingsSerializer<W> {
|
|||||||
// last el block 0, offset block 1,
|
// last el block 0, offset block 1,
|
||||||
self.postings_write.extend(block_encoded);
|
self.postings_write.extend(block_encoded);
|
||||||
}
|
}
|
||||||
if self.termfreq_enabled {
|
if self.mode.has_freq() {
|
||||||
// encode the term_freqs
|
|
||||||
let (num_bits, block_encoded): (u8, &[u8]) = self
|
let (num_bits, block_encoded): (u8, &[u8]) = self
|
||||||
.block_encoder
|
.block_encoder
|
||||||
.compress_block_unsorted(&self.block.term_freqs());
|
.compress_block_unsorted(&self.block.term_freqs());
|
||||||
self.postings_write.extend(block_encoded);
|
self.postings_write.extend(block_encoded);
|
||||||
self.skip_write.write_term_freq(num_bits);
|
self.skip_write.write_term_freq(num_bits);
|
||||||
if self.termfreq_sum_enabled {
|
if self.mode.has_positions() {
|
||||||
|
// We serialize the sum of term freqs within the skip information
|
||||||
|
// in order to navigate through positions.
|
||||||
let sum_freq = self.block.term_freqs().iter().cloned().sum();
|
let sum_freq = self.block.term_freqs().iter().cloned().sum();
|
||||||
self.skip_write.write_total_term_freq(sum_freq);
|
self.skip_write.write_total_term_freq(sum_freq);
|
||||||
}
|
}
|
||||||
|
let mut blockwand_params = (0u8, 0u32);
|
||||||
|
if let Some(bm25_weight) = self.bm25_weight.as_ref() {
|
||||||
|
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
|
||||||
|
let docs = self.block.doc_ids().iter().cloned();
|
||||||
|
let term_freqs = self.block.term_freqs().iter().cloned();
|
||||||
|
let fieldnorms = docs.map(|doc| fieldnorm_reader.fieldnorm_id(doc));
|
||||||
|
blockwand_params = fieldnorms
|
||||||
|
.zip(term_freqs)
|
||||||
|
.max_by(
|
||||||
|
|(left_fieldnorm_id, left_term_freq),
|
||||||
|
(right_fieldnorm_id, right_term_freq)| {
|
||||||
|
let left_score =
|
||||||
|
bm25_weight.tf_factor(*left_fieldnorm_id, *left_term_freq);
|
||||||
|
let right_score =
|
||||||
|
bm25_weight.tf_factor(*right_fieldnorm_id, *right_term_freq);
|
||||||
|
left_score
|
||||||
|
.partial_cmp(&right_score)
|
||||||
|
.unwrap_or(Ordering::Equal)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let (fieldnorm_id, term_freq) = blockwand_params;
|
||||||
|
self.skip_write.write_blockwand_max(fieldnorm_id, term_freq);
|
||||||
}
|
}
|
||||||
self.block.clear();
|
self.block.clear();
|
||||||
}
|
}
|
||||||
@@ -382,7 +451,7 @@ impl<W: Write> PostingsSerializer<W> {
|
|||||||
self.postings_write.write_all(block_encoded)?;
|
self.postings_write.write_all(block_encoded)?;
|
||||||
}
|
}
|
||||||
// ... Idem for term frequencies
|
// ... Idem for term frequencies
|
||||||
if self.termfreq_enabled {
|
if self.mode.has_freq() {
|
||||||
let block_encoded = self
|
let block_encoded = self
|
||||||
.block_encoder
|
.block_encoder
|
||||||
.compress_vint_unsorted(self.block.term_freqs());
|
.compress_vint_unsorted(self.block.term_freqs());
|
||||||
@@ -400,6 +469,7 @@ impl<W: Write> PostingsSerializer<W> {
|
|||||||
}
|
}
|
||||||
self.skip_write.clear();
|
self.skip_write.clear();
|
||||||
self.postings_write.clear();
|
self.postings_write.clear();
|
||||||
|
self.bm25_weight = None;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,32 +1,46 @@
|
|||||||
use crate::common::BinarySerializable;
|
use std::convert::TryInto;
|
||||||
use crate::directory::ReadOnlySource;
|
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
|
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
|
||||||
|
use crate::query::BM25Weight;
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::{DocId, TERMINATED};
|
use crate::{DocId, Score, TERMINATED};
|
||||||
use owned_read::OwnedRead;
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn encode_block_wand_max_tf(max_tf: u32) -> u8 {
|
||||||
|
max_tf.min(u8::MAX as u32) as u8
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn decode_block_wand_max_tf(max_tf_code: u8) -> u32 {
|
||||||
|
if max_tf_code == u8::MAX {
|
||||||
|
u32::MAX
|
||||||
|
} else {
|
||||||
|
max_tf_code as u32
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn read_u32(data: &[u8]) -> u32 {
|
||||||
|
u32::from_le_bytes(data[..4].try_into().unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn write_u32(val: u32, buf: &mut Vec<u8>) {
|
||||||
|
buf.extend_from_slice(&val.to_le_bytes());
|
||||||
|
}
|
||||||
|
|
||||||
pub struct SkipSerializer {
|
pub struct SkipSerializer {
|
||||||
buffer: Vec<u8>,
|
buffer: Vec<u8>,
|
||||||
prev_doc: DocId,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SkipSerializer {
|
impl SkipSerializer {
|
||||||
pub fn new() -> SkipSerializer {
|
pub fn new() -> SkipSerializer {
|
||||||
SkipSerializer {
|
SkipSerializer { buffer: Vec::new() }
|
||||||
buffer: Vec::new(),
|
|
||||||
prev_doc: 0u32,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_doc(&mut self, last_doc: DocId, doc_num_bits: u8) {
|
pub fn write_doc(&mut self, last_doc: DocId, doc_num_bits: u8) {
|
||||||
assert!(
|
write_u32(last_doc, &mut self.buffer);
|
||||||
last_doc > self.prev_doc,
|
|
||||||
"write_doc(...) called with non-increasing doc ids. \
|
|
||||||
Did you forget to call clear maybe?"
|
|
||||||
);
|
|
||||||
let delta_doc = last_doc - self.prev_doc;
|
|
||||||
self.prev_doc = last_doc;
|
|
||||||
delta_doc.serialize(&mut self.buffer).unwrap();
|
|
||||||
self.buffer.push(doc_num_bits);
|
self.buffer.push(doc_num_bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -35,9 +49,13 @@ impl SkipSerializer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_total_term_freq(&mut self, tf_sum: u32) {
|
pub fn write_total_term_freq(&mut self, tf_sum: u32) {
|
||||||
tf_sum
|
write_u32(tf_sum, &mut self.buffer);
|
||||||
.serialize(&mut self.buffer)
|
}
|
||||||
.expect("Should never fail");
|
|
||||||
|
pub fn write_blockwand_max(&mut self, fieldnorm_id: u8, term_freq: u32) {
|
||||||
|
let block_wand_tf = encode_block_wand_max_tf(term_freq);
|
||||||
|
self.buffer
|
||||||
|
.extend_from_slice(&[fieldnorm_id, block_wand_tf]);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn data(&self) -> &[u8] {
|
pub fn data(&self) -> &[u8] {
|
||||||
@@ -45,15 +63,15 @@ impl SkipSerializer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear(&mut self) {
|
pub fn clear(&mut self) {
|
||||||
self.prev_doc = 0u32;
|
|
||||||
self.buffer.clear();
|
self.buffer.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub(crate) struct SkipReader {
|
pub(crate) struct SkipReader {
|
||||||
last_doc_in_block: DocId,
|
last_doc_in_block: DocId,
|
||||||
pub(crate) last_doc_in_previous_block: DocId,
|
pub(crate) last_doc_in_previous_block: DocId,
|
||||||
owned_read: OwnedRead,
|
owned_read: OwnedBytes,
|
||||||
skip_info: IndexRecordOption,
|
skip_info: IndexRecordOption,
|
||||||
byte_offset: usize,
|
byte_offset: usize,
|
||||||
remaining_docs: u32, // number of docs remaining, including the
|
remaining_docs: u32, // number of docs remaining, including the
|
||||||
@@ -69,41 +87,74 @@ pub(crate) enum BlockInfo {
|
|||||||
doc_num_bits: u8,
|
doc_num_bits: u8,
|
||||||
tf_num_bits: u8,
|
tf_num_bits: u8,
|
||||||
tf_sum: u32,
|
tf_sum: u32,
|
||||||
|
block_wand_fieldnorm_id: u8,
|
||||||
|
block_wand_term_freq: u32,
|
||||||
|
},
|
||||||
|
VInt {
|
||||||
|
num_docs: u32,
|
||||||
},
|
},
|
||||||
VInt(u32),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for BlockInfo {
|
impl Default for BlockInfo {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
BlockInfo::VInt(0)
|
BlockInfo::VInt { num_docs: 0u32 }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SkipReader {
|
impl SkipReader {
|
||||||
pub fn new(data: ReadOnlySource, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader {
|
pub fn new(data: OwnedBytes, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader {
|
||||||
SkipReader {
|
let mut skip_reader = SkipReader {
|
||||||
last_doc_in_block: 0u32,
|
last_doc_in_block: if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
TERMINATED
|
||||||
|
},
|
||||||
last_doc_in_previous_block: 0u32,
|
last_doc_in_previous_block: 0u32,
|
||||||
owned_read: OwnedRead::new(data),
|
owned_read: data,
|
||||||
skip_info,
|
skip_info,
|
||||||
block_info: BlockInfo::default(),
|
block_info: BlockInfo::VInt { num_docs: doc_freq },
|
||||||
byte_offset: 0,
|
byte_offset: 0,
|
||||||
remaining_docs: doc_freq,
|
remaining_docs: doc_freq,
|
||||||
position_offset: 0u64,
|
position_offset: 0u64,
|
||||||
|
};
|
||||||
|
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||||
|
skip_reader.read_block_info();
|
||||||
|
}
|
||||||
|
skip_reader
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reset(&mut self, data: OwnedBytes, doc_freq: u32) {
|
||||||
|
self.last_doc_in_block = if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
TERMINATED
|
||||||
|
};
|
||||||
|
self.last_doc_in_previous_block = 0u32;
|
||||||
|
self.owned_read = data;
|
||||||
|
self.block_info = BlockInfo::VInt { num_docs: doc_freq };
|
||||||
|
self.byte_offset = 0;
|
||||||
|
self.remaining_docs = doc_freq;
|
||||||
|
self.position_offset = 0u64;
|
||||||
|
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||||
|
self.read_block_info();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset(&mut self, data: ReadOnlySource, doc_freq: u32) {
|
// Returns the block max score for this block if available.
|
||||||
self.last_doc_in_block = 0u32;
|
//
|
||||||
self.last_doc_in_previous_block = 0u32;
|
// The block max score is available for all full bitpacked block,
|
||||||
self.owned_read = OwnedRead::new(data);
|
// but no available for the last VInt encoded incomplete block.
|
||||||
self.block_info = BlockInfo::default();
|
pub fn block_max_score(&self, bm25_weight: &BM25Weight) -> Option<Score> {
|
||||||
self.byte_offset = 0;
|
match self.block_info {
|
||||||
self.remaining_docs = doc_freq;
|
BlockInfo::BitPacked {
|
||||||
|
block_wand_fieldnorm_id,
|
||||||
|
block_wand_term_freq,
|
||||||
|
..
|
||||||
|
} => Some(bm25_weight.score(block_wand_fieldnorm_id, block_wand_term_freq)),
|
||||||
|
BlockInfo::VInt { .. } => None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
#[inline(always)]
|
|
||||||
pub(crate) fn last_doc_in_block(&self) -> DocId {
|
pub(crate) fn last_doc_in_block(&self) -> DocId {
|
||||||
self.last_doc_in_block
|
self.last_doc_in_block
|
||||||
}
|
}
|
||||||
@@ -112,43 +163,56 @@ impl SkipReader {
|
|||||||
self.position_offset
|
self.position_offset
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
pub fn byte_offset(&self) -> usize {
|
pub fn byte_offset(&self) -> usize {
|
||||||
self.byte_offset
|
self.byte_offset
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_block_info(&mut self) {
|
fn read_block_info(&mut self) {
|
||||||
let doc_delta = u32::deserialize(&mut self.owned_read).expect("Skip data corrupted");
|
let bytes = self.owned_read.as_slice();
|
||||||
self.last_doc_in_block += doc_delta as DocId;
|
let advance_len: usize;
|
||||||
let doc_num_bits = self.owned_read.get(0);
|
self.last_doc_in_block = read_u32(bytes);
|
||||||
|
let doc_num_bits = bytes[4];
|
||||||
match self.skip_info {
|
match self.skip_info {
|
||||||
IndexRecordOption::Basic => {
|
IndexRecordOption::Basic => {
|
||||||
self.owned_read.advance(1);
|
advance_len = 5;
|
||||||
self.block_info = BlockInfo::BitPacked {
|
self.block_info = BlockInfo::BitPacked {
|
||||||
doc_num_bits,
|
doc_num_bits,
|
||||||
tf_num_bits: 0,
|
tf_num_bits: 0,
|
||||||
tf_sum: 0,
|
tf_sum: 0,
|
||||||
|
block_wand_fieldnorm_id: 0,
|
||||||
|
block_wand_term_freq: 0,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
IndexRecordOption::WithFreqs => {
|
IndexRecordOption::WithFreqs => {
|
||||||
let tf_num_bits = self.owned_read.get(1);
|
let tf_num_bits = bytes[5];
|
||||||
|
let block_wand_fieldnorm_id = bytes[6];
|
||||||
|
let block_wand_term_freq = decode_block_wand_max_tf(bytes[7]);
|
||||||
|
advance_len = 8;
|
||||||
self.block_info = BlockInfo::BitPacked {
|
self.block_info = BlockInfo::BitPacked {
|
||||||
doc_num_bits,
|
doc_num_bits,
|
||||||
tf_num_bits,
|
tf_num_bits,
|
||||||
tf_sum: 0,
|
tf_sum: 0,
|
||||||
|
block_wand_fieldnorm_id,
|
||||||
|
block_wand_term_freq,
|
||||||
};
|
};
|
||||||
self.owned_read.advance(2);
|
|
||||||
}
|
}
|
||||||
IndexRecordOption::WithFreqsAndPositions => {
|
IndexRecordOption::WithFreqsAndPositions => {
|
||||||
let tf_num_bits = self.owned_read.get(1);
|
let tf_num_bits = bytes[5];
|
||||||
self.owned_read.advance(2);
|
let tf_sum = read_u32(&bytes[6..10]);
|
||||||
let tf_sum = u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum");
|
let block_wand_fieldnorm_id = bytes[10];
|
||||||
|
let block_wand_term_freq = decode_block_wand_max_tf(bytes[11]);
|
||||||
|
advance_len = 12;
|
||||||
self.block_info = BlockInfo::BitPacked {
|
self.block_info = BlockInfo::BitPacked {
|
||||||
doc_num_bits,
|
doc_num_bits,
|
||||||
tf_num_bits,
|
tf_num_bits,
|
||||||
tf_sum,
|
tf_sum,
|
||||||
|
block_wand_fieldnorm_id,
|
||||||
|
block_wand_term_freq,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
self.owned_read.advance(advance_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn block_info(&self) -> BlockInfo {
|
pub fn block_info(&self) -> BlockInfo {
|
||||||
@@ -159,35 +223,44 @@ impl SkipReader {
|
|||||||
///
|
///
|
||||||
/// If the target is larger than all documents, the skip_reader
|
/// If the target is larger than all documents, the skip_reader
|
||||||
/// then advance to the last Variable In block.
|
/// then advance to the last Variable In block.
|
||||||
pub fn seek(&mut self, target: DocId) {
|
pub fn seek(&mut self, target: DocId) -> bool {
|
||||||
while self.last_doc_in_block < target {
|
if self.last_doc_in_block() >= target {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
loop {
|
||||||
self.advance();
|
self.advance();
|
||||||
|
if self.last_doc_in_block() >= target {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn advance(&mut self) -> bool {
|
pub fn advance(&mut self) {
|
||||||
match self.block_info {
|
match self.block_info {
|
||||||
BlockInfo::BitPacked {
|
BlockInfo::BitPacked {
|
||||||
doc_num_bits,
|
doc_num_bits,
|
||||||
tf_num_bits,
|
tf_num_bits,
|
||||||
tf_sum,
|
tf_sum,
|
||||||
|
..
|
||||||
} => {
|
} => {
|
||||||
self.remaining_docs -= COMPRESSION_BLOCK_SIZE as u32;
|
self.remaining_docs -= COMPRESSION_BLOCK_SIZE as u32;
|
||||||
self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits);
|
self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits);
|
||||||
self.position_offset += tf_sum as u64;
|
self.position_offset += tf_sum as u64;
|
||||||
}
|
}
|
||||||
BlockInfo::VInt(num_vint_docs) => {
|
BlockInfo::VInt { num_docs } => {
|
||||||
self.remaining_docs -= num_vint_docs;
|
debug_assert_eq!(num_docs, self.remaining_docs);
|
||||||
|
self.remaining_docs = 0;
|
||||||
|
self.byte_offset = std::usize::MAX;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.last_doc_in_previous_block = self.last_doc_in_block;
|
self.last_doc_in_previous_block = self.last_doc_in_block;
|
||||||
if self.remaining_docs >= COMPRESSION_BLOCK_SIZE as u32 {
|
if self.remaining_docs >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||||
self.read_block_info();
|
self.read_block_info();
|
||||||
true
|
|
||||||
} else {
|
} else {
|
||||||
self.last_doc_in_block = TERMINATED;
|
self.last_doc_in_block = TERMINATED;
|
||||||
self.block_info = BlockInfo::VInt(self.remaining_docs);
|
self.block_info = BlockInfo::VInt {
|
||||||
self.remaining_docs > 0
|
num_docs: self.remaining_docs,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -198,48 +271,71 @@ mod tests {
|
|||||||
use super::BlockInfo;
|
use super::BlockInfo;
|
||||||
use super::IndexRecordOption;
|
use super::IndexRecordOption;
|
||||||
use super::{SkipReader, SkipSerializer};
|
use super::{SkipReader, SkipSerializer};
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::OwnedBytes;
|
||||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_encode_block_wand_max_tf() {
|
||||||
|
for tf in 0..255 {
|
||||||
|
assert_eq!(super::encode_block_wand_max_tf(tf), tf as u8);
|
||||||
|
}
|
||||||
|
for &tf in &[255, 256, 1_000_000, u32::MAX] {
|
||||||
|
assert_eq!(super::encode_block_wand_max_tf(tf), 255);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_decode_block_wand_max_tf() {
|
||||||
|
for tf in 0..255 {
|
||||||
|
assert_eq!(super::decode_block_wand_max_tf(tf), tf as u32);
|
||||||
|
}
|
||||||
|
assert_eq!(super::decode_block_wand_max_tf(255), u32::MAX);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_skip_with_freq() {
|
fn test_skip_with_freq() {
|
||||||
let buf = {
|
let buf = {
|
||||||
let mut skip_serializer = SkipSerializer::new();
|
let mut skip_serializer = SkipSerializer::new();
|
||||||
skip_serializer.write_doc(1u32, 2u8);
|
skip_serializer.write_doc(1u32, 2u8);
|
||||||
skip_serializer.write_term_freq(3u8);
|
skip_serializer.write_term_freq(3u8);
|
||||||
|
skip_serializer.write_blockwand_max(13u8, 3u32);
|
||||||
skip_serializer.write_doc(5u32, 5u8);
|
skip_serializer.write_doc(5u32, 5u8);
|
||||||
skip_serializer.write_term_freq(2u8);
|
skip_serializer.write_term_freq(2u8);
|
||||||
|
skip_serializer.write_blockwand_max(8u8, 2u32);
|
||||||
skip_serializer.data().to_owned()
|
skip_serializer.data().to_owned()
|
||||||
};
|
};
|
||||||
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
||||||
let mut skip_reader = SkipReader::new(
|
let mut skip_reader =
|
||||||
ReadOnlySource::new(buf),
|
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::WithFreqs);
|
||||||
doc_freq,
|
|
||||||
IndexRecordOption::WithFreqs,
|
|
||||||
);
|
|
||||||
assert!(skip_reader.advance());
|
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
skip_reader.block_info(),
|
skip_reader.block_info,
|
||||||
BlockInfo::BitPacked {
|
BlockInfo::BitPacked {
|
||||||
doc_num_bits: 2u8,
|
doc_num_bits: 2u8,
|
||||||
tf_num_bits: 3u8,
|
tf_num_bits: 3u8,
|
||||||
tf_sum: 0
|
tf_sum: 0,
|
||||||
|
block_wand_fieldnorm_id: 13,
|
||||||
|
block_wand_term_freq: 3
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
assert!(skip_reader.advance());
|
skip_reader.advance();
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
skip_reader.block_info(),
|
skip_reader.block_info(),
|
||||||
BlockInfo::BitPacked {
|
BlockInfo::BitPacked {
|
||||||
doc_num_bits: 5u8,
|
doc_num_bits: 5u8,
|
||||||
tf_num_bits: 2u8,
|
tf_num_bits: 2u8,
|
||||||
tf_sum: 0
|
tf_sum: 0,
|
||||||
|
block_wand_fieldnorm_id: 8,
|
||||||
|
block_wand_term_freq: 2
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
assert!(skip_reader.advance());
|
skip_reader.advance();
|
||||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32));
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 });
|
||||||
assert!(!skip_reader.advance());
|
skip_reader.advance();
|
||||||
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||||
|
skip_reader.advance();
|
||||||
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -251,34 +347,37 @@ mod tests {
|
|||||||
skip_serializer.data().to_owned()
|
skip_serializer.data().to_owned()
|
||||||
};
|
};
|
||||||
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
||||||
let mut skip_reader = SkipReader::new(
|
let mut skip_reader =
|
||||||
ReadOnlySource::from(buf),
|
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::Basic);
|
||||||
doc_freq,
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
assert!(skip_reader.advance());
|
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
skip_reader.block_info(),
|
skip_reader.block_info(),
|
||||||
BlockInfo::BitPacked {
|
BlockInfo::BitPacked {
|
||||||
doc_num_bits: 2u8,
|
doc_num_bits: 2u8,
|
||||||
tf_num_bits: 0,
|
tf_num_bits: 0,
|
||||||
tf_sum: 0u32
|
tf_sum: 0u32,
|
||||||
|
block_wand_fieldnorm_id: 0,
|
||||||
|
block_wand_term_freq: 0
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
assert!(skip_reader.advance());
|
skip_reader.advance();
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
skip_reader.block_info(),
|
skip_reader.block_info(),
|
||||||
BlockInfo::BitPacked {
|
BlockInfo::BitPacked {
|
||||||
doc_num_bits: 5u8,
|
doc_num_bits: 5u8,
|
||||||
tf_num_bits: 0,
|
tf_num_bits: 0,
|
||||||
tf_sum: 0u32
|
tf_sum: 0u32,
|
||||||
|
block_wand_fieldnorm_id: 0,
|
||||||
|
block_wand_term_freq: 0
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
assert!(skip_reader.advance());
|
skip_reader.advance();
|
||||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32));
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 });
|
||||||
assert!(!skip_reader.advance());
|
skip_reader.advance();
|
||||||
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||||
|
skip_reader.advance();
|
||||||
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -289,21 +388,20 @@ mod tests {
|
|||||||
skip_serializer.data().to_owned()
|
skip_serializer.data().to_owned()
|
||||||
};
|
};
|
||||||
let doc_freq = COMPRESSION_BLOCK_SIZE as u32;
|
let doc_freq = COMPRESSION_BLOCK_SIZE as u32;
|
||||||
let mut skip_reader = SkipReader::new(
|
let mut skip_reader =
|
||||||
ReadOnlySource::from(buf),
|
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::Basic);
|
||||||
doc_freq,
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
assert!(skip_reader.advance());
|
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
skip_reader.block_info(),
|
skip_reader.block_info(),
|
||||||
BlockInfo::BitPacked {
|
BlockInfo::BitPacked {
|
||||||
doc_num_bits: 2u8,
|
doc_num_bits: 2u8,
|
||||||
tf_num_bits: 0,
|
tf_num_bits: 0,
|
||||||
tf_sum: 0u32
|
tf_sum: 0u32,
|
||||||
|
block_wand_fieldnorm_id: 0,
|
||||||
|
block_wand_term_freq: 0
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
assert!(!skip_reader.advance());
|
skip_reader.advance();
|
||||||
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -206,8 +206,8 @@ mod tests {
|
|||||||
fn test_stack_long() {
|
fn test_stack_long() {
|
||||||
let mut heap = MemoryArena::new();
|
let mut heap = MemoryArena::new();
|
||||||
let mut stack = ExpUnrolledLinkedList::new();
|
let mut stack = ExpUnrolledLinkedList::new();
|
||||||
let source: Vec<u32> = (0..100).collect();
|
let data: Vec<u32> = (0..100).collect();
|
||||||
for &el in &source {
|
for &el in &data {
|
||||||
assert!(stack
|
assert!(stack
|
||||||
.writer(&mut heap)
|
.writer(&mut heap)
|
||||||
.write_u32::<LittleEndian>(el)
|
.write_u32::<LittleEndian>(el)
|
||||||
@@ -221,7 +221,7 @@ mod tests {
|
|||||||
result.push(LittleEndian::read_u32(&remaining[..4]));
|
result.push(LittleEndian::read_u32(&remaining[..4]));
|
||||||
remaining = &remaining[4..];
|
remaining = &remaining[4..];
|
||||||
}
|
}
|
||||||
assert_eq!(&result[..], &source[..]);
|
assert_eq!(&result[..], &data[..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -7,35 +7,50 @@ use std::io;
|
|||||||
pub struct TermInfo {
|
pub struct TermInfo {
|
||||||
/// Number of documents in the segment containing the term
|
/// Number of documents in the segment containing the term
|
||||||
pub doc_freq: u32,
|
pub doc_freq: u32,
|
||||||
/// Start offset within the postings (`.idx`) file.
|
/// Start offset of the posting list within the postings (`.idx`) file.
|
||||||
pub postings_offset: u64,
|
pub postings_start_offset: u64,
|
||||||
|
/// Stop offset of the posting list within the postings (`.idx`) file.
|
||||||
|
/// The byte range is `[start_offset..stop_offset)`.
|
||||||
|
pub postings_stop_offset: u64,
|
||||||
/// Start offset of the first block within the position (`.pos`) file.
|
/// Start offset of the first block within the position (`.pos`) file.
|
||||||
pub positions_idx: u64,
|
pub positions_idx: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl TermInfo {
|
||||||
|
pub(crate) fn posting_num_bytes(&self) -> u32 {
|
||||||
|
let num_bytes = self.postings_stop_offset - self.postings_start_offset;
|
||||||
|
assert!(num_bytes <= std::u32::MAX as u64);
|
||||||
|
num_bytes as u32
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl FixedSize for TermInfo {
|
impl FixedSize for TermInfo {
|
||||||
/// Size required for the binary serialization of a `TermInfo` object.
|
/// Size required for the binary serialization of a `TermInfo` object.
|
||||||
/// This is large, but in practise, `TermInfo` are encoded in blocks and
|
/// This is large, but in practise, `TermInfo` are encoded in blocks and
|
||||||
/// only the first `TermInfo` of a block is serialized uncompressed.
|
/// only the first `TermInfo` of a block is serialized uncompressed.
|
||||||
/// The subsequent `TermInfo` are delta encoded and bitpacked.
|
/// The subsequent `TermInfo` are delta encoded and bitpacked.
|
||||||
const SIZE_IN_BYTES: usize = u32::SIZE_IN_BYTES + 2 * u64::SIZE_IN_BYTES;
|
const SIZE_IN_BYTES: usize = 2 * u32::SIZE_IN_BYTES + 2 * u64::SIZE_IN_BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BinarySerializable for TermInfo {
|
impl BinarySerializable for TermInfo {
|
||||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
self.doc_freq.serialize(writer)?;
|
self.doc_freq.serialize(writer)?;
|
||||||
self.postings_offset.serialize(writer)?;
|
self.postings_start_offset.serialize(writer)?;
|
||||||
|
self.posting_num_bytes().serialize(writer)?;
|
||||||
self.positions_idx.serialize(writer)?;
|
self.positions_idx.serialize(writer)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
let doc_freq = u32::deserialize(reader)?;
|
let doc_freq = u32::deserialize(reader)?;
|
||||||
let postings_offset = u64::deserialize(reader)?;
|
let postings_start_offset = u64::deserialize(reader)?;
|
||||||
|
let postings_num_bytes = u32::deserialize(reader)?;
|
||||||
|
let postings_stop_offset = postings_start_offset + u64::from(postings_num_bytes);
|
||||||
let positions_idx = u64::deserialize(reader)?;
|
let positions_idx = u64::deserialize(reader)?;
|
||||||
Ok(TermInfo {
|
Ok(TermInfo {
|
||||||
doc_freq,
|
doc_freq,
|
||||||
postings_offset,
|
postings_start_offset,
|
||||||
|
postings_stop_offset,
|
||||||
positions_idx,
|
positions_idx,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ use crate::Score;
|
|||||||
|
|
||||||
/// Query that matches all of the documents.
|
/// Query that matches all of the documents.
|
||||||
///
|
///
|
||||||
/// All of the document get the score 1f32.
|
/// All of the document get the score 1.0.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct AllQuery;
|
pub struct AllQuery;
|
||||||
|
|
||||||
@@ -23,7 +23,7 @@ impl Query for AllQuery {
|
|||||||
pub struct AllWeight;
|
pub struct AllWeight;
|
||||||
|
|
||||||
impl Weight for AllWeight {
|
impl Weight for AllWeight {
|
||||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
|
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||||
let all_scorer = AllScorer {
|
let all_scorer = AllScorer {
|
||||||
doc: 0u32,
|
doc: 0u32,
|
||||||
max_doc: reader.max_doc(),
|
max_doc: reader.max_doc(),
|
||||||
@@ -35,7 +35,7 @@ impl Weight for AllWeight {
|
|||||||
if doc >= reader.max_doc() {
|
if doc >= reader.max_doc() {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
Ok(Explanation::new("AllQuery", 1f32))
|
Ok(Explanation::new("AllQuery", 1.0))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -66,7 +66,7 @@ impl DocSet for AllScorer {
|
|||||||
|
|
||||||
impl Scorer for AllScorer {
|
impl Scorer for AllScorer {
|
||||||
fn score(&mut self) -> Score {
|
fn score(&mut self) -> Score {
|
||||||
1f32
|
1.0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,7 +83,7 @@ mod tests {
|
|||||||
let field = schema_builder.add_text_field("text", TEXT);
|
let field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(field=>"aaa"));
|
index_writer.add_document(doc!(field=>"aaa"));
|
||||||
index_writer.add_document(doc!(field=>"bbb"));
|
index_writer.add_document(doc!(field=>"bbb"));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
@@ -100,7 +100,7 @@ mod tests {
|
|||||||
let weight = AllQuery.weight(&searcher, false).unwrap();
|
let weight = AllQuery.weight(&searcher, false).unwrap();
|
||||||
{
|
{
|
||||||
let reader = searcher.segment_reader(0);
|
let reader = searcher.segment_reader(0);
|
||||||
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
|
let mut scorer = weight.scorer(reader, 1.0).unwrap();
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.advance(), 1u32);
|
assert_eq!(scorer.advance(), 1u32);
|
||||||
assert_eq!(scorer.doc(), 1u32);
|
assert_eq!(scorer.doc(), 1u32);
|
||||||
@@ -108,7 +108,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let reader = searcher.segment_reader(1);
|
let reader = searcher.segment_reader(1);
|
||||||
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
|
let mut scorer = weight.scorer(reader, 1.0).unwrap();
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.advance(), TERMINATED);
|
assert_eq!(scorer.advance(), TERMINATED);
|
||||||
}
|
}
|
||||||
@@ -122,14 +122,14 @@ mod tests {
|
|||||||
let weight = AllQuery.weight(&searcher, false).unwrap();
|
let weight = AllQuery.weight(&searcher, false).unwrap();
|
||||||
let reader = searcher.segment_reader(0);
|
let reader = searcher.segment_reader(0);
|
||||||
{
|
{
|
||||||
let mut scorer = weight.scorer(reader, 2.0f32).unwrap();
|
let mut scorer = weight.scorer(reader, 2.0).unwrap();
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.score(), 2.0f32);
|
assert_eq!(scorer.score(), 2.0);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut scorer = weight.scorer(reader, 1.5f32).unwrap();
|
let mut scorer = weight.scorer(reader, 1.5).unwrap();
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.score(), 1.5f32);
|
assert_eq!(scorer.score(), 1.5);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,9 +5,9 @@ use crate::query::{BitSetDocSet, Explanation};
|
|||||||
use crate::query::{Scorer, Weight};
|
use crate::query::{Scorer, Weight};
|
||||||
use crate::schema::{Field, IndexRecordOption};
|
use crate::schema::{Field, IndexRecordOption};
|
||||||
use crate::termdict::{TermDictionary, TermStreamer};
|
use crate::termdict::{TermDictionary, TermStreamer};
|
||||||
use crate::DocId;
|
|
||||||
use crate::Result;
|
|
||||||
use crate::TantivyError;
|
use crate::TantivyError;
|
||||||
|
use crate::{DocId, Score};
|
||||||
|
use std::io;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tantivy_fst::Automaton;
|
use tantivy_fst::Automaton;
|
||||||
|
|
||||||
@@ -20,6 +20,7 @@ pub struct AutomatonWeight<A> {
|
|||||||
impl<A> AutomatonWeight<A>
|
impl<A> AutomatonWeight<A>
|
||||||
where
|
where
|
||||||
A: Automaton + Send + Sync + 'static,
|
A: Automaton + Send + Sync + 'static,
|
||||||
|
A::State: Clone,
|
||||||
{
|
{
|
||||||
/// Create a new AutomationWeight
|
/// Create a new AutomationWeight
|
||||||
pub fn new<IntoArcA: Into<Arc<A>>>(field: Field, automaton: IntoArcA) -> AutomatonWeight<A> {
|
pub fn new<IntoArcA: Into<Arc<A>>>(field: Field, automaton: IntoArcA) -> AutomatonWeight<A> {
|
||||||
@@ -29,7 +30,10 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn automaton_stream<'a>(&'a self, term_dict: &'a TermDictionary) -> TermStreamer<'a, &'a A> {
|
fn automaton_stream<'a>(
|
||||||
|
&'a self,
|
||||||
|
term_dict: &'a TermDictionary,
|
||||||
|
) -> io::Result<TermStreamer<'a, &'a A>> {
|
||||||
let automaton: &A = &*self.automaton;
|
let automaton: &A = &*self.automaton;
|
||||||
let term_stream_builder = term_dict.search(automaton);
|
let term_stream_builder = term_dict.search(automaton);
|
||||||
term_stream_builder.into_stream()
|
term_stream_builder.into_stream()
|
||||||
@@ -39,25 +43,27 @@ where
|
|||||||
impl<A> Weight for AutomatonWeight<A>
|
impl<A> Weight for AutomatonWeight<A>
|
||||||
where
|
where
|
||||||
A: Automaton + Send + Sync + 'static,
|
A: Automaton + Send + Sync + 'static,
|
||||||
|
A::State: Clone,
|
||||||
{
|
{
|
||||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
|
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||||
let max_doc = reader.max_doc();
|
let max_doc = reader.max_doc();
|
||||||
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
||||||
|
let inverted_index = reader.inverted_index(self.field)?;
|
||||||
let inverted_index = reader.inverted_index(self.field);
|
|
||||||
let term_dict = inverted_index.terms();
|
let term_dict = inverted_index.terms();
|
||||||
let mut term_stream = self.automaton_stream(term_dict);
|
let mut term_stream = self.automaton_stream(term_dict)?;
|
||||||
while term_stream.advance() {
|
while term_stream.advance() {
|
||||||
let term_info = term_stream.value();
|
let term_info = term_stream.value();
|
||||||
let mut block_segment_postings = inverted_index
|
let mut block_segment_postings = inverted_index
|
||||||
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
|
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic)?;
|
||||||
loop {
|
loop {
|
||||||
for &doc in block_segment_postings.docs() {
|
let docs = block_segment_postings.docs();
|
||||||
doc_bitset.insert(doc);
|
if docs.is_empty() {
|
||||||
}
|
|
||||||
if !block_segment_postings.advance() {
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
for &doc in docs {
|
||||||
|
doc_bitset.insert(doc);
|
||||||
|
}
|
||||||
|
block_segment_postings.advance();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let doc_bitset = BitSetDocSet::from(doc_bitset);
|
let doc_bitset = BitSetDocSet::from(doc_bitset);
|
||||||
@@ -65,10 +71,10 @@ where
|
|||||||
Ok(Box::new(const_scorer))
|
Ok(Box::new(const_scorer))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
let mut scorer = self.scorer(reader, 1.0)?;
|
||||||
if scorer.seek(doc) == doc {
|
if scorer.seek(doc) == doc {
|
||||||
Ok(Explanation::new("AutomatonScorer", 1.0f32))
|
Ok(Explanation::new("AutomatonScorer", 1.0))
|
||||||
} else {
|
} else {
|
||||||
Err(TantivyError::InvalidArgument(
|
Err(TantivyError::InvalidArgument(
|
||||||
"Document does not exist".to_string(),
|
"Document does not exist".to_string(),
|
||||||
@@ -90,7 +96,7 @@ mod tests {
|
|||||||
let mut schema = Schema::builder();
|
let mut schema = Schema::builder();
|
||||||
let title = schema.add_text_field("title", STRING);
|
let title = schema.add_text_field("title", STRING);
|
||||||
let index = Index::create_in_ram(schema.build());
|
let index = Index::create_in_ram(schema.build());
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(title=>"abc"));
|
index_writer.add_document(doc!(title=>"abc"));
|
||||||
index_writer.add_document(doc!(title=>"bcd"));
|
index_writer.add_document(doc!(title=>"bcd"));
|
||||||
index_writer.add_document(doc!(title=>"abcd"));
|
index_writer.add_document(doc!(title=>"abcd"));
|
||||||
@@ -98,6 +104,7 @@ mod tests {
|
|||||||
index
|
index
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
enum State {
|
enum State {
|
||||||
Start,
|
Start,
|
||||||
NotMatching,
|
NotMatching,
|
||||||
@@ -143,13 +150,13 @@ mod tests {
|
|||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let mut scorer = automaton_weight
|
let mut scorer = automaton_weight
|
||||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
.scorer(searcher.segment_reader(0u32), 1.0)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.score(), 1.0f32);
|
assert_eq!(scorer.score(), 1.0);
|
||||||
assert_eq!(scorer.advance(), 2u32);
|
assert_eq!(scorer.advance(), 2u32);
|
||||||
assert_eq!(scorer.doc(), 2u32);
|
assert_eq!(scorer.doc(), 2u32);
|
||||||
assert_eq!(scorer.score(), 1.0f32);
|
assert_eq!(scorer.score(), 1.0);
|
||||||
assert_eq!(scorer.advance(), TERMINATED);
|
assert_eq!(scorer.advance(), TERMINATED);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -161,9 +168,9 @@ mod tests {
|
|||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let mut scorer = automaton_weight
|
let mut scorer = automaton_weight
|
||||||
.scorer(searcher.segment_reader(0u32), 1.32f32)
|
.scorer(searcher.segment_reader(0u32), 1.32)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.score(), 1.32f32);
|
assert_eq!(scorer.score(), 1.32);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -61,21 +61,23 @@ impl DocSet for BitSetDocSet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn seek(&mut self, target: DocId) -> DocId {
|
||||||
|
if target >= self.docs.max_value() {
|
||||||
|
self.doc = TERMINATED;
|
||||||
|
return TERMINATED;
|
||||||
|
}
|
||||||
let target_bucket = target / 64u32;
|
let target_bucket = target / 64u32;
|
||||||
|
|
||||||
// Mask for all of the bits greater or equal
|
|
||||||
// to our target document.
|
|
||||||
if target_bucket > self.cursor_bucket {
|
if target_bucket > self.cursor_bucket {
|
||||||
self.go_to_bucket(target_bucket);
|
self.go_to_bucket(target_bucket);
|
||||||
let greater_filter: TinySet = TinySet::range_greater_or_equal(target);
|
let greater_filter: TinySet = TinySet::range_greater_or_equal(target);
|
||||||
self.cursor_tinybitset = self.cursor_tinybitset.intersect(greater_filter);
|
self.cursor_tinybitset = self.cursor_tinybitset.intersect(greater_filter);
|
||||||
self.advance();
|
self.advance()
|
||||||
|
} else {
|
||||||
|
let mut doc = self.doc();
|
||||||
|
while doc < target {
|
||||||
|
doc = self.advance();
|
||||||
|
}
|
||||||
|
doc
|
||||||
}
|
}
|
||||||
let mut doc = self.doc();
|
|
||||||
while doc < target {
|
|
||||||
doc = self.advance();
|
|
||||||
}
|
|
||||||
doc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the current document
|
/// Returns the current document
|
||||||
@@ -114,6 +116,13 @@ mod tests {
|
|||||||
assert_eq!(empty.advance(), TERMINATED)
|
assert_eq!(empty.advance(), TERMINATED)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_seek_terminated() {
|
||||||
|
let bitset = BitSet::with_max_value(1000);
|
||||||
|
let mut empty = BitSetDocSet::from(bitset);
|
||||||
|
assert_eq!(empty.seek(TERMINATED), TERMINATED)
|
||||||
|
}
|
||||||
|
|
||||||
fn test_go_through_sequential(docs: &[DocId]) {
|
fn test_go_through_sequential(docs: &[DocId]) {
|
||||||
let mut docset = create_docbitset(docs, 1_000u32);
|
let mut docset = create_docbitset(docs, 1_000u32);
|
||||||
for &doc in docs {
|
for &doc in docs {
|
||||||
|
|||||||
@@ -3,21 +3,24 @@ use crate::query::Explanation;
|
|||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::Searcher;
|
use crate::Searcher;
|
||||||
use crate::Term;
|
use crate::Term;
|
||||||
|
use serde::Deserialize;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
const K1: f32 = 1.2;
|
const K1: Score = 1.2;
|
||||||
const B: f32 = 0.75;
|
const B: Score = 0.75;
|
||||||
|
|
||||||
fn idf(doc_freq: u64, doc_count: u64) -> f32 {
|
fn idf(doc_freq: u64, doc_count: u64) -> Score {
|
||||||
let x = ((doc_count - doc_freq) as f32 + 0.5) / (doc_freq as f32 + 0.5);
|
assert!(doc_count >= doc_freq, "{} >= {}", doc_count, doc_freq);
|
||||||
(1f32 + x).ln()
|
let x = ((doc_count - doc_freq) as Score + 0.5) / (doc_freq as Score + 0.5);
|
||||||
|
(1.0 + x).ln()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cached_tf_component(fieldnorm: u32, average_fieldnorm: f32) -> f32 {
|
fn cached_tf_component(fieldnorm: u32, average_fieldnorm: Score) -> Score {
|
||||||
K1 * (1f32 - B + B * fieldnorm as f32 / average_fieldnorm)
|
K1 * (1.0 - B + B * fieldnorm as Score / average_fieldnorm)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compute_tf_cache(average_fieldnorm: f32) -> [f32; 256] {
|
fn compute_tf_cache(average_fieldnorm: Score) -> [Score; 256] {
|
||||||
let mut cache = [0f32; 256];
|
let mut cache: [Score; 256] = [0.0; 256];
|
||||||
for (fieldnorm_id, cache_mut) in cache.iter_mut().enumerate() {
|
for (fieldnorm_id, cache_mut) in cache.iter_mut().enumerate() {
|
||||||
let fieldnorm = FieldNormReader::id_to_fieldnorm(fieldnorm_id as u8);
|
let fieldnorm = FieldNormReader::id_to_fieldnorm(fieldnorm_id as u8);
|
||||||
*cache_mut = cached_tf_component(fieldnorm, average_fieldnorm);
|
*cache_mut = cached_tf_component(fieldnorm, average_fieldnorm);
|
||||||
@@ -25,15 +28,22 @@ fn compute_tf_cache(average_fieldnorm: f32) -> [f32; 256] {
|
|||||||
cache
|
cache
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct BM25Params {
|
||||||
|
pub idf: Score,
|
||||||
|
pub avg_fieldnorm: Score,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct BM25Weight {
|
pub struct BM25Weight {
|
||||||
idf_explain: Explanation,
|
idf_explain: Explanation,
|
||||||
weight: f32,
|
weight: Score,
|
||||||
cache: [f32; 256],
|
cache: [Score; 256],
|
||||||
average_fieldnorm: f32,
|
average_fieldnorm: Score,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BM25Weight {
|
impl BM25Weight {
|
||||||
pub fn boost_by(&self, boost: f32) -> BM25Weight {
|
pub fn boost_by(&self, boost: Score) -> BM25Weight {
|
||||||
BM25Weight {
|
BM25Weight {
|
||||||
idf_explain: self.idf_explain.clone(),
|
idf_explain: self.idf_explain.clone(),
|
||||||
weight: self.weight * boost,
|
weight: self.weight * boost,
|
||||||
@@ -42,7 +52,7 @@ impl BM25Weight {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn for_terms(searcher: &Searcher, terms: &[Term]) -> BM25Weight {
|
pub fn for_terms(searcher: &Searcher, terms: &[Term]) -> crate::Result<BM25Weight> {
|
||||||
assert!(!terms.is_empty(), "BM25 requires at least one term");
|
assert!(!terms.is_empty(), "BM25 requires at least one term");
|
||||||
let field = terms[0].field();
|
let field = terms[0].field();
|
||||||
for term in &terms[1..] {
|
for term in &terms[1..] {
|
||||||
@@ -56,38 +66,48 @@ impl BM25Weight {
|
|||||||
let mut total_num_tokens = 0u64;
|
let mut total_num_tokens = 0u64;
|
||||||
let mut total_num_docs = 0u64;
|
let mut total_num_docs = 0u64;
|
||||||
for segment_reader in searcher.segment_readers() {
|
for segment_reader in searcher.segment_readers() {
|
||||||
let inverted_index = segment_reader.inverted_index(field);
|
let inverted_index = segment_reader.inverted_index(field)?;
|
||||||
total_num_tokens += inverted_index.total_num_tokens();
|
total_num_tokens += inverted_index.total_num_tokens();
|
||||||
total_num_docs += u64::from(segment_reader.max_doc());
|
total_num_docs += u64::from(segment_reader.max_doc());
|
||||||
}
|
}
|
||||||
let average_fieldnorm = total_num_tokens as f32 / total_num_docs as f32;
|
let average_fieldnorm = total_num_tokens as Score / total_num_docs as Score;
|
||||||
|
|
||||||
let mut idf_explain: Explanation;
|
|
||||||
if terms.len() == 1 {
|
if terms.len() == 1 {
|
||||||
let term_doc_freq = searcher.doc_freq(&terms[0]);
|
let term_doc_freq = searcher.doc_freq(&terms[0])?;
|
||||||
let idf = idf(term_doc_freq, total_num_docs);
|
Ok(BM25Weight::for_one_term(
|
||||||
idf_explain =
|
term_doc_freq,
|
||||||
Explanation::new("idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))", idf);
|
total_num_docs,
|
||||||
idf_explain.add_const(
|
average_fieldnorm,
|
||||||
"n, number of docs containing this term",
|
))
|
||||||
term_doc_freq as f32,
|
|
||||||
);
|
|
||||||
idf_explain.add_const("N, total number of docs", total_num_docs as f32);
|
|
||||||
} else {
|
} else {
|
||||||
let idf = terms
|
let mut idf_sum: Score = 0.0;
|
||||||
.iter()
|
for term in terms {
|
||||||
.map(|term| {
|
let term_doc_freq = searcher.doc_freq(term)?;
|
||||||
let term_doc_freq = searcher.doc_freq(term);
|
idf_sum += idf(term_doc_freq, total_num_docs);
|
||||||
idf(term_doc_freq, total_num_docs)
|
}
|
||||||
})
|
let idf_explain = Explanation::new("idf", idf_sum);
|
||||||
.sum::<f32>();
|
Ok(BM25Weight::new(idf_explain, average_fieldnorm))
|
||||||
idf_explain = Explanation::new("idf", idf);
|
|
||||||
}
|
}
|
||||||
BM25Weight::new(idf_explain, average_fieldnorm)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new(idf_explain: Explanation, average_fieldnorm: f32) -> BM25Weight {
|
pub fn for_one_term(
|
||||||
let weight = idf_explain.value() * (1f32 + K1);
|
term_doc_freq: u64,
|
||||||
|
total_num_docs: u64,
|
||||||
|
avg_fieldnorm: Score,
|
||||||
|
) -> BM25Weight {
|
||||||
|
let idf = idf(term_doc_freq, total_num_docs);
|
||||||
|
let mut idf_explain =
|
||||||
|
Explanation::new("idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))", idf);
|
||||||
|
idf_explain.add_const(
|
||||||
|
"n, number of docs containing this term",
|
||||||
|
term_doc_freq as Score,
|
||||||
|
);
|
||||||
|
idf_explain.add_const("N, total number of docs", total_num_docs as Score);
|
||||||
|
BM25Weight::new(idf_explain, avg_fieldnorm)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn new(idf_explain: Explanation, average_fieldnorm: Score) -> BM25Weight {
|
||||||
|
let weight = idf_explain.value() * (1.0 + K1);
|
||||||
BM25Weight {
|
BM25Weight {
|
||||||
idf_explain,
|
idf_explain,
|
||||||
weight,
|
weight,
|
||||||
@@ -98,19 +118,27 @@ impl BM25Weight {
|
|||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn score(&self, fieldnorm_id: u8, term_freq: u32) -> Score {
|
pub fn score(&self, fieldnorm_id: u8, term_freq: u32) -> Score {
|
||||||
|
self.weight * self.tf_factor(fieldnorm_id, term_freq)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn max_score(&self) -> Score {
|
||||||
|
self.score(255u8, 2_013_265_944)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub(crate) fn tf_factor(&self, fieldnorm_id: u8, term_freq: u32) -> Score {
|
||||||
|
let term_freq = term_freq as Score;
|
||||||
let norm = self.cache[fieldnorm_id as usize];
|
let norm = self.cache[fieldnorm_id as usize];
|
||||||
let term_freq = term_freq as f32;
|
term_freq / (term_freq + norm)
|
||||||
self.weight * term_freq / (term_freq + norm)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn explain(&self, fieldnorm_id: u8, term_freq: u32) -> Explanation {
|
pub fn explain(&self, fieldnorm_id: u8, term_freq: u32) -> Explanation {
|
||||||
// The explain format is directly copied from Lucene's.
|
// The explain format is directly copied from Lucene's.
|
||||||
// (So, Kudos to Lucene)
|
// (So, Kudos to Lucene)
|
||||||
|
|
||||||
let score = self.score(fieldnorm_id, term_freq);
|
let score = self.score(fieldnorm_id, term_freq);
|
||||||
|
|
||||||
let norm = self.cache[fieldnorm_id as usize];
|
let norm = self.cache[fieldnorm_id as usize];
|
||||||
let term_freq = term_freq as f32;
|
let term_freq = term_freq as Score;
|
||||||
let right_factor = term_freq / (term_freq + norm);
|
let right_factor = term_freq / (term_freq + norm);
|
||||||
|
|
||||||
let mut tf_explanation = Explanation::new(
|
let mut tf_explanation = Explanation::new(
|
||||||
@@ -123,12 +151,12 @@ impl BM25Weight {
|
|||||||
tf_explanation.add_const("b, length normalization parameter", B);
|
tf_explanation.add_const("b, length normalization parameter", B);
|
||||||
tf_explanation.add_const(
|
tf_explanation.add_const(
|
||||||
"dl, length of field",
|
"dl, length of field",
|
||||||
FieldNormReader::id_to_fieldnorm(fieldnorm_id) as f32,
|
FieldNormReader::id_to_fieldnorm(fieldnorm_id) as Score,
|
||||||
);
|
);
|
||||||
tf_explanation.add_const("avgdl, average length of field", self.average_fieldnorm);
|
tf_explanation.add_const("avgdl, average length of field", self.average_fieldnorm);
|
||||||
|
|
||||||
let mut explanation = Explanation::new("TermQuery, product of...", score);
|
let mut explanation = Explanation::new("TermQuery, product of...", score);
|
||||||
explanation.add_detail(Explanation::new("(K1+1)", K1 + 1f32));
|
explanation.add_detail(Explanation::new("(K1+1)", K1 + 1.0));
|
||||||
explanation.add_detail(self.idf_explain.clone());
|
explanation.add_detail(self.idf_explain.clone());
|
||||||
explanation.add_detail(tf_explanation);
|
explanation.add_detail(tf_explanation);
|
||||||
explanation
|
explanation
|
||||||
@@ -139,10 +167,11 @@ impl BM25Weight {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::idf;
|
use super::idf;
|
||||||
use crate::tests::assert_nearly_equals;
|
use crate::{assert_nearly_equals, Score};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_idf() {
|
fn test_idf() {
|
||||||
assert_nearly_equals(idf(1, 2), 0.6931472);
|
let score: Score = 2.0;
|
||||||
|
assert_nearly_equals!(idf(1, 2), score.ln());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
541
src/query/boolean_query/block_wand.rs
Normal file
541
src/query/boolean_query/block_wand.rs
Normal file
@@ -0,0 +1,541 @@
|
|||||||
|
use crate::query::term_query::TermScorer;
|
||||||
|
use crate::query::Scorer;
|
||||||
|
use crate::{DocId, DocSet, Score, TERMINATED};
|
||||||
|
use std::ops::Deref;
|
||||||
|
use std::ops::DerefMut;
|
||||||
|
|
||||||
|
/// Takes a term_scorers sorted by their current doc() and a threshold and returns
|
||||||
|
/// Returns (pivot_len, pivot_ord) defined as follows:
|
||||||
|
/// - `pivot_doc` lowest document that has a chance of exceeding (>) the threshold score.
|
||||||
|
/// - `before_pivot_len` number of term_scorers such that term_scorer.doc() < pivot.
|
||||||
|
/// - `pivot_len` number of term_scorers such that term_scorer.doc() <= pivot.
|
||||||
|
///
|
||||||
|
/// We always have `before_pivot_len` < `pivot_len`.
|
||||||
|
///
|
||||||
|
/// None is returned if we establish that no document can exceed the threshold.
|
||||||
|
fn find_pivot_doc(
|
||||||
|
term_scorers: &[TermScorerWithMaxScore],
|
||||||
|
threshold: Score,
|
||||||
|
) -> Option<(usize, usize, DocId)> {
|
||||||
|
let mut max_score = 0.0;
|
||||||
|
let mut before_pivot_len = 0;
|
||||||
|
let mut pivot_doc = TERMINATED;
|
||||||
|
while before_pivot_len < term_scorers.len() {
|
||||||
|
let term_scorer = &term_scorers[before_pivot_len];
|
||||||
|
max_score += term_scorer.max_score;
|
||||||
|
if max_score > threshold {
|
||||||
|
pivot_doc = term_scorer.doc();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
before_pivot_len += 1;
|
||||||
|
}
|
||||||
|
if pivot_doc == TERMINATED {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
// Right now i is an ordinal, we want a len.
|
||||||
|
let mut pivot_len = before_pivot_len + 1;
|
||||||
|
// Some other term_scorer may be positioned on the same document.
|
||||||
|
pivot_len += term_scorers[pivot_len..]
|
||||||
|
.iter()
|
||||||
|
.take_while(|term_scorer| term_scorer.doc() == pivot_doc)
|
||||||
|
.count();
|
||||||
|
Some((before_pivot_len, pivot_len, pivot_doc))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Before and after calling this method, scorers need to be sorted by their `.doc()`.
|
||||||
|
fn block_max_was_too_low_advance_one_scorer(
|
||||||
|
scorers: &mut Vec<TermScorerWithMaxScore>,
|
||||||
|
pivot_len: usize,
|
||||||
|
) {
|
||||||
|
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
|
||||||
|
let mut scorer_to_seek = pivot_len - 1;
|
||||||
|
let mut doc_to_seek_after = scorers[scorer_to_seek].doc();
|
||||||
|
for scorer_ord in (0..pivot_len - 1).rev() {
|
||||||
|
let scorer = &scorers[scorer_ord];
|
||||||
|
if scorer.last_doc_in_block() <= doc_to_seek_after {
|
||||||
|
doc_to_seek_after = scorer.last_doc_in_block();
|
||||||
|
scorer_to_seek = scorer_ord;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for scorer in &scorers[pivot_len..] {
|
||||||
|
if scorer.doc() <= doc_to_seek_after {
|
||||||
|
doc_to_seek_after = scorer.doc();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
scorers[scorer_to_seek].seek(doc_to_seek_after + 1);
|
||||||
|
restore_ordering(scorers, scorer_to_seek);
|
||||||
|
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted
|
||||||
|
// except term_scorers[ord] that might be in advance compared to its ranks,
|
||||||
|
// bubble up term_scorers[ord] in order to restore the ordering.
|
||||||
|
fn restore_ordering(term_scorers: &mut Vec<TermScorerWithMaxScore>, ord: usize) {
|
||||||
|
let doc = term_scorers[ord].doc();
|
||||||
|
for i in ord + 1..term_scorers.len() {
|
||||||
|
if term_scorers[i].doc() >= doc {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
term_scorers.swap(i, i - 1);
|
||||||
|
}
|
||||||
|
debug_assert!(is_sorted(term_scorers.iter().map(|scorer| scorer.doc())));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempts to advance all term_scorers between `&term_scorers[0..before_len]` to the pivot.
|
||||||
|
// If this works, return true.
|
||||||
|
// If this fails (ie: one of the term_scorer does not contain `pivot_doc` and seek goes past the
|
||||||
|
// pivot), reorder the term_scorers to ensure the list is still sorted and returns `false`.
|
||||||
|
// If a term_scorer reach TERMINATED in the process return false remove the term_scorer and return.
|
||||||
|
fn align_scorers(
|
||||||
|
term_scorers: &mut Vec<TermScorerWithMaxScore>,
|
||||||
|
pivot_doc: DocId,
|
||||||
|
before_pivot_len: usize,
|
||||||
|
) -> bool {
|
||||||
|
debug_assert_ne!(pivot_doc, TERMINATED);
|
||||||
|
for i in (0..before_pivot_len).rev() {
|
||||||
|
let new_doc = term_scorers[i].seek(pivot_doc);
|
||||||
|
if new_doc != pivot_doc {
|
||||||
|
if new_doc == TERMINATED {
|
||||||
|
term_scorers.swap_remove(i);
|
||||||
|
}
|
||||||
|
// We went past the pivot.
|
||||||
|
// We just go through the outer loop mechanic (Note that pivot is
|
||||||
|
// still a possible candidate).
|
||||||
|
//
|
||||||
|
// Termination is still guaranteed since we can only consider the same
|
||||||
|
// pivot at most term_scorers.len() - 1 times.
|
||||||
|
restore_ordering(term_scorers, i);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assumes terms_scorers[..pivot_len] are positioned on the same doc (pivot_doc).
|
||||||
|
// Advance term_scorers[..pivot_len] and out of these removes the terminated scores.
|
||||||
|
// Restores the ordering of term_scorers.
|
||||||
|
fn advance_all_scorers_on_pivot(term_scorers: &mut Vec<TermScorerWithMaxScore>, pivot_len: usize) {
|
||||||
|
for term_scorer in &mut term_scorers[..pivot_len] {
|
||||||
|
term_scorer.advance();
|
||||||
|
}
|
||||||
|
// TODO use drain_filter when available.
|
||||||
|
let mut i = 0;
|
||||||
|
while i != term_scorers.len() {
|
||||||
|
if term_scorers[i].doc() == TERMINATED {
|
||||||
|
term_scorers.swap_remove(i);
|
||||||
|
} else {
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
term_scorers.sort_by_key(|scorer| scorer.doc());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn block_wand(
|
||||||
|
mut scorers: Vec<TermScorer>,
|
||||||
|
mut threshold: Score,
|
||||||
|
callback: &mut dyn FnMut(u32, Score) -> Score,
|
||||||
|
) {
|
||||||
|
let mut scorers: Vec<TermScorerWithMaxScore> = scorers
|
||||||
|
.iter_mut()
|
||||||
|
.map(TermScorerWithMaxScore::from)
|
||||||
|
.collect();
|
||||||
|
scorers.sort_by_key(|scorer| scorer.doc());
|
||||||
|
// At this point we need to ensure that the scorers are sorted!
|
||||||
|
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
|
||||||
|
while let Some((before_pivot_len, pivot_len, pivot_doc)) =
|
||||||
|
find_pivot_doc(&scorers[..], threshold)
|
||||||
|
{
|
||||||
|
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
|
||||||
|
debug_assert_ne!(pivot_doc, TERMINATED);
|
||||||
|
debug_assert!(before_pivot_len < pivot_len);
|
||||||
|
|
||||||
|
let block_max_score_upperbound: Score = scorers[..pivot_len]
|
||||||
|
.iter_mut()
|
||||||
|
.map(|scorer| {
|
||||||
|
scorer.shallow_seek(pivot_doc);
|
||||||
|
scorer.block_max_score()
|
||||||
|
})
|
||||||
|
.sum();
|
||||||
|
|
||||||
|
// Beware after shallow advance, skip readers can be in advance compared to
|
||||||
|
// the segment posting lists.
|
||||||
|
//
|
||||||
|
// `block_segment_postings.load_block()` need to be called separately.
|
||||||
|
if block_max_score_upperbound <= threshold {
|
||||||
|
// Block max condition was not reached
|
||||||
|
// We could get away by simply advancing the scorers to DocId + 1 but it would
|
||||||
|
// be inefficient. The optimization requires proper explanation and was
|
||||||
|
// isolated in a different function.
|
||||||
|
block_max_was_too_low_advance_one_scorer(&mut scorers, pivot_len);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block max condition is observed.
|
||||||
|
//
|
||||||
|
// Let's try and advance all scorers before the pivot to the pivot.
|
||||||
|
if !align_scorers(&mut scorers, pivot_doc, before_pivot_len) {
|
||||||
|
// At least of the scorer does not contain the pivot.
|
||||||
|
//
|
||||||
|
// Let's stop scoring this pivot and go through the pivot selection again.
|
||||||
|
// Note that the current pivot is not necessarily a bad candidate and it
|
||||||
|
// may be picked again.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, all scorers are positioned on the doc.
|
||||||
|
let score = scorers[..pivot_len]
|
||||||
|
.iter_mut()
|
||||||
|
.map(|scorer| scorer.score())
|
||||||
|
.sum();
|
||||||
|
if score > threshold {
|
||||||
|
threshold = callback(pivot_doc, score);
|
||||||
|
}
|
||||||
|
// let's advance all of the scorers that are currently positioned on the pivot.
|
||||||
|
advance_all_scorers_on_pivot(&mut scorers, pivot_len);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct TermScorerWithMaxScore<'a> {
|
||||||
|
scorer: &'a mut TermScorer,
|
||||||
|
max_score: Score,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> From<&'a mut TermScorer> for TermScorerWithMaxScore<'a> {
|
||||||
|
fn from(scorer: &'a mut TermScorer) -> Self {
|
||||||
|
let max_score = scorer.max_score();
|
||||||
|
TermScorerWithMaxScore { scorer, max_score }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Deref for TermScorerWithMaxScore<'a> {
|
||||||
|
type Target = TermScorer;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
self.scorer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> DerefMut for TermScorerWithMaxScore<'a> {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
self.scorer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_sorted<I: Iterator<Item = DocId>>(mut it: I) -> bool {
|
||||||
|
if let Some(first) = it.next() {
|
||||||
|
let mut prev = first;
|
||||||
|
for doc in it {
|
||||||
|
if doc < prev {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
prev = doc;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::query::score_combiner::SumCombiner;
|
||||||
|
use crate::query::term_query::TermScorer;
|
||||||
|
use crate::query::Union;
|
||||||
|
use crate::query::{BM25Weight, Scorer};
|
||||||
|
use crate::{DocId, DocSet, Score, TERMINATED};
|
||||||
|
use proptest::prelude::*;
|
||||||
|
use std::cmp::Ordering;
|
||||||
|
use std::collections::BinaryHeap;
|
||||||
|
use std::iter;
|
||||||
|
|
||||||
|
struct Float(Score);
|
||||||
|
|
||||||
|
impl Eq for Float {}
|
||||||
|
|
||||||
|
impl PartialEq for Float {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
self.cmp(&other) == Ordering::Equal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for Float {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for Float {
|
||||||
|
fn cmp(&self, other: &Self) -> Ordering {
|
||||||
|
other.0.partial_cmp(&self.0).unwrap_or(Ordering::Equal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn nearly_equals(left: Score, right: Score) -> bool {
|
||||||
|
(left - right).abs() < 0.0001 * (left + right).abs()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_checkpoints_for_each_pruning(
|
||||||
|
term_scorers: Vec<TermScorer>,
|
||||||
|
n: usize,
|
||||||
|
) -> Vec<(DocId, Score)> {
|
||||||
|
let mut heap: BinaryHeap<Float> = BinaryHeap::with_capacity(n);
|
||||||
|
let mut checkpoints: Vec<(DocId, Score)> = Vec::new();
|
||||||
|
let mut limit: Score = 0.0;
|
||||||
|
super::block_wand(term_scorers, Score::MIN, &mut |doc, score| {
|
||||||
|
heap.push(Float(score));
|
||||||
|
if heap.len() > n {
|
||||||
|
heap.pop().unwrap();
|
||||||
|
}
|
||||||
|
if heap.len() == n {
|
||||||
|
limit = heap.peek().unwrap().0;
|
||||||
|
}
|
||||||
|
if !nearly_equals(score, limit) {
|
||||||
|
checkpoints.push((doc, score));
|
||||||
|
}
|
||||||
|
return limit;
|
||||||
|
});
|
||||||
|
checkpoints
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_checkpoints_manual(term_scorers: Vec<TermScorer>, n: usize) -> Vec<(DocId, Score)> {
|
||||||
|
let mut heap: BinaryHeap<Float> = BinaryHeap::with_capacity(n);
|
||||||
|
let mut checkpoints: Vec<(DocId, Score)> = Vec::new();
|
||||||
|
let mut scorer: Union<TermScorer, SumCombiner> = Union::from(term_scorers);
|
||||||
|
|
||||||
|
let mut limit = Score::MIN;
|
||||||
|
loop {
|
||||||
|
if scorer.doc() == TERMINATED {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let doc = scorer.doc();
|
||||||
|
let score = scorer.score();
|
||||||
|
if score > limit {
|
||||||
|
heap.push(Float(score));
|
||||||
|
if heap.len() > n {
|
||||||
|
heap.pop().unwrap();
|
||||||
|
}
|
||||||
|
if heap.len() == n {
|
||||||
|
limit = heap.peek().unwrap().0;
|
||||||
|
}
|
||||||
|
if !nearly_equals(score, limit) {
|
||||||
|
checkpoints.push((doc, score));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
scorer.advance();
|
||||||
|
}
|
||||||
|
checkpoints
|
||||||
|
}
|
||||||
|
|
||||||
|
const MAX_TERM_FREQ: u32 = 100u32;
|
||||||
|
|
||||||
|
fn posting_list(max_doc: u32) -> BoxedStrategy<Vec<(DocId, u32)>> {
|
||||||
|
(1..max_doc + 1)
|
||||||
|
.prop_flat_map(move |doc_freq| {
|
||||||
|
(
|
||||||
|
proptest::bits::bitset::sampled(doc_freq as usize, 0..max_doc as usize),
|
||||||
|
proptest::collection::vec(1u32..MAX_TERM_FREQ, doc_freq as usize),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.prop_map(|(docset, term_freqs)| {
|
||||||
|
docset
|
||||||
|
.iter()
|
||||||
|
.map(|doc| doc as u32)
|
||||||
|
.zip(term_freqs.iter().cloned())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
})
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn gen_term_scorers(num_scorers: usize) -> BoxedStrategy<(Vec<Vec<(DocId, u32)>>, Vec<u32>)> {
|
||||||
|
(1u32..100u32)
|
||||||
|
.prop_flat_map(move |max_doc: u32| {
|
||||||
|
(
|
||||||
|
proptest::collection::vec(posting_list(max_doc), num_scorers),
|
||||||
|
proptest::collection::vec(2u32..10u32 * MAX_TERM_FREQ, max_doc as usize),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_block_wand_aux(posting_lists: &[Vec<(DocId, u32)>], fieldnorms: &[u32]) {
|
||||||
|
// We virtually repeat all docs 64 times in order to emulate blocks of 2 documents
|
||||||
|
// and surface blogs more easily.
|
||||||
|
const REPEAT: usize = 64;
|
||||||
|
let fieldnorms_expanded = fieldnorms
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.flat_map(|fieldnorm| iter::repeat(fieldnorm).take(REPEAT))
|
||||||
|
.collect::<Vec<u32>>();
|
||||||
|
|
||||||
|
let postings_lists_expanded: Vec<Vec<(DocId, u32)>> = posting_lists
|
||||||
|
.iter()
|
||||||
|
.map(|posting_list| {
|
||||||
|
posting_list
|
||||||
|
.into_iter()
|
||||||
|
.cloned()
|
||||||
|
.flat_map(|(doc, term_freq)| {
|
||||||
|
(0 as u32..REPEAT as u32).map(move |offset| {
|
||||||
|
(
|
||||||
|
doc * (REPEAT as u32) + offset,
|
||||||
|
if offset == 0 { term_freq } else { 1 },
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Vec<(DocId, u32)>>()
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let total_fieldnorms: u64 = fieldnorms_expanded
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.map(|fieldnorm| fieldnorm as u64)
|
||||||
|
.sum();
|
||||||
|
let average_fieldnorm = (total_fieldnorms as Score) / (fieldnorms_expanded.len() as Score);
|
||||||
|
let max_doc = fieldnorms_expanded.len();
|
||||||
|
|
||||||
|
let term_scorers: Vec<TermScorer> = postings_lists_expanded
|
||||||
|
.iter()
|
||||||
|
.map(|postings| {
|
||||||
|
let bm25_weight = BM25Weight::for_one_term(
|
||||||
|
postings.len() as u64,
|
||||||
|
max_doc as u64,
|
||||||
|
average_fieldnorm,
|
||||||
|
);
|
||||||
|
TermScorer::create_for_test(postings, &fieldnorms_expanded[..], bm25_weight)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
for top_k in 1..4 {
|
||||||
|
let checkpoints_for_each_pruning =
|
||||||
|
compute_checkpoints_for_each_pruning(term_scorers.clone(), top_k);
|
||||||
|
let checkpoints_manual = compute_checkpoints_manual(term_scorers.clone(), top_k);
|
||||||
|
assert_eq!(checkpoints_for_each_pruning.len(), checkpoints_manual.len());
|
||||||
|
for (&(left_doc, left_score), &(right_doc, right_score)) in checkpoints_for_each_pruning
|
||||||
|
.iter()
|
||||||
|
.zip(checkpoints_manual.iter())
|
||||||
|
{
|
||||||
|
assert_eq!(left_doc, right_doc);
|
||||||
|
assert!(nearly_equals(left_score, right_score));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
proptest! {
|
||||||
|
#![proptest_config(ProptestConfig::with_cases(500))]
|
||||||
|
#[test]
|
||||||
|
fn test_block_wand_two_term_scorers((posting_lists, fieldnorms) in gen_term_scorers(2)) {
|
||||||
|
test_block_wand_aux(&posting_lists[..], &fieldnorms[..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_fn_reproduce_proptest() {
|
||||||
|
let postings_lists = &[
|
||||||
|
vec![
|
||||||
|
(0, 1),
|
||||||
|
(1, 1),
|
||||||
|
(2, 1),
|
||||||
|
(3, 1),
|
||||||
|
(4, 1),
|
||||||
|
(6, 1),
|
||||||
|
(7, 7),
|
||||||
|
(8, 1),
|
||||||
|
(10, 1),
|
||||||
|
(12, 1),
|
||||||
|
(13, 1),
|
||||||
|
(14, 1),
|
||||||
|
(15, 1),
|
||||||
|
(16, 1),
|
||||||
|
(19, 1),
|
||||||
|
(20, 1),
|
||||||
|
(21, 1),
|
||||||
|
(22, 1),
|
||||||
|
(24, 1),
|
||||||
|
(25, 1),
|
||||||
|
(26, 1),
|
||||||
|
(28, 1),
|
||||||
|
(30, 1),
|
||||||
|
(31, 1),
|
||||||
|
(33, 1),
|
||||||
|
(34, 1),
|
||||||
|
(35, 1),
|
||||||
|
(36, 95),
|
||||||
|
(37, 1),
|
||||||
|
(39, 1),
|
||||||
|
(41, 1),
|
||||||
|
(44, 1),
|
||||||
|
(46, 1),
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
(0, 5),
|
||||||
|
(2, 1),
|
||||||
|
(4, 1),
|
||||||
|
(5, 84),
|
||||||
|
(6, 47),
|
||||||
|
(7, 26),
|
||||||
|
(8, 50),
|
||||||
|
(9, 34),
|
||||||
|
(11, 73),
|
||||||
|
(12, 11),
|
||||||
|
(13, 51),
|
||||||
|
(14, 45),
|
||||||
|
(15, 18),
|
||||||
|
(18, 60),
|
||||||
|
(19, 80),
|
||||||
|
(20, 63),
|
||||||
|
(23, 79),
|
||||||
|
(24, 69),
|
||||||
|
(26, 35),
|
||||||
|
(28, 82),
|
||||||
|
(29, 19),
|
||||||
|
(30, 2),
|
||||||
|
(31, 7),
|
||||||
|
(33, 40),
|
||||||
|
(34, 1),
|
||||||
|
(35, 33),
|
||||||
|
(36, 27),
|
||||||
|
(37, 24),
|
||||||
|
(38, 65),
|
||||||
|
(39, 32),
|
||||||
|
(40, 85),
|
||||||
|
(41, 1),
|
||||||
|
(42, 69),
|
||||||
|
(43, 11),
|
||||||
|
(45, 45),
|
||||||
|
(47, 97),
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
(2, 1),
|
||||||
|
(4, 1),
|
||||||
|
(7, 94),
|
||||||
|
(8, 1),
|
||||||
|
(9, 1),
|
||||||
|
(10, 1),
|
||||||
|
(12, 1),
|
||||||
|
(15, 1),
|
||||||
|
(22, 1),
|
||||||
|
(23, 1),
|
||||||
|
(26, 1),
|
||||||
|
(27, 1),
|
||||||
|
(32, 1),
|
||||||
|
(33, 1),
|
||||||
|
(34, 1),
|
||||||
|
(36, 96),
|
||||||
|
(39, 1),
|
||||||
|
(41, 1),
|
||||||
|
],
|
||||||
|
];
|
||||||
|
let fieldnorms = &[
|
||||||
|
685, 239, 780, 564, 664, 827, 5, 56, 930, 887, 263, 665, 167, 127, 120, 919, 292, 92,
|
||||||
|
489, 734, 814, 724, 700, 304, 128, 779, 311, 877, 774, 15, 866, 368, 894, 371, 982,
|
||||||
|
502, 507, 669, 680, 76, 594, 626, 578, 331, 170, 639, 665, 186,
|
||||||
|
][..];
|
||||||
|
test_block_wand_aux(postings_lists, fieldnorms);
|
||||||
|
}
|
||||||
|
|
||||||
|
proptest! {
|
||||||
|
#![proptest_config(ProptestConfig::with_cases(500))]
|
||||||
|
#[ignore]
|
||||||
|
#[test]
|
||||||
|
#[ignore]
|
||||||
|
fn test_block_wand_three_term_scorers((posting_lists, fieldnorms) in gen_term_scorers(3)) {
|
||||||
|
test_block_wand_aux(&posting_lists[..], &fieldnorms[..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -83,7 +83,7 @@ use std::collections::BTreeSet;
|
|||||||
/// ];
|
/// ];
|
||||||
/// // Make a BooleanQuery equivalent to
|
/// // Make a BooleanQuery equivalent to
|
||||||
/// // title:+diary title:-girl
|
/// // title:+diary title:-girl
|
||||||
/// let diary_must_and_girl_mustnot = BooleanQuery::from(queries_with_occurs1);
|
/// let diary_must_and_girl_mustnot = BooleanQuery::new(queries_with_occurs1);
|
||||||
/// let count1 = searcher.search(&diary_must_and_girl_mustnot, &Count)?;
|
/// let count1 = searcher.search(&diary_must_and_girl_mustnot, &Count)?;
|
||||||
/// assert_eq!(count1, 1);
|
/// assert_eq!(count1, 1);
|
||||||
///
|
///
|
||||||
@@ -93,7 +93,7 @@ use std::collections::BTreeSet;
|
|||||||
/// IndexRecordOption::Basic,
|
/// IndexRecordOption::Basic,
|
||||||
/// ));
|
/// ));
|
||||||
/// // "title:diary OR title:cow"
|
/// // "title:diary OR title:cow"
|
||||||
/// let title_diary_or_cow = BooleanQuery::from(vec![
|
/// let title_diary_or_cow = BooleanQuery::new(vec![
|
||||||
/// (Occur::Should, diary_term_query.box_clone()),
|
/// (Occur::Should, diary_term_query.box_clone()),
|
||||||
/// (Occur::Should, cow_term_query),
|
/// (Occur::Should, cow_term_query),
|
||||||
/// ]);
|
/// ]);
|
||||||
@@ -108,7 +108,7 @@ use std::collections::BTreeSet;
|
|||||||
/// // You can combine subqueries of different types into 1 BooleanQuery:
|
/// // You can combine subqueries of different types into 1 BooleanQuery:
|
||||||
/// // `TermQuery` and `PhraseQuery`
|
/// // `TermQuery` and `PhraseQuery`
|
||||||
/// // "title:diary OR "dairy cow"
|
/// // "title:diary OR "dairy cow"
|
||||||
/// let term_of_phrase_query = BooleanQuery::from(vec![
|
/// let term_of_phrase_query = BooleanQuery::new(vec![
|
||||||
/// (Occur::Should, diary_term_query.box_clone()),
|
/// (Occur::Should, diary_term_query.box_clone()),
|
||||||
/// (Occur::Should, phrase_query.box_clone()),
|
/// (Occur::Should, phrase_query.box_clone()),
|
||||||
/// ]);
|
/// ]);
|
||||||
@@ -117,7 +117,7 @@ use std::collections::BTreeSet;
|
|||||||
///
|
///
|
||||||
/// // You can nest one BooleanQuery inside another
|
/// // You can nest one BooleanQuery inside another
|
||||||
/// // body:found AND ("title:diary OR "dairy cow")
|
/// // body:found AND ("title:diary OR "dairy cow")
|
||||||
/// let nested_query = BooleanQuery::from(vec![
|
/// let nested_query = BooleanQuery::new(vec![
|
||||||
/// (Occur::Must, body_term_query),
|
/// (Occur::Must, body_term_query),
|
||||||
/// (Occur::Must, Box::new(term_of_phrase_query))
|
/// (Occur::Must, Box::new(term_of_phrase_query))
|
||||||
/// ]);
|
/// ]);
|
||||||
@@ -143,7 +143,7 @@ impl Clone for BooleanQuery {
|
|||||||
|
|
||||||
impl From<Vec<(Occur, Box<dyn Query>)>> for BooleanQuery {
|
impl From<Vec<(Occur, Box<dyn Query>)>> for BooleanQuery {
|
||||||
fn from(subqueries: Vec<(Occur, Box<dyn Query>)>) -> BooleanQuery {
|
fn from(subqueries: Vec<(Occur, Box<dyn Query>)>) -> BooleanQuery {
|
||||||
BooleanQuery { subqueries }
|
BooleanQuery::new(subqueries)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,6 +167,23 @@ impl Query for BooleanQuery {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BooleanQuery {
|
impl BooleanQuery {
|
||||||
|
/// Creates a new boolean query.
|
||||||
|
pub fn new(subqueries: Vec<(Occur, Box<dyn Query>)>) -> BooleanQuery {
|
||||||
|
BooleanQuery { subqueries }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the intersection of the queries.
|
||||||
|
pub fn intersection(queries: Vec<Box<dyn Query>>) -> BooleanQuery {
|
||||||
|
let subqueries = queries.into_iter().map(|s| (Occur::Must, s)).collect();
|
||||||
|
BooleanQuery::new(subqueries)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the union of the queries.
|
||||||
|
pub fn union(queries: Vec<Box<dyn Query>>) -> BooleanQuery {
|
||||||
|
let subqueries = queries.into_iter().map(|s| (Occur::Should, s)).collect();
|
||||||
|
BooleanQuery::new(subqueries)
|
||||||
|
}
|
||||||
|
|
||||||
/// Helper method to create a boolean query matching a given list of terms.
|
/// Helper method to create a boolean query matching a given list of terms.
|
||||||
/// The resulting query is a disjunction of the terms.
|
/// The resulting query is a disjunction of the terms.
|
||||||
pub fn new_multiterms_query(terms: Vec<Term>) -> BooleanQuery {
|
pub fn new_multiterms_query(terms: Vec<Term>) -> BooleanQuery {
|
||||||
@@ -178,7 +195,7 @@ impl BooleanQuery {
|
|||||||
(Occur::Should, term_query)
|
(Occur::Should, term_query)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
BooleanQuery::from(occur_term_queries)
|
BooleanQuery::new(occur_term_queries)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deconstructed view of the clauses making up this query.
|
/// Deconstructed view of the clauses making up this query.
|
||||||
@@ -186,3 +203,77 @@ impl BooleanQuery {
|
|||||||
&self.subqueries[..]
|
&self.subqueries[..]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::BooleanQuery;
|
||||||
|
use crate::collector::DocSetCollector;
|
||||||
|
use crate::query::{QueryClone, TermQuery};
|
||||||
|
use crate::schema::{IndexRecordOption, Schema, TEXT};
|
||||||
|
use crate::{DocAddress, Index, Term};
|
||||||
|
|
||||||
|
fn create_test_index() -> crate::Result<Index> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text = schema_builder.add_text_field("text", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut writer = index.writer_for_tests().unwrap();
|
||||||
|
writer.add_document(doc!(text=>"b c"));
|
||||||
|
writer.add_document(doc!(text=>"a c"));
|
||||||
|
writer.add_document(doc!(text=>"a b"));
|
||||||
|
writer.add_document(doc!(text=>"a d"));
|
||||||
|
writer.commit()?;
|
||||||
|
Ok(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_union() -> crate::Result<()> {
|
||||||
|
let index = create_test_index()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let text = index.schema().get_field("text").unwrap();
|
||||||
|
let term_a = TermQuery::new(Term::from_field_text(text, "a"), IndexRecordOption::Basic);
|
||||||
|
let term_d = TermQuery::new(Term::from_field_text(text, "d"), IndexRecordOption::Basic);
|
||||||
|
let union_ad = BooleanQuery::union(vec![term_a.box_clone(), term_d.box_clone()]);
|
||||||
|
let docs = searcher.search(&union_ad, &DocSetCollector)?;
|
||||||
|
assert_eq!(
|
||||||
|
docs,
|
||||||
|
vec![
|
||||||
|
DocAddress(0u32, 1u32),
|
||||||
|
DocAddress(0u32, 2u32),
|
||||||
|
DocAddress(0u32, 3u32)
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
.collect()
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_intersection() -> crate::Result<()> {
|
||||||
|
let index = create_test_index()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let text = index.schema().get_field("text").unwrap();
|
||||||
|
let term_a = TermQuery::new(Term::from_field_text(text, "a"), IndexRecordOption::Basic);
|
||||||
|
let term_b = TermQuery::new(Term::from_field_text(text, "b"), IndexRecordOption::Basic);
|
||||||
|
let term_c = TermQuery::new(Term::from_field_text(text, "c"), IndexRecordOption::Basic);
|
||||||
|
let intersection_ab =
|
||||||
|
BooleanQuery::intersection(vec![term_a.box_clone(), term_b.box_clone()]);
|
||||||
|
let intersection_ac =
|
||||||
|
BooleanQuery::intersection(vec![term_a.box_clone(), term_c.box_clone()]);
|
||||||
|
let intersection_bc =
|
||||||
|
BooleanQuery::intersection(vec![term_b.box_clone(), term_c.box_clone()]);
|
||||||
|
{
|
||||||
|
let docs = searcher.search(&intersection_ab, &DocSetCollector)?;
|
||||||
|
assert_eq!(docs, vec![DocAddress(0u32, 2u32)].into_iter().collect());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let docs = searcher.search(&intersection_ac, &DocSetCollector)?;
|
||||||
|
assert_eq!(docs, vec![DocAddress(0u32, 1u32)].into_iter().collect());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let docs = searcher.search(&intersection_bc, &DocSetCollector)?;
|
||||||
|
assert_eq!(docs, vec![DocAddress(0u32, 0u32)].into_iter().collect());
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user