mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-06 01:02:55 +00:00
Compare commits
328 Commits
refactorin
...
issue/997
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
114fbe2512 | ||
|
|
155729044b | ||
|
|
4b34231f28 | ||
|
|
8e7fe068e9 | ||
|
|
4c384272dc | ||
|
|
5de9961cf2 | ||
|
|
eab36b5c6a | ||
|
|
96e5de2eb9 | ||
|
|
5f740d9ab4 | ||
|
|
4f32126e35 | ||
|
|
d2d0873fdb | ||
|
|
761298ff00 | ||
|
|
52b1eb2c37 | ||
|
|
2ab25d994f | ||
|
|
5fac119aa0 | ||
|
|
31137beea6 | ||
|
|
316d65d7c6 | ||
|
|
82d7553c63 | ||
|
|
bc0eb813ff | ||
|
|
a259023fd9 | ||
|
|
25105448e8 | ||
|
|
fe3faf5b3f | ||
|
|
f19dd896cf | ||
|
|
9fe26c4fdd | ||
|
|
a369a72cae | ||
|
|
a707967453 | ||
|
|
b2f2097239 | ||
|
|
6ae96038c2 | ||
|
|
2c6a0d0a19 | ||
|
|
4bcdca8545 | ||
|
|
67f8e91395 | ||
|
|
b209763a55 | ||
|
|
5ef96795dc | ||
|
|
784717749f | ||
|
|
945bcc5bd3 | ||
|
|
51aa9c319e | ||
|
|
74d8d2946b | ||
|
|
0a160cc16e | ||
|
|
f099f97daa | ||
|
|
769e9ba14d | ||
|
|
a482c0e966 | ||
|
|
86d92a72e7 | ||
|
|
ef618a5999 | ||
|
|
94d3d7a89a | ||
|
|
aa9e79f957 | ||
|
|
84a2f534db | ||
|
|
1b4be24dca | ||
|
|
824ccc37ae | ||
|
|
5231651020 | ||
|
|
fa2c6f80c7 | ||
|
|
43c7b3bfec | ||
|
|
b17a10546a | ||
|
|
bf6e6e8a7c | ||
|
|
203b0256a3 | ||
|
|
caf2a38b7e | ||
|
|
96f24b078e | ||
|
|
332b50a4eb | ||
|
|
8ca0954b3b | ||
|
|
36343e2de8 | ||
|
|
2f14a892ca | ||
|
|
9c3cabce40 | ||
|
|
f8d71c2b10 | ||
|
|
394dfb24f1 | ||
|
|
b0549a229d | ||
|
|
670b6eaff6 | ||
|
|
a4f33d3823 | ||
|
|
c7841e3da5 | ||
|
|
e7b4a12bba | ||
|
|
0aaa929d6e | ||
|
|
1112797c18 | ||
|
|
920481e1c1 | ||
|
|
55f7b84966 | ||
|
|
09ab4df1fe | ||
|
|
0c2cf81b37 | ||
|
|
d864430bda | ||
|
|
de60540e06 | ||
|
|
c3e311e6b8 | ||
|
|
ac704f2f22 | ||
|
|
be626083a0 | ||
|
|
b68fcca1e0 | ||
|
|
af6dfa1856 | ||
|
|
654c400a0b | ||
|
|
80a99539ce | ||
|
|
4b1c770e5e | ||
|
|
3491645e69 | ||
|
|
e72c8287f8 | ||
|
|
b4b3bc7acd | ||
|
|
521c7b271b | ||
|
|
acd888c999 | ||
|
|
3ab1ba0b2f | ||
|
|
b344c0ac05 | ||
|
|
1741619c7f | ||
|
|
067ba3dff0 | ||
|
|
f79250f665 | ||
|
|
5a33b8d533 | ||
|
|
d165655fb1 | ||
|
|
c805871b92 | ||
|
|
f288e32634 | ||
|
|
bc44543d8f | ||
|
|
db514208a7 | ||
|
|
b6ff29e020 | ||
|
|
7c94dfdc15 | ||
|
|
8782c0eada | ||
|
|
fea0ba1042 | ||
|
|
027555c75f | ||
|
|
b478ed747a | ||
|
|
e9aa27dace | ||
|
|
c079133f3a | ||
|
|
30c5f7c5f0 | ||
|
|
6f26871c0f | ||
|
|
f93cc5b5e3 | ||
|
|
5a25c8dfd3 | ||
|
|
f5c079159d | ||
|
|
1cfdce3437 | ||
|
|
e9e6d141e9 | ||
|
|
8d0e049261 | ||
|
|
0335c7353d | ||
|
|
267e920a80 | ||
|
|
d8a3a47e3e | ||
|
|
7f0e61b173 | ||
|
|
ce4c50446b | ||
|
|
9ab25d2575 | ||
|
|
6d4b982417 | ||
|
|
650eca271f | ||
|
|
8ee55aef6d | ||
|
|
40d41c7dcb | ||
|
|
c780a889a7 | ||
|
|
eef348004e | ||
|
|
e784bbc40f | ||
|
|
b8118d439f | ||
|
|
a49e59053c | ||
|
|
41bb2bd58b | ||
|
|
7fd6054145 | ||
|
|
6abf4e97b5 | ||
|
|
d23aee76c9 | ||
|
|
58a1595792 | ||
|
|
726d32eac5 | ||
|
|
b5f3dcdc8b | ||
|
|
2875deb4b1 | ||
|
|
b2dfacdc70 | ||
|
|
36a0520a48 | ||
|
|
6b5a5ac1d0 | ||
|
|
581c2bb718 | ||
|
|
3d192c0f57 | ||
|
|
9dc36f4431 | ||
|
|
730ccefffb | ||
|
|
2c56f4b583 | ||
|
|
9e27da8b4e | ||
|
|
7f373f232a | ||
|
|
6f0487979c | ||
|
|
71c66a5405 | ||
|
|
2eb5326aa4 | ||
|
|
91e92fa8a3 | ||
|
|
9cc1661ce2 | ||
|
|
c3f44d38f3 | ||
|
|
01b4aa9adc | ||
|
|
7a78b1cba3 | ||
|
|
4d011cc648 | ||
|
|
80cbe889ba | ||
|
|
c23a03ad81 | ||
|
|
579e3d1ed8 | ||
|
|
687a36a49c | ||
|
|
ad82b455a3 | ||
|
|
848afa43ee | ||
|
|
7720d21265 | ||
|
|
96f946d4c3 | ||
|
|
3432149759 | ||
|
|
392221e36a | ||
|
|
674cae8ee2 | ||
|
|
838c476733 | ||
|
|
5f574348d1 | ||
|
|
19a02b2c30 | ||
|
|
c339b05789 | ||
|
|
2d3c657f9d | ||
|
|
07f9b828ae | ||
|
|
70bae7ce4c | ||
|
|
ac2a7273e6 | ||
|
|
4ce9517a82 | ||
|
|
73024a8af3 | ||
|
|
e70e605fc3 | ||
|
|
439d6956a9 | ||
|
|
6530bf0eae | ||
|
|
151498cbe7 | ||
|
|
3a72b1cb98 | ||
|
|
2737822620 | ||
|
|
06c12ae221 | ||
|
|
4e4400af7f | ||
|
|
3f1ecf53ab | ||
|
|
0b583b8130 | ||
|
|
31d18dca1c | ||
|
|
5e06e7de5a | ||
|
|
8af53cbd36 | ||
|
|
4914076e8f | ||
|
|
e04f47e922 | ||
|
|
f355695581 | ||
|
|
cbacdf0de8 | ||
|
|
3dd0322f4c | ||
|
|
2481c87be8 | ||
|
|
b6a664b5f8 | ||
|
|
25b666a7c9 | ||
|
|
9b41912e66 | ||
|
|
8e74bb98b5 | ||
|
|
6db8bb49d6 | ||
|
|
410aed0176 | ||
|
|
00a239a712 | ||
|
|
68fe406924 | ||
|
|
f71b04acb0 | ||
|
|
1ab7f660a4 | ||
|
|
0ebbc4cb5a | ||
|
|
5300cb5da0 | ||
|
|
7d773abc92 | ||
|
|
c34541ccce | ||
|
|
1cc5bd706c | ||
|
|
4026d183bc | ||
|
|
c0f5645cd9 | ||
|
|
cbff874e43 | ||
|
|
baf015fc57 | ||
|
|
7275ebdf3c | ||
|
|
b974e7ce34 | ||
|
|
8f8f34499f | ||
|
|
6ea6f4bfcd | ||
|
|
e25284bafe | ||
|
|
8b67877cd5 | ||
|
|
9de1360538 | ||
|
|
c55db83609 | ||
|
|
1e5ebdbf3c | ||
|
|
9a2090ab21 | ||
|
|
e4aaacdb86 | ||
|
|
29acf1104d | ||
|
|
3d34fa0b69 | ||
|
|
77f363987a | ||
|
|
c0be461191 | ||
|
|
1fb562f44a | ||
|
|
c591d0e591 | ||
|
|
186d7fc20e | ||
|
|
cfbdef5186 | ||
|
|
d04368b1d4 | ||
|
|
b167058028 | ||
|
|
262957717b | ||
|
|
873a808321 | ||
|
|
6fa8f9330e | ||
|
|
b3f0ef0878 | ||
|
|
04304262ba | ||
|
|
920ced364a | ||
|
|
e0499118e2 | ||
|
|
50b5efae46 | ||
|
|
486b8fa9c5 | ||
|
|
b2baed9bdd | ||
|
|
b591542c0b | ||
|
|
a83fa00ac4 | ||
|
|
7ff5c7c797 | ||
|
|
1748602691 | ||
|
|
6542dd5337 | ||
|
|
c64a44b9e1 | ||
|
|
fccc5b3bed | ||
|
|
98b9d5c6c4 | ||
|
|
afd2c1a8ad | ||
|
|
81f35a3ceb | ||
|
|
7e2e765f4a | ||
|
|
7d6cfa58e1 | ||
|
|
14735ce3aa | ||
|
|
72f7cc1569 | ||
|
|
abef5c4e74 | ||
|
|
ae14022bf0 | ||
|
|
55f5658d40 | ||
|
|
3ae6363462 | ||
|
|
9e20d7f8a5 | ||
|
|
ab13ffe377 | ||
|
|
039138ed50 | ||
|
|
6227a0555a | ||
|
|
f85d0a522a | ||
|
|
5795488ba7 | ||
|
|
c3045dfb5c | ||
|
|
811fd0cb9e | ||
|
|
f6847c46d7 | ||
|
|
92dac7af5c | ||
|
|
801905d77f | ||
|
|
8f5ac86f30 | ||
|
|
d12a06b65b | ||
|
|
749432f949 | ||
|
|
c1400f25a7 | ||
|
|
87120acf7c | ||
|
|
401f74f7ae | ||
|
|
03d31f6713 | ||
|
|
a57faf07f6 | ||
|
|
562ea9a839 | ||
|
|
cf92cc1ada | ||
|
|
f6000aece7 | ||
|
|
2b3fe3a2b5 | ||
|
|
0fde90faac | ||
|
|
5838644b03 | ||
|
|
c0011edd05 | ||
|
|
431c187a60 | ||
|
|
392abec420 | ||
|
|
dfbe337fe2 | ||
|
|
b9896c4962 | ||
|
|
afa5715e56 | ||
|
|
79474288d0 | ||
|
|
daf64487b4 | ||
|
|
00816f5529 | ||
|
|
f73787e6e5 | ||
|
|
5cffa71467 | ||
|
|
02af28b3b7 | ||
|
|
afe0134d0f | ||
|
|
db9e81d0f9 | ||
|
|
3821f57ecc | ||
|
|
d379f98b22 | ||
|
|
ef3eddf3da | ||
|
|
08a2368845 | ||
|
|
1868fc1e2c | ||
|
|
451a0252ab | ||
|
|
42756c7474 | ||
|
|
598b076240 | ||
|
|
f1f96fc417 | ||
|
|
9c941603f5 | ||
|
|
fb3d6fa332 | ||
|
|
88fd7f091a | ||
|
|
6e4fdfd4bf | ||
|
|
0519056bd8 | ||
|
|
7305ad575e | ||
|
|
79f64ac2f4 | ||
|
|
67bce6cbf2 | ||
|
|
e5316a4388 | ||
|
|
6a8a8557d2 | ||
|
|
3a65dc84c8 | ||
|
|
ce42bbf5c9 | ||
|
|
7b21b3f25a | ||
|
|
46caec1040 |
12
.github/FUNDING.yml
vendored
Normal file
12
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
# These are supported funding model platforms
|
||||||
|
|
||||||
|
github: fulmicoton
|
||||||
|
patreon: # Replace with a single Patreon username
|
||||||
|
open_collective: # Replace with a single Open Collective username
|
||||||
|
ko_fi: # Replace with a single Ko-fi username
|
||||||
|
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||||
|
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||||
|
liberapay: # Replace with a single Liberapay username
|
||||||
|
issuehunt: # Replace with a single IssueHunt username
|
||||||
|
otechie: # Replace with a single Otechie username
|
||||||
|
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,4 +1,5 @@
|
|||||||
tantivy.iml
|
tantivy.iml
|
||||||
|
proptest-regressions
|
||||||
*.swp
|
*.swp
|
||||||
target
|
target
|
||||||
target/debug
|
target/debug
|
||||||
@@ -11,3 +12,4 @@ cpp/simdcomp/bitpackingbenchmark
|
|||||||
*.bk
|
*.bk
|
||||||
.idea
|
.idea
|
||||||
trace.dat
|
trace.dat
|
||||||
|
cargo-timing*
|
||||||
|
|||||||
297
ARCHITECTURE.md
Normal file
297
ARCHITECTURE.md
Normal file
@@ -0,0 +1,297 @@
|
|||||||
|
# Tantivy
|
||||||
|
|
||||||
|
## What is tantivy?
|
||||||
|
|
||||||
|
Tantivy is a library that is meant to build search engines. Although it is by no mean a port of Lucene, its architecture is strongly inspired by it. If you are familiar with Lucene, you may be struck by the overlapping vocabulary.
|
||||||
|
This is not fortuitous.
|
||||||
|
|
||||||
|
Tantivy's bread and butter is to address the problem of full-text search :
|
||||||
|
|
||||||
|
Given a large set of textual documents, and a text query, return the K-most relevant documents in a very efficient way. In order to execute these queries rapidly, the tantivy need to build an index beforehand. The relevance score implemented in the tantivy is not configurable. Tantivy uses the same score as the default similarity used in Lucene / Elasticsearch, called [BM25](https://en.wikipedia.org/wiki/Okapi_BM25).
|
||||||
|
|
||||||
|
But tantivy's scope does not stop there. Numerous features are required to power rich search applications. For instance, one may want to:
|
||||||
|
- compute the count of documents matching a query in the different section of an e-commerce website,
|
||||||
|
- display an average price per meter square for a real estate search engine,
|
||||||
|
- take in account historical user data to rank documents in a specific way,
|
||||||
|
- or even use tantivy to power an OLAP database.
|
||||||
|
|
||||||
|
A more abstract description of the problem space tantivy is trying to address is the following.
|
||||||
|
|
||||||
|
Ingest a large set of documents, create an index that makes it possible to
|
||||||
|
rapidly select all documents matching a given predicate (also known as a query) and
|
||||||
|
collect some information about them (See collector).
|
||||||
|
|
||||||
|
Roughly speaking the design is following these guiding principles:
|
||||||
|
- Search should be O(1) in memory.
|
||||||
|
- Indexing should be O(1) in memory. (In practise it is just sublinear)
|
||||||
|
- Search should be as fast as possible
|
||||||
|
|
||||||
|
This comes at the cost of the dynamicity of the index : while it is possible to add, and delete documents from our corpus, the tantivy is designed to handle these updates in large batches.
|
||||||
|
|
||||||
|
## [core/](src/core): Index, segments, searchers.
|
||||||
|
|
||||||
|
Core contains all of the high level code to make it possible for to create an index, add documents, delete documents and commit.
|
||||||
|
|
||||||
|
This is both the most high-level part of tantivy, the least performance sensitive one, the seemingly most mundane code... And paradoxically the most complicated part.
|
||||||
|
|
||||||
|
### Index and Segments...
|
||||||
|
|
||||||
|
A tantivy index is in fact a collection of smaller independent immutable segments.
|
||||||
|
Each segment contains its own independent set of datastructures.
|
||||||
|
|
||||||
|
A segment is identified by a segment id that is in fact a UUID.
|
||||||
|
The file of a segment has the format
|
||||||
|
|
||||||
|
```segment-id . ext ```
|
||||||
|
|
||||||
|
The extension signals which datastructure (or [`SegmentComponent`](src/core/segment_component.rs)) is stored in the file.
|
||||||
|
|
||||||
|
A small `meta.json` file is in charge keeping track of the list of segments, as well as the schema.
|
||||||
|
|
||||||
|
On commit, one segment per indexing thread is written to disk, and the `meta.json` is then updated atomically.
|
||||||
|
|
||||||
|
For a better idea of how indexing works, you may read the [following blog post](https://fulmicoton.com/posts/behold-tantivy-part2/).
|
||||||
|
|
||||||
|
|
||||||
|
### Deletes
|
||||||
|
|
||||||
|
Deletes happen by deleting a "term". Tantivy does not offer any notion of primary id, so it is up to the user to use a field in their schema as if it was a primary id, and delete the associated term if they want to delete only one specific document.
|
||||||
|
|
||||||
|
On commit, tantivy will find all of the segments with documents matching this existing term and create a [tombstone file](src/fastfield/delete.rs) that represents the bitset of the document that are deleted.
|
||||||
|
Like all segment files, this file is immutable. Because it is possible to have more than one tombstone file at a given instant, the tombstone filename has the format ``` segment_id . commit_opstamp . del```.
|
||||||
|
|
||||||
|
An opstamp is simply an incremental id that identifies any operation applied to the index. For instance, performing a commit or adding a document.
|
||||||
|
|
||||||
|
|
||||||
|
### DocId
|
||||||
|
|
||||||
|
Within a segment, all documents are identified by a DocId that ranges within `[0, max_doc)`.
|
||||||
|
where max doc is the number of documents in the segment, (deleted or not). Having such a compact `DocId` space is key to the compression of our datastructures.
|
||||||
|
|
||||||
|
The DocIds are simply allocated in the order documents are added to the index.
|
||||||
|
|
||||||
|
### Merges
|
||||||
|
|
||||||
|
In separate threads, tantivy's index writer search for opportunities to merge segments.
|
||||||
|
The point of segment merges is to:
|
||||||
|
- eventually get rid of tombstoned documents
|
||||||
|
- reduce the otherwise evergrowing number of segments.
|
||||||
|
|
||||||
|
Indeed, while having several segments instead of one does not hurt search too much, having hundreds can have a measurable impact on the search performance.
|
||||||
|
|
||||||
|
### Searcher
|
||||||
|
|
||||||
|
The user of the library usually does not need to know about the existence of Segments.
|
||||||
|
Searching is done through an object called a [`Searcher`](src/core/searcher.rs), that captures a
|
||||||
|
snapshot of the index at one point of time, by holding a list of [SegmentReader](src/core/segment_reader.rs).
|
||||||
|
|
||||||
|
In other words, regardless of commits, file garbage collection, or segment merge that might happen, as long as the user holds and reuse the same [Searcher](src/core/searcher.rs), search will happen on an immutable snapshot of the index.
|
||||||
|
|
||||||
|
## [directory/](src/directory): Where should the data be stored?
|
||||||
|
|
||||||
|
Tantivy, like Lucene, abstracts the place where the data should be stored in a key-trait
|
||||||
|
called [`Directory`](src/directory/directory.rs).
|
||||||
|
Contrary to Lucene however, "files" are quite different from some kind of `io::Read` object.
|
||||||
|
Check out [`src/directory/directory.rs`](src/directory/directory.rs) trait for more details.
|
||||||
|
|
||||||
|
Tantivy ships two main directory implementation: the `MMapDirectory` and the `RAMDirectory`,
|
||||||
|
but users can extend tantivy with their own implementation.
|
||||||
|
|
||||||
|
## [schema/](src/schema): What are documents?
|
||||||
|
|
||||||
|
Tantivy's document follow a very strict schema , decided before building any index.
|
||||||
|
|
||||||
|
The schema defines all of the fields that the indexes [`Document`](src/schema/document.rs) may and should contain, their types (`text`, `i64`, `u64`, `Date`, ...) as well as how it should be indexed / represented in tantivy.
|
||||||
|
|
||||||
|
Depending on the type of the field, you can decide to
|
||||||
|
- put it in the docstore
|
||||||
|
- store it as a fast field
|
||||||
|
- index it
|
||||||
|
|
||||||
|
Practically, tantivy will push values associated to this type to up to 3 respective
|
||||||
|
datastructures.
|
||||||
|
|
||||||
|
*Limitations*
|
||||||
|
|
||||||
|
As of today, tantivy's schema impose a 1:1 relationship between a field that is being ingested and a field represented in the search index. In sophisticated search application, it is fairly common to want to index a field twice using different tokenizers, or to index the concatenation of several fields together into one field.
|
||||||
|
|
||||||
|
This is not something tantivy supports, and it is up to the user to duplicate field / concatenate fields before feeding them to tantivy.
|
||||||
|
|
||||||
|
## General information about these datastructures.
|
||||||
|
|
||||||
|
All datastructures in tantivy, have:
|
||||||
|
- a writer
|
||||||
|
- a serializer
|
||||||
|
- a reader
|
||||||
|
|
||||||
|
The writer builds a in-memory representation of a batch of documents. This representation is not searchable. It is just meant as intermediary mutable representation, to which we can sequentially add
|
||||||
|
the document of a batch. At the end of the batch (or if a memory limit is reached), this representation
|
||||||
|
is then converted in an on-disk immutable representation, that is extremely compact.
|
||||||
|
This conversion is done by the serializer.
|
||||||
|
|
||||||
|
Finally, the reader is in charge of offering an API to read on this on-disk read-only representation.
|
||||||
|
In tantivy, readers are designed to require very little anonymous memory. The data is read straight from an mmapped file, and loading an index is as fast as mmapping its files.
|
||||||
|
|
||||||
|
## [store/](src/store): Here is my DocId, Gimme my document!
|
||||||
|
|
||||||
|
The docstore is a row-oriented storage that, for each documents, stores a subset of the fields
|
||||||
|
that are marked as stored in the schema. The docstore is compressed using a general purpose algorithm
|
||||||
|
like LZ4.
|
||||||
|
|
||||||
|
**Useful for**
|
||||||
|
|
||||||
|
In search engines, it is often used to display search results.
|
||||||
|
Once the top 10 documents have been identified, we fetch them from the store, and display them or their snippet on the search result page (aka SERP).
|
||||||
|
|
||||||
|
**Not useful for**
|
||||||
|
|
||||||
|
Fetching a document from the store is typically a "slow" operation. It usually consists in
|
||||||
|
- searching into a compact tree-like datastructure to find the position of the right block.
|
||||||
|
- decompressing a small block
|
||||||
|
- returning the document from this block.
|
||||||
|
|
||||||
|
It is NOT meant to be called for every document matching a query.
|
||||||
|
|
||||||
|
As a rule of thumb, if you hit the docstore more than 100 times per search query, you are probably misusing tantivy.
|
||||||
|
|
||||||
|
|
||||||
|
## [fastfield/](src/fastfield): Here is my DocId, Gimme my value!
|
||||||
|
|
||||||
|
Fast fields are stored in a column-oriented storage that allows for random access.
|
||||||
|
The only compression applied is bitpacking. The column comes with two meta data.
|
||||||
|
The minimum value in the column and the number of bits per doc.
|
||||||
|
|
||||||
|
Fetching a value for a `DocId` is then as simple as computing
|
||||||
|
|
||||||
|
```
|
||||||
|
min_value + fetch_bits(num_bits * doc_id..num_bits * (doc_id+1))
|
||||||
|
```
|
||||||
|
|
||||||
|
This operation just requires one memory fetch.
|
||||||
|
Because, DocSets are scanned through in order (DocId are iterated in a sorted manner) which
|
||||||
|
also help locality.
|
||||||
|
|
||||||
|
In Lucene's jargon, fast fields are called DocValues.
|
||||||
|
|
||||||
|
**Useful for**
|
||||||
|
|
||||||
|
They are typically integer values that are useful to either rank or compute aggregate over
|
||||||
|
all of the documents matching a query (aka [DocSet](src/docset.rs)).
|
||||||
|
|
||||||
|
For instance, one could define a function to combine upvotes with tantivy's internal relevancy score.
|
||||||
|
This can be done by fetching a fast field during scoring.
|
||||||
|
Once could also compute the mean price of the items matching a query in an e-commerce website.
|
||||||
|
This can be done by fetching a fast field in a collector.
|
||||||
|
Finally one could decide to post filter a docset to remove docset with a price within a specific range.
|
||||||
|
If the ratio of filtered out documents is not too low, an efficient way to do this is to fetch the price, and apply the filter on the collector side.
|
||||||
|
|
||||||
|
Aside from integer values, it is also possible to store an actual byte payload.
|
||||||
|
For advanced search engine, it is possible to store all of the features required for learning-to-rank in a byte payload, access it during search, and apply the learning-to-rank model.
|
||||||
|
|
||||||
|
Finally facets are a specific kind of fast field, and the associated source code is in [`fastfield/facet_reader.rs`](src/fastfield/facet_reader.rs).
|
||||||
|
|
||||||
|
# The inverted search index.
|
||||||
|
|
||||||
|
The inverted index is the core part of full-text search.
|
||||||
|
When presented a new document with the text field "Hello, happy tax payer!", tantivy breaks it into a list of so-called token. In addition to just splitting this strings into tokens, it might also do different kind of operations like dropping the punctuation, converting the character to lowercase, apply stemming etc. Tantivy makes it possible to configure the operations to be applied in the schema
|
||||||
|
(tokenizer/ is the place where these operations are implemented).
|
||||||
|
|
||||||
|
For instance, the default tokenizer of tantivy would break our text into: `[hello, happy, tax, payer]`.
|
||||||
|
The document will therefore be registered in the inverted index as containing the terms
|
||||||
|
`[text:hello, text:happy, text:tax, text:payer]`.
|
||||||
|
|
||||||
|
The role of the inverted index is, when given a term, supply us with a very fast iterator over
|
||||||
|
the sorted doc ids that match the term.
|
||||||
|
|
||||||
|
Such an iterator is called a posting list. In addition to giving us `DocId`, they can also give us optionally to the number of occurrence of the term for each document, also called term frequency or TF.
|
||||||
|
|
||||||
|
These iterators being sorted by DocId, one can create an iterator over the document containing `text:tax AND text:payer`, `(text:tax AND text:payer) OR (text:contribuable)` or any boolean expression.
|
||||||
|
|
||||||
|
In order to represent the function
|
||||||
|
```Term ⟶ Posting```
|
||||||
|
|
||||||
|
The inverted index actually consists of two datastructures chained together.
|
||||||
|
|
||||||
|
- [Term](src/schema/term.rs) ⟶ [TermInfo](src/postings/term_info.rs) is addressed by the term dictionary.
|
||||||
|
- [TermInfo](src/postings/term_info.rs) ⟶ [Posting](src/postings/postings.rs) is addressed by the posting lists.
|
||||||
|
|
||||||
|
Where [TermInfo](src/postings/term_info.rs) is an object containing some meta data about a term.
|
||||||
|
|
||||||
|
|
||||||
|
## [termdict/](src/termdict): Here is a term, give me the [TermInfo](src/postings/term_info.rs)!
|
||||||
|
|
||||||
|
Tantivy's term dictionary is mainly in charge of supplying the function
|
||||||
|
|
||||||
|
[Term](src/schema/term.rs) ⟶ [TermInfo](src/postings/term_info.rs)
|
||||||
|
|
||||||
|
It is itself is broken into two parts.
|
||||||
|
- [Term](src/schema/term.rs) ⟶ [TermOrdinal](src/termdict/mod.rs) is addressed by a finite state transducer, implemented by the fst crate.
|
||||||
|
- [TermOrdinal](src/termdict/mod.rs) ⟶ [TermInfo](src/postings/term_info.rs) is addressed by the term info store.
|
||||||
|
|
||||||
|
|
||||||
|
## [postings/](src/postings): Iterate over documents... very fast!
|
||||||
|
|
||||||
|
A posting list makes it possible to store a sorted list of doc ids and for each doc store
|
||||||
|
a term frequency as well.
|
||||||
|
|
||||||
|
The posting list are stored in a separate file. The [TermInfo](src/postings/term_info.rs) contains an offset into that file and a number of documents for the given posting list. Both are required and sufficient to read the posting list.
|
||||||
|
|
||||||
|
The posting list is organized in block of 128 documents.
|
||||||
|
One block of doc ids is followed by one block of term frequencies.
|
||||||
|
|
||||||
|
The doc ids are delta encoded and bitpacked.
|
||||||
|
The term frequencies are bitpacked.
|
||||||
|
|
||||||
|
Because the number of docs is rarely a multiple of 128, the last block my contain an arbitrary number of docs between 1 and 127 documents. We then use variable int encoding instead of bitpacking.
|
||||||
|
|
||||||
|
## [positions/](src/positions): Where are my terms within the documents?
|
||||||
|
|
||||||
|
Phrase queries make it possible to search for documents containing a specific sequence of document.
|
||||||
|
For instance, when the phrase query "the art of war" does not match "the war of art".
|
||||||
|
To make it possible, it is possible to specify in the schema that a field should store positions in addition to being indexed.
|
||||||
|
|
||||||
|
The token positions of all of the terms are then stored in a separate file with the extension `.pos`.
|
||||||
|
The [TermInfo](src/postings/term_info.rs) gives an offset (expressed in position this time) in this file. As we iterate throught the docset,
|
||||||
|
we advance the position reader by the number of term frequencies of the current document.
|
||||||
|
|
||||||
|
## [fieldnorms/](src/fieldnorms): Here is my doc, how many tokens in this field?
|
||||||
|
|
||||||
|
The [BM25](https://en.wikipedia.org/wiki/Okapi_BM25) formula also requires to know the number of tokens stored in a specific field for a given document. We store this information on one byte per document in the fieldnorm.
|
||||||
|
The fieldnorm is therefore compressed. Values up to 40 are encoded unchanged. There is then a logarithmic mapping that
|
||||||
|
|
||||||
|
|
||||||
|
## [tokenizer/](src/tokenizer): How should we process text?
|
||||||
|
|
||||||
|
Text processing is key to a good search experience.
|
||||||
|
Splits or normalize your text too much, and the search results will have a less precision and a higher recall.
|
||||||
|
Do not normalize, or under split your text, you will end up with a higher precision and a lesser recall.
|
||||||
|
|
||||||
|
Text processing can be configured by selecting an off-the-shelf [`Tokenizer`](./src/tokenizer/tokenizer.rs) or implementing your own to first split the text into tokens, and then chain different [`TokenFilter`](src/tokenizer/tokenizer.rs)'s to it.
|
||||||
|
|
||||||
|
Tantivy's comes with few tokenizers, but external crates are offering advanced tokenizers, such as [Lindera](https://crates.io/crates/lindera) for Japanese.
|
||||||
|
|
||||||
|
|
||||||
|
## [query/](src/query): Define and compose queries
|
||||||
|
|
||||||
|
The [Query](src/query/query.rs) trait defines what a query is.
|
||||||
|
Due to the necessity for some query to compute some statistics over the entire index, and because the
|
||||||
|
index is composed of several `SegmentReader`, the path from transforming a `Query` to a iterator over document is slightly convoluted, but fundamentally, this is what a Query is.
|
||||||
|
|
||||||
|
The iterator over a document comes with some scoring function. The resulting trait is called a
|
||||||
|
[Scorer](src/query/scorer.rs) and is specific to a segment.
|
||||||
|
|
||||||
|
Different queries can be combined using the [BooleanQuery](src/query/boolean_query/).
|
||||||
|
Tantivy comes with different types of queries, and can be extended by implementing
|
||||||
|
the Query`, `Weight` and `Scorer` traits.
|
||||||
|
|
||||||
|
## [collector](src/collector): Define what to do with matched documents
|
||||||
|
|
||||||
|
Collectors define how to aggregate the documents matching a query, in the broadest sense possible.
|
||||||
|
The search will push matched document one by one, calling their
|
||||||
|
`fn collect(doc: DocId, score: Score);` method.
|
||||||
|
|
||||||
|
Users may implement their own collectors by implementing the [Collector](src/collector/mod.rs) trait.
|
||||||
|
|
||||||
|
## [query-grammar](query-grammar): Defines the grammar of the query parser
|
||||||
|
|
||||||
|
While the [QueryParser](src/query/query_parser/query_parser.rs) struct is located in the `query/` directory, the actual parser combinator used to convert user queries into an AST is in an external crate called `query-grammar`. This part was externalize to lighten the work of the compiler.
|
||||||
161
CHANGELOG.md
161
CHANGELOG.md
@@ -1,3 +1,96 @@
|
|||||||
|
Tantivy 0.15.0
|
||||||
|
=========================
|
||||||
|
- API Changes. Using Range instead of (start, end) in the API and internals (`FileSlice`, `OwnedBytes`, `Snippets`, ...)
|
||||||
|
This change is breaking but migration is trivial.
|
||||||
|
- Added an Histogram collector. (@fulmicoton) #994
|
||||||
|
- Added support for Option<TCollector>. (@fulmicoton)
|
||||||
|
|
||||||
|
|
||||||
|
Tantivy 0.14.0
|
||||||
|
=========================
|
||||||
|
- Remove dependency to atomicwrites #833 .Implemented by @fulmicoton upon suggestion and research from @asafigan).
|
||||||
|
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
|
||||||
|
- API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match.
|
||||||
|
- Large API Change in the Directory API. Tantivy used to assume that all files could be somehow memory mapped. After this change, Directory return a `FileSlice` that can be reduced and eventually read into an `OwnedBytes` object. Long and blocking io operation are still required by they do not span over the entire file.
|
||||||
|
- Added support for Brotli compression in the DocStore. (@ppodolsky)
|
||||||
|
- Added helper for building intersections and unions in BooleanQuery (@guilload)
|
||||||
|
- Bugfix in `Query::explain`
|
||||||
|
- Removed dependency on `notify` #924. Replaced with `FileWatcher` struct that polls meta file every 500ms in background thread. (@halvorboe @guilload)
|
||||||
|
- Added `FilterCollector`, which wraps another collector and filters docs using a predicate over a fast field (@barrotsteindev)
|
||||||
|
- Simplified the encoding of the skip reader struct. BlockWAND max tf is now encoded over a single byte. (@fulmicoton)
|
||||||
|
- `FilterCollector` now supports all Fast Field value types (@barrotsteindev)
|
||||||
|
- FastField are not all loaded when opening the segment reader. (@fulmicoton)
|
||||||
|
|
||||||
|
This version breaks compatibility and requires users to reindex everything.
|
||||||
|
|
||||||
|
Tantivy 0.13.2
|
||||||
|
===================
|
||||||
|
Bugfix. Acquiring a facet reader on a segment that does not contain any
|
||||||
|
doc with this facet returns `None`. (#896)
|
||||||
|
|
||||||
|
Tantivy 0.13.1
|
||||||
|
===================
|
||||||
|
Made `Query` and `Collector` `Send + Sync`.
|
||||||
|
Updated misc dependency versions.
|
||||||
|
|
||||||
|
Tantivy 0.13.0
|
||||||
|
======================
|
||||||
|
Tantivy 0.13 introduce a change in the index format that will require
|
||||||
|
you to reindex your index (BlockWAND information are added in the skiplist).
|
||||||
|
The index size increase is minor as this information is only added for
|
||||||
|
full blocks.
|
||||||
|
If you have a massive index for which reindexing is not an option, please contact me
|
||||||
|
so that we can discuss possible solutions.
|
||||||
|
|
||||||
|
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
|
||||||
|
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
|
||||||
|
- `MMapDirectory::open` does not return a `Result` anymore.
|
||||||
|
- Change in the DocSet and Scorer API. (@fulmicoton).
|
||||||
|
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
|
||||||
|
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
|
||||||
|
As a result, iterating through DocSet now looks as follows
|
||||||
|
```rust
|
||||||
|
let mut doc = docset.doc();
|
||||||
|
while doc != TERMINATED {
|
||||||
|
// ...
|
||||||
|
doc = docset.advance();
|
||||||
|
}
|
||||||
|
```
|
||||||
|
The change made it possible to greatly simplify a lot of the docset's code.
|
||||||
|
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
|
||||||
|
- Added an offset option to the Top(.*)Collectors. (@robyoung)
|
||||||
|
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
|
||||||
|
to the PISA team for answering all my questions!)
|
||||||
|
|
||||||
|
Tantivy 0.12.0
|
||||||
|
======================
|
||||||
|
- Removing static dispatch in tokenizers for simplicity. (#762)
|
||||||
|
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
|
||||||
|
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
|
||||||
|
- Added a configurable maximum number of docs (10M by default) for a segment to be considered for merge (@hntd187, landed by @halvorboe #713)
|
||||||
|
- Important Bugfix #777, causing tantivy to retain memory mapping. (diagnosed by @poljar)
|
||||||
|
- Added support for field boosting. (#547, @fulmicoton)
|
||||||
|
|
||||||
|
## How to update?
|
||||||
|
|
||||||
|
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
|
||||||
|
minor changes. Check https://github.com/tantivy-search/tantivy/blob/main/examples/custom_tokenizer.rs
|
||||||
|
to check for some code sample.
|
||||||
|
|
||||||
|
Tantivy 0.11.3
|
||||||
|
=======================
|
||||||
|
- Fixed DateTime as a fast field (#735)
|
||||||
|
|
||||||
|
Tantivy 0.11.2
|
||||||
|
=======================
|
||||||
|
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
|
||||||
|
- Exposing a constructor for `WatchHandle` (#731)
|
||||||
|
|
||||||
|
Tantivy 0.11.1
|
||||||
|
=====================
|
||||||
|
- Bug fix #729
|
||||||
|
|
||||||
|
|
||||||
Tantivy 0.11.0
|
Tantivy 0.11.0
|
||||||
=====================
|
=====================
|
||||||
|
|
||||||
@@ -9,13 +102,19 @@ Tantivy 0.11.0
|
|||||||
- API change around `Box<BoxableTokenizer>`. See detail in #629
|
- API change around `Box<BoxableTokenizer>`. See detail in #629
|
||||||
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
|
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
|
||||||
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
|
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
|
||||||
|
- Add a method to check the compatibility of the footer in the index with the running version of tantivy (@petr-tik)
|
||||||
|
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
|
||||||
|
- Added handling of pre-tokenized text fields (#642), which will enable users to
|
||||||
|
load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara)
|
||||||
|
- Fix crash when committing multiple times with deleted documents. #681 (@brainlock)
|
||||||
|
|
||||||
## How to update?
|
## How to update?
|
||||||
|
|
||||||
|
- The index format is changed. You are required to reindex your data to use tantivy 0.11.
|
||||||
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
|
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
|
||||||
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
|
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
|
||||||
an error and handling the `Result` is required.
|
an error and handling the `Result` is required.
|
||||||
|
- `tantivy::version()` now returns a `Version` object. This object implements `ToString()`
|
||||||
|
|
||||||
Tantivy 0.10.2
|
Tantivy 0.10.2
|
||||||
=====================
|
=====================
|
||||||
@@ -35,26 +134,26 @@ Tantivy 0.10.0
|
|||||||
|
|
||||||
*Tantivy 0.10.0 index format is compatible with the index format in 0.9.0.*
|
*Tantivy 0.10.0 index format is compatible with the index format in 0.9.0.*
|
||||||
|
|
||||||
- Added an API to easily tweak or entirely replace the
|
- Added an API to easily tweak or entirely replace the
|
||||||
default score. See `TopDocs::tweak_score`and `TopScore::custom_score` (@pmasurel)
|
default score. See `TopDocs::tweak_score`and `TopScore::custom_score` (@fulmicoton)
|
||||||
- Added an ASCII folding filter (@drusellers)
|
- Added an ASCII folding filter (@drusellers)
|
||||||
- Bugfix in `query.count` in presence of deletes (@pmasurel)
|
- Bugfix in `query.count` in presence of deletes (@fulmicoton)
|
||||||
- Added `.explain(...)` in `Query` and `Weight` to (@pmasurel)
|
- Added `.explain(...)` in `Query` and `Weight` to (@fulmicoton)
|
||||||
- Added an efficient way to `delete_all_documents` in `IndexWriter` (@petr-tik).
|
- Added an efficient way to `delete_all_documents` in `IndexWriter` (@petr-tik).
|
||||||
All segments are simply removed.
|
All segments are simply removed.
|
||||||
|
|
||||||
Minor
|
Minor
|
||||||
---------
|
---------
|
||||||
- Switched to Rust 2018 (@uvd)
|
- Switched to Rust 2018 (@uvd)
|
||||||
- Small simplification of the code.
|
- Small simplification of the code.
|
||||||
Calling .freq() or .doc() when .advance() has never been called
|
Calling .freq() or .doc() when .advance() has never been called
|
||||||
on segment postings should panic from now on.
|
on segment postings should panic from now on.
|
||||||
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
|
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
|
||||||
- Fast fields are now preloaded when the `SegmentReader` is created.
|
- Fast fields are now preloaded when the `SegmentReader` is created.
|
||||||
- `IndexMeta` is now public. (@hntd187)
|
- `IndexMeta` is now public. (@hntd187)
|
||||||
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
|
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
|
||||||
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
|
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
|
||||||
only require a read lock. (@pmasurel)
|
only require a read lock. (@fulmicoton)
|
||||||
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
|
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
|
||||||
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
|
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
|
||||||
- Bugfix - Files get deleted slightly earlier
|
- Bugfix - Files get deleted slightly earlier
|
||||||
@@ -68,7 +167,7 @@ Your program should be usable as is.
|
|||||||
|
|
||||||
Fast fields used to be accessed directly from the `SegmentReader`.
|
Fast fields used to be accessed directly from the `SegmentReader`.
|
||||||
The API changed, you are now required to acquire your fast field reader via the
|
The API changed, you are now required to acquire your fast field reader via the
|
||||||
`segment_reader.fast_fields()`, and use one of the typed method:
|
`segment_reader.fast_fields()`, and use one of the typed method:
|
||||||
- `.u64()`, `.i64()` if your field is single-valued ;
|
- `.u64()`, `.i64()` if your field is single-valued ;
|
||||||
- `.u64s()`, `.i64s()` if your field is multi-valued ;
|
- `.u64s()`, `.i64s()` if your field is multi-valued ;
|
||||||
- `.bytes()` if your field is bytes fast field.
|
- `.bytes()` if your field is bytes fast field.
|
||||||
@@ -77,16 +176,16 @@ The API changed, you are now required to acquire your fast field reader via the
|
|||||||
|
|
||||||
Tantivy 0.9.0
|
Tantivy 0.9.0
|
||||||
=====================
|
=====================
|
||||||
*0.9.0 index format is not compatible with the
|
*0.9.0 index format is not compatible with the
|
||||||
previous index format.*
|
previous index format.*
|
||||||
- MAJOR BUGFIX :
|
- MAJOR BUGFIX :
|
||||||
Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton)
|
Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton)
|
||||||
- Removed most unsafe (@fulmicoton)
|
- Removed most unsafe (@fulmicoton)
|
||||||
- Indexer memory footprint improved. (VInt comp, inlining the first block. (@fulmicoton)
|
- Indexer memory footprint improved. (VInt comp, inlining the first block. (@fulmicoton)
|
||||||
- Stemming in other language possible (@pentlander)
|
- Stemming in other language possible (@pentlander)
|
||||||
- Segments with no docs are deleted earlier (@barrotsteindev)
|
- Segments with no docs are deleted earlier (@barrotsteindev)
|
||||||
- Added grouped add and delete operations.
|
- Added grouped add and delete operations.
|
||||||
They are guaranteed to happen together (i.e. they cannot be split by a commit).
|
They are guaranteed to happen together (i.e. they cannot be split by a commit).
|
||||||
In addition, adds are guaranteed to happen on the same segment. (@elbow-jason)
|
In addition, adds are guaranteed to happen on the same segment. (@elbow-jason)
|
||||||
- Removed `INT_STORED` and `INT_INDEXED`. It is now possible to use `STORED` and `INDEXED`
|
- Removed `INT_STORED` and `INT_INDEXED`. It is now possible to use `STORED` and `INDEXED`
|
||||||
for int fields. (@fulmicoton)
|
for int fields. (@fulmicoton)
|
||||||
@@ -100,26 +199,26 @@ tantivy 0.9 brought some API breaking change.
|
|||||||
To update from tantivy 0.8, you will need to go through the following steps.
|
To update from tantivy 0.8, you will need to go through the following steps.
|
||||||
|
|
||||||
- `schema::INT_INDEXED` and `schema::INT_STORED` should be replaced by `schema::INDEXED` and `schema::INT_STORED`.
|
- `schema::INT_INDEXED` and `schema::INT_STORED` should be replaced by `schema::INDEXED` and `schema::INT_STORED`.
|
||||||
- The index now does not hold the pool of searcher anymore. You are required to create an intermediary object called
|
- The index now does not hold the pool of searcher anymore. You are required to create an intermediary object called
|
||||||
`IndexReader` for this.
|
`IndexReader` for this.
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
// create the reader. You typically need to create 1 reader for the entire
|
// create the reader. You typically need to create 1 reader for the entire
|
||||||
// lifetime of you program.
|
// lifetime of you program.
|
||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
|
|
||||||
// Acquire a searcher (previously `index.searcher()`) is now written:
|
// Acquire a searcher (previously `index.searcher()`) is now written:
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
|
|
||||||
// With the default setting of the reader, you are not required to
|
// With the default setting of the reader, you are not required to
|
||||||
// call `index.load_searchers()` anymore.
|
// call `index.load_searchers()` anymore.
|
||||||
//
|
//
|
||||||
// The IndexReader will pick up that change automatically, regardless
|
// The IndexReader will pick up that change automatically, regardless
|
||||||
// of whether the update was done in a different process or not.
|
// of whether the update was done in a different process or not.
|
||||||
// If this behavior is not wanted, you can create your reader with
|
// If this behavior is not wanted, you can create your reader with
|
||||||
// the `ReloadPolicy::Manual`, and manually decide when to reload the index
|
// the `ReloadPolicy::Manual`, and manually decide when to reload the index
|
||||||
// by calling `reader.reload()?`.
|
// by calling `reader.reload()?`.
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@@ -134,7 +233,7 @@ Tantivy 0.8.1
|
|||||||
=====================
|
=====================
|
||||||
Hotfix of #476.
|
Hotfix of #476.
|
||||||
|
|
||||||
Merge was reflecting deletes before commit was passed.
|
Merge was reflecting deletes before commit was passed.
|
||||||
Thanks @barrotsteindev for reporting the bug.
|
Thanks @barrotsteindev for reporting the bug.
|
||||||
|
|
||||||
|
|
||||||
@@ -142,7 +241,7 @@ Tantivy 0.8.0
|
|||||||
=====================
|
=====================
|
||||||
*No change in the index format*
|
*No change in the index format*
|
||||||
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
|
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
|
||||||
- Multithreaded search (@jwolfe, @fulmicoton)
|
- Multithreaded search (@jwolfe, @fulmicoton)
|
||||||
|
|
||||||
|
|
||||||
Tantivy 0.7.1
|
Tantivy 0.7.1
|
||||||
@@ -170,7 +269,7 @@ Tantivy 0.6.1
|
|||||||
- Exclusive `field:{startExcl to endExcl}`
|
- Exclusive `field:{startExcl to endExcl}`
|
||||||
- Mixed `field:[startIncl to endExcl}` and vice versa
|
- Mixed `field:[startIncl to endExcl}` and vice versa
|
||||||
- Unbounded `field:[start to *]`, `field:[* to end]`
|
- Unbounded `field:[start to *]`, `field:[* to end]`
|
||||||
|
|
||||||
|
|
||||||
Tantivy 0.6
|
Tantivy 0.6
|
||||||
==========================
|
==========================
|
||||||
@@ -178,10 +277,10 @@ Tantivy 0.6
|
|||||||
Special thanks to @drusellers and @jason-wolfe for their contributions
|
Special thanks to @drusellers and @jason-wolfe for their contributions
|
||||||
to this release!
|
to this release!
|
||||||
|
|
||||||
- Removed C code. Tantivy is now pure Rust. (@pmasurel)
|
- Removed C code. Tantivy is now pure Rust. (@fulmicoton)
|
||||||
- BM25 (@pmasurel)
|
- BM25 (@fulmicoton)
|
||||||
- Approximate field norms encoded over 1 byte. (@pmasurel)
|
- Approximate field norms encoded over 1 byte. (@fulmicoton)
|
||||||
- Compiles on stable rust (@pmasurel)
|
- Compiles on stable rust (@fulmicoton)
|
||||||
- Add &[u8] fastfield for associating arbitrary bytes to each document (@jason-wolfe) (#270)
|
- Add &[u8] fastfield for associating arbitrary bytes to each document (@jason-wolfe) (#270)
|
||||||
- Completely uncompressed
|
- Completely uncompressed
|
||||||
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
|
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
|
||||||
@@ -189,7 +288,7 @@ to this release!
|
|||||||
- Add Stopword Filter support (@drusellers)
|
- Add Stopword Filter support (@drusellers)
|
||||||
- Add a FuzzyTermQuery (@drusellers)
|
- Add a FuzzyTermQuery (@drusellers)
|
||||||
- Add a RegexQuery (@drusellers)
|
- Add a RegexQuery (@drusellers)
|
||||||
- Various performance improvements (@pmasurel)_
|
- Various performance improvements (@fulmicoton)_
|
||||||
|
|
||||||
|
|
||||||
Tantivy 0.5.2
|
Tantivy 0.5.2
|
||||||
|
|||||||
89
Cargo.toml
89
Cargo.toml
@@ -1,11 +1,11 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy"
|
name = "tantivy"
|
||||||
version = "0.11.0"
|
version = "0.14.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
description = """Search engine library"""
|
description = """Search engine library"""
|
||||||
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
|
documentation = "https://docs.rs/tantivy/"
|
||||||
homepage = "https://github.com/tantivy-search/tantivy"
|
homepage = "https://github.com/tantivy-search/tantivy"
|
||||||
repository = "https://github.com/tantivy-search/tantivy"
|
repository = "https://github.com/tantivy-search/tantivy"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
@@ -13,56 +13,56 @@ keywords = ["search", "information", "retrieval"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64 = "0.10.0"
|
base64 = "0.13"
|
||||||
byteorder = "1.0"
|
byteorder = "1"
|
||||||
crc32fast = "1.2.0"
|
crc32fast = "1"
|
||||||
once_cell = "1.0"
|
once_cell = "1"
|
||||||
regex ={version = "1.3.0", default-features = false, features = ["std"]}
|
regex ={version = "1", default-features = false, features = ["std"]}
|
||||||
tantivy-fst = "0.1"
|
tantivy-fst = "0.3"
|
||||||
memmap = {version = "0.7", optional=true}
|
memmap = {version = "0.7", optional=true}
|
||||||
lz4 = {version="1.20", optional=true}
|
lz4 = {version="1", optional=true}
|
||||||
snap = {version="0.2"}
|
brotli = {version="3.3.0", optional=true}
|
||||||
atomicwrites = {version="0.2.2", optional=true}
|
snap = "1"
|
||||||
tempfile = "3.0"
|
tempfile = {version="3", optional=true}
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
serde = "1.0"
|
serde = {version="1", features=["derive"]}
|
||||||
serde_derive = "1.0"
|
serde_json = "1"
|
||||||
serde_json = "1.0"
|
num_cpus = "1"
|
||||||
num_cpus = "1.2"
|
|
||||||
fs2={version="0.4", optional=true}
|
fs2={version="0.4", optional=true}
|
||||||
itertools = "0.8"
|
levenshtein_automata = "0.2"
|
||||||
levenshtein_automata = {version="0.1", features=["fst_automaton"]}
|
uuid = { version = "0.8", features = ["v4", "serde"] }
|
||||||
notify = {version="4", optional=true}
|
crossbeam = "0.8"
|
||||||
bit-set = "0.5"
|
futures = {version = "0.3", features=["thread-pool"] }
|
||||||
uuid = { version = "0.7.2", features = ["v4", "serde"] }
|
tantivy-query-grammar = { version="0.14.0", path="./query-grammar" }
|
||||||
crossbeam = "0.7"
|
stable_deref_trait = "1"
|
||||||
futures = "0.1"
|
rust-stemmers = "1"
|
||||||
futures-cpupool = "0.1"
|
downcast-rs = "1"
|
||||||
owning_ref = "0.4"
|
|
||||||
stable_deref_trait = "1.0.0"
|
|
||||||
rust-stemmers = "1.1"
|
|
||||||
downcast-rs = { version="1.0" }
|
|
||||||
tantivy-query-grammar = { path="./query-grammar" }
|
|
||||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||||
census = "0.2"
|
census = "0.4"
|
||||||
fnv = "1.0.6"
|
fnv = "1"
|
||||||
owned-read = "0.4"
|
thiserror = "1.0"
|
||||||
failure = "0.1"
|
htmlescape = "0.3"
|
||||||
htmlescape = "0.3.1"
|
fail = "0.4"
|
||||||
fail = "0.3"
|
|
||||||
scoped-pool = "1.0"
|
|
||||||
murmurhash32 = "0.2"
|
murmurhash32 = "0.2"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
smallvec = "0.6"
|
smallvec = "1"
|
||||||
|
rayon = "1"
|
||||||
|
lru = "0.6"
|
||||||
|
fastdivide = "0.3"
|
||||||
|
|
||||||
[target.'cfg(windows)'.dependencies]
|
[target.'cfg(windows)'.dependencies]
|
||||||
winapi = "0.3"
|
winapi = "0.3"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rand = "0.7"
|
rand = "0.8"
|
||||||
maplit = "1"
|
maplit = "1"
|
||||||
matches = "0.1.8"
|
matches = "0.1.8"
|
||||||
time = "0.1.42"
|
proptest = "1.0"
|
||||||
|
criterion = "0.3"
|
||||||
|
|
||||||
|
[dev-dependencies.fail]
|
||||||
|
version = "0.4"
|
||||||
|
features = ["failpoints"]
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
opt-level = 3
|
opt-level = 3
|
||||||
@@ -75,7 +75,8 @@ overflow-checks = true
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["mmap"]
|
default = ["mmap"]
|
||||||
mmap = ["atomicwrites", "fs2", "memmap", "notify"]
|
mmap = ["fs2", "tempfile", "memmap"]
|
||||||
|
brotli-compression = ["brotli"]
|
||||||
lz4-compression = ["lz4"]
|
lz4-compression = ["lz4"]
|
||||||
failpoints = ["fail/failpoints"]
|
failpoints = ["fail/failpoints"]
|
||||||
unstable = [] # useful for benches.
|
unstable = [] # useful for benches.
|
||||||
@@ -87,10 +88,6 @@ members = ["query-grammar"]
|
|||||||
[badges]
|
[badges]
|
||||||
travis-ci = { repository = "tantivy-search/tantivy" }
|
travis-ci = { repository = "tantivy-search/tantivy" }
|
||||||
|
|
||||||
[dev-dependencies.fail]
|
|
||||||
version = "0.3"
|
|
||||||
features = ["failpoints"]
|
|
||||||
|
|
||||||
# Following the "fail" crate best practises, we isolate
|
# Following the "fail" crate best practises, we isolate
|
||||||
# tests that define specific behavior in fail check points
|
# tests that define specific behavior in fail check points
|
||||||
# in a different binary.
|
# in a different binary.
|
||||||
@@ -102,3 +99,7 @@ features = ["failpoints"]
|
|||||||
name = "failpoints"
|
name = "failpoints"
|
||||||
path = "tests/failpoints/mod.rs"
|
path = "tests/failpoints/mod.rs"
|
||||||
required-features = ["fail/failpoints"]
|
required-features = ["fail/failpoints"]
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "analyzer"
|
||||||
|
harness = false
|
||||||
|
|||||||
23
README.md
23
README.md
@@ -1,11 +1,10 @@
|
|||||||
|
|
||||||
[](https://travis-ci.org/tantivy-search/tantivy)
|
[](https://travis-ci.org/tantivy-search/tantivy)
|
||||||
[](https://codecov.io/gh/tantivy-search/tantivy)
|
[](https://codecov.io/gh/tantivy-search/tantivy)
|
||||||
[](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
[](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
[](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
|
[](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/main)
|
||||||
[](https://crates.io/crates/tantivy)
|
[](https://crates.io/crates/tantivy)
|
||||||
[](https://saythanks.io/to/fulmicoton)
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -31,16 +30,15 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
|
|||||||
|
|
||||||
# Benchmark
|
# Benchmark
|
||||||
|
|
||||||
Tantivy is typically faster than Lucene, but the results depend on
|
|
||||||
the nature of the queries in your workload.
|
|
||||||
|
|
||||||
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
||||||
performance for different type of queries / collection.
|
performance for different type of queries / collection.
|
||||||
|
|
||||||
|
Your mileage WILL vary depending on the nature of queries and their load.
|
||||||
|
|
||||||
# Features
|
# Features
|
||||||
|
|
||||||
- Full-text search
|
- Full-text search
|
||||||
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)) and [Japanese](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter))
|
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy) and [tantivy-tokenizer-tiny-segmente](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
|
||||||
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
|
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
|
||||||
- Tiny startup time (<10ms), perfect for command line tools
|
- Tiny startup time (<10ms), perfect for command line tools
|
||||||
- BM25 scoring (the same as Lucene)
|
- BM25 scoring (the same as Lucene)
|
||||||
@@ -59,18 +57,17 @@ performance for different type of queries / collection.
|
|||||||
- Configurable indexing (optional term frequency and position indexing)
|
- Configurable indexing (optional term frequency and position indexing)
|
||||||
- Cheesy logo with a horse
|
- Cheesy logo with a horse
|
||||||
|
|
||||||
# Non-features
|
## Non-features
|
||||||
|
|
||||||
- Distributed search is out of the scope of Tantivy. That being said, Tantivy is a
|
- Distributed search is out of the scope of Tantivy. That being said, Tantivy is a
|
||||||
library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
|
library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
|
||||||
are within the scope of Tantivy.
|
are within the scope of Tantivy.
|
||||||
|
|
||||||
# Supported OS and compiler
|
|
||||||
|
|
||||||
Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows.
|
|
||||||
|
|
||||||
# Getting started
|
# Getting started
|
||||||
|
|
||||||
|
Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows.
|
||||||
|
|
||||||
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
||||||
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli) - `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
|
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli) - `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
|
||||||
index documents, and search via the CLI or a small server with a REST API.
|
index documents, and search via the CLI or a small server with a REST API.
|
||||||
@@ -87,7 +84,7 @@ There are many ways to support this project.
|
|||||||
- Help with documentation by asking questions or submitting PRs
|
- Help with documentation by asking questions or submitting PRs
|
||||||
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
|
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
|
||||||
- Talk about Tantivy around you
|
- Talk about Tantivy around you
|
||||||
- Drop a word on on [](https://saythanks.io/to/fulmicoton) or even [](https://www.patreon.com/fulmicoton)
|
- [](https://www.patreon.com/fulmicoton)
|
||||||
|
|
||||||
# Contributing code
|
# Contributing code
|
||||||
|
|
||||||
|
|||||||
@@ -18,5 +18,5 @@ install:
|
|||||||
build: false
|
build: false
|
||||||
|
|
||||||
test_script:
|
test_script:
|
||||||
- REM SET RUST_LOG=tantivy,test & cargo test --verbose --no-default-features --features mmap
|
- REM SET RUST_LOG=tantivy,test & cargo test --all --verbose --no-default-features --features mmap
|
||||||
- REM SET RUST_BACKTRACE=1 & cargo build --examples
|
- REM SET RUST_BACKTRACE=1 & cargo build --examples
|
||||||
|
|||||||
3774
benches/alice.txt
Normal file
3774
benches/alice.txt
Normal file
File diff suppressed because it is too large
Load Diff
22
benches/analyzer.rs
Normal file
22
benches/analyzer.rs
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
use criterion::{criterion_group, criterion_main, Criterion};
|
||||||
|
use tantivy::tokenizer::TokenizerManager;
|
||||||
|
|
||||||
|
const ALICE_TXT: &'static str = include_str!("alice.txt");
|
||||||
|
|
||||||
|
pub fn criterion_benchmark(c: &mut Criterion) {
|
||||||
|
let tokenizer_manager = TokenizerManager::default();
|
||||||
|
let tokenizer = tokenizer_manager.get("default").unwrap();
|
||||||
|
c.bench_function("default-tokenize-alice", |b| {
|
||||||
|
b.iter(|| {
|
||||||
|
let mut word_count = 0;
|
||||||
|
let mut token_stream = tokenizer.token_stream(ALICE_TXT);
|
||||||
|
while token_stream.advance() {
|
||||||
|
word_count += 1;
|
||||||
|
}
|
||||||
|
assert_eq!(word_count, 30_731);
|
||||||
|
})
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
criterion_group!(benches, criterion_benchmark);
|
||||||
|
criterion_main!(benches);
|
||||||
@@ -112,18 +112,6 @@ fn main() -> tantivy::Result<()> {
|
|||||||
limbs and branches that arch over the pool"
|
limbs and branches that arch over the pool"
|
||||||
));
|
));
|
||||||
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
title => "Of Mice and Men",
|
|
||||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
|
||||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
|
||||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
|
||||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
|
||||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
|
||||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
|
||||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
|
||||||
limbs and branches that arch over the pool"
|
|
||||||
));
|
|
||||||
|
|
||||||
// Multivalued field just need to be repeated.
|
// Multivalued field just need to be repeated.
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
title => "Frankenstein",
|
title => "Frankenstein",
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use tantivy::fastfield::FastFieldReader;
|
|||||||
use tantivy::query::QueryParser;
|
use tantivy::query::QueryParser;
|
||||||
use tantivy::schema::Field;
|
use tantivy::schema::Field;
|
||||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||||
use tantivy::{doc, Index, SegmentReader, TantivyError};
|
use tantivy::{doc, Index, Score, SegmentReader};
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct Stats {
|
struct Stats {
|
||||||
@@ -72,16 +72,7 @@ impl Collector for StatsCollector {
|
|||||||
_segment_local_id: u32,
|
_segment_local_id: u32,
|
||||||
segment_reader: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
) -> tantivy::Result<StatsSegmentCollector> {
|
) -> tantivy::Result<StatsSegmentCollector> {
|
||||||
let fast_field_reader = segment_reader
|
let fast_field_reader = segment_reader.fast_fields().u64(self.field)?;
|
||||||
.fast_fields()
|
|
||||||
.u64(self.field)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
let field_name = segment_reader.schema().get_field_name(self.field);
|
|
||||||
TantivyError::SchemaError(format!(
|
|
||||||
"Field {:?} is not a u64 fast field.",
|
|
||||||
field_name
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
Ok(StatsSegmentCollector {
|
Ok(StatsSegmentCollector {
|
||||||
fast_field_reader,
|
fast_field_reader,
|
||||||
stats: Stats::default(),
|
stats: Stats::default(),
|
||||||
@@ -114,7 +105,7 @@ struct StatsSegmentCollector {
|
|||||||
impl SegmentCollector for StatsSegmentCollector {
|
impl SegmentCollector for StatsSegmentCollector {
|
||||||
type Fruit = Option<Stats>;
|
type Fruit = Option<Stats>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, _score: f32) {
|
fn collect(&mut self, doc: u32, _score: Score) {
|
||||||
let value = self.fast_field_reader.get(doc) as f64;
|
let value = self.fast_field_reader.get(doc) as f64;
|
||||||
self.stats.count += 1;
|
self.stats.count += 1;
|
||||||
self.stats.sum += value;
|
self.stats.sum += value;
|
||||||
|
|||||||
@@ -13,63 +13,100 @@
|
|||||||
// ---
|
// ---
|
||||||
// Importing tantivy...
|
// Importing tantivy...
|
||||||
use tantivy::collector::FacetCollector;
|
use tantivy::collector::FacetCollector;
|
||||||
use tantivy::query::AllQuery;
|
use tantivy::query::{AllQuery, TermQuery};
|
||||||
use tantivy::schema::*;
|
use tantivy::schema::*;
|
||||||
use tantivy::{doc, Index};
|
use tantivy::{doc, Index};
|
||||||
use tempfile::TempDir;
|
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
fn main() -> tantivy::Result<()> {
|
||||||
// Let's create a temporary directory for the
|
// Let's create a temporary directory for the sake of this example
|
||||||
// sake of this example
|
|
||||||
let index_path = TempDir::new()?;
|
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
schema_builder.add_text_field("name", TEXT | STORED);
|
let name = schema_builder.add_text_field("felin_name", TEXT | STORED);
|
||||||
|
// this is our faceted field: its scientific classification
|
||||||
// this is our faceted field
|
let classification = schema_builder.add_facet_field("classification", INDEXED);
|
||||||
schema_builder.add_facet_field("tags");
|
|
||||||
|
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
let mut index_writer = index.writer(30_000_000)?;
|
||||||
|
|
||||||
let mut index_writer = index.writer(50_000_000)?;
|
|
||||||
|
|
||||||
let name = schema.get_field("name").unwrap();
|
|
||||||
let tags = schema.get_field("tags").unwrap();
|
|
||||||
|
|
||||||
// For convenience, tantivy also comes with a macro to
|
// For convenience, tantivy also comes with a macro to
|
||||||
// reduce the boilerplate above.
|
// reduce the boilerplate above.
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
name => "the ditch",
|
name => "Cat",
|
||||||
tags => Facet::from("/pools/north")
|
classification => Facet::from("/Felidae/Felinae/Felis")
|
||||||
));
|
));
|
||||||
|
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
name => "little stacey",
|
name => "Canada lynx",
|
||||||
tags => Facet::from("/pools/south")
|
classification => Facet::from("/Felidae/Felinae/Lynx")
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
name => "Cheetah",
|
||||||
|
classification => Facet::from("/Felidae/Felinae/Acinonyx")
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
name => "Tiger",
|
||||||
|
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
name => "Lion",
|
||||||
|
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
name => "Jaguar",
|
||||||
|
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
name => "Sunda clouded leopard",
|
||||||
|
classification => Facet::from("/Felidae/Pantherinae/Neofelis")
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
name => "Fossa",
|
||||||
|
classification => Facet::from("/Eupleridae/Cryptoprocta")
|
||||||
));
|
));
|
||||||
|
|
||||||
index_writer.commit()?;
|
index_writer.commit()?;
|
||||||
|
|
||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
|
{
|
||||||
|
let mut facet_collector = FacetCollector::for_field(classification);
|
||||||
|
facet_collector.add_facet("/Felidae");
|
||||||
|
let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||||
|
// This lists all of the facet counts, right below "/Felidae".
|
||||||
|
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae").collect();
|
||||||
|
assert_eq!(
|
||||||
|
facets,
|
||||||
|
vec![
|
||||||
|
(&Facet::from("/Felidae/Felinae"), 3),
|
||||||
|
(&Facet::from("/Felidae/Pantherinae"), 4),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
let mut facet_collector = FacetCollector::for_field(tags);
|
// Facets are also searchable.
|
||||||
facet_collector.add_facet("/pools");
|
//
|
||||||
|
// For instance a common UI pattern is to allow the user someone to click on a facet link
|
||||||
|
// (e.g: `Pantherinae`) to drill down and filter the current result set with this subfacet.
|
||||||
|
//
|
||||||
|
// The search would then look as follows.
|
||||||
|
|
||||||
let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
|
// Check the reference doc for different ways to create a `Facet` object.
|
||||||
|
{
|
||||||
// This lists all of the facet counts
|
let facet = Facet::from_text("/Felidae/Pantherinae");
|
||||||
let facets: Vec<(&Facet, u64)> = facet_counts.get("/pools").collect();
|
let facet_term = Term::from_facet(classification, &facet);
|
||||||
assert_eq!(
|
let facet_term_query = TermQuery::new(facet_term, IndexRecordOption::Basic);
|
||||||
facets,
|
let mut facet_collector = FacetCollector::for_field(classification);
|
||||||
vec![
|
facet_collector.add_facet("/Felidae/Pantherinae");
|
||||||
(&Facet::from("/pools/north"), 1),
|
let facet_counts = searcher.search(&facet_term_query, &facet_collector)?;
|
||||||
(&Facet::from("/pools/south"), 1),
|
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae/Pantherinae").collect();
|
||||||
]
|
assert_eq!(
|
||||||
);
|
facets,
|
||||||
|
vec![
|
||||||
|
(&Facet::from("/Felidae/Pantherinae/Neofelis"), 1),
|
||||||
|
(&Facet::from("/Felidae/Pantherinae/Panthera"), 3),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
98
examples/faceted_search_with_tweaked_score.rs
Normal file
98
examples/faceted_search_with_tweaked_score.rs
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
use std::collections::HashSet;
|
||||||
|
use tantivy::collector::TopDocs;
|
||||||
|
use tantivy::doc;
|
||||||
|
use tantivy::query::BooleanQuery;
|
||||||
|
use tantivy::schema::*;
|
||||||
|
use tantivy::{DocId, Index, Score, SegmentReader};
|
||||||
|
|
||||||
|
fn main() -> tantivy::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
|
let title = schema_builder.add_text_field("title", STORED);
|
||||||
|
let ingredient = schema_builder.add_facet_field("ingredient", INDEXED);
|
||||||
|
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
|
||||||
|
let mut index_writer = index.writer(30_000_000)?;
|
||||||
|
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
title => "Fried egg",
|
||||||
|
ingredient => Facet::from("/ingredient/egg"),
|
||||||
|
ingredient => Facet::from("/ingredient/oil"),
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
title => "Scrambled egg",
|
||||||
|
ingredient => Facet::from("/ingredient/egg"),
|
||||||
|
ingredient => Facet::from("/ingredient/butter"),
|
||||||
|
ingredient => Facet::from("/ingredient/milk"),
|
||||||
|
ingredient => Facet::from("/ingredient/salt"),
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
title => "Egg rolls",
|
||||||
|
ingredient => Facet::from("/ingredient/egg"),
|
||||||
|
ingredient => Facet::from("/ingredient/garlic"),
|
||||||
|
ingredient => Facet::from("/ingredient/salt"),
|
||||||
|
ingredient => Facet::from("/ingredient/oil"),
|
||||||
|
ingredient => Facet::from("/ingredient/tortilla-wrap"),
|
||||||
|
ingredient => Facet::from("/ingredient/mushroom"),
|
||||||
|
));
|
||||||
|
index_writer.commit()?;
|
||||||
|
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
{
|
||||||
|
let facets = vec![
|
||||||
|
Facet::from("/ingredient/egg"),
|
||||||
|
Facet::from("/ingredient/oil"),
|
||||||
|
Facet::from("/ingredient/garlic"),
|
||||||
|
Facet::from("/ingredient/mushroom"),
|
||||||
|
];
|
||||||
|
let query = BooleanQuery::new_multiterms_query(
|
||||||
|
facets
|
||||||
|
.iter()
|
||||||
|
.map(|key| Term::from_facet(ingredient, &key))
|
||||||
|
.collect(),
|
||||||
|
);
|
||||||
|
let top_docs_by_custom_score =
|
||||||
|
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
|
||||||
|
let ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
|
||||||
|
let facet_dict = ingredient_reader.facet_dict();
|
||||||
|
|
||||||
|
let query_ords: HashSet<u64> = facets
|
||||||
|
.iter()
|
||||||
|
.filter_map(|key| facet_dict.term_ord(key.encoded_str()).unwrap())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut facet_ords_buffer: Vec<u64> = Vec::with_capacity(20);
|
||||||
|
|
||||||
|
move |doc: DocId, original_score: Score| {
|
||||||
|
ingredient_reader.facet_ords(doc, &mut facet_ords_buffer);
|
||||||
|
let missing_ingredients = facet_ords_buffer
|
||||||
|
.iter()
|
||||||
|
.filter(|ord| !query_ords.contains(ord))
|
||||||
|
.count();
|
||||||
|
let tweak = 1.0 / 4_f32.powi(missing_ingredients as i32);
|
||||||
|
|
||||||
|
original_score * tweak
|
||||||
|
}
|
||||||
|
});
|
||||||
|
let top_docs = searcher.search(&query, &top_docs_by_custom_score)?;
|
||||||
|
|
||||||
|
let titles: Vec<String> = top_docs
|
||||||
|
.iter()
|
||||||
|
.map(|(_, doc_id)| {
|
||||||
|
searcher
|
||||||
|
.doc(*doc_id)
|
||||||
|
.unwrap()
|
||||||
|
.get_first(title)
|
||||||
|
.unwrap()
|
||||||
|
.text()
|
||||||
|
.unwrap()
|
||||||
|
.to_owned()
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
// # Iterating docs and positioms.
|
// # Iterating docs and positions.
|
||||||
//
|
//
|
||||||
// At its core of tantivy, relies on a data structure
|
// At its core of tantivy, relies on a data structure
|
||||||
// called an inverted index.
|
// called an inverted index.
|
||||||
@@ -10,7 +10,7 @@
|
|||||||
// ---
|
// ---
|
||||||
// Importing tantivy...
|
// Importing tantivy...
|
||||||
use tantivy::schema::*;
|
use tantivy::schema::*;
|
||||||
use tantivy::{doc, DocId, DocSet, Index, Postings};
|
use tantivy::{doc, DocSet, Index, Postings, TERMINATED};
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
fn main() -> tantivy::Result<()> {
|
||||||
// We first create a schema for the sake of the
|
// We first create a schema for the sake of the
|
||||||
@@ -45,7 +45,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// Inverted index stands for the combination of
|
// Inverted index stands for the combination of
|
||||||
// - the term dictionary
|
// - the term dictionary
|
||||||
// - the inverted lists associated to each terms and their positions
|
// - the inverted lists associated to each terms and their positions
|
||||||
let inverted_index = segment_reader.inverted_index(title);
|
let inverted_index = segment_reader.inverted_index(title)?;
|
||||||
|
|
||||||
// A `Term` is a text token associated with a field.
|
// A `Term` is a text token associated with a field.
|
||||||
// Let's go through all docs containing the term `title:the` and access their position
|
// Let's go through all docs containing the term `title:the` and access their position
|
||||||
@@ -58,16 +58,15 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// If you don't need all this information, you may get better performance by decompressing less
|
// If you don't need all this information, you may get better performance by decompressing less
|
||||||
// information.
|
// information.
|
||||||
if let Some(mut segment_postings) =
|
if let Some(mut segment_postings) =
|
||||||
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)
|
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)?
|
||||||
{
|
{
|
||||||
// this buffer will be used to request for positions
|
// this buffer will be used to request for positions
|
||||||
let mut positions: Vec<u32> = Vec::with_capacity(100);
|
let mut positions: Vec<u32> = Vec::with_capacity(100);
|
||||||
while segment_postings.advance() {
|
let mut doc_id = segment_postings.doc();
|
||||||
// the number of time the term appears in the document.
|
while doc_id != TERMINATED {
|
||||||
let doc_id: DocId = segment_postings.doc(); //< do not try to access this before calling advance once.
|
|
||||||
|
|
||||||
// This MAY contains deleted documents as well.
|
// This MAY contains deleted documents as well.
|
||||||
if segment_reader.is_deleted(doc_id) {
|
if segment_reader.is_deleted(doc_id) {
|
||||||
|
doc_id = segment_postings.advance();
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -86,6 +85,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// Doc 2: TermFreq 1: [0]
|
// Doc 2: TermFreq 1: [0]
|
||||||
// ```
|
// ```
|
||||||
println!("Doc {}: TermFreq {}: {:?}", doc_id, term_freq, positions);
|
println!("Doc {}: TermFreq {}: {:?}", doc_id, term_freq, positions);
|
||||||
|
doc_id = segment_postings.advance();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -106,7 +106,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// Inverted index stands for the combination of
|
// Inverted index stands for the combination of
|
||||||
// - the term dictionary
|
// - the term dictionary
|
||||||
// - the inverted lists associated to each terms and their positions
|
// - the inverted lists associated to each terms and their positions
|
||||||
let inverted_index = segment_reader.inverted_index(title);
|
let inverted_index = segment_reader.inverted_index(title)?;
|
||||||
|
|
||||||
// This segment posting object is like a cursor over the documents matching the term.
|
// This segment posting object is like a cursor over the documents matching the term.
|
||||||
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
|
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
|
||||||
@@ -115,13 +115,18 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// If you don't need all this information, you may get better performance by decompressing less
|
// If you don't need all this information, you may get better performance by decompressing less
|
||||||
// information.
|
// information.
|
||||||
if let Some(mut block_segment_postings) =
|
if let Some(mut block_segment_postings) =
|
||||||
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
|
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)?
|
||||||
{
|
{
|
||||||
while block_segment_postings.advance() {
|
loop {
|
||||||
|
let docs = block_segment_postings.docs();
|
||||||
|
if docs.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
// Once again these docs MAY contains deleted documents as well.
|
// Once again these docs MAY contains deleted documents as well.
|
||||||
let docs = block_segment_postings.docs();
|
let docs = block_segment_postings.docs();
|
||||||
// Prints `Docs [0, 2].`
|
// Prints `Docs [0, 2].`
|
||||||
println!("Docs {:?}", docs);
|
println!("Docs {:?}", docs);
|
||||||
|
block_segment_postings.advance();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
139
examples/pre_tokenized_text.rs
Normal file
139
examples/pre_tokenized_text.rs
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
// # Pre-tokenized text example
|
||||||
|
//
|
||||||
|
// This example shows how to use pre-tokenized text. Sometimes you might
|
||||||
|
// want to index and search through text which is already split into
|
||||||
|
// tokens by some external tool.
|
||||||
|
//
|
||||||
|
// In this example we will:
|
||||||
|
// - use tantivy tokenizer to create tokens and load them directly into tantivy,
|
||||||
|
// - import tokenized text straight from json,
|
||||||
|
// - perform a search on documents with pre-tokenized text
|
||||||
|
|
||||||
|
use tantivy::collector::{Count, TopDocs};
|
||||||
|
use tantivy::query::TermQuery;
|
||||||
|
use tantivy::schema::*;
|
||||||
|
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, Tokenizer};
|
||||||
|
use tantivy::{doc, Index, ReloadPolicy};
|
||||||
|
use tempfile::TempDir;
|
||||||
|
|
||||||
|
fn pre_tokenize_text(text: &str) -> Vec<Token> {
|
||||||
|
let mut token_stream = SimpleTokenizer.token_stream(text);
|
||||||
|
let mut tokens = vec![];
|
||||||
|
while token_stream.advance() {
|
||||||
|
tokens.push(token_stream.token().clone());
|
||||||
|
}
|
||||||
|
tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() -> tantivy::Result<()> {
|
||||||
|
let index_path = TempDir::new()?;
|
||||||
|
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
|
schema_builder.add_text_field("title", TEXT | STORED);
|
||||||
|
schema_builder.add_text_field("body", TEXT);
|
||||||
|
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
|
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||||
|
|
||||||
|
let mut index_writer = index.writer(50_000_000)?;
|
||||||
|
|
||||||
|
// We can create a document manually, by setting the fields
|
||||||
|
// one by one in a Document object.
|
||||||
|
let title = schema.get_field("title").unwrap();
|
||||||
|
let body = schema.get_field("body").unwrap();
|
||||||
|
|
||||||
|
let title_text = "The Old Man and the Sea";
|
||||||
|
let body_text = "He was an old man who fished alone in a skiff in the Gulf Stream";
|
||||||
|
|
||||||
|
// Content of our first document
|
||||||
|
// We create `PreTokenizedString` which contains original text and vector of tokens
|
||||||
|
let title_tok = PreTokenizedString {
|
||||||
|
text: String::from(title_text),
|
||||||
|
tokens: pre_tokenize_text(title_text),
|
||||||
|
};
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"Original text: \"{}\" and tokens: {:?}",
|
||||||
|
title_tok.text, title_tok.tokens
|
||||||
|
);
|
||||||
|
|
||||||
|
let body_tok = PreTokenizedString {
|
||||||
|
text: String::from(body_text),
|
||||||
|
tokens: pre_tokenize_text(body_text),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Now lets create a document and add our `PreTokenizedString`
|
||||||
|
let old_man_doc = doc!(title => title_tok, body => body_tok);
|
||||||
|
|
||||||
|
// ... now let's just add it to the IndexWriter
|
||||||
|
index_writer.add_document(old_man_doc);
|
||||||
|
|
||||||
|
// Pretokenized text can also be fed as JSON
|
||||||
|
let short_man_json = r#"{
|
||||||
|
"title":[{
|
||||||
|
"text":"The Old Man",
|
||||||
|
"tokens":[
|
||||||
|
{"offset_from":0,"offset_to":3,"position":0,"text":"The","position_length":1},
|
||||||
|
{"offset_from":4,"offset_to":7,"position":1,"text":"Old","position_length":1},
|
||||||
|
{"offset_from":8,"offset_to":11,"position":2,"text":"Man","position_length":1}
|
||||||
|
]
|
||||||
|
}]
|
||||||
|
}"#;
|
||||||
|
|
||||||
|
let short_man_doc = schema.parse_document(&short_man_json)?;
|
||||||
|
|
||||||
|
index_writer.add_document(short_man_doc);
|
||||||
|
|
||||||
|
// Let's commit changes
|
||||||
|
index_writer.commit()?;
|
||||||
|
|
||||||
|
// ... and now is the time to query our index
|
||||||
|
|
||||||
|
let reader = index
|
||||||
|
.reader_builder()
|
||||||
|
.reload_policy(ReloadPolicy::OnCommit)
|
||||||
|
.try_into()?;
|
||||||
|
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
|
||||||
|
// We want to get documents with token "Man", we will use TermQuery to do it
|
||||||
|
// Using PreTokenizedString means the tokens are stored as is avoiding stemming
|
||||||
|
// and lowercasing, which preserves full words in their original form
|
||||||
|
let query = TermQuery::new(
|
||||||
|
Term::from_field_text(title, "Man"),
|
||||||
|
IndexRecordOption::Basic,
|
||||||
|
);
|
||||||
|
|
||||||
|
let (top_docs, count) = searcher
|
||||||
|
.search(&query, &(TopDocs::with_limit(2), Count))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(count, 2);
|
||||||
|
|
||||||
|
// Now let's print out the results.
|
||||||
|
// Note that the tokens are not stored along with the original text
|
||||||
|
// in the document store
|
||||||
|
for (_score, doc_address) in top_docs {
|
||||||
|
let retrieved_doc = searcher.doc(doc_address)?;
|
||||||
|
println!("Document: {}", schema.to_json(&retrieved_doc));
|
||||||
|
}
|
||||||
|
|
||||||
|
// In contrary to the previous query, when we search for the "man" term we
|
||||||
|
// should get no results, as it's not one of the indexed tokens. SimpleTokenizer
|
||||||
|
// only splits text on whitespace / punctuation.
|
||||||
|
|
||||||
|
let query = TermQuery::new(
|
||||||
|
Term::from_field_text(title, "man"),
|
||||||
|
IndexRecordOption::Basic,
|
||||||
|
);
|
||||||
|
|
||||||
|
let (_top_docs, count) = searcher
|
||||||
|
.search(&query, &(TopDocs::with_limit(2), Count))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(count, 0);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -69,12 +69,12 @@ fn highlight(snippet: Snippet) -> String {
|
|||||||
let mut result = String::new();
|
let mut result = String::new();
|
||||||
let mut start_from = 0;
|
let mut start_from = 0;
|
||||||
|
|
||||||
for (start, end) in snippet.highlighted().iter().map(|h| h.bounds()) {
|
for fragment_range in snippet.highlighted() {
|
||||||
result.push_str(&snippet.fragments()[start_from..start]);
|
result.push_str(&snippet.fragments()[start_from..fragment_range.start]);
|
||||||
result.push_str(" --> ");
|
result.push_str(" --> ");
|
||||||
result.push_str(&snippet.fragments()[start..end]);
|
result.push_str(&snippet.fragments()[fragment_range.clone()]);
|
||||||
result.push_str(" <-- ");
|
result.push_str(" <-- ");
|
||||||
start_from = end;
|
start_from = fragment_range.end;
|
||||||
}
|
}
|
||||||
|
|
||||||
result.push_str(&snippet.fragments()[start_from..]);
|
result.push_str(&snippet.fragments()[start_from..]);
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
|
|
||||||
// This tokenizer lowers all of the text (to help with stop word matching)
|
// This tokenizer lowers all of the text (to help with stop word matching)
|
||||||
// then removes all instances of `the` and `and` from the corpus
|
// then removes all instances of `the` and `and` from the corpus
|
||||||
let tokenizer = SimpleTokenizer
|
let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
||||||
.filter(LowerCaser)
|
.filter(LowerCaser)
|
||||||
.filter(StopWordFilter::remove(vec![
|
.filter(StopWordFilter::remove(vec![
|
||||||
"the".to_string(),
|
"the".to_string(),
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-query-grammar"
|
name = "tantivy-query-grammar"
|
||||||
version = "0.11.0"
|
version = "0.14.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
@@ -13,4 +13,4 @@ keywords = ["search", "information", "retrieval"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
combine = ">=3.6.0,<4.0.0"
|
combine = {version="4", default-features=false, features=[] }
|
||||||
|
|||||||
3
query-grammar/README.md
Normal file
3
query-grammar/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Tantivy Query Grammar
|
||||||
|
|
||||||
|
This crate is used by tantivy to parse queries.
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
#![recursion_limit = "100"]
|
|
||||||
|
|
||||||
mod occur;
|
mod occur;
|
||||||
mod query_grammar;
|
mod query_grammar;
|
||||||
mod user_input_ast;
|
mod user_input_ast;
|
||||||
|
|||||||
@@ -31,22 +31,12 @@ impl Occur {
|
|||||||
|
|
||||||
/// Compose two occur values.
|
/// Compose two occur values.
|
||||||
pub fn compose(left: Occur, right: Occur) -> Occur {
|
pub fn compose(left: Occur, right: Occur) -> Occur {
|
||||||
match left {
|
match (left, right) {
|
||||||
Occur::Should => right,
|
(Occur::Should, _) => right,
|
||||||
Occur::Must => {
|
(Occur::Must, Occur::MustNot) => Occur::MustNot,
|
||||||
if right == Occur::MustNot {
|
(Occur::Must, _) => Occur::Must,
|
||||||
Occur::MustNot
|
(Occur::MustNot, Occur::MustNot) => Occur::Must,
|
||||||
} else {
|
(Occur::MustNot, _) => Occur::MustNot,
|
||||||
Occur::Must
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Occur::MustNot => {
|
|
||||||
if right == Occur::MustNot {
|
|
||||||
Occur::Must
|
|
||||||
} else {
|
|
||||||
Occur::MustNot
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -56,3 +46,27 @@ impl fmt::Display for Occur {
|
|||||||
f.write_char(self.to_char())
|
f.write_char(self.to_char())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use crate::Occur;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_occur_compose() {
|
||||||
|
assert_eq!(Occur::compose(Occur::Should, Occur::Should), Occur::Should);
|
||||||
|
assert_eq!(Occur::compose(Occur::Should, Occur::Must), Occur::Must);
|
||||||
|
assert_eq!(
|
||||||
|
Occur::compose(Occur::Should, Occur::MustNot),
|
||||||
|
Occur::MustNot
|
||||||
|
);
|
||||||
|
assert_eq!(Occur::compose(Occur::Must, Occur::Should), Occur::Must);
|
||||||
|
assert_eq!(Occur::compose(Occur::Must, Occur::Must), Occur::Must);
|
||||||
|
assert_eq!(Occur::compose(Occur::Must, Occur::MustNot), Occur::MustNot);
|
||||||
|
assert_eq!(
|
||||||
|
Occur::compose(Occur::MustNot, Occur::Should),
|
||||||
|
Occur::MustNot
|
||||||
|
);
|
||||||
|
assert_eq!(Occur::compose(Occur::MustNot, Occur::Must), Occur::MustNot);
|
||||||
|
assert_eq!(Occur::compose(Occur::MustNot, Occur::MustNot), Occur::Must);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,171 +1,211 @@
|
|||||||
use super::user_input_ast::*;
|
use super::user_input_ast::{UserInputAST, UserInputBound, UserInputLeaf, UserInputLiteral};
|
||||||
use crate::Occur;
|
use crate::Occur;
|
||||||
use combine::char::*;
|
use combine::error::StringStreamError;
|
||||||
use combine::error::StreamError;
|
use combine::parser::char::{char, digit, letter, space, spaces, string};
|
||||||
use combine::stream::StreamErrorFor;
|
use combine::parser::Parser;
|
||||||
use combine::*;
|
use combine::{
|
||||||
|
attempt, choice, eof, many, many1, one_of, optional, parser, satisfy, skip_many1, value,
|
||||||
|
};
|
||||||
|
|
||||||
parser! {
|
fn field<'a>() -> impl Parser<&'a str, Output = String> {
|
||||||
fn field[I]()(I) -> String
|
(
|
||||||
where [I: Stream<Item = char>] {
|
(letter().or(char('_'))),
|
||||||
(
|
many(satisfy(|c: char| {
|
||||||
letter(),
|
c.is_alphanumeric() || c == '_' || c == '-'
|
||||||
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
|
})),
|
||||||
).skip(char(':')).map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
)
|
||||||
}
|
.skip(char(':'))
|
||||||
}
|
|
||||||
|
|
||||||
parser! {
|
|
||||||
fn word[I]()(I) -> String
|
|
||||||
where [I: Stream<Item = char>] {
|
|
||||||
(
|
|
||||||
satisfy(|c: char| !c.is_whitespace() && !['-', '`', ':', '{', '}', '"', '[', ']', '(',')'].contains(&c) ),
|
|
||||||
many(satisfy(|c: char| !c.is_whitespace() && ![':', '{', '}', '"', '[', ']', '(',')'].contains(&c)))
|
|
||||||
)
|
|
||||||
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
||||||
.and_then(|s: String|
|
|
||||||
match s.as_str() {
|
|
||||||
"OR" => Err(StreamErrorFor::<I>::unexpected_static_message("OR")),
|
|
||||||
"AND" => Err(StreamErrorFor::<I>::unexpected_static_message("AND")),
|
|
||||||
"NOT" => Err(StreamErrorFor::<I>::unexpected_static_message("NOT")),
|
|
||||||
_ => Ok(s)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
parser! {
|
fn word<'a>() -> impl Parser<&'a str, Output = String> {
|
||||||
fn literal[I]()(I) -> UserInputLeaf
|
(
|
||||||
where [I: Stream<Item = char>]
|
satisfy(|c: char| {
|
||||||
{
|
!c.is_whitespace()
|
||||||
let term_val = || {
|
&& !['-', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
||||||
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
|
}),
|
||||||
phrase.or(word())
|
many(satisfy(|c: char| {
|
||||||
};
|
!c.is_whitespace() && ![':', '^', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
||||||
let term_val_with_field = negative_number().or(term_val());
|
})),
|
||||||
let term_query =
|
)
|
||||||
(field(), term_val_with_field)
|
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
||||||
.map(|(field_name, phrase)| UserInputLiteral {
|
.and_then(|s: String| match s.as_str() {
|
||||||
field_name: Some(field_name),
|
"OR" | "AND " | "NOT" => Err(StringStreamError::UnexpectedParse),
|
||||||
phrase,
|
_ => Ok(s),
|
||||||
});
|
|
||||||
let term_default_field = term_val().map(|phrase| UserInputLiteral {
|
|
||||||
field_name: None,
|
|
||||||
phrase,
|
|
||||||
});
|
|
||||||
attempt(term_query)
|
|
||||||
.or(term_default_field)
|
|
||||||
.map(UserInputLeaf::from)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
parser! {
|
|
||||||
fn negative_number[I]()(I) -> String
|
|
||||||
where [I: Stream<Item = char>]
|
|
||||||
{
|
|
||||||
(char('-'), many1(satisfy(char::is_numeric)),
|
|
||||||
optional((char('.'), many1(satisfy(char::is_numeric)))))
|
|
||||||
.map(|(s1, s2, s3): (char, String, Option<(char, String)>)| {
|
|
||||||
if let Some(('.', s3)) = s3 {
|
|
||||||
format!("{}{}.{}", s1, s2, s3)
|
|
||||||
} else {
|
|
||||||
format!("{}{}", s1, s2)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
parser! {
|
|
||||||
fn spaces1[I]()(I) -> ()
|
|
||||||
where [I: Stream<Item = char>] {
|
|
||||||
skip_many1(space())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
parser! {
|
|
||||||
/// Function that parses a range out of a Stream
|
|
||||||
/// Supports ranges like:
|
|
||||||
/// [5 TO 10], {5 TO 10}, [* TO 10], [10 TO *], {10 TO *], >5, <=10
|
|
||||||
/// [a TO *], [a TO c], [abc TO bcd}
|
|
||||||
fn range[I]()(I) -> UserInputLeaf
|
|
||||||
where [I: Stream<Item = char>] {
|
|
||||||
let range_term_val = || {
|
|
||||||
word().or(negative_number()).or(char('*').with(value("*".to_string())))
|
|
||||||
};
|
|
||||||
|
|
||||||
// check for unbounded range in the form of <5, <=10, >5, >=5
|
|
||||||
let elastic_unbounded_range = (choice([attempt(string(">=")),
|
|
||||||
attempt(string("<=")),
|
|
||||||
attempt(string("<")),
|
|
||||||
attempt(string(">"))])
|
|
||||||
.skip(spaces()),
|
|
||||||
range_term_val()).
|
|
||||||
map(|(comparison_sign, bound): (&str, String)|
|
|
||||||
match comparison_sign {
|
|
||||||
">=" => (UserInputBound::Inclusive(bound), UserInputBound::Unbounded),
|
|
||||||
"<=" => (UserInputBound::Unbounded, UserInputBound::Inclusive(bound)),
|
|
||||||
"<" => (UserInputBound::Unbounded, UserInputBound::Exclusive(bound)),
|
|
||||||
">" => (UserInputBound::Exclusive(bound), UserInputBound::Unbounded),
|
|
||||||
// default case
|
|
||||||
_ => (UserInputBound::Unbounded, UserInputBound::Unbounded)
|
|
||||||
});
|
|
||||||
let lower_bound = (one_of("{[".chars()), range_term_val())
|
|
||||||
.map(|(boundary_char, lower_bound): (char, String)|
|
|
||||||
if lower_bound == "*" {
|
|
||||||
UserInputBound::Unbounded
|
|
||||||
} else if boundary_char == '{' {
|
|
||||||
UserInputBound::Exclusive(lower_bound)
|
|
||||||
} else {
|
|
||||||
UserInputBound::Inclusive(lower_bound)
|
|
||||||
});
|
|
||||||
let upper_bound = (range_term_val(), one_of("}]".chars()))
|
|
||||||
.map(|(higher_bound, boundary_char): (String, char)|
|
|
||||||
if higher_bound == "*" {
|
|
||||||
UserInputBound::Unbounded
|
|
||||||
} else if boundary_char == '}' {
|
|
||||||
UserInputBound::Exclusive(higher_bound)
|
|
||||||
} else {
|
|
||||||
UserInputBound::Inclusive(higher_bound)
|
|
||||||
});
|
|
||||||
// return only lower and upper
|
|
||||||
let lower_to_upper = (lower_bound.
|
|
||||||
skip((spaces(),
|
|
||||||
string("TO"),
|
|
||||||
spaces())),
|
|
||||||
upper_bound);
|
|
||||||
|
|
||||||
(optional(field()).skip(spaces()),
|
|
||||||
// try elastic first, if it matches, the range is unbounded
|
|
||||||
attempt(elastic_unbounded_range).or(lower_to_upper))
|
|
||||||
.map(|(field, (lower, upper))|
|
|
||||||
// Construct the leaf from extracted field (optional)
|
|
||||||
// and bounds
|
|
||||||
UserInputLeaf::Range {
|
|
||||||
field,
|
|
||||||
lower,
|
|
||||||
upper
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn term_val<'a>() -> impl Parser<&'a str, Output = String> {
|
||||||
|
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
|
||||||
|
phrase.or(word())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn term_query<'a>() -> impl Parser<&'a str, Output = UserInputLiteral> {
|
||||||
|
let term_val_with_field = negative_number().or(term_val());
|
||||||
|
(field(), term_val_with_field).map(|(field_name, phrase)| UserInputLiteral {
|
||||||
|
field_name: Some(field_name),
|
||||||
|
phrase,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn literal<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
||||||
|
let term_default_field = term_val().map(|phrase| UserInputLiteral {
|
||||||
|
field_name: None,
|
||||||
|
phrase,
|
||||||
|
});
|
||||||
|
attempt(term_query())
|
||||||
|
.or(term_default_field)
|
||||||
|
.map(UserInputLeaf::from)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn negative_number<'a>() -> impl Parser<&'a str, Output = String> {
|
||||||
|
(
|
||||||
|
char('-'),
|
||||||
|
many1(digit()),
|
||||||
|
optional((char('.'), many1(digit()))),
|
||||||
|
)
|
||||||
|
.map(|(s1, s2, s3): (char, String, Option<(char, String)>)| {
|
||||||
|
if let Some(('.', s3)) = s3 {
|
||||||
|
format!("{}{}.{}", s1, s2, s3)
|
||||||
|
} else {
|
||||||
|
format!("{}{}", s1, s2)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn spaces1<'a>() -> impl Parser<&'a str, Output = ()> {
|
||||||
|
skip_many1(space())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Function that parses a range out of a Stream
|
||||||
|
/// Supports ranges like:
|
||||||
|
/// [5 TO 10], {5 TO 10}, [* TO 10], [10 TO *], {10 TO *], >5, <=10
|
||||||
|
/// [a TO *], [a TO c], [abc TO bcd}
|
||||||
|
fn range<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
||||||
|
let range_term_val = || {
|
||||||
|
word()
|
||||||
|
.or(negative_number())
|
||||||
|
.or(char('*').with(value("*".to_string())))
|
||||||
|
};
|
||||||
|
|
||||||
|
// check for unbounded range in the form of <5, <=10, >5, >=5
|
||||||
|
let elastic_unbounded_range = (
|
||||||
|
choice([
|
||||||
|
attempt(string(">=")),
|
||||||
|
attempt(string("<=")),
|
||||||
|
attempt(string("<")),
|
||||||
|
attempt(string(">")),
|
||||||
|
])
|
||||||
|
.skip(spaces()),
|
||||||
|
range_term_val(),
|
||||||
|
)
|
||||||
|
.map(
|
||||||
|
|(comparison_sign, bound): (&str, String)| match comparison_sign {
|
||||||
|
">=" => (UserInputBound::Inclusive(bound), UserInputBound::Unbounded),
|
||||||
|
"<=" => (UserInputBound::Unbounded, UserInputBound::Inclusive(bound)),
|
||||||
|
"<" => (UserInputBound::Unbounded, UserInputBound::Exclusive(bound)),
|
||||||
|
">" => (UserInputBound::Exclusive(bound), UserInputBound::Unbounded),
|
||||||
|
// default case
|
||||||
|
_ => (UserInputBound::Unbounded, UserInputBound::Unbounded),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
let lower_bound = (one_of("{[".chars()), range_term_val()).map(
|
||||||
|
|(boundary_char, lower_bound): (char, String)| {
|
||||||
|
if lower_bound == "*" {
|
||||||
|
UserInputBound::Unbounded
|
||||||
|
} else if boundary_char == '{' {
|
||||||
|
UserInputBound::Exclusive(lower_bound)
|
||||||
|
} else {
|
||||||
|
UserInputBound::Inclusive(lower_bound)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
let upper_bound = (range_term_val(), one_of("}]".chars())).map(
|
||||||
|
|(higher_bound, boundary_char): (String, char)| {
|
||||||
|
if higher_bound == "*" {
|
||||||
|
UserInputBound::Unbounded
|
||||||
|
} else if boundary_char == '}' {
|
||||||
|
UserInputBound::Exclusive(higher_bound)
|
||||||
|
} else {
|
||||||
|
UserInputBound::Inclusive(higher_bound)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
// return only lower and upper
|
||||||
|
let lower_to_upper = (
|
||||||
|
lower_bound.skip((spaces(), string("TO"), spaces())),
|
||||||
|
upper_bound,
|
||||||
|
);
|
||||||
|
|
||||||
|
(
|
||||||
|
optional(field()).skip(spaces()),
|
||||||
|
// try elastic first, if it matches, the range is unbounded
|
||||||
|
attempt(elastic_unbounded_range).or(lower_to_upper),
|
||||||
|
)
|
||||||
|
.map(|(field, (lower, upper))|
|
||||||
|
// Construct the leaf from extracted field (optional)
|
||||||
|
// and bounds
|
||||||
|
UserInputLeaf::Range {
|
||||||
|
field,
|
||||||
|
lower,
|
||||||
|
upper
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn negate(expr: UserInputAST) -> UserInputAST {
|
fn negate(expr: UserInputAST) -> UserInputAST {
|
||||||
expr.unary(Occur::MustNot)
|
expr.unary(Occur::MustNot)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn must(expr: UserInputAST) -> UserInputAST {
|
fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||||
expr.unary(Occur::Must)
|
parser(|input| {
|
||||||
|
char('(')
|
||||||
|
.with(ast())
|
||||||
|
.skip(char(')'))
|
||||||
|
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All)))
|
||||||
|
.or(attempt(
|
||||||
|
string("NOT").skip(spaces1()).with(leaf()).map(negate),
|
||||||
|
))
|
||||||
|
.or(attempt(range().map(UserInputAST::from)))
|
||||||
|
.or(literal().map(UserInputAST::from))
|
||||||
|
.parse_stream(input)
|
||||||
|
.into_result()
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
parser! {
|
fn occur_symbol<'a>() -> impl Parser<&'a str, Output = Occur> {
|
||||||
fn leaf[I]()(I) -> UserInputAST
|
char('-')
|
||||||
where [I: Stream<Item = char>] {
|
.map(|_| Occur::MustNot)
|
||||||
char('-').with(leaf()).map(negate)
|
.or(char('+').map(|_| Occur::Must))
|
||||||
.or(char('+').with(leaf()).map(must))
|
}
|
||||||
.or(char('(').with(ast()).skip(char(')')))
|
|
||||||
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All)))
|
fn occur_leaf<'a>() -> impl Parser<&'a str, Output = (Option<Occur>, UserInputAST)> {
|
||||||
.or(attempt(string("NOT").skip(spaces1()).with(leaf()).map(negate)))
|
(optional(occur_symbol()), boosted_leaf())
|
||||||
.or(attempt(range().map(UserInputAST::from)))
|
}
|
||||||
.or(literal().map(UserInputAST::from))
|
|
||||||
}
|
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||||
|
(many1(digit()), optional((char('.'), many1(digit())))).map(
|
||||||
|
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
|
||||||
|
let mut float_str = int_part;
|
||||||
|
if let Some((chr, decimal_str)) = decimal_part_opt {
|
||||||
|
float_str.push(chr);
|
||||||
|
float_str.push_str(&decimal_str);
|
||||||
|
}
|
||||||
|
float_str.parse::<f64>().unwrap()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||||
|
(char('^'), positive_float_number()).map(|(_, boost)| boost)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||||
|
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
|
||||||
|
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
|
||||||
|
UserInputAST::Boost(Box::new(leaf), boost)
|
||||||
|
}
|
||||||
|
_ => leaf,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
@@ -174,13 +214,10 @@ enum BinaryOperand {
|
|||||||
And,
|
And,
|
||||||
}
|
}
|
||||||
|
|
||||||
parser! {
|
fn binary_operand<'a>() -> impl Parser<&'a str, Output = BinaryOperand> {
|
||||||
fn binary_operand[I]()(I) -> BinaryOperand
|
string("AND")
|
||||||
where [I: Stream<Item = char>]
|
.with(value(BinaryOperand::And))
|
||||||
{
|
.or(string("OR").with(value(BinaryOperand::Or)))
|
||||||
string("AND").with(value(BinaryOperand::And))
|
|
||||||
.or(string("OR").with(value(BinaryOperand::Or)))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn aggregate_binary_expressions(
|
fn aggregate_binary_expressions(
|
||||||
@@ -208,37 +245,84 @@ fn aggregate_binary_expressions(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
parser! {
|
fn operand_leaf<'a>() -> impl Parser<&'a str, Output = (BinaryOperand, UserInputAST)> {
|
||||||
pub fn ast[I]()(I) -> UserInputAST
|
(
|
||||||
where [I: Stream<Item = char>]
|
binary_operand().skip(spaces()),
|
||||||
{
|
boosted_leaf().skip(spaces()),
|
||||||
let operand_leaf = (binary_operand().skip(spaces()), leaf().skip(spaces()));
|
)
|
||||||
let boolean_expr = (leaf().skip(spaces().silent()), many1(operand_leaf)).map(
|
|
||||||
|(left, right)| aggregate_binary_expressions(left,right));
|
|
||||||
let whitespace_separated_leaves = many1(leaf().skip(spaces().silent()))
|
|
||||||
.map(|subqueries: Vec<UserInputAST>|
|
|
||||||
if subqueries.len() == 1 {
|
|
||||||
subqueries.into_iter().next().unwrap()
|
|
||||||
} else {
|
|
||||||
UserInputAST::Clause(subqueries.into_iter().collect())
|
|
||||||
});
|
|
||||||
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
|
|
||||||
spaces().with(expr).skip(spaces())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
parser! {
|
pub fn ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||||
pub fn parse_to_ast[I]()(I) -> UserInputAST
|
let boolean_expr = (boosted_leaf().skip(spaces()), many1(operand_leaf()))
|
||||||
where [I: Stream<Item = char>]
|
.map(|(left, right)| aggregate_binary_expressions(left, right));
|
||||||
{
|
let whitespace_separated_leaves = many1(occur_leaf().skip(spaces().silent())).map(
|
||||||
spaces().with(optional(ast()).skip(eof())).map(|opt_ast| opt_ast.unwrap_or_else(UserInputAST::empty_query))
|
|subqueries: Vec<(Option<Occur>, UserInputAST)>| {
|
||||||
}
|
if subqueries.len() == 1 {
|
||||||
|
let (occur_opt, ast) = subqueries.into_iter().next().unwrap();
|
||||||
|
match occur_opt.unwrap_or(Occur::Should) {
|
||||||
|
Occur::Must | Occur::Should => ast,
|
||||||
|
Occur::MustNot => UserInputAST::Clause(vec![(Some(Occur::MustNot), ast)]),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
UserInputAST::Clause(subqueries.into_iter().collect())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
|
||||||
|
spaces().with(expr).skip(spaces())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse_to_ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||||
|
spaces()
|
||||||
|
.with(optional(ast()).skip(eof()))
|
||||||
|
.map(|opt_ast| opt_ast.unwrap_or_else(UserInputAST::empty_query))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
|
|
||||||
|
type TestParseResult = Result<(), StringStreamError>;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use combine::parser::Parser;
|
||||||
|
|
||||||
|
pub fn nearly_equals(a: f64, b: f64) -> bool {
|
||||||
|
(a - b).abs() < 0.0005 * (a + b).abs()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn assert_nearly_equals(expected: f64, val: f64) {
|
||||||
|
assert!(
|
||||||
|
nearly_equals(val, expected),
|
||||||
|
"Got {}, expected {}.",
|
||||||
|
val,
|
||||||
|
expected
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_occur_symbol() -> TestParseResult {
|
||||||
|
assert_eq!(super::occur_symbol().parse("-")?, (Occur::MustNot, ""));
|
||||||
|
assert_eq!(super::occur_symbol().parse("+")?, (Occur::Must, ""));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_positive_float_number() {
|
||||||
|
fn valid_parse(float_str: &str, expected_val: f64, expected_remaining: &str) {
|
||||||
|
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
|
||||||
|
assert_eq!(remaining, expected_remaining);
|
||||||
|
assert_nearly_equals(val, expected_val);
|
||||||
|
}
|
||||||
|
fn error_parse(float_str: &str) {
|
||||||
|
assert!(positive_float_number().parse(float_str).is_err());
|
||||||
|
}
|
||||||
|
valid_parse("1.0", 1.0, "");
|
||||||
|
valid_parse("1", 1.0, "");
|
||||||
|
valid_parse("0.234234 aaa", 0.234234f64, " aaa");
|
||||||
|
error_parse(".3332");
|
||||||
|
error_parse("1.");
|
||||||
|
error_parse("-1.");
|
||||||
|
}
|
||||||
|
|
||||||
fn test_parse_query_to_ast_helper(query: &str, expected: &str) {
|
fn test_parse_query_to_ast_helper(query: &str, expected: &str) {
|
||||||
let query = parse_to_ast().parse(query).unwrap().0;
|
let query = parse_to_ast().parse(query).unwrap().0;
|
||||||
@@ -269,15 +353,24 @@ mod test {
|
|||||||
"Err(UnexpectedParse)"
|
"Err(UnexpectedParse)"
|
||||||
);
|
);
|
||||||
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
|
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
|
||||||
test_parse_query_to_ast_helper("NOT a", "-(\"a\")");
|
test_parse_query_to_ast_helper("NOT a", "(-\"a\")");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_boosting() {
|
||||||
|
assert!(parse_to_ast().parse("a^2^3").is_err());
|
||||||
|
assert!(parse_to_ast().parse("a^2^").is_err());
|
||||||
|
test_parse_query_to_ast_helper("a^3", "(\"a\")^3");
|
||||||
|
test_parse_query_to_ast_helper("a^3 b^2", "(*(\"a\")^3 *(\"b\")^2)");
|
||||||
|
test_parse_query_to_ast_helper("a^1", "\"a\"");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_query_to_ast_binary_op() {
|
fn test_parse_query_to_ast_binary_op() {
|
||||||
test_parse_query_to_ast_helper("a AND b", "(+(\"a\") +(\"b\"))");
|
test_parse_query_to_ast_helper("a AND b", "(+\"a\" +\"b\")");
|
||||||
test_parse_query_to_ast_helper("a OR b", "(?(\"a\") ?(\"b\"))");
|
test_parse_query_to_ast_helper("a OR b", "(?\"a\" ?\"b\")");
|
||||||
test_parse_query_to_ast_helper("a OR b AND c", "(?(\"a\") ?((+(\"b\") +(\"c\"))))");
|
test_parse_query_to_ast_helper("a OR b AND c", "(?\"a\" ?(+\"b\" +\"c\"))");
|
||||||
test_parse_query_to_ast_helper("a AND b AND c", "(+(\"a\") +(\"b\") +(\"c\"))");
|
test_parse_query_to_ast_helper("a AND b AND c", "(+\"a\" +\"b\" +\"c\")");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
|
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
|
||||||
"Err(UnexpectedParse)"
|
"Err(UnexpectedParse)"
|
||||||
@@ -315,6 +408,32 @@ mod test {
|
|||||||
test_parse_query_to_ast_helper("weight: <= 70.5", "weight:{\"*\" TO \"70.5\"]");
|
test_parse_query_to_ast_helper("weight: <= 70.5", "weight:{\"*\" TO \"70.5\"]");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_occur_leaf() {
|
||||||
|
let ((occur, ast), _) = super::occur_leaf().parse("+abc").unwrap();
|
||||||
|
assert_eq!(occur, Some(Occur::Must));
|
||||||
|
assert_eq!(format!("{:?}", ast), "\"abc\"");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_field_name() -> TestParseResult {
|
||||||
|
assert_eq!(
|
||||||
|
super::field().parse("my-field-name:a")?,
|
||||||
|
("my-field-name".to_string(), "a")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
super::field().parse("my_field_name:a")?,
|
||||||
|
("my_field_name".to_string(), "a")
|
||||||
|
);
|
||||||
|
assert!(super::field().parse(":a").is_err());
|
||||||
|
assert!(super::field().parse("-my_field:a").is_err());
|
||||||
|
assert_eq!(
|
||||||
|
super::field().parse("_my_field:a")?,
|
||||||
|
("_my_field".to_string(), "a")
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_range_parser() {
|
fn test_range_parser() {
|
||||||
// testing the range() parser separately
|
// testing the range() parser separately
|
||||||
@@ -343,32 +462,67 @@ mod test {
|
|||||||
fn test_parse_query_to_triming_spaces() {
|
fn test_parse_query_to_triming_spaces() {
|
||||||
test_parse_query_to_ast_helper(" abc", "\"abc\"");
|
test_parse_query_to_ast_helper(" abc", "\"abc\"");
|
||||||
test_parse_query_to_ast_helper("abc ", "\"abc\"");
|
test_parse_query_to_ast_helper("abc ", "\"abc\"");
|
||||||
test_parse_query_to_ast_helper("( a OR abc)", "(?(\"a\") ?(\"abc\"))");
|
test_parse_query_to_ast_helper("( a OR abc)", "(?\"a\" ?\"abc\")");
|
||||||
test_parse_query_to_ast_helper("(a OR abc)", "(?(\"a\") ?(\"abc\"))");
|
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
|
||||||
test_parse_query_to_ast_helper("(a OR abc)", "(?(\"a\") ?(\"abc\"))");
|
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
|
||||||
test_parse_query_to_ast_helper("a OR abc ", "(?(\"a\") ?(\"abc\"))");
|
test_parse_query_to_ast_helper("a OR abc ", "(?\"a\" ?\"abc\")");
|
||||||
test_parse_query_to_ast_helper("(a OR abc )", "(?(\"a\") ?(\"abc\"))");
|
test_parse_query_to_ast_helper("(a OR abc )", "(?\"a\" ?\"abc\")");
|
||||||
test_parse_query_to_ast_helper("(a OR abc) ", "(?(\"a\") ?(\"abc\"))");
|
test_parse_query_to_ast_helper("(a OR abc) ", "(?\"a\" ?\"abc\")");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_query_to_ast() {
|
fn test_parse_query_single_term() {
|
||||||
test_parse_query_to_ast_helper("abc", "\"abc\"");
|
test_parse_query_to_ast_helper("abc", "\"abc\"");
|
||||||
test_parse_query_to_ast_helper("a b", "(\"a\" \"b\")");
|
}
|
||||||
test_parse_query_to_ast_helper("+(a b)", "+((\"a\" \"b\"))");
|
|
||||||
test_parse_query_to_ast_helper("+d", "+(\"d\")");
|
#[test]
|
||||||
test_parse_query_to_ast_helper("+(a b) +d", "(+((\"a\" \"b\")) +(\"d\"))");
|
fn test_parse_query_default_clause() {
|
||||||
test_parse_query_to_ast_helper("(+a +b) d", "((+(\"a\") +(\"b\")) \"d\")");
|
test_parse_query_to_ast_helper("a b", "(*\"a\" *\"b\")");
|
||||||
test_parse_query_to_ast_helper("(+a)", "+(\"a\")");
|
}
|
||||||
test_parse_query_to_ast_helper("(+a +b)", "(+(\"a\") +(\"b\"))");
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_query_must_default_clause() {
|
||||||
|
test_parse_query_to_ast_helper("+(a b)", "(*\"a\" *\"b\")");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_query_must_single_term() {
|
||||||
|
test_parse_query_to_ast_helper("+d", "\"d\"");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_single_term_with_field() {
|
||||||
test_parse_query_to_ast_helper("abc:toto", "abc:\"toto\"");
|
test_parse_query_to_ast_helper("abc:toto", "abc:\"toto\"");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_single_term_with_float() {
|
||||||
test_parse_query_to_ast_helper("abc:1.1", "abc:\"1.1\"");
|
test_parse_query_to_ast_helper("abc:1.1", "abc:\"1.1\"");
|
||||||
test_parse_query_to_ast_helper("+abc:toto", "+(abc:\"toto\")");
|
}
|
||||||
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+(abc:\"toto\") -(\"titi\"))");
|
|
||||||
test_parse_query_to_ast_helper("-abc:toto", "-(abc:\"toto\")");
|
#[test]
|
||||||
test_parse_query_to_ast_helper("abc:a b", "(abc:\"a\" \"b\")");
|
fn test_must_clause() {
|
||||||
|
test_parse_query_to_ast_helper("(+a +b)", "(+\"a\" +\"b\")");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_test_query_plus_a_b_plus_d() {
|
||||||
|
test_parse_query_to_ast_helper("+(a b) +d", "(+(*\"a\" *\"b\") +\"d\")");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_test_query_other() {
|
||||||
|
test_parse_query_to_ast_helper("(+a +b) d", "(*(+\"a\" +\"b\") *\"d\")");
|
||||||
|
test_parse_query_to_ast_helper("+abc:toto", "abc:\"toto\"");
|
||||||
|
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+abc:\"toto\" -\"titi\")");
|
||||||
|
test_parse_query_to_ast_helper("-abc:toto", "(-abc:\"toto\")");
|
||||||
|
test_parse_query_to_ast_helper("abc:a b", "(*abc:\"a\" *\"b\")");
|
||||||
test_parse_query_to_ast_helper("abc:\"a b\"", "abc:\"a b\"");
|
test_parse_query_to_ast_helper("abc:\"a b\"", "abc:\"a b\"");
|
||||||
test_parse_query_to_ast_helper("foo:[1 TO 5]", "foo:[\"1\" TO \"5\"]");
|
test_parse_query_to_ast_helper("foo:[1 TO 5]", "foo:[\"1\" TO \"5\"]");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_query_with_range() {
|
||||||
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
|
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
|
||||||
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
|
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
|
||||||
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
|
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
|
||||||
|
|||||||
@@ -85,14 +85,14 @@ impl UserInputBound {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub enum UserInputAST {
|
pub enum UserInputAST {
|
||||||
Clause(Vec<UserInputAST>),
|
Clause(Vec<(Option<Occur>, UserInputAST)>),
|
||||||
Unary(Occur, Box<UserInputAST>),
|
|
||||||
Leaf(Box<UserInputLeaf>),
|
Leaf(Box<UserInputLeaf>),
|
||||||
|
Boost(Box<UserInputAST>, f64),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserInputAST {
|
impl UserInputAST {
|
||||||
pub fn unary(self, occur: Occur) -> UserInputAST {
|
pub fn unary(self, occur: Occur) -> UserInputAST {
|
||||||
UserInputAST::Unary(occur, Box::new(self))
|
UserInputAST::Clause(vec![(Some(occur), self)])
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compose(occur: Occur, asts: Vec<UserInputAST>) -> UserInputAST {
|
fn compose(occur: Occur, asts: Vec<UserInputAST>) -> UserInputAST {
|
||||||
@@ -103,7 +103,7 @@ impl UserInputAST {
|
|||||||
} else {
|
} else {
|
||||||
UserInputAST::Clause(
|
UserInputAST::Clause(
|
||||||
asts.into_iter()
|
asts.into_iter()
|
||||||
.map(|ast: UserInputAST| ast.unary(occur))
|
.map(|ast: UserInputAST| (Some(occur), ast))
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -134,26 +134,38 @@ impl From<UserInputLeaf> for UserInputAST {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn print_occur_ast(
|
||||||
|
occur_opt: Option<Occur>,
|
||||||
|
ast: &UserInputAST,
|
||||||
|
formatter: &mut fmt::Formatter,
|
||||||
|
) -> fmt::Result {
|
||||||
|
if let Some(occur) = occur_opt {
|
||||||
|
write!(formatter, "{}{:?}", occur, ast)?;
|
||||||
|
} else {
|
||||||
|
write!(formatter, "*{:?}", ast)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Debug for UserInputAST {
|
impl fmt::Debug for UserInputAST {
|
||||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||||
match *self {
|
match *self {
|
||||||
UserInputAST::Clause(ref subqueries) => {
|
UserInputAST::Clause(ref subqueries) => {
|
||||||
if subqueries.is_empty() {
|
if subqueries.is_empty() {
|
||||||
write!(formatter, "<emptyclause>")?;
|
write!(formatter, "<emptyclause>")?;
|
||||||
} else {
|
} else {
|
||||||
write!(formatter, "(")?;
|
write!(formatter, "(")?;
|
||||||
write!(formatter, "{:?}", &subqueries[0])?;
|
print_occur_ast(subqueries[0].0, &subqueries[0].1, formatter)?;
|
||||||
for subquery in &subqueries[1..] {
|
for subquery in &subqueries[1..] {
|
||||||
write!(formatter, " {:?}", subquery)?;
|
write!(formatter, " ")?;
|
||||||
|
print_occur_ast(subquery.0, &subquery.1, formatter)?;
|
||||||
}
|
}
|
||||||
write!(formatter, ")")?;
|
write!(formatter, ")")?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
UserInputAST::Unary(ref occur, ref subquery) => {
|
|
||||||
write!(formatter, "{}({:?})", occur, subquery)
|
|
||||||
}
|
|
||||||
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
|
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
|
||||||
|
UserInputAST::Boost(ref leaf, boost) => write!(formatter, "({:?})^{}", leaf, boost),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use super::Collector;
|
use super::Collector;
|
||||||
use crate::collector::SegmentCollector;
|
use crate::collector::SegmentCollector;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Result;
|
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
@@ -13,44 +12,29 @@ use crate::SegmentReader;
|
|||||||
/// use tantivy::collector::Count;
|
/// use tantivy::collector::Count;
|
||||||
/// use tantivy::query::QueryParser;
|
/// use tantivy::query::QueryParser;
|
||||||
/// use tantivy::schema::{Schema, TEXT};
|
/// use tantivy::schema::{Schema, TEXT};
|
||||||
/// use tantivy::{doc, Index, Result};
|
/// use tantivy::{doc, Index};
|
||||||
///
|
///
|
||||||
/// # fn main() { example().unwrap(); }
|
/// let mut schema_builder = Schema::builder();
|
||||||
/// fn example() -> Result<()> {
|
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// let schema = schema_builder.build();
|
||||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
/// let index = Index::create_in_ram(schema);
|
||||||
/// let schema = schema_builder.build();
|
|
||||||
/// let index = Index::create_in_ram(schema);
|
|
||||||
/// {
|
|
||||||
/// let mut index_writer = index.writer(3_000_000)?;
|
|
||||||
/// index_writer.add_document(doc!(
|
|
||||||
/// title => "The Name of the Wind",
|
|
||||||
/// ));
|
|
||||||
/// index_writer.add_document(doc!(
|
|
||||||
/// title => "The Diary of Muadib",
|
|
||||||
/// ));
|
|
||||||
/// index_writer.add_document(doc!(
|
|
||||||
/// title => "A Dairy Cow",
|
|
||||||
/// ));
|
|
||||||
/// index_writer.add_document(doc!(
|
|
||||||
/// title => "The Diary of a Young Girl",
|
|
||||||
/// ));
|
|
||||||
/// index_writer.commit().unwrap();
|
|
||||||
/// }
|
|
||||||
///
|
///
|
||||||
/// let reader = index.reader()?;
|
/// let mut index_writer = index.writer(3_000_000).unwrap();
|
||||||
/// let searcher = reader.searcher();
|
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||||
|
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||||
|
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||||
|
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
||||||
|
/// assert!(index_writer.commit().is_ok());
|
||||||
///
|
///
|
||||||
/// {
|
/// let reader = index.reader().unwrap();
|
||||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
/// let searcher = reader.searcher();
|
||||||
/// let query = query_parser.parse_query("diary")?;
|
|
||||||
/// let count = searcher.search(&query, &Count).unwrap();
|
|
||||||
///
|
///
|
||||||
/// assert_eq!(count, 2);
|
/// // Here comes the important part
|
||||||
/// }
|
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||||
|
/// let query = query_parser.parse_query("diary").unwrap();
|
||||||
|
/// let count = searcher.search(&query, &Count).unwrap();
|
||||||
///
|
///
|
||||||
/// Ok(())
|
/// assert_eq!(count, 2);
|
||||||
/// }
|
|
||||||
/// ```
|
/// ```
|
||||||
pub struct Count;
|
pub struct Count;
|
||||||
|
|
||||||
@@ -59,7 +43,11 @@ impl Collector for Count {
|
|||||||
|
|
||||||
type Child = SegmentCountCollector;
|
type Child = SegmentCountCollector;
|
||||||
|
|
||||||
fn for_segment(&self, _: SegmentLocalId, _: &SegmentReader) -> Result<SegmentCountCollector> {
|
fn for_segment(
|
||||||
|
&self,
|
||||||
|
_: SegmentLocalId,
|
||||||
|
_: &SegmentReader,
|
||||||
|
) -> crate::Result<SegmentCountCollector> {
|
||||||
Ok(SegmentCountCollector::default())
|
Ok(SegmentCountCollector::default())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -67,7 +55,7 @@ impl Collector for Count {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, segment_counts: Vec<usize>) -> Result<usize> {
|
fn merge_fruits(&self, segment_counts: Vec<usize>) -> crate::Result<usize> {
|
||||||
Ok(segment_counts.into_iter().sum())
|
Ok(segment_counts.into_iter().sum())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -108,18 +96,18 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut count_collector = SegmentCountCollector::default();
|
let mut count_collector = SegmentCountCollector::default();
|
||||||
count_collector.collect(0u32, 1f32);
|
count_collector.collect(0u32, 1.0);
|
||||||
assert_eq!(count_collector.harvest(), 1);
|
assert_eq!(count_collector.harvest(), 1);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut count_collector = SegmentCountCollector::default();
|
let mut count_collector = SegmentCountCollector::default();
|
||||||
count_collector.collect(0u32, 1f32);
|
count_collector.collect(0u32, 1.0);
|
||||||
assert_eq!(count_collector.harvest(), 1);
|
assert_eq!(count_collector.harvest(), 1);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut count_collector = SegmentCountCollector::default();
|
let mut count_collector = SegmentCountCollector::default();
|
||||||
count_collector.collect(0u32, 1f32);
|
count_collector.collect(0u32, 1.0);
|
||||||
count_collector.collect(1u32, 1f32);
|
count_collector.collect(1u32, 1.0);
|
||||||
assert_eq!(count_collector.harvest(), 2);
|
assert_eq!(count_collector.harvest(), 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use crate::collector::top_collector::{TopCollector, TopSegmentCollector};
|
use crate::collector::top_collector::{TopCollector, TopSegmentCollector};
|
||||||
use crate::collector::{Collector, SegmentCollector};
|
use crate::collector::{Collector, SegmentCollector};
|
||||||
use crate::Result;
|
|
||||||
use crate::{DocAddress, DocId, Score, SegmentReader};
|
use crate::{DocAddress, DocId, Score, SegmentReader};
|
||||||
|
|
||||||
pub(crate) struct CustomScoreTopCollector<TCustomScorer, TScore = Score> {
|
pub(crate) struct CustomScoreTopCollector<TCustomScorer, TScore = Score> {
|
||||||
@@ -12,13 +11,13 @@ impl<TCustomScorer, TScore> CustomScoreTopCollector<TCustomScorer, TScore>
|
|||||||
where
|
where
|
||||||
TScore: Clone + PartialOrd,
|
TScore: Clone + PartialOrd,
|
||||||
{
|
{
|
||||||
pub fn new(
|
pub(crate) fn new(
|
||||||
custom_scorer: TCustomScorer,
|
custom_scorer: TCustomScorer,
|
||||||
limit: usize,
|
collector: TopCollector<TScore>,
|
||||||
) -> CustomScoreTopCollector<TCustomScorer, TScore> {
|
) -> CustomScoreTopCollector<TCustomScorer, TScore> {
|
||||||
CustomScoreTopCollector {
|
CustomScoreTopCollector {
|
||||||
custom_scorer,
|
custom_scorer,
|
||||||
collector: TopCollector::with_limit(limit),
|
collector,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -29,7 +28,7 @@ where
|
|||||||
/// It is the segment local version of the [`CustomScorer`](./trait.CustomScorer.html).
|
/// It is the segment local version of the [`CustomScorer`](./trait.CustomScorer.html).
|
||||||
pub trait CustomSegmentScorer<TScore>: 'static {
|
pub trait CustomSegmentScorer<TScore>: 'static {
|
||||||
/// Computes the score of a specific `doc`.
|
/// Computes the score of a specific `doc`.
|
||||||
fn score(&self, doc: DocId) -> TScore;
|
fn score(&mut self, doc: DocId) -> TScore;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `CustomScorer` makes it possible to define any kind of score.
|
/// `CustomScorer` makes it possible to define any kind of score.
|
||||||
@@ -42,12 +41,12 @@ pub trait CustomScorer<TScore>: Sync {
|
|||||||
type Child: CustomSegmentScorer<TScore>;
|
type Child: CustomSegmentScorer<TScore>;
|
||||||
/// Builds a child scorer for a specific segment. The child scorer is associated to
|
/// Builds a child scorer for a specific segment. The child scorer is associated to
|
||||||
/// a specific segment.
|
/// a specific segment.
|
||||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
|
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
|
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
|
||||||
where
|
where
|
||||||
TCustomScorer: CustomScorer<TScore>,
|
TCustomScorer: CustomScorer<TScore> + Send + Sync,
|
||||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||||
{
|
{
|
||||||
type Fruit = Vec<(TScore, DocAddress)>;
|
type Fruit = Vec<(TScore, DocAddress)>;
|
||||||
@@ -58,11 +57,9 @@ where
|
|||||||
&self,
|
&self,
|
||||||
segment_local_id: u32,
|
segment_local_id: u32,
|
||||||
segment_reader: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
) -> Result<Self::Child> {
|
) -> crate::Result<Self::Child> {
|
||||||
|
let segment_collector = self.collector.for_segment(segment_local_id, segment_reader);
|
||||||
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
|
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
|
||||||
let segment_collector = self
|
|
||||||
.collector
|
|
||||||
.for_segment(segment_local_id, segment_reader)?;
|
|
||||||
Ok(CustomScoreTopSegmentCollector {
|
Ok(CustomScoreTopSegmentCollector {
|
||||||
segment_collector,
|
segment_collector,
|
||||||
segment_scorer,
|
segment_scorer,
|
||||||
@@ -73,7 +70,7 @@ where
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> Result<Self::Fruit> {
|
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
|
||||||
self.collector.merge_fruits(segment_fruits)
|
self.collector.merge_fruits(segment_fruits)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -111,16 +108,16 @@ where
|
|||||||
{
|
{
|
||||||
type Child = T;
|
type Child = T;
|
||||||
|
|
||||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
|
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
|
||||||
Ok((self)(segment_reader))
|
Ok((self)(segment_reader))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<F, TScore> CustomSegmentScorer<TScore> for F
|
impl<F, TScore> CustomSegmentScorer<TScore> for F
|
||||||
where
|
where
|
||||||
F: 'static + Sync + Send + Fn(DocId) -> TScore,
|
F: 'static + FnMut(DocId) -> TScore,
|
||||||
{
|
{
|
||||||
fn score(&self, doc: DocId) -> TScore {
|
fn score(&mut self, doc: DocId) -> TScore {
|
||||||
(self)(doc)
|
(self)(doc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
61
src/collector/docset_collector.rs
Normal file
61
src/collector/docset_collector.rs
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
use crate::{DocAddress, DocId, Score};
|
||||||
|
|
||||||
|
use super::{Collector, SegmentCollector};
|
||||||
|
|
||||||
|
/// Collectors that returns the set of DocAddress that matches the query.
|
||||||
|
///
|
||||||
|
/// This collector is mostly useful for tests.
|
||||||
|
pub struct DocSetCollector;
|
||||||
|
|
||||||
|
impl Collector for DocSetCollector {
|
||||||
|
type Fruit = HashSet<DocAddress>;
|
||||||
|
type Child = DocSetChildCollector;
|
||||||
|
|
||||||
|
fn for_segment(
|
||||||
|
&self,
|
||||||
|
segment_local_id: crate::SegmentLocalId,
|
||||||
|
_segment: &crate::SegmentReader,
|
||||||
|
) -> crate::Result<Self::Child> {
|
||||||
|
Ok(DocSetChildCollector {
|
||||||
|
segment_local_id,
|
||||||
|
docs: HashSet::new(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn requires_scoring(&self) -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
segment_fruits: Vec<(u32, HashSet<DocId>)>,
|
||||||
|
) -> crate::Result<Self::Fruit> {
|
||||||
|
let len: usize = segment_fruits.iter().map(|(_, docset)| docset.len()).sum();
|
||||||
|
let mut result = HashSet::with_capacity(len);
|
||||||
|
for (segment_local_id, docs) in segment_fruits {
|
||||||
|
for doc in docs {
|
||||||
|
result.insert(DocAddress(segment_local_id, doc));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct DocSetChildCollector {
|
||||||
|
segment_local_id: u32,
|
||||||
|
docs: HashSet<DocId>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SegmentCollector for DocSetChildCollector {
|
||||||
|
type Fruit = (u32, HashSet<DocId>);
|
||||||
|
|
||||||
|
fn collect(&mut self, doc: crate::DocId, _score: Score) {
|
||||||
|
self.docs.insert(doc);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn harvest(self) -> (u32, HashSet<DocId>) {
|
||||||
|
(self.segment_local_id, self.docs)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,22 +1,19 @@
|
|||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
use crate::collector::SegmentCollector;
|
use crate::collector::SegmentCollector;
|
||||||
use crate::docset::SkipResult;
|
|
||||||
use crate::fastfield::FacetReader;
|
use crate::fastfield::FacetReader;
|
||||||
use crate::schema::Facet;
|
use crate::schema::Facet;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Result;
|
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
use crate::TantivyError;
|
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::btree_map;
|
use std::collections::btree_map;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use std::collections::BinaryHeap;
|
use std::collections::BinaryHeap;
|
||||||
use std::collections::Bound;
|
|
||||||
use std::iter::Peekable;
|
use std::iter::Peekable;
|
||||||
|
use std::ops::Bound;
|
||||||
use std::{u64, usize};
|
use std::{u64, usize};
|
||||||
|
|
||||||
struct Hit<'a> {
|
struct Hit<'a> {
|
||||||
@@ -83,17 +80,16 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
|||||||
/// ```rust
|
/// ```rust
|
||||||
/// use tantivy::collector::FacetCollector;
|
/// use tantivy::collector::FacetCollector;
|
||||||
/// use tantivy::query::AllQuery;
|
/// use tantivy::query::AllQuery;
|
||||||
/// use tantivy::schema::{Facet, Schema, TEXT};
|
/// use tantivy::schema::{Facet, Schema, INDEXED, TEXT};
|
||||||
/// use tantivy::{doc, Index, Result};
|
/// use tantivy::{doc, Index};
|
||||||
///
|
///
|
||||||
/// # fn main() { example().unwrap(); }
|
/// fn example() -> tantivy::Result<()> {
|
||||||
/// fn example() -> Result<()> {
|
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// let mut schema_builder = Schema::builder();
|
||||||
///
|
///
|
||||||
/// // Facet have their own specific type.
|
/// // Facet have their own specific type.
|
||||||
/// // It is not a bad practise to put all of your
|
/// // It is not a bad practise to put all of your
|
||||||
/// // facet information in the same field.
|
/// // facet information in the same field.
|
||||||
/// let facet = schema_builder.add_facet_field("facet");
|
/// let facet = schema_builder.add_facet_field("facet", INDEXED);
|
||||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||||
/// let schema = schema_builder.build();
|
/// let schema = schema_builder.build();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let index = Index::create_in_ram(schema);
|
||||||
@@ -127,7 +123,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
|||||||
/// let searcher = reader.searcher();
|
/// let searcher = reader.searcher();
|
||||||
///
|
///
|
||||||
/// {
|
/// {
|
||||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||||
/// facet_collector.add_facet("/lang");
|
/// facet_collector.add_facet("/lang");
|
||||||
/// facet_collector.add_facet("/category");
|
/// facet_collector.add_facet("/category");
|
||||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||||
@@ -143,7 +139,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
|||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// {
|
/// {
|
||||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||||
/// facet_collector.add_facet("/category/fiction");
|
/// facet_collector.add_facet("/category/fiction");
|
||||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||||
///
|
///
|
||||||
@@ -158,8 +154,8 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
|||||||
/// ]);
|
/// ]);
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// {
|
/// {
|
||||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||||
/// facet_collector.add_facet("/category/fiction");
|
/// facet_collector.add_facet("/category/fiction");
|
||||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||||
///
|
///
|
||||||
@@ -172,6 +168,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
|||||||
///
|
///
|
||||||
/// Ok(())
|
/// Ok(())
|
||||||
/// }
|
/// }
|
||||||
|
/// # assert!(example().is_ok());
|
||||||
/// ```
|
/// ```
|
||||||
pub struct FacetCollector {
|
pub struct FacetCollector {
|
||||||
field: Field,
|
field: Field,
|
||||||
@@ -189,6 +186,11 @@ pub struct FacetSegmentCollector {
|
|||||||
collapse_facet_ords: Vec<u64>,
|
collapse_facet_ords: Vec<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum SkipResult {
|
||||||
|
Found,
|
||||||
|
NotFound,
|
||||||
|
}
|
||||||
|
|
||||||
fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
||||||
target: &[u8],
|
target: &[u8],
|
||||||
collapse_it: &mut Peekable<I>,
|
collapse_it: &mut Peekable<I>,
|
||||||
@@ -198,14 +200,14 @@ fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
|||||||
Some(facet_bytes) => match facet_bytes.encoded_str().as_bytes().cmp(target) {
|
Some(facet_bytes) => match facet_bytes.encoded_str().as_bytes().cmp(target) {
|
||||||
Ordering::Less => {}
|
Ordering::Less => {}
|
||||||
Ordering::Greater => {
|
Ordering::Greater => {
|
||||||
return SkipResult::OverStep;
|
return SkipResult::NotFound;
|
||||||
}
|
}
|
||||||
Ordering::Equal => {
|
Ordering::Equal => {
|
||||||
return SkipResult::Reached;
|
return SkipResult::Found;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
None => {
|
None => {
|
||||||
return SkipResult::End;
|
return SkipResult::NotFound;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
collapse_it.next();
|
collapse_it.next();
|
||||||
@@ -262,11 +264,8 @@ impl Collector for FacetCollector {
|
|||||||
&self,
|
&self,
|
||||||
_: SegmentLocalId,
|
_: SegmentLocalId,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
) -> Result<FacetSegmentCollector> {
|
) -> crate::Result<FacetSegmentCollector> {
|
||||||
let field_name = reader.schema().get_field_name(self.field);
|
let facet_reader = reader.facet_reader(self.field)?;
|
||||||
let facet_reader = reader.facet_reader(self.field).ok_or_else(|| {
|
|
||||||
TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let mut collapse_mapping = Vec::new();
|
let mut collapse_mapping = Vec::new();
|
||||||
let mut counts = Vec::new();
|
let mut counts = Vec::new();
|
||||||
@@ -275,14 +274,14 @@ impl Collector for FacetCollector {
|
|||||||
let mut collapse_facet_it = self.facets.iter().peekable();
|
let mut collapse_facet_it = self.facets.iter().peekable();
|
||||||
collapse_facet_ords.push(0);
|
collapse_facet_ords.push(0);
|
||||||
{
|
{
|
||||||
let mut facet_streamer = facet_reader.facet_dict().range().into_stream();
|
let mut facet_streamer = facet_reader.facet_dict().range().into_stream()?;
|
||||||
if facet_streamer.advance() {
|
if facet_streamer.advance() {
|
||||||
'outer: loop {
|
'outer: loop {
|
||||||
// at the begining of this loop, facet_streamer
|
// at the begining of this loop, facet_streamer
|
||||||
// is positionned on a term that has not been processed yet.
|
// is positionned on a term that has not been processed yet.
|
||||||
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
|
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
|
||||||
match skip_result {
|
match skip_result {
|
||||||
SkipResult::Reached => {
|
SkipResult::Found => {
|
||||||
// we reach a facet we decided to collapse.
|
// we reach a facet we decided to collapse.
|
||||||
let collapse_depth = facet_depth(facet_streamer.key());
|
let collapse_depth = facet_depth(facet_streamer.key());
|
||||||
let mut collapsed_id = 0;
|
let mut collapsed_id = 0;
|
||||||
@@ -302,7 +301,7 @@ impl Collector for FacetCollector {
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
SkipResult::End | SkipResult::OverStep => {
|
SkipResult::NotFound => {
|
||||||
collapse_mapping.push(0);
|
collapse_mapping.push(0);
|
||||||
if !facet_streamer.advance() {
|
if !facet_streamer.advance() {
|
||||||
break;
|
break;
|
||||||
@@ -328,7 +327,7 @@ impl Collector for FacetCollector {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, segments_facet_counts: Vec<FacetCounts>) -> Result<FacetCounts> {
|
fn merge_fruits(&self, segments_facet_counts: Vec<FacetCounts>) -> crate::Result<FacetCounts> {
|
||||||
let mut facet_counts: BTreeMap<Facet, u64> = BTreeMap::new();
|
let mut facet_counts: BTreeMap<Facet, u64> = BTreeMap::new();
|
||||||
for segment_facet_counts in segments_facet_counts {
|
for segment_facet_counts in segments_facet_counts {
|
||||||
for (facet, count) in segment_facet_counts.facet_counts {
|
for (facet, count) in segment_facet_counts.facet_counts {
|
||||||
@@ -369,9 +368,12 @@ impl SegmentCollector for FacetSegmentCollector {
|
|||||||
}
|
}
|
||||||
let mut facet = vec![];
|
let mut facet = vec![];
|
||||||
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
|
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
|
||||||
facet_dict.ord_to_term(facet_ord as u64, &mut facet);
|
// TODO handle errors.
|
||||||
// TODO
|
if facet_dict.ord_to_term(facet_ord as u64, &mut facet).is_ok() {
|
||||||
facet_counts.insert(Facet::from_encoded(facet).unwrap(), count);
|
if let Ok(facet) = Facet::from_encoded(facet) {
|
||||||
|
facet_counts.insert(facet, count);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
FacetCounts { facet_counts }
|
FacetCounts { facet_counts }
|
||||||
}
|
}
|
||||||
@@ -396,6 +398,8 @@ impl<'a> Iterator for FacetChildIterator<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FacetCounts {
|
impl FacetCounts {
|
||||||
|
/// Returns an iterator over all of the facet count pairs inside this result.
|
||||||
|
/// See the documentation for `FacetCollector` for a usage example.
|
||||||
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_>
|
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_>
|
||||||
where
|
where
|
||||||
Facet: From<T>,
|
Facet: From<T>,
|
||||||
@@ -415,6 +419,8 @@ impl FacetCounts {
|
|||||||
FacetChildIterator { underlying }
|
FacetChildIterator { underlying }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a vector of top `k` facets with their counts, sorted highest-to-lowest by counts.
|
||||||
|
/// See the documentation for `FacetCollector` for a usage example.
|
||||||
pub fn top_k<T>(&self, facet: T, k: usize) -> Vec<(&Facet, u64)>
|
pub fn top_k<T>(&self, facet: T, k: usize) -> Vec<(&Facet, u64)>
|
||||||
where
|
where
|
||||||
Facet: From<T>,
|
Facet: From<T>,
|
||||||
@@ -452,9 +458,11 @@ impl FacetCounts {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::{FacetCollector, FacetCounts};
|
use super::{FacetCollector, FacetCounts};
|
||||||
|
use crate::collector::Count;
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
use crate::query::AllQuery;
|
use crate::query::{AllQuery, QueryParser, TermQuery};
|
||||||
use crate::schema::{Document, Facet, Field, Schema};
|
use crate::schema::{Document, Facet, Field, IndexRecordOption, Schema, INDEXED};
|
||||||
|
use crate::Term;
|
||||||
use rand::distributions::Uniform;
|
use rand::distributions::Uniform;
|
||||||
use rand::prelude::SliceRandom;
|
use rand::prelude::SliceRandom;
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
@@ -463,11 +471,11 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_facet_collector_drilldown() {
|
fn test_facet_collector_drilldown() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let facet_field = schema_builder.add_facet_field("facet");
|
let facet_field = schema_builder.add_facet_field("facet", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let num_facets: usize = 3 * 4 * 5;
|
let num_facets: usize = 3 * 4 * 5;
|
||||||
let facets: Vec<Facet> = (0..num_facets)
|
let facets: Vec<Facet> = (0..num_facets)
|
||||||
.map(|mut n| {
|
.map(|mut n| {
|
||||||
@@ -523,10 +531,10 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_doc_unsorted_multifacet() {
|
fn test_doc_unsorted_multifacet() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let facet_field = schema_builder.add_facet_field("facets");
|
let facet_field = schema_builder.add_facet_field("facets", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from_text(&"/subjects/A/a"),
|
facet_field => Facet::from_text(&"/subjects/A/a"),
|
||||||
facet_field => Facet::from_text(&"/subjects/B/a"),
|
facet_field => Facet::from_text(&"/subjects/B/a"),
|
||||||
@@ -544,6 +552,56 @@ mod tests {
|
|||||||
assert_eq!(facets[0].1, 1);
|
assert_eq!(facets[0].1, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_doc_search_by_facet() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let facet_field = schema_builder.add_facet_field("facet", INDEXED);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
facet_field => Facet::from_text(&"/A/A"),
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
facet_field => Facet::from_text(&"/A/B"),
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
facet_field => Facet::from_text(&"/A/C/A"),
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
facet_field => Facet::from_text(&"/D/C/A"),
|
||||||
|
));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.num_docs(), 4);
|
||||||
|
|
||||||
|
let count_facet = |facet_str: &str| {
|
||||||
|
let term = Term::from_facet(facet_field, &Facet::from_text(facet_str));
|
||||||
|
searcher
|
||||||
|
.search(&TermQuery::new(term, IndexRecordOption::Basic), &Count)
|
||||||
|
.unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(count_facet("/"), 4);
|
||||||
|
assert_eq!(count_facet("/A"), 3);
|
||||||
|
assert_eq!(count_facet("/A/B"), 1);
|
||||||
|
assert_eq!(count_facet("/A/C"), 1);
|
||||||
|
assert_eq!(count_facet("/A/C/A"), 1);
|
||||||
|
assert_eq!(count_facet("/C/A"), 0);
|
||||||
|
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![]);
|
||||||
|
{
|
||||||
|
let query = query_parser.parse_query("facet:/A/B")?;
|
||||||
|
assert_eq!(1, searcher.search(&query, &Count).unwrap());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let query = query_parser.parse_query("facet:/A")?;
|
||||||
|
assert_eq!(3, searcher.search(&query, &Count)?);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_non_used_facet_collector() {
|
fn test_non_used_facet_collector() {
|
||||||
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
|
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
|
||||||
@@ -554,7 +612,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_facet_collector_topk() {
|
fn test_facet_collector_topk() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let facet_field = schema_builder.add_facet_field("facet");
|
let facet_field = schema_builder.add_facet_field("facet", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
@@ -576,7 +634,7 @@ mod tests {
|
|||||||
.collect();
|
.collect();
|
||||||
docs[..].shuffle(&mut thread_rng());
|
docs[..].shuffle(&mut thread_rng());
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
for doc in docs {
|
for doc in docs {
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
@@ -606,7 +664,7 @@ mod bench {
|
|||||||
|
|
||||||
use crate::collector::FacetCollector;
|
use crate::collector::FacetCollector;
|
||||||
use crate::query::AllQuery;
|
use crate::query::AllQuery;
|
||||||
use crate::schema::{Facet, Schema};
|
use crate::schema::{Facet, Schema, INDEXED};
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use rand::thread_rng;
|
use rand::thread_rng;
|
||||||
@@ -615,7 +673,7 @@ mod bench {
|
|||||||
#[bench]
|
#[bench]
|
||||||
fn bench_facet_collector(b: &mut Bencher) {
|
fn bench_facet_collector(b: &mut Bencher) {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let facet_field = schema_builder.add_facet_field("facet");
|
let facet_field = schema_builder.add_facet_field("facet", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
@@ -629,7 +687,7 @@ mod bench {
|
|||||||
// 40425 docs
|
// 40425 docs
|
||||||
docs[..].shuffle(&mut thread_rng());
|
docs[..].shuffle(&mut thread_rng());
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
for doc in docs {
|
for doc in docs {
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
|
|||||||
183
src/collector/filter_collector_wrapper.rs
Normal file
183
src/collector/filter_collector_wrapper.rs
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
// # Custom collector example
|
||||||
|
//
|
||||||
|
// This example shows how you can implement your own
|
||||||
|
// collector. As an example, we will compute a collector
|
||||||
|
// that computes the standard deviation of a given fast field.
|
||||||
|
//
|
||||||
|
// Of course, you can have a look at the tantivy's built-in collectors
|
||||||
|
// such as the `CountCollector` for more examples.
|
||||||
|
|
||||||
|
// ---
|
||||||
|
// Importing tantivy...
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
|
use crate::collector::{Collector, SegmentCollector};
|
||||||
|
use crate::fastfield::{FastFieldReader, FastValue};
|
||||||
|
use crate::schema::Field;
|
||||||
|
use crate::{Score, SegmentReader, TantivyError};
|
||||||
|
|
||||||
|
/// The `FilterCollector` collector filters docs using a u64 fast field value and a predicate.
|
||||||
|
/// Only the documents for which the predicate returned "true" will be passed on to the next collector.
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// use tantivy::collector::{TopDocs, FilterCollector};
|
||||||
|
/// use tantivy::query::QueryParser;
|
||||||
|
/// use tantivy::schema::{Schema, TEXT, INDEXED, FAST};
|
||||||
|
/// use tantivy::{doc, DocAddress, Index};
|
||||||
|
///
|
||||||
|
/// let mut schema_builder = Schema::builder();
|
||||||
|
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||||
|
/// let price = schema_builder.add_u64_field("price", INDEXED | FAST);
|
||||||
|
/// let schema = schema_builder.build();
|
||||||
|
/// let index = Index::create_in_ram(schema);
|
||||||
|
///
|
||||||
|
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
|
/// index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64));
|
||||||
|
/// index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64));
|
||||||
|
/// index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64));
|
||||||
|
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64));
|
||||||
|
/// assert!(index_writer.commit().is_ok());
|
||||||
|
///
|
||||||
|
/// let reader = index.reader().unwrap();
|
||||||
|
/// let searcher = reader.searcher();
|
||||||
|
///
|
||||||
|
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||||
|
/// let query = query_parser.parse_query("diary").unwrap();
|
||||||
|
/// let no_filter_collector = FilterCollector::new(price, &|value: u64| value > 20_120u64, TopDocs::with_limit(2));
|
||||||
|
/// let top_docs = searcher.search(&query, &no_filter_collector).unwrap();
|
||||||
|
///
|
||||||
|
/// assert_eq!(top_docs.len(), 1);
|
||||||
|
/// assert_eq!(top_docs[0].1, DocAddress(0, 1));
|
||||||
|
///
|
||||||
|
/// let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new(price, &|value| value < 5u64, TopDocs::with_limit(2));
|
||||||
|
/// let filtered_top_docs = searcher.search(&query, &filter_all_collector).unwrap();
|
||||||
|
///
|
||||||
|
/// assert_eq!(filtered_top_docs.len(), 0);
|
||||||
|
/// ```
|
||||||
|
pub struct FilterCollector<TCollector, TPredicate, TPredicateValue: FastValue>
|
||||||
|
where
|
||||||
|
TPredicate: 'static + Clone,
|
||||||
|
{
|
||||||
|
field: Field,
|
||||||
|
collector: TCollector,
|
||||||
|
predicate: TPredicate,
|
||||||
|
t_predicate_value: PhantomData<TPredicateValue>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TCollector, TPredicate, TPredicateValue: FastValue>
|
||||||
|
FilterCollector<TCollector, TPredicate, TPredicateValue>
|
||||||
|
where
|
||||||
|
TCollector: Collector + Send + Sync,
|
||||||
|
TPredicate: Fn(TPredicateValue) -> bool + Send + Sync + Clone,
|
||||||
|
{
|
||||||
|
/// Create a new FilterCollector.
|
||||||
|
pub fn new(
|
||||||
|
field: Field,
|
||||||
|
predicate: TPredicate,
|
||||||
|
collector: TCollector,
|
||||||
|
) -> FilterCollector<TCollector, TPredicate, TPredicateValue> {
|
||||||
|
FilterCollector {
|
||||||
|
field,
|
||||||
|
predicate,
|
||||||
|
collector,
|
||||||
|
t_predicate_value: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TCollector, TPredicate, TPredicateValue: FastValue> Collector
|
||||||
|
for FilterCollector<TCollector, TPredicate, TPredicateValue>
|
||||||
|
where
|
||||||
|
TCollector: Collector + Send + Sync,
|
||||||
|
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync + Clone,
|
||||||
|
TPredicateValue: FastValue,
|
||||||
|
{
|
||||||
|
// That's the type of our result.
|
||||||
|
// Our standard deviation will be a float.
|
||||||
|
type Fruit = TCollector::Fruit;
|
||||||
|
|
||||||
|
type Child = FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>;
|
||||||
|
|
||||||
|
fn for_segment(
|
||||||
|
&self,
|
||||||
|
segment_local_id: u32,
|
||||||
|
segment_reader: &SegmentReader,
|
||||||
|
) -> crate::Result<FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>> {
|
||||||
|
let schema = segment_reader.schema();
|
||||||
|
let field_entry = schema.get_field_entry(self.field);
|
||||||
|
if !field_entry.is_fast() {
|
||||||
|
return Err(TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is not a fast field.",
|
||||||
|
field_entry.name()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
let requested_type = TPredicateValue::to_type();
|
||||||
|
let field_schema_type = field_entry.field_type().value_type();
|
||||||
|
if requested_type != field_schema_type {
|
||||||
|
return Err(TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is of type {:?}!={:?}",
|
||||||
|
field_entry.name(),
|
||||||
|
requested_type,
|
||||||
|
field_schema_type
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let fast_field_reader = segment_reader
|
||||||
|
.fast_fields()
|
||||||
|
.typed_fast_field_reader(self.field)?;
|
||||||
|
|
||||||
|
let segment_collector = self
|
||||||
|
.collector
|
||||||
|
.for_segment(segment_local_id, segment_reader)?;
|
||||||
|
|
||||||
|
Ok(FilterSegmentCollector {
|
||||||
|
fast_field_reader,
|
||||||
|
segment_collector,
|
||||||
|
predicate: self.predicate.clone(),
|
||||||
|
t_predicate_value: PhantomData,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn requires_scoring(&self) -> bool {
|
||||||
|
self.collector.requires_scoring()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
segment_fruits: Vec<<TCollector::Child as SegmentCollector>::Fruit>,
|
||||||
|
) -> crate::Result<TCollector::Fruit> {
|
||||||
|
self.collector.merge_fruits(segment_fruits)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
|
||||||
|
where
|
||||||
|
TPredicate: 'static,
|
||||||
|
TPredicateValue: FastValue,
|
||||||
|
{
|
||||||
|
fast_field_reader: FastFieldReader<TPredicateValue>,
|
||||||
|
segment_collector: TSegmentCollector,
|
||||||
|
predicate: TPredicate,
|
||||||
|
t_predicate_value: PhantomData<TPredicateValue>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TSegmentCollector, TPredicate, TPredicateValue> SegmentCollector
|
||||||
|
for FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
|
||||||
|
where
|
||||||
|
TSegmentCollector: SegmentCollector,
|
||||||
|
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync,
|
||||||
|
TPredicateValue: FastValue,
|
||||||
|
{
|
||||||
|
type Fruit = TSegmentCollector::Fruit;
|
||||||
|
|
||||||
|
fn collect(&mut self, doc: u32, score: Score) {
|
||||||
|
let value = self.fast_field_reader.get(doc);
|
||||||
|
if (self.predicate)(value) {
|
||||||
|
self.segment_collector.collect(doc, score)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn harvest(self) -> <TSegmentCollector as SegmentCollector>::Fruit {
|
||||||
|
self.segment_collector.harvest()
|
||||||
|
}
|
||||||
|
}
|
||||||
291
src/collector/histogram_collector.rs
Normal file
291
src/collector/histogram_collector.rs
Normal file
@@ -0,0 +1,291 @@
|
|||||||
|
use crate::collector::{Collector, SegmentCollector};
|
||||||
|
use crate::fastfield::{FastFieldReader, FastValue};
|
||||||
|
use crate::schema::{Field, Type};
|
||||||
|
use crate::{DocId, Score};
|
||||||
|
use fastdivide::DividerU64;
|
||||||
|
|
||||||
|
/// Histogram builds an histogram of the values of a fastfield for the
|
||||||
|
/// collected DocSet.
|
||||||
|
///
|
||||||
|
/// At construction, it is given parameters that define a partition of an interval
|
||||||
|
/// [min_val, max_val) into N buckets with the same width.
|
||||||
|
/// The ith bucket is then defined by `[min_val + i * bucket_width, min_val + (i+1) * bucket_width)`
|
||||||
|
///
|
||||||
|
/// An histogram is then defined as a `Vec<u64>` of length `num_buckets`, that contains a count of
|
||||||
|
/// documents for each value bucket.
|
||||||
|
///
|
||||||
|
/// See also [`HistogramCollector::new()`].
|
||||||
|
///
|
||||||
|
/// # Warning
|
||||||
|
///
|
||||||
|
/// f64 field. are not supported.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct HistogramCollector {
|
||||||
|
min_value: u64,
|
||||||
|
num_buckets: usize,
|
||||||
|
divider: DividerU64,
|
||||||
|
field: Field,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HistogramCollector {
|
||||||
|
/// Builds a new HistogramCollector.
|
||||||
|
///
|
||||||
|
/// The scale/range of the histogram is not dynamic. It is required to
|
||||||
|
/// define it by supplying following parameter:
|
||||||
|
/// - `min_value`: the minimum value that can be recorded in the histogram.
|
||||||
|
/// - `bucket_width`: the length of the interval that is associated to each buckets.
|
||||||
|
/// - `num_buckets`: The overall number of buckets.
|
||||||
|
///
|
||||||
|
/// Together, this parameters define a partition of `[min_value, min_value + num_buckets * bucket_width)`
|
||||||
|
/// into `num_buckets` intervals of width bucket that we call `bucket`.
|
||||||
|
///
|
||||||
|
/// # Disclaimer
|
||||||
|
/// This function panics if the field given is of type f64.
|
||||||
|
pub fn new<TFastValue: FastValue>(
|
||||||
|
field: Field,
|
||||||
|
min_value: TFastValue,
|
||||||
|
bucket_width: u64,
|
||||||
|
num_buckets: usize,
|
||||||
|
) -> HistogramCollector {
|
||||||
|
let fast_type = TFastValue::to_type();
|
||||||
|
assert!(fast_type == Type::U64 || fast_type == Type::I64 || fast_type == Type::Date);
|
||||||
|
HistogramCollector {
|
||||||
|
min_value: min_value.to_u64(),
|
||||||
|
num_buckets,
|
||||||
|
field,
|
||||||
|
divider: DividerU64::divide_by(bucket_width),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct HistogramComputer {
|
||||||
|
counts: Vec<u64>,
|
||||||
|
min_value: u64,
|
||||||
|
divider: DividerU64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HistogramComputer {
|
||||||
|
#[inline]
|
||||||
|
pub(crate) fn add_value(&mut self, value: u64) {
|
||||||
|
if value < self.min_value {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let delta = value - self.min_value;
|
||||||
|
let delta_u64 = delta.to_u64();
|
||||||
|
let bucket_id: usize = self.divider.divide(delta_u64) as usize;
|
||||||
|
if bucket_id < self.counts.len() {
|
||||||
|
self.counts[bucket_id] += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn harvest(self) -> Vec<u64> {
|
||||||
|
self.counts
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub struct SegmentHistogramCollector {
|
||||||
|
histogram_computer: HistogramComputer,
|
||||||
|
ff_reader: FastFieldReader<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SegmentCollector for SegmentHistogramCollector {
|
||||||
|
type Fruit = Vec<u64>;
|
||||||
|
|
||||||
|
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||||
|
let value = self.ff_reader.get(doc);
|
||||||
|
self.histogram_computer.add_value(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn harvest(self) -> Self::Fruit {
|
||||||
|
self.histogram_computer.harvest()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Collector for HistogramCollector {
|
||||||
|
type Fruit = Vec<u64>;
|
||||||
|
type Child = SegmentHistogramCollector;
|
||||||
|
|
||||||
|
fn for_segment(
|
||||||
|
&self,
|
||||||
|
_segment_local_id: crate::SegmentLocalId,
|
||||||
|
segment: &crate::SegmentReader,
|
||||||
|
) -> crate::Result<Self::Child> {
|
||||||
|
let ff_reader = segment.fast_fields().u64_lenient(self.field)?;
|
||||||
|
Ok(SegmentHistogramCollector {
|
||||||
|
histogram_computer: HistogramComputer {
|
||||||
|
counts: vec![0; self.num_buckets],
|
||||||
|
min_value: self.min_value,
|
||||||
|
divider: self.divider,
|
||||||
|
},
|
||||||
|
ff_reader,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn requires_scoring(&self) -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_fruits(&self, child_histograms: Vec<Vec<u64>>) -> crate::Result<Vec<u64>> {
|
||||||
|
Ok(add_vecs(child_histograms, self.num_buckets))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_arrays_into(acc: &mut [u64], add: &[u64]) {
|
||||||
|
assert_eq!(acc.len(), add.len());
|
||||||
|
for (dest_bucket, bucket_count) in acc.iter_mut().zip(add) {
|
||||||
|
*dest_bucket += bucket_count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_vecs(mut vals_list: Vec<Vec<u64>>, len: usize) -> Vec<u64> {
|
||||||
|
let mut acc = vals_list.pop().unwrap_or_else(|| vec![0u64; len]);
|
||||||
|
assert_eq!(acc.len(), len);
|
||||||
|
for vals in vals_list {
|
||||||
|
add_arrays_into(&mut acc, &vals);
|
||||||
|
}
|
||||||
|
acc
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::{add_vecs, HistogramCollector, HistogramComputer};
|
||||||
|
use crate::chrono::{TimeZone, Utc};
|
||||||
|
use crate::schema::{Schema, FAST};
|
||||||
|
use crate::{doc, query, Index};
|
||||||
|
use fastdivide::DividerU64;
|
||||||
|
use query::AllQuery;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_add_histograms_simple() {
|
||||||
|
assert_eq!(
|
||||||
|
add_vecs(vec![vec![1, 0, 3], vec![11, 2, 3], vec![0, 0, 1]], 3),
|
||||||
|
vec![12, 2, 7]
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_add_histograms_empty() {
|
||||||
|
assert_eq!(add_vecs(vec![], 3), vec![0, 0, 0])
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_histogram_builder_simple() {
|
||||||
|
// [1..3)
|
||||||
|
// [3..5)
|
||||||
|
// ..
|
||||||
|
// [9..11)
|
||||||
|
let mut histogram_computer = HistogramComputer {
|
||||||
|
counts: vec![0; 5],
|
||||||
|
min_value: 1,
|
||||||
|
divider: DividerU64::divide_by(2),
|
||||||
|
};
|
||||||
|
histogram_computer.add_value(1);
|
||||||
|
histogram_computer.add_value(7);
|
||||||
|
assert_eq!(histogram_computer.harvest(), vec![1, 0, 0, 1, 0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_histogram_too_low_is_ignored() {
|
||||||
|
let mut histogram_computer = HistogramComputer {
|
||||||
|
counts: vec![0; 5],
|
||||||
|
min_value: 2,
|
||||||
|
divider: DividerU64::divide_by(2),
|
||||||
|
};
|
||||||
|
histogram_computer.add_value(0);
|
||||||
|
assert_eq!(histogram_computer.harvest(), vec![0, 0, 0, 0, 0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_histogram_too_high_is_ignored() {
|
||||||
|
let mut histogram_computer = HistogramComputer {
|
||||||
|
counts: vec![0u64; 5],
|
||||||
|
min_value: 0,
|
||||||
|
divider: DividerU64::divide_by(2),
|
||||||
|
};
|
||||||
|
histogram_computer.add_value(10);
|
||||||
|
assert_eq!(histogram_computer.harvest(), vec![0, 0, 0, 0, 0]);
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_no_segments() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let val_field = schema_builder.add_u64_field("val_field", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let all_query = AllQuery;
|
||||||
|
let histogram_collector = HistogramCollector::new(val_field, 0u64, 2, 5);
|
||||||
|
let histogram = searcher.search(&all_query, &histogram_collector)?;
|
||||||
|
assert_eq!(histogram, vec![0; 5]);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_histogram_i64() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let val_field = schema_builder.add_i64_field("val_field", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut writer = index.writer_with_num_threads(1, 4_000_000)?;
|
||||||
|
writer.add_document(doc!(val_field=>12i64));
|
||||||
|
writer.add_document(doc!(val_field=>-30i64));
|
||||||
|
writer.add_document(doc!(val_field=>-12i64));
|
||||||
|
writer.add_document(doc!(val_field=>-10i64));
|
||||||
|
writer.commit()?;
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let all_query = AllQuery;
|
||||||
|
let histogram_collector = HistogramCollector::new(val_field, -20i64, 10u64, 4);
|
||||||
|
let histogram = searcher.search(&all_query, &histogram_collector)?;
|
||||||
|
assert_eq!(histogram, vec![1, 1, 0, 1]);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_histogram_merge() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let val_field = schema_builder.add_i64_field("val_field", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut writer = index.writer_with_num_threads(1, 4_000_000)?;
|
||||||
|
writer.add_document(doc!(val_field=>12i64));
|
||||||
|
writer.commit()?;
|
||||||
|
writer.add_document(doc!(val_field=>-30i64));
|
||||||
|
writer.commit()?;
|
||||||
|
writer.add_document(doc!(val_field=>-12i64));
|
||||||
|
writer.commit()?;
|
||||||
|
writer.add_document(doc!(val_field=>-10i64));
|
||||||
|
writer.commit()?;
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let all_query = AllQuery;
|
||||||
|
let histogram_collector = HistogramCollector::new(val_field, -20i64, 10u64, 4);
|
||||||
|
let histogram = searcher.search(&all_query, &histogram_collector)?;
|
||||||
|
assert_eq!(histogram, vec![1, 1, 0, 1]);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_histogram_dates() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let date_field = schema_builder.add_date_field("date_field", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut writer = index.writer_with_num_threads(1, 4_000_000)?;
|
||||||
|
writer.add_document(doc!(date_field=>Utc.ymd(1982, 9, 17).and_hms(0, 0,0)));
|
||||||
|
writer.add_document(doc!(date_field=>Utc.ymd(1986, 3, 9).and_hms(0, 0, 0)));
|
||||||
|
writer.add_document(doc!(date_field=>Utc.ymd(1983, 9, 27).and_hms(0, 0, 0)));
|
||||||
|
writer.commit()?;
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let all_query = AllQuery;
|
||||||
|
let week_histogram_collector = HistogramCollector::new(
|
||||||
|
date_field,
|
||||||
|
Utc.ymd(1980, 1, 1).and_hms(0, 0, 0),
|
||||||
|
3600 * 24 * 365, // it is just for a unit test... sorry leap years.
|
||||||
|
10,
|
||||||
|
);
|
||||||
|
let week_histogram = searcher.search(&all_query, &week_histogram_collector)?;
|
||||||
|
assert_eq!(week_histogram, vec![0, 0, 1, 1, 0, 0, 1, 0, 0, 0]);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,127 +0,0 @@
|
|||||||
use std::cmp::Eq;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::hash::Hash;
|
|
||||||
|
|
||||||
use collector::Collector;
|
|
||||||
use fastfield::FastFieldReader;
|
|
||||||
use schema::Field;
|
|
||||||
|
|
||||||
use DocId;
|
|
||||||
use Result;
|
|
||||||
use Score;
|
|
||||||
use SegmentReader;
|
|
||||||
use SegmentLocalId;
|
|
||||||
|
|
||||||
|
|
||||||
/// Facet collector for i64/u64 fast field
|
|
||||||
pub struct IntFacetCollector<T>
|
|
||||||
where
|
|
||||||
T: FastFieldReader,
|
|
||||||
T::ValueType: Eq + Hash,
|
|
||||||
{
|
|
||||||
counters: HashMap<T::ValueType, u64>,
|
|
||||||
field: Field,
|
|
||||||
ff_reader: Option<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
impl<T> IntFacetCollector<T>
|
|
||||||
where
|
|
||||||
T: FastFieldReader,
|
|
||||||
T::ValueType: Eq + Hash,
|
|
||||||
{
|
|
||||||
/// Creates a new facet collector for aggregating a given field.
|
|
||||||
pub fn new(field: Field) -> IntFacetCollector<T> {
|
|
||||||
IntFacetCollector {
|
|
||||||
counters: HashMap::new(),
|
|
||||||
field: field,
|
|
||||||
ff_reader: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
impl<T> Collector for IntFacetCollector<T>
|
|
||||||
where
|
|
||||||
T: FastFieldReader,
|
|
||||||
T::ValueType: Eq + Hash,
|
|
||||||
{
|
|
||||||
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
|
|
||||||
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn collect(&mut self, doc: DocId, _: Score) {
|
|
||||||
let val = self.ff_reader
|
|
||||||
.as_ref()
|
|
||||||
.expect(
|
|
||||||
"collect() was called before set_segment. \
|
|
||||||
This should never happen.",
|
|
||||||
)
|
|
||||||
.get(doc);
|
|
||||||
*(self.counters.entry(val).or_insert(0)) += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
|
|
||||||
use collector::{chain, IntFacetCollector};
|
|
||||||
use query::QueryParser;
|
|
||||||
use fastfield::{I64FastFieldReader, U64FastFieldReader};
|
|
||||||
use schema::{self, FAST, STRING};
|
|
||||||
use Index;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
// create 10 documents, set num field value to 0 or 1 for even/odd ones
|
|
||||||
// make sure we have facet counters correctly filled
|
|
||||||
fn test_facet_collector_results() {
|
|
||||||
|
|
||||||
let mut schema_builder = schema::Schema::builder();
|
|
||||||
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
|
|
||||||
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
|
|
||||||
let num_field_f64 = schema_builder.add_f64_field("num_f64", FAST);
|
|
||||||
let text_field = schema_builder.add_text_field("text", STRING);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema.clone());
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
{
|
|
||||||
for i in 0u64..10u64 {
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
num_field_i64 => ((i as i64) % 3i64) as i64,
|
|
||||||
num_field_u64 => (i % 2u64) as u64,
|
|
||||||
num_field_f64 => (i % 4u64) as f64,
|
|
||||||
text_field => "text"
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert_eq!(index_writer.commit().unwrap(), 10u64);
|
|
||||||
}
|
|
||||||
|
|
||||||
let searcher = index.reader().searcher();
|
|
||||||
let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64);
|
|
||||||
let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64);
|
|
||||||
let mut ffvf_f64: IntFacetCollector<F64FastFieldReader> = IntFacetCollector::new(num_field_f64);
|
|
||||||
|
|
||||||
{
|
|
||||||
// perform the query
|
|
||||||
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64).push(&mut ffvf_f64);
|
|
||||||
let mut query_parser = QueryParser::for_index(index, vec![text_field]);
|
|
||||||
let query = query_parser.parse_query("text:text").unwrap();
|
|
||||||
query.search(&searcher, &mut facet_collectors).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_eq!(ffvf_u64.counters[&0], 5);
|
|
||||||
assert_eq!(ffvf_u64.counters[&1], 5);
|
|
||||||
assert_eq!(ffvf_i64.counters[&0], 4);
|
|
||||||
assert_eq!(ffvf_i64.counters[&1], 3);
|
|
||||||
assert_eq!(ffvf_f64.counters[&0.0], 3);
|
|
||||||
assert_eq!(ffvf_f64.counters[&2.0], 2);
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -85,7 +85,6 @@ See the `custom_collector` example.
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Result;
|
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
@@ -94,6 +93,9 @@ use downcast_rs::impl_downcast;
|
|||||||
mod count_collector;
|
mod count_collector;
|
||||||
pub use self::count_collector::Count;
|
pub use self::count_collector::Count;
|
||||||
|
|
||||||
|
mod histogram_collector;
|
||||||
|
pub use histogram_collector::HistogramCollector;
|
||||||
|
|
||||||
mod multi_collector;
|
mod multi_collector;
|
||||||
pub use self::multi_collector::MultiCollector;
|
pub use self::multi_collector::MultiCollector;
|
||||||
|
|
||||||
@@ -110,6 +112,14 @@ pub use self::tweak_score_top_collector::{ScoreSegmentTweaker, ScoreTweaker};
|
|||||||
|
|
||||||
mod facet_collector;
|
mod facet_collector;
|
||||||
pub use self::facet_collector::FacetCollector;
|
pub use self::facet_collector::FacetCollector;
|
||||||
|
pub use self::facet_collector::FacetCounts;
|
||||||
|
use crate::query::Weight;
|
||||||
|
|
||||||
|
mod docset_collector;
|
||||||
|
pub use self::docset_collector::DocSetCollector;
|
||||||
|
|
||||||
|
mod filter_collector_wrapper;
|
||||||
|
pub use self::filter_collector_wrapper::FilterCollector;
|
||||||
|
|
||||||
/// `Fruit` is the type for the result of our collection.
|
/// `Fruit` is the type for the result of our collection.
|
||||||
/// e.g. `usize` for the `Count` collector.
|
/// e.g. `usize` for the `Count` collector.
|
||||||
@@ -133,13 +143,13 @@ impl<T> Fruit for T where T: Send + downcast_rs::Downcast {}
|
|||||||
/// The collection logic itself is in the `SegmentCollector`.
|
/// The collection logic itself is in the `SegmentCollector`.
|
||||||
///
|
///
|
||||||
/// Segments are not guaranteed to be visited in any specific order.
|
/// Segments are not guaranteed to be visited in any specific order.
|
||||||
pub trait Collector: Sync {
|
pub trait Collector: Sync + Send {
|
||||||
/// `Fruit` is the type for the result of our collection.
|
/// `Fruit` is the type for the result of our collection.
|
||||||
/// e.g. `usize` for the `Count` collector.
|
/// e.g. `usize` for the `Count` collector.
|
||||||
type Fruit: Fruit;
|
type Fruit: Fruit;
|
||||||
|
|
||||||
/// Type of the `SegmentCollector` associated to this collector.
|
/// Type of the `SegmentCollector` associated to this collector.
|
||||||
type Child: SegmentCollector<Fruit = Self::Fruit>;
|
type Child: SegmentCollector;
|
||||||
|
|
||||||
/// `set_segment` is called before beginning to enumerate
|
/// `set_segment` is called before beginning to enumerate
|
||||||
/// on this segment.
|
/// on this segment.
|
||||||
@@ -147,14 +157,95 @@ pub trait Collector: Sync {
|
|||||||
&self,
|
&self,
|
||||||
segment_local_id: SegmentLocalId,
|
segment_local_id: SegmentLocalId,
|
||||||
segment: &SegmentReader,
|
segment: &SegmentReader,
|
||||||
) -> Result<Self::Child>;
|
) -> crate::Result<Self::Child>;
|
||||||
|
|
||||||
/// Returns true iff the collector requires to compute scores for documents.
|
/// Returns true iff the collector requires to compute scores for documents.
|
||||||
fn requires_scoring(&self) -> bool;
|
fn requires_scoring(&self) -> bool;
|
||||||
|
|
||||||
/// Combines the fruit associated to the collection of each segments
|
/// Combines the fruit associated to the collection of each segments
|
||||||
/// into one fruit.
|
/// into one fruit.
|
||||||
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> Result<Self::Fruit>;
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
|
) -> crate::Result<Self::Fruit>;
|
||||||
|
|
||||||
|
/// Created a segment collector and
|
||||||
|
fn collect_segment(
|
||||||
|
&self,
|
||||||
|
weight: &dyn Weight,
|
||||||
|
segment_ord: u32,
|
||||||
|
reader: &SegmentReader,
|
||||||
|
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
||||||
|
let mut segment_collector = self.for_segment(segment_ord as u32, reader)?;
|
||||||
|
|
||||||
|
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||||
|
weight.for_each(reader, &mut |doc, score| {
|
||||||
|
if delete_bitset.is_alive(doc) {
|
||||||
|
segment_collector.collect(doc, score);
|
||||||
|
}
|
||||||
|
})?;
|
||||||
|
} else {
|
||||||
|
weight.for_each(reader, &mut |doc, score| {
|
||||||
|
segment_collector.collect(doc, score);
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
Ok(segment_collector.harvest())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TSegmentCollector: SegmentCollector> SegmentCollector for Option<TSegmentCollector> {
|
||||||
|
type Fruit = Option<TSegmentCollector::Fruit>;
|
||||||
|
|
||||||
|
fn collect(&mut self, doc: DocId, score: Score) {
|
||||||
|
if let Some(segment_collector) = self {
|
||||||
|
segment_collector.collect(doc, score);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn harvest(self) -> Self::Fruit {
|
||||||
|
self.map(|segment_collector| segment_collector.harvest())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TCollector: Collector> Collector for Option<TCollector> {
|
||||||
|
type Fruit = Option<TCollector::Fruit>;
|
||||||
|
|
||||||
|
type Child = Option<<TCollector as Collector>::Child>;
|
||||||
|
|
||||||
|
fn for_segment(
|
||||||
|
&self,
|
||||||
|
segment_local_id: SegmentLocalId,
|
||||||
|
segment: &SegmentReader,
|
||||||
|
) -> crate::Result<Self::Child> {
|
||||||
|
Ok(if let Some(inner) = self {
|
||||||
|
let inner_segment_collector = inner.for_segment(segment_local_id, segment)?;
|
||||||
|
Some(inner_segment_collector)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn requires_scoring(&self) -> bool {
|
||||||
|
self.as_ref()
|
||||||
|
.map(|inner| inner.requires_scoring())
|
||||||
|
.unwrap_or(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
|
) -> crate::Result<Self::Fruit> {
|
||||||
|
if let Some(inner) = self.as_ref() {
|
||||||
|
let inner_segment_fruits: Vec<_> = segment_fruits
|
||||||
|
.into_iter()
|
||||||
|
.flat_map(|fruit_opt| fruit_opt.into_iter())
|
||||||
|
.collect();
|
||||||
|
let fruit = inner.merge_fruits(inner_segment_fruits)?;
|
||||||
|
Ok(Some(fruit))
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The `SegmentCollector` is the trait in charge of defining the
|
/// The `SegmentCollector` is the trait in charge of defining the
|
||||||
@@ -185,7 +276,11 @@ where
|
|||||||
type Fruit = (Left::Fruit, Right::Fruit);
|
type Fruit = (Left::Fruit, Right::Fruit);
|
||||||
type Child = (Left::Child, Right::Child);
|
type Child = (Left::Child, Right::Child);
|
||||||
|
|
||||||
fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
|
fn for_segment(
|
||||||
|
&self,
|
||||||
|
segment_local_id: u32,
|
||||||
|
segment: &SegmentReader,
|
||||||
|
) -> crate::Result<Self::Child> {
|
||||||
let left = self.0.for_segment(segment_local_id, segment)?;
|
let left = self.0.for_segment(segment_local_id, segment)?;
|
||||||
let right = self.1.for_segment(segment_local_id, segment)?;
|
let right = self.1.for_segment(segment_local_id, segment)?;
|
||||||
Ok((left, right))
|
Ok((left, right))
|
||||||
@@ -197,11 +292,11 @@ where
|
|||||||
|
|
||||||
fn merge_fruits(
|
fn merge_fruits(
|
||||||
&self,
|
&self,
|
||||||
children: Vec<(Left::Fruit, Right::Fruit)>,
|
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
) -> Result<(Left::Fruit, Right::Fruit)> {
|
) -> crate::Result<(Left::Fruit, Right::Fruit)> {
|
||||||
let mut left_fruits = vec![];
|
let mut left_fruits = vec![];
|
||||||
let mut right_fruits = vec![];
|
let mut right_fruits = vec![];
|
||||||
for (left_fruit, right_fruit) in children {
|
for (left_fruit, right_fruit) in segment_fruits {
|
||||||
left_fruits.push(left_fruit);
|
left_fruits.push(left_fruit);
|
||||||
right_fruits.push(right_fruit);
|
right_fruits.push(right_fruit);
|
||||||
}
|
}
|
||||||
@@ -240,7 +335,11 @@ where
|
|||||||
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit);
|
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit);
|
||||||
type Child = (One::Child, Two::Child, Three::Child);
|
type Child = (One::Child, Two::Child, Three::Child);
|
||||||
|
|
||||||
fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
|
fn for_segment(
|
||||||
|
&self,
|
||||||
|
segment_local_id: u32,
|
||||||
|
segment: &SegmentReader,
|
||||||
|
) -> crate::Result<Self::Child> {
|
||||||
let one = self.0.for_segment(segment_local_id, segment)?;
|
let one = self.0.for_segment(segment_local_id, segment)?;
|
||||||
let two = self.1.for_segment(segment_local_id, segment)?;
|
let two = self.1.for_segment(segment_local_id, segment)?;
|
||||||
let three = self.2.for_segment(segment_local_id, segment)?;
|
let three = self.2.for_segment(segment_local_id, segment)?;
|
||||||
@@ -251,7 +350,10 @@ where
|
|||||||
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
|
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> Result<Self::Fruit> {
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
|
) -> crate::Result<Self::Fruit> {
|
||||||
let mut one_fruits = vec![];
|
let mut one_fruits = vec![];
|
||||||
let mut two_fruits = vec![];
|
let mut two_fruits = vec![];
|
||||||
let mut three_fruits = vec![];
|
let mut three_fruits = vec![];
|
||||||
@@ -299,7 +401,11 @@ where
|
|||||||
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit, Four::Fruit);
|
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit, Four::Fruit);
|
||||||
type Child = (One::Child, Two::Child, Three::Child, Four::Child);
|
type Child = (One::Child, Two::Child, Three::Child, Four::Child);
|
||||||
|
|
||||||
fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
|
fn for_segment(
|
||||||
|
&self,
|
||||||
|
segment_local_id: u32,
|
||||||
|
segment: &SegmentReader,
|
||||||
|
) -> crate::Result<Self::Child> {
|
||||||
let one = self.0.for_segment(segment_local_id, segment)?;
|
let one = self.0.for_segment(segment_local_id, segment)?;
|
||||||
let two = self.1.for_segment(segment_local_id, segment)?;
|
let two = self.1.for_segment(segment_local_id, segment)?;
|
||||||
let three = self.2.for_segment(segment_local_id, segment)?;
|
let three = self.2.for_segment(segment_local_id, segment)?;
|
||||||
@@ -314,7 +420,10 @@ where
|
|||||||
|| self.3.requires_scoring()
|
|| self.3.requires_scoring()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> Result<Self::Fruit> {
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
|
) -> crate::Result<Self::Fruit> {
|
||||||
let mut one_fruits = vec![];
|
let mut one_fruits = vec![];
|
||||||
let mut two_fruits = vec![];
|
let mut two_fruits = vec![];
|
||||||
let mut three_fruits = vec![];
|
let mut three_fruits = vec![];
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ use super::Collector;
|
|||||||
use super::SegmentCollector;
|
use super::SegmentCollector;
|
||||||
use crate::collector::Fruit;
|
use crate::collector::Fruit;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Result;
|
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
@@ -24,7 +23,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
|||||||
&self,
|
&self,
|
||||||
segment_local_id: u32,
|
segment_local_id: u32,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
) -> Result<Box<dyn BoxableSegmentCollector>> {
|
) -> crate::Result<Box<dyn BoxableSegmentCollector>> {
|
||||||
let child = self.0.for_segment(segment_local_id, reader)?;
|
let child = self.0.for_segment(segment_local_id, reader)?;
|
||||||
Ok(Box::new(SegmentCollectorWrapper(child)))
|
Ok(Box::new(SegmentCollectorWrapper(child)))
|
||||||
}
|
}
|
||||||
@@ -33,18 +32,21 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
|||||||
self.0.requires_scoring()
|
self.0.requires_scoring()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, children: Vec<<Self as Collector>::Fruit>) -> Result<Box<dyn Fruit>> {
|
fn merge_fruits(
|
||||||
let typed_fruit: Vec<TCollector::Fruit> = children
|
&self,
|
||||||
|
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
|
) -> crate::Result<Box<dyn Fruit>> {
|
||||||
|
let typed_fruit: Vec<<TCollector::Child as SegmentCollector>::Fruit> = children
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|untyped_fruit| {
|
.map(|untyped_fruit| {
|
||||||
untyped_fruit
|
untyped_fruit
|
||||||
.downcast::<TCollector::Fruit>()
|
.downcast::<<TCollector::Child as SegmentCollector>::Fruit>()
|
||||||
.map(|boxed_but_typed| *boxed_but_typed)
|
.map(|boxed_but_typed| *boxed_but_typed)
|
||||||
.map_err(|_| {
|
.map_err(|_| {
|
||||||
TantivyError::InvalidArgument("Failed to cast child fruit.".to_string())
|
TantivyError::InvalidArgument("Failed to cast child fruit.".to_string())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect::<Result<_>>()?;
|
.collect::<crate::Result<_>>()?;
|
||||||
let merged_fruit = self.0.merge_fruits(typed_fruit)?;
|
let merged_fruit = self.0.merge_fruits(typed_fruit)?;
|
||||||
Ok(Box::new(merged_fruit))
|
Ok(Box::new(merged_fruit))
|
||||||
}
|
}
|
||||||
@@ -53,7 +55,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
|||||||
impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
||||||
type Fruit = Box<dyn Fruit>;
|
type Fruit = Box<dyn Fruit>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, score: f32) {
|
fn collect(&mut self, doc: u32, score: Score) {
|
||||||
self.as_mut().collect(doc, score);
|
self.as_mut().collect(doc, score);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,7 +65,7 @@ impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub trait BoxableSegmentCollector {
|
pub trait BoxableSegmentCollector {
|
||||||
fn collect(&mut self, doc: u32, score: f32);
|
fn collect(&mut self, doc: u32, score: Score);
|
||||||
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
|
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,7 +74,7 @@ pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegment
|
|||||||
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
|
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
|
||||||
for SegmentCollectorWrapper<TSegmentCollector>
|
for SegmentCollectorWrapper<TSegmentCollector>
|
||||||
{
|
{
|
||||||
fn collect(&mut self, doc: u32, score: f32) {
|
fn collect(&mut self, doc: u32, score: Score) {
|
||||||
self.0.collect(doc, score);
|
self.0.collect(doc, score);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -108,49 +110,35 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
|
|||||||
/// use tantivy::collector::{Count, TopDocs, MultiCollector};
|
/// use tantivy::collector::{Count, TopDocs, MultiCollector};
|
||||||
/// use tantivy::query::QueryParser;
|
/// use tantivy::query::QueryParser;
|
||||||
/// use tantivy::schema::{Schema, TEXT};
|
/// use tantivy::schema::{Schema, TEXT};
|
||||||
/// use tantivy::{doc, Index, Result};
|
/// use tantivy::{doc, Index};
|
||||||
///
|
///
|
||||||
/// # fn main() { example().unwrap(); }
|
/// let mut schema_builder = Schema::builder();
|
||||||
/// fn example() -> Result<()> {
|
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// let schema = schema_builder.build();
|
||||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
/// let index = Index::create_in_ram(schema);
|
||||||
/// let schema = schema_builder.build();
|
|
||||||
/// let index = Index::create_in_ram(schema);
|
|
||||||
/// {
|
|
||||||
/// let mut index_writer = index.writer(3_000_000)?;
|
|
||||||
/// index_writer.add_document(doc!(
|
|
||||||
/// title => "The Name of the Wind",
|
|
||||||
/// ));
|
|
||||||
/// index_writer.add_document(doc!(
|
|
||||||
/// title => "The Diary of Muadib",
|
|
||||||
/// ));
|
|
||||||
/// index_writer.add_document(doc!(
|
|
||||||
/// title => "A Dairy Cow",
|
|
||||||
/// ));
|
|
||||||
/// index_writer.add_document(doc!(
|
|
||||||
/// title => "The Diary of a Young Girl",
|
|
||||||
/// ));
|
|
||||||
/// index_writer.commit().unwrap();
|
|
||||||
/// }
|
|
||||||
///
|
///
|
||||||
/// let reader = index.reader()?;
|
/// let mut index_writer = index.writer(3_000_000).unwrap();
|
||||||
/// let searcher = reader.searcher();
|
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||||
|
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||||
|
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||||
|
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
||||||
|
/// assert!(index_writer.commit().is_ok());
|
||||||
///
|
///
|
||||||
/// let mut collectors = MultiCollector::new();
|
/// let reader = index.reader().unwrap();
|
||||||
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
|
/// let searcher = reader.searcher();
|
||||||
/// let count_handle = collectors.add_collector(Count);
|
|
||||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
|
||||||
/// let query = query_parser.parse_query("diary")?;
|
|
||||||
/// let mut multi_fruit = searcher.search(&query, &collectors)?;
|
|
||||||
///
|
///
|
||||||
/// let count = count_handle.extract(&mut multi_fruit);
|
/// let mut collectors = MultiCollector::new();
|
||||||
/// let top_docs = top_docs_handle.extract(&mut multi_fruit);
|
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
|
||||||
|
/// let count_handle = collectors.add_collector(Count);
|
||||||
|
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||||
|
/// let query = query_parser.parse_query("diary").unwrap();
|
||||||
|
/// let mut multi_fruit = searcher.search(&query, &collectors).unwrap();
|
||||||
///
|
///
|
||||||
/// # assert_eq!(count, 2);
|
/// let count = count_handle.extract(&mut multi_fruit);
|
||||||
/// # assert_eq!(top_docs.len(), 2);
|
/// let top_docs = top_docs_handle.extract(&mut multi_fruit);
|
||||||
///
|
///
|
||||||
/// Ok(())
|
/// assert_eq!(count, 2);
|
||||||
/// }
|
/// assert_eq!(top_docs.len(), 2);
|
||||||
/// ```
|
/// ```
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
@@ -189,12 +177,12 @@ impl<'a> Collector for MultiCollector<'a> {
|
|||||||
&self,
|
&self,
|
||||||
segment_local_id: SegmentLocalId,
|
segment_local_id: SegmentLocalId,
|
||||||
segment: &SegmentReader,
|
segment: &SegmentReader,
|
||||||
) -> Result<MultiCollectorChild> {
|
) -> crate::Result<MultiCollectorChild> {
|
||||||
let children = self
|
let children = self
|
||||||
.collector_wrappers
|
.collector_wrappers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|collector_wrapper| collector_wrapper.for_segment(segment_local_id, segment))
|
.map(|collector_wrapper| collector_wrapper.for_segment(segment_local_id, segment))
|
||||||
.collect::<Result<Vec<_>>>()?;
|
.collect::<crate::Result<Vec<_>>>()?;
|
||||||
Ok(MultiCollectorChild { children })
|
Ok(MultiCollectorChild { children })
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -205,7 +193,7 @@ impl<'a> Collector for MultiCollector<'a> {
|
|||||||
.any(Collector::requires_scoring)
|
.any(Collector::requires_scoring)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> Result<MultiFruit> {
|
fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> crate::Result<MultiFruit> {
|
||||||
let mut segment_fruits_list: Vec<Vec<Box<dyn Fruit>>> = (0..self.collector_wrappers.len())
|
let mut segment_fruits_list: Vec<Vec<Box<dyn Fruit>>> = (0..self.collector_wrappers.len())
|
||||||
.map(|_| Vec::with_capacity(segments_multifruits.len()))
|
.map(|_| Vec::with_capacity(segments_multifruits.len()))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
@@ -223,7 +211,7 @@ impl<'a> Collector for MultiCollector<'a> {
|
|||||||
.map(|(child_collector, segment_fruits)| {
|
.map(|(child_collector, segment_fruits)| {
|
||||||
Ok(Some(child_collector.merge_fruits(segment_fruits)?))
|
Ok(Some(child_collector.merge_fruits(segment_fruits)?))
|
||||||
})
|
})
|
||||||
.collect::<Result<_>>()?;
|
.collect::<crate::Result<_>>()?;
|
||||||
Ok(MultiFruit { sub_fruits })
|
Ok(MultiFruit { sub_fruits })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -271,7 +259,7 @@ mod tests {
|
|||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(text=>"abc"));
|
index_writer.add_document(doc!(text=>"abc"));
|
||||||
index_writer.add_document(doc!(text=>"abc abc abc"));
|
index_writer.add_document(doc!(text=>"abc abc abc"));
|
||||||
index_writer.add_document(doc!(text=>"abc abc"));
|
index_writer.add_document(doc!(text=>"abc abc"));
|
||||||
|
|||||||
@@ -3,10 +3,17 @@ use crate::core::SegmentReader;
|
|||||||
use crate::fastfield::BytesFastFieldReader;
|
use crate::fastfield::BytesFastFieldReader;
|
||||||
use crate::fastfield::FastFieldReader;
|
use crate::fastfield::FastFieldReader;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::DocAddress;
|
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
|
use crate::{DocAddress, Document, Searcher};
|
||||||
|
|
||||||
|
use crate::collector::{Count, FilterCollector, TopDocs};
|
||||||
|
use crate::query::{AllQuery, QueryParser};
|
||||||
|
use crate::schema::{Schema, FAST, TEXT};
|
||||||
|
use crate::DateTime;
|
||||||
|
use crate::{doc, Index};
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
||||||
compute_score: true,
|
compute_score: true,
|
||||||
@@ -16,6 +23,54 @@ pub const TEST_COLLECTOR_WITHOUT_SCORE: TestCollector = TestCollector {
|
|||||||
compute_score: true,
|
compute_score: true,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_filter_collector() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let title = schema_builder.add_text_field("title", TEXT);
|
||||||
|
let price = schema_builder.add_u64_field("price", FAST);
|
||||||
|
let date = schema_builder.add_date_field("date", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
|
index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64, date => DateTime::from_str("1898-04-09T00:00:00+00:00").unwrap()));
|
||||||
|
index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64, date => DateTime::from_str("2020-04-09T00:00:00+00:00").unwrap()));
|
||||||
|
index_writer.add_document(doc!(title => "The Diary of Anne Frank", price => 18_240u64, date => DateTime::from_str("2019-04-20T00:00:00+00:00").unwrap()));
|
||||||
|
index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64, date => DateTime::from_str("2019-04-09T00:00:00+00:00").unwrap()));
|
||||||
|
index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64, date => DateTime::from_str("2018-04-09T00:00:00+00:00").unwrap()));
|
||||||
|
assert!(index_writer.commit().is_ok());
|
||||||
|
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||||
|
let query = query_parser.parse_query("diary").unwrap();
|
||||||
|
let filter_some_collector = FilterCollector::new(
|
||||||
|
price,
|
||||||
|
&|value: u64| value > 20_120u64,
|
||||||
|
TopDocs::with_limit(2),
|
||||||
|
);
|
||||||
|
let top_docs = searcher.search(&query, &filter_some_collector).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(top_docs.len(), 1);
|
||||||
|
assert_eq!(top_docs[0].1, DocAddress(0, 1));
|
||||||
|
|
||||||
|
let filter_all_collector: FilterCollector<_, _, u64> =
|
||||||
|
FilterCollector::new(price, &|value| value < 5u64, TopDocs::with_limit(2));
|
||||||
|
let filtered_top_docs = searcher.search(&query, &filter_all_collector).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(filtered_top_docs.len(), 0);
|
||||||
|
|
||||||
|
fn date_filter(value: DateTime) -> bool {
|
||||||
|
(value - DateTime::from_str("2019-04-09T00:00:00+00:00").unwrap()).num_weeks() > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
let filter_dates_collector = FilterCollector::new(date, &date_filter, TopDocs::with_limit(5));
|
||||||
|
let filtered_date_docs = searcher.search(&query, &filter_dates_collector).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(filtered_date_docs.len(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
/// Stores all of the doc ids.
|
/// Stores all of the doc ids.
|
||||||
/// This collector is only used for tests.
|
/// This collector is only used for tests.
|
||||||
/// It is unusable in pr
|
/// It is unusable in pr
|
||||||
@@ -55,7 +110,7 @@ impl Collector for TestCollector {
|
|||||||
&self,
|
&self,
|
||||||
segment_id: SegmentLocalId,
|
segment_id: SegmentLocalId,
|
||||||
_reader: &SegmentReader,
|
_reader: &SegmentReader,
|
||||||
) -> Result<TestSegmentCollector> {
|
) -> crate::Result<TestSegmentCollector> {
|
||||||
Ok(TestSegmentCollector {
|
Ok(TestSegmentCollector {
|
||||||
segment_id,
|
segment_id,
|
||||||
fruit: TestFruit::default(),
|
fruit: TestFruit::default(),
|
||||||
@@ -66,7 +121,7 @@ impl Collector for TestCollector {
|
|||||||
self.compute_score
|
self.compute_score
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, mut children: Vec<TestFruit>) -> Result<TestFruit> {
|
fn merge_fruits(&self, mut children: Vec<TestFruit>) -> crate::Result<TestFruit> {
|
||||||
children.sort_by_key(|fruit| {
|
children.sort_by_key(|fruit| {
|
||||||
if fruit.docs().is_empty() {
|
if fruit.docs().is_empty() {
|
||||||
0
|
0
|
||||||
@@ -124,7 +179,7 @@ impl Collector for FastFieldTestCollector {
|
|||||||
&self,
|
&self,
|
||||||
_: SegmentLocalId,
|
_: SegmentLocalId,
|
||||||
segment_reader: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
) -> Result<FastFieldSegmentCollector> {
|
) -> crate::Result<FastFieldSegmentCollector> {
|
||||||
let reader = segment_reader
|
let reader = segment_reader
|
||||||
.fast_fields()
|
.fast_fields()
|
||||||
.u64(self.field)
|
.u64(self.field)
|
||||||
@@ -139,7 +194,7 @@ impl Collector for FastFieldTestCollector {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, children: Vec<Vec<u64>>) -> Result<Vec<u64>> {
|
fn merge_fruits(&self, children: Vec<Vec<u64>>) -> crate::Result<Vec<u64>> {
|
||||||
Ok(children.into_iter().flat_map(|v| v.into_iter()).collect())
|
Ok(children.into_iter().flat_map(|v| v.into_iter()).collect())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -184,13 +239,11 @@ impl Collector for BytesFastFieldTestCollector {
|
|||||||
&self,
|
&self,
|
||||||
_segment_local_id: u32,
|
_segment_local_id: u32,
|
||||||
segment_reader: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
) -> Result<BytesFastFieldSegmentCollector> {
|
) -> crate::Result<BytesFastFieldSegmentCollector> {
|
||||||
|
let reader = segment_reader.fast_fields().bytes(self.field)?;
|
||||||
Ok(BytesFastFieldSegmentCollector {
|
Ok(BytesFastFieldSegmentCollector {
|
||||||
vals: Vec::new(),
|
vals: Vec::new(),
|
||||||
reader: segment_reader
|
reader,
|
||||||
.fast_fields()
|
|
||||||
.bytes(self.field)
|
|
||||||
.expect("Field is not a bytes fast field."),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -198,7 +251,7 @@ impl Collector for BytesFastFieldTestCollector {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, children: Vec<Vec<u8>>) -> Result<Vec<u8>> {
|
fn merge_fruits(&self, children: Vec<Vec<u8>>) -> crate::Result<Vec<u8>> {
|
||||||
Ok(children.into_iter().flat_map(|c| c.into_iter()).collect())
|
Ok(children.into_iter().flat_map(|c| c.into_iter()).collect())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -206,7 +259,7 @@ impl Collector for BytesFastFieldTestCollector {
|
|||||||
impl SegmentCollector for BytesFastFieldSegmentCollector {
|
impl SegmentCollector for BytesFastFieldSegmentCollector {
|
||||||
type Fruit = Vec<u8>;
|
type Fruit = Vec<u8>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, _score: f32) {
|
fn collect(&mut self, doc: u32, _score: Score) {
|
||||||
let data = self.reader.get_bytes(doc);
|
let data = self.reader.get_bytes(doc);
|
||||||
self.vals.extend(data);
|
self.vals.extend(data);
|
||||||
}
|
}
|
||||||
@@ -215,3 +268,30 @@ impl SegmentCollector for BytesFastFieldSegmentCollector {
|
|||||||
self.vals
|
self.vals
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn make_test_searcher() -> crate::Result<crate::LeasedItem<Searcher>> {
|
||||||
|
let schema = Schema::builder().build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(Document::default());
|
||||||
|
index_writer.add_document(Document::default());
|
||||||
|
index_writer.commit()?;
|
||||||
|
Ok(index.reader()?.searcher())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_option_collector_some() -> crate::Result<()> {
|
||||||
|
let searcher = make_test_searcher()?;
|
||||||
|
let counts = searcher.search(&AllQuery, &Some(Count))?;
|
||||||
|
assert_eq!(counts, Some(2));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_option_collector_none() -> crate::Result<()> {
|
||||||
|
let searcher = make_test_searcher()?;
|
||||||
|
let none_collector: Option<Count> = None;
|
||||||
|
let counts = searcher.search(&AllQuery, &none_collector)?;
|
||||||
|
assert_eq!(counts, None);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,52 +1,63 @@
|
|||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Result;
|
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
use serde::export::PhantomData;
|
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::BinaryHeap;
|
use std::collections::BinaryHeap;
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
/// Contains a feature (field, score, etc.) of a document along with the document address.
|
/// Contains a feature (field, score, etc.) of a document along with the document address.
|
||||||
///
|
///
|
||||||
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
|
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
|
||||||
/// default Rust heap is a max heap, whereas a min heap is needed.
|
/// default Rust heap is a max heap, whereas a min heap is needed.
|
||||||
///
|
///
|
||||||
|
/// Additionally, it guarantees stable sorting: in case of a tie on the feature, the document
|
||||||
|
/// address is used.
|
||||||
|
///
|
||||||
/// WARNING: equality is not what you would expect here.
|
/// WARNING: equality is not what you would expect here.
|
||||||
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
|
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
|
||||||
/// is equal. This should be perfectly fine for this usage, but let's make sure this
|
/// is equal. This should be perfectly fine for this usage, but let's make sure this
|
||||||
/// struct is never public.
|
/// struct is never public.
|
||||||
struct ComparableDoc<T, D> {
|
pub(crate) struct ComparableDoc<T, D> {
|
||||||
feature: T,
|
pub feature: T,
|
||||||
doc: D,
|
pub doc: D,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: PartialOrd, D> PartialOrd for ComparableDoc<T, D> {
|
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> {
|
||||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
Some(self.cmp(other))
|
Some(self.cmp(other))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: PartialOrd, D> Ord for ComparableDoc<T, D> {
|
impl<T: PartialOrd, D: PartialOrd> Ord for ComparableDoc<T, D> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn cmp(&self, other: &Self) -> Ordering {
|
fn cmp(&self, other: &Self) -> Ordering {
|
||||||
other
|
// Reversed to make BinaryHeap work as a min-heap
|
||||||
|
let by_feature = other
|
||||||
.feature
|
.feature
|
||||||
.partial_cmp(&self.feature)
|
.partial_cmp(&self.feature)
|
||||||
.unwrap_or_else(|| Ordering::Equal)
|
.unwrap_or(Ordering::Equal);
|
||||||
|
|
||||||
|
let lazy_by_doc_address = || self.doc.partial_cmp(&other.doc).unwrap_or(Ordering::Equal);
|
||||||
|
|
||||||
|
// In case of a tie on the feature, we sort by ascending
|
||||||
|
// `DocAddress` in order to ensure a stable sorting of the
|
||||||
|
// documents.
|
||||||
|
by_feature.then_with(lazy_by_doc_address)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: PartialOrd, D> PartialEq for ComparableDoc<T, D> {
|
impl<T: PartialOrd, D: PartialOrd> PartialEq for ComparableDoc<T, D> {
|
||||||
fn eq(&self, other: &Self) -> bool {
|
fn eq(&self, other: &Self) -> bool {
|
||||||
self.cmp(other) == Ordering::Equal
|
self.cmp(other) == Ordering::Equal
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: PartialOrd, D> Eq for ComparableDoc<T, D> {}
|
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {}
|
||||||
|
|
||||||
pub(crate) struct TopCollector<T> {
|
pub(crate) struct TopCollector<T> {
|
||||||
limit: usize,
|
pub limit: usize,
|
||||||
|
pub offset: usize,
|
||||||
_marker: PhantomData<T>,
|
_marker: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -62,27 +73,33 @@ where
|
|||||||
if limit < 1 {
|
if limit < 1 {
|
||||||
panic!("Limit must be strictly greater than 0.");
|
panic!("Limit must be strictly greater than 0.");
|
||||||
}
|
}
|
||||||
TopCollector {
|
Self {
|
||||||
limit,
|
limit,
|
||||||
|
offset: 0,
|
||||||
_marker: PhantomData,
|
_marker: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn limit(&self) -> usize {
|
/// Skip the first "offset" documents when collecting.
|
||||||
self.limit
|
///
|
||||||
|
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
|
||||||
|
/// Lucene's TopDocsCollector.
|
||||||
|
pub fn and_offset(mut self, offset: usize) -> TopCollector<T> {
|
||||||
|
self.offset = offset;
|
||||||
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn merge_fruits(
|
pub fn merge_fruits(
|
||||||
&self,
|
&self,
|
||||||
children: Vec<Vec<(T, DocAddress)>>,
|
children: Vec<Vec<(T, DocAddress)>>,
|
||||||
) -> Result<Vec<(T, DocAddress)>> {
|
) -> crate::Result<Vec<(T, DocAddress)>> {
|
||||||
if self.limit == 0 {
|
if self.limit == 0 {
|
||||||
return Ok(Vec::new());
|
return Ok(Vec::new());
|
||||||
}
|
}
|
||||||
let mut top_collector = BinaryHeap::new();
|
let mut top_collector = BinaryHeap::new();
|
||||||
for child_fruit in children {
|
for child_fruit in children {
|
||||||
for (feature, doc) in child_fruit {
|
for (feature, doc) in child_fruit {
|
||||||
if top_collector.len() < self.limit {
|
if top_collector.len() < (self.limit + self.offset) {
|
||||||
top_collector.push(ComparableDoc { feature, doc });
|
top_collector.push(ComparableDoc { feature, doc });
|
||||||
} else if let Some(mut head) = top_collector.peek_mut() {
|
} else if let Some(mut head) = top_collector.peek_mut() {
|
||||||
if head.feature < feature {
|
if head.feature < feature {
|
||||||
@@ -94,6 +111,7 @@ where
|
|||||||
Ok(top_collector
|
Ok(top_collector
|
||||||
.into_sorted_vec()
|
.into_sorted_vec()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
.skip(self.offset)
|
||||||
.map(|cdoc| (cdoc.feature, cdoc.doc))
|
.map(|cdoc| (cdoc.feature, cdoc.doc))
|
||||||
.collect())
|
.collect())
|
||||||
}
|
}
|
||||||
@@ -102,8 +120,21 @@ where
|
|||||||
&self,
|
&self,
|
||||||
segment_id: SegmentLocalId,
|
segment_id: SegmentLocalId,
|
||||||
_: &SegmentReader,
|
_: &SegmentReader,
|
||||||
) -> Result<TopSegmentCollector<F>> {
|
) -> TopSegmentCollector<F> {
|
||||||
Ok(TopSegmentCollector::new(segment_id, self.limit))
|
TopSegmentCollector::new(segment_id, self.limit + self.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new TopCollector with the same limit and offset.
|
||||||
|
///
|
||||||
|
/// Ideally we would use Into but the blanket implementation seems to cause the Scorer traits
|
||||||
|
/// to fail.
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub(crate) fn into_tscore<TScore: PartialOrd + Clone>(self) -> TopCollector<TScore> {
|
||||||
|
TopCollector {
|
||||||
|
limit: self.limit,
|
||||||
|
offset: self.offset,
|
||||||
|
_marker: PhantomData,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -177,7 +208,7 @@ impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::TopSegmentCollector;
|
use super::{TopCollector, TopSegmentCollector};
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -214,4 +245,136 @@ mod tests {
|
|||||||
]
|
]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_top_segment_collector_stable_ordering_for_equal_feature() {
|
||||||
|
// given that the documents are collected in ascending doc id order,
|
||||||
|
// when harvesting we have to guarantee stable sorting in case of a tie
|
||||||
|
// on the score
|
||||||
|
let doc_ids_collection = [4, 5, 6];
|
||||||
|
let score = 3.14;
|
||||||
|
|
||||||
|
let mut top_collector_limit_2 = TopSegmentCollector::new(0, 2);
|
||||||
|
for id in &doc_ids_collection {
|
||||||
|
top_collector_limit_2.collect(*id, score);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut top_collector_limit_3 = TopSegmentCollector::new(0, 3);
|
||||||
|
for id in &doc_ids_collection {
|
||||||
|
top_collector_limit_3.collect(*id, score);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
top_collector_limit_2.harvest(),
|
||||||
|
top_collector_limit_3.harvest()[..2].to_vec(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_top_collector_with_limit_and_offset() {
|
||||||
|
let collector = TopCollector::with_limit(2).and_offset(1);
|
||||||
|
|
||||||
|
let results = collector
|
||||||
|
.merge_fruits(vec![vec![
|
||||||
|
(0.9, DocAddress(0, 1)),
|
||||||
|
(0.8, DocAddress(0, 2)),
|
||||||
|
(0.7, DocAddress(0, 3)),
|
||||||
|
(0.6, DocAddress(0, 4)),
|
||||||
|
(0.5, DocAddress(0, 5)),
|
||||||
|
]])
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
results,
|
||||||
|
vec![(0.8, DocAddress(0, 2)), (0.7, DocAddress(0, 3)),]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_top_collector_with_limit_larger_than_set_and_offset() {
|
||||||
|
let collector = TopCollector::with_limit(2).and_offset(1);
|
||||||
|
|
||||||
|
let results = collector
|
||||||
|
.merge_fruits(vec![vec![(0.9, DocAddress(0, 1)), (0.8, DocAddress(0, 2))]])
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(results, vec![(0.8, DocAddress(0, 2)),]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_top_collector_with_limit_and_offset_larger_than_set() {
|
||||||
|
let collector = TopCollector::with_limit(2).and_offset(20);
|
||||||
|
|
||||||
|
let results = collector
|
||||||
|
.merge_fruits(vec![vec![(0.9, DocAddress(0, 1)), (0.8, DocAddress(0, 2))]])
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(results, vec![]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
|
mod bench {
|
||||||
|
use super::TopSegmentCollector;
|
||||||
|
use test::Bencher;
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_top_segment_collector_collect_not_at_capacity(b: &mut Bencher) {
|
||||||
|
let mut top_collector = TopSegmentCollector::new(0, 400);
|
||||||
|
|
||||||
|
b.iter(|| {
|
||||||
|
for i in 0..100 {
|
||||||
|
top_collector.collect(i, 0.8);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_top_segment_collector_collect_at_capacity(b: &mut Bencher) {
|
||||||
|
let mut top_collector = TopSegmentCollector::new(0, 100);
|
||||||
|
|
||||||
|
for i in 0..100 {
|
||||||
|
top_collector.collect(i, 0.8);
|
||||||
|
}
|
||||||
|
|
||||||
|
b.iter(|| {
|
||||||
|
for i in 0..100 {
|
||||||
|
top_collector.collect(i, 0.8);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_top_segment_collector_collect_and_harvest_many_ties(b: &mut Bencher) {
|
||||||
|
b.iter(|| {
|
||||||
|
let mut top_collector = TopSegmentCollector::new(0, 100);
|
||||||
|
|
||||||
|
for i in 0..100 {
|
||||||
|
top_collector.collect(i, 0.8);
|
||||||
|
}
|
||||||
|
|
||||||
|
// it would be nice to be able to do the setup N times but still
|
||||||
|
// measure only harvest(). We can't since harvest() consumes
|
||||||
|
// the top_collector.
|
||||||
|
top_collector.harvest()
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_top_segment_collector_collect_and_harvest_no_tie(b: &mut Bencher) {
|
||||||
|
b.iter(|| {
|
||||||
|
let mut top_collector = TopSegmentCollector::new(0, 100);
|
||||||
|
let mut score = 1.0;
|
||||||
|
|
||||||
|
for i in 0..100 {
|
||||||
|
score += 1.0;
|
||||||
|
top_collector.collect(i, score);
|
||||||
|
}
|
||||||
|
|
||||||
|
// it would be nice to be able to do the setup N times but still
|
||||||
|
// measure only harvest(). We can't since harvest() consumes
|
||||||
|
// the top_collector.
|
||||||
|
top_collector.harvest()
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,74 +1,160 @@
|
|||||||
use super::Collector;
|
use super::Collector;
|
||||||
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
use crate::collector::top_collector::{ComparableDoc, TopCollector};
|
||||||
use crate::collector::top_collector::TopCollector;
|
|
||||||
use crate::collector::top_collector::TopSegmentCollector;
|
|
||||||
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
||||||
use crate::collector::{
|
use crate::collector::{
|
||||||
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
||||||
};
|
};
|
||||||
|
use crate::fastfield::FastFieldReader;
|
||||||
|
use crate::query::Weight;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Result;
|
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
|
use crate::{collector::custom_score_top_collector::CustomScoreTopCollector, fastfield::FastValue};
|
||||||
|
use crate::{collector::top_collector::TopSegmentCollector, TantivyError};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
use std::{collections::BinaryHeap, marker::PhantomData};
|
||||||
|
|
||||||
/// The Top Score Collector keeps track of the K documents
|
struct FastFieldConvertCollector<
|
||||||
|
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
|
||||||
|
TFastValue: FastValue,
|
||||||
|
> {
|
||||||
|
pub collector: TCollector,
|
||||||
|
pub field: Field,
|
||||||
|
pub fast_value: std::marker::PhantomData<TFastValue>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TCollector, TFastValue> Collector for FastFieldConvertCollector<TCollector, TFastValue>
|
||||||
|
where
|
||||||
|
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
|
||||||
|
TFastValue: FastValue,
|
||||||
|
{
|
||||||
|
type Fruit = Vec<(TFastValue, DocAddress)>;
|
||||||
|
|
||||||
|
type Child = TCollector::Child;
|
||||||
|
|
||||||
|
fn for_segment(
|
||||||
|
&self,
|
||||||
|
segment_local_id: crate::SegmentLocalId,
|
||||||
|
segment: &SegmentReader,
|
||||||
|
) -> crate::Result<Self::Child> {
|
||||||
|
let schema = segment.schema();
|
||||||
|
let field_entry = schema.get_field_entry(self.field);
|
||||||
|
if !field_entry.is_fast() {
|
||||||
|
return Err(TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is not a fast field.",
|
||||||
|
field_entry.name()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
let schema_type = TFastValue::to_type();
|
||||||
|
let requested_type = field_entry.field_type().value_type();
|
||||||
|
if schema_type != requested_type {
|
||||||
|
return Err(TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is of type {:?}!={:?}",
|
||||||
|
field_entry.name(),
|
||||||
|
schema_type,
|
||||||
|
requested_type
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
self.collector.for_segment(segment_local_id, segment)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn requires_scoring(&self) -> bool {
|
||||||
|
self.collector.requires_scoring()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
|
) -> crate::Result<Self::Fruit> {
|
||||||
|
let raw_result = self.collector.merge_fruits(segment_fruits)?;
|
||||||
|
let transformed_result = raw_result
|
||||||
|
.into_iter()
|
||||||
|
.map(|(score, doc_address)| (TFastValue::from_u64(score), doc_address))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
Ok(transformed_result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The `TopDocs` collector keeps track of the top `K` documents
|
||||||
/// sorted by their score.
|
/// sorted by their score.
|
||||||
///
|
///
|
||||||
/// The implementation is based on a `BinaryHeap`.
|
/// The implementation is based on a `BinaryHeap`.
|
||||||
/// The theorical complexity for collecting the top `K` out of `n` documents
|
/// The theorical complexity for collecting the top `K` out of `n` documents
|
||||||
/// is `O(n log K)`.
|
/// is `O(n log K)`.
|
||||||
///
|
///
|
||||||
|
/// This collector guarantees a stable sorting in case of a tie on the
|
||||||
|
/// document score. As such, it is suitable to implement pagination.
|
||||||
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// use tantivy::collector::TopDocs;
|
/// use tantivy::collector::TopDocs;
|
||||||
/// use tantivy::query::QueryParser;
|
/// use tantivy::query::QueryParser;
|
||||||
/// use tantivy::schema::{Schema, TEXT};
|
/// use tantivy::schema::{Schema, TEXT};
|
||||||
/// use tantivy::{doc, DocAddress, Index, Result};
|
/// use tantivy::{doc, DocAddress, Index};
|
||||||
///
|
///
|
||||||
/// # fn main() { example().unwrap(); }
|
/// let mut schema_builder = Schema::builder();
|
||||||
/// fn example() -> Result<()> {
|
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// let schema = schema_builder.build();
|
||||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
/// let index = Index::create_in_ram(schema);
|
||||||
/// let schema = schema_builder.build();
|
|
||||||
/// let index = Index::create_in_ram(schema);
|
|
||||||
/// {
|
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
|
||||||
/// index_writer.add_document(doc!(
|
|
||||||
/// title => "The Name of the Wind",
|
|
||||||
/// ));
|
|
||||||
/// index_writer.add_document(doc!(
|
|
||||||
/// title => "The Diary of Muadib",
|
|
||||||
/// ));
|
|
||||||
/// index_writer.add_document(doc!(
|
|
||||||
/// title => "A Dairy Cow",
|
|
||||||
/// ));
|
|
||||||
/// index_writer.add_document(doc!(
|
|
||||||
/// title => "The Diary of a Young Girl",
|
|
||||||
/// ));
|
|
||||||
/// index_writer.commit().unwrap();
|
|
||||||
/// }
|
|
||||||
///
|
///
|
||||||
/// let reader = index.reader()?;
|
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
/// let searcher = reader.searcher();
|
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||||
|
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||||
|
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||||
|
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
||||||
|
/// assert!(index_writer.commit().is_ok());
|
||||||
///
|
///
|
||||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
/// let reader = index.reader().unwrap();
|
||||||
/// let query = query_parser.parse_query("diary")?;
|
/// let searcher = reader.searcher();
|
||||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2))?;
|
|
||||||
///
|
///
|
||||||
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
|
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||||
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
|
/// let query = query_parser.parse_query("diary").unwrap();
|
||||||
|
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
|
||||||
///
|
///
|
||||||
/// Ok(())
|
/// assert_eq!(top_docs[0].1, DocAddress(0, 1));
|
||||||
/// }
|
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
|
||||||
/// ```
|
/// ```
|
||||||
pub struct TopDocs(TopCollector<Score>);
|
pub struct TopDocs(TopCollector<Score>);
|
||||||
|
|
||||||
impl fmt::Debug for TopDocs {
|
impl fmt::Debug for TopDocs {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "TopDocs({})", self.0.limit())
|
write!(
|
||||||
|
f,
|
||||||
|
"TopDocs(limit={}, offset={})",
|
||||||
|
self.0.limit, self.0.offset
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ScorerByFastFieldReader {
|
||||||
|
ff_reader: FastFieldReader<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||||
|
fn score(&mut self, doc: DocId) -> u64 {
|
||||||
|
self.ff_reader.get(doc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ScorerByField {
|
||||||
|
field: Field,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CustomScorer<u64> for ScorerByField {
|
||||||
|
type Child = ScorerByFastFieldReader;
|
||||||
|
|
||||||
|
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
|
||||||
|
// We interpret this field as u64, regardless of its type, that way,
|
||||||
|
// we avoid needless conversion. Regardless of the fast field type, the
|
||||||
|
// mapping is monotonic, so it is sufficient to compute our top-K docs.
|
||||||
|
//
|
||||||
|
// The conversion will then happen only on the top-K docs.
|
||||||
|
let ff_reader: FastFieldReader<u64> = segment_reader
|
||||||
|
.fast_fields()
|
||||||
|
.typed_fast_field_reader(self.field)?;
|
||||||
|
Ok(ScorerByFastFieldReader { ff_reader })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -81,11 +167,60 @@ impl TopDocs {
|
|||||||
TopDocs(TopCollector::with_limit(limit))
|
TopDocs(TopCollector::with_limit(limit))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Skip the first "offset" documents when collecting.
|
||||||
|
///
|
||||||
|
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
|
||||||
|
/// Lucene's TopDocsCollector.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// use tantivy::collector::TopDocs;
|
||||||
|
/// use tantivy::query::QueryParser;
|
||||||
|
/// use tantivy::schema::{Schema, TEXT};
|
||||||
|
/// use tantivy::{doc, DocAddress, Index};
|
||||||
|
///
|
||||||
|
/// let mut schema_builder = Schema::builder();
|
||||||
|
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||||
|
/// let schema = schema_builder.build();
|
||||||
|
/// let index = Index::create_in_ram(schema);
|
||||||
|
///
|
||||||
|
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
|
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||||
|
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||||
|
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||||
|
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
||||||
|
/// index_writer.add_document(doc!(title => "The Diary of Lena Mukhina"));
|
||||||
|
/// assert!(index_writer.commit().is_ok());
|
||||||
|
///
|
||||||
|
/// let reader = index.reader().unwrap();
|
||||||
|
/// let searcher = reader.searcher();
|
||||||
|
///
|
||||||
|
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||||
|
/// let query = query_parser.parse_query("diary").unwrap();
|
||||||
|
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2).and_offset(1)).unwrap();
|
||||||
|
///
|
||||||
|
/// assert_eq!(top_docs.len(), 2);
|
||||||
|
/// assert_eq!(top_docs[0].1, DocAddress(0, 4));
|
||||||
|
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
|
||||||
|
/// ```
|
||||||
|
pub fn and_offset(self, offset: usize) -> TopDocs {
|
||||||
|
TopDocs(self.0.and_offset(offset))
|
||||||
|
}
|
||||||
|
|
||||||
/// Set top-K to rank documents by a given fast field.
|
/// Set top-K to rank documents by a given fast field.
|
||||||
///
|
///
|
||||||
|
/// If the field is not a fast or does not exist, this method returns successfully (it is not aware of any schema).
|
||||||
|
/// An error will be returned at the moment of search.
|
||||||
|
///
|
||||||
|
/// If the field is a FAST field but not a u64 field, search will return successfully but it will return
|
||||||
|
/// returns a monotonic u64-representation (ie. the order is still correct) of the requested field type.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||||
/// # use tantivy::{doc, Index, Result, DocAddress};
|
/// # use tantivy::{doc, Index, DocAddress};
|
||||||
/// # use tantivy::query::{Query, QueryParser};
|
/// # use tantivy::query::{Query, QueryParser};
|
||||||
/// use tantivy::Searcher;
|
/// use tantivy::Searcher;
|
||||||
/// use tantivy::collector::TopDocs;
|
/// use tantivy::collector::TopDocs;
|
||||||
@@ -96,17 +231,14 @@ impl TopDocs {
|
|||||||
/// # let title = schema_builder.add_text_field("title", TEXT);
|
/// # let title = schema_builder.add_text_field("title", TEXT);
|
||||||
/// # let rating = schema_builder.add_u64_field("rating", FAST);
|
/// # let rating = schema_builder.add_u64_field("rating", FAST);
|
||||||
/// # let schema = schema_builder.build();
|
/// # let schema = schema_builder.build();
|
||||||
/// #
|
/// #
|
||||||
/// # let index = Index::create_in_ram(schema);
|
/// # let index = Index::create_in_ram(schema);
|
||||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
/// # index_writer.add_document(doc!(
|
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
|
||||||
/// # title => "The Name of the Wind",
|
|
||||||
/// # rating => 92u64,
|
|
||||||
/// # ));
|
|
||||||
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
||||||
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
||||||
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
|
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
|
||||||
/// # index_writer.commit()?;
|
/// # assert!(index_writer.commit().is_ok());
|
||||||
/// # let reader = index.reader()?;
|
/// # let reader = index.reader()?;
|
||||||
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
||||||
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
||||||
@@ -115,26 +247,21 @@ impl TopDocs {
|
|||||||
/// # (80u64, DocAddress(0u32, 3))]);
|
/// # (80u64, DocAddress(0u32, 3))]);
|
||||||
/// # Ok(())
|
/// # Ok(())
|
||||||
/// # }
|
/// # }
|
||||||
///
|
|
||||||
///
|
|
||||||
/// /// Searches the document matching the given query, and
|
/// /// Searches the document matching the given query, and
|
||||||
/// /// collects the top 10 documents, order by the u64-`field`
|
/// /// collects the top 10 documents, order by the u64-`field`
|
||||||
/// /// given in argument.
|
/// /// given in argument.
|
||||||
/// ///
|
|
||||||
/// /// `field` is required to be a FAST field.
|
|
||||||
/// fn docs_sorted_by_rating(searcher: &Searcher,
|
/// fn docs_sorted_by_rating(searcher: &Searcher,
|
||||||
/// query: &dyn Query,
|
/// query: &dyn Query,
|
||||||
/// sort_by_field: Field)
|
/// rating_field: Field)
|
||||||
/// -> Result<Vec<(u64, DocAddress)>> {
|
/// -> tantivy::Result<Vec<(u64, DocAddress)>> {
|
||||||
///
|
///
|
||||||
/// // This is where we build our topdocs collector
|
/// // This is where we build our topdocs collector
|
||||||
/// //
|
/// //
|
||||||
/// // Note the generics parameter that needs to match the
|
/// // Note the `rating_field` needs to be a FAST field here.
|
||||||
/// // type `sort_by_field`.
|
/// let top_books_by_rating = TopDocs
|
||||||
/// let top_docs_by_rating = TopDocs
|
|
||||||
/// ::with_limit(10)
|
/// ::with_limit(10)
|
||||||
/// .order_by_u64_field(sort_by_field);
|
/// .order_by_u64_field(rating_field);
|
||||||
///
|
///
|
||||||
/// // ... and here are our documents. Note this is a simple vec.
|
/// // ... and here are our documents. Note this is a simple vec.
|
||||||
/// // The `u64` in the pair is the value of our fast field for
|
/// // The `u64` in the pair is the value of our fast field for
|
||||||
/// // each documents.
|
/// // each documents.
|
||||||
@@ -143,28 +270,105 @@ impl TopDocs {
|
|||||||
/// // length of 10, or less if not enough documents matched the
|
/// // length of 10, or less if not enough documents matched the
|
||||||
/// // query.
|
/// // query.
|
||||||
/// let resulting_docs: Vec<(u64, DocAddress)> =
|
/// let resulting_docs: Vec<(u64, DocAddress)> =
|
||||||
/// searcher.search(query, &top_docs_by_rating)?;
|
/// searcher.search(query, &top_books_by_rating)?;
|
||||||
///
|
///
|
||||||
/// Ok(resulting_docs)
|
/// Ok(resulting_docs)
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # See also
|
||||||
///
|
|
||||||
/// May panic if the field requested is not a fast field.
|
|
||||||
///
|
///
|
||||||
|
/// To confortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
||||||
|
/// [.order_by_fast_field(...)](#method.order_by_fast_field) method.
|
||||||
pub fn order_by_u64_field(
|
pub fn order_by_u64_field(
|
||||||
self,
|
self,
|
||||||
field: Field,
|
field: Field,
|
||||||
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
|
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
|
||||||
self.custom_score(move |segment_reader: &SegmentReader| {
|
CustomScoreTopCollector::new(ScorerByField { field }, self.0.into_tscore())
|
||||||
let ff_reader = segment_reader
|
}
|
||||||
.fast_fields()
|
|
||||||
.u64(field)
|
/// Set top-K to rank documents by a given fast field.
|
||||||
.expect("Field requested is not a i64/u64 fast field.");
|
///
|
||||||
//TODO error message missmatch actual behavior for i64
|
/// If the field is not a fast field, or its field type does not match the generic type, this method does not panic,
|
||||||
move |doc: DocId| ff_reader.get(doc)
|
/// but an explicit error will be returned at the moment of collection.
|
||||||
})
|
///
|
||||||
|
/// Note that this method is a generic. The requested fast field type will be often
|
||||||
|
/// inferred in your code by the rust compiler.
|
||||||
|
///
|
||||||
|
/// Implementation-wise, for performance reason, tantivy will manipulate the u64 representation of your fast
|
||||||
|
/// field until the last moment.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||||
|
/// # use tantivy::{doc, Index, DocAddress};
|
||||||
|
/// # use tantivy::query::{Query, AllQuery};
|
||||||
|
/// use tantivy::Searcher;
|
||||||
|
/// use tantivy::collector::TopDocs;
|
||||||
|
/// use tantivy::schema::Field;
|
||||||
|
///
|
||||||
|
/// # fn main() -> tantivy::Result<()> {
|
||||||
|
/// # let mut schema_builder = Schema::builder();
|
||||||
|
/// # let title = schema_builder.add_text_field("company", TEXT);
|
||||||
|
/// # let rating = schema_builder.add_i64_field("revenue", FAST);
|
||||||
|
/// # let schema = schema_builder.build();
|
||||||
|
/// #
|
||||||
|
/// # let index = Index::create_in_ram(schema);
|
||||||
|
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
|
/// # index_writer.add_document(doc!(title => "MadCow Inc.", rating => 92_000_000i64));
|
||||||
|
/// # index_writer.add_document(doc!(title => "Zozo Cow KKK", rating => 119_000_000i64));
|
||||||
|
/// # index_writer.add_document(doc!(title => "Declining Cow", rating => -63_000_000i64));
|
||||||
|
/// # assert!(index_writer.commit().is_ok());
|
||||||
|
/// # let reader = index.reader()?;
|
||||||
|
/// # let top_docs = docs_sorted_by_revenue(&reader.searcher(), &AllQuery, rating)?;
|
||||||
|
/// # assert_eq!(top_docs,
|
||||||
|
/// # vec![(119_000_000i64, DocAddress(0, 1)),
|
||||||
|
/// # (92_000_000i64, DocAddress(0, 0))]);
|
||||||
|
/// # Ok(())
|
||||||
|
/// # }
|
||||||
|
/// /// Searches the document matching the given query, and
|
||||||
|
/// /// collects the top 10 documents, order by the u64-`field`
|
||||||
|
/// /// given in argument.
|
||||||
|
/// fn docs_sorted_by_revenue(searcher: &Searcher,
|
||||||
|
/// query: &dyn Query,
|
||||||
|
/// revenue_field: Field)
|
||||||
|
/// -> tantivy::Result<Vec<(i64, DocAddress)>> {
|
||||||
|
///
|
||||||
|
/// // This is where we build our topdocs collector
|
||||||
|
/// //
|
||||||
|
/// // Note the generics parameter that needs to match the
|
||||||
|
/// // type `sort_by_field`. revenue_field here is a FAST i64 field.
|
||||||
|
/// let top_company_by_revenue = TopDocs
|
||||||
|
/// ::with_limit(2)
|
||||||
|
/// .order_by_fast_field(revenue_field);
|
||||||
|
///
|
||||||
|
/// // ... and here are our documents. Note this is a simple vec.
|
||||||
|
/// // The `i64` in the pair is the value of our fast field for
|
||||||
|
/// // each documents.
|
||||||
|
/// //
|
||||||
|
/// // The vec is sorted decreasingly by `sort_by_field`, and has a
|
||||||
|
/// // length of 10, or less if not enough documents matched the
|
||||||
|
/// // query.
|
||||||
|
/// let resulting_docs: Vec<(i64, DocAddress)> =
|
||||||
|
/// searcher.search(query, &top_company_by_revenue)?;
|
||||||
|
///
|
||||||
|
/// Ok(resulting_docs)
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
pub fn order_by_fast_field<TFastValue>(
|
||||||
|
self,
|
||||||
|
fast_field: Field,
|
||||||
|
) -> impl Collector<Fruit = Vec<(TFastValue, DocAddress)>>
|
||||||
|
where
|
||||||
|
TFastValue: FastValue,
|
||||||
|
{
|
||||||
|
let u64_collector = self.order_by_u64_field(fast_field);
|
||||||
|
FastFieldConvertCollector {
|
||||||
|
collector: u64_collector,
|
||||||
|
field: fast_field,
|
||||||
|
fast_value: PhantomData,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ranks the documents using a custom score.
|
/// Ranks the documents using a custom score.
|
||||||
@@ -187,7 +391,7 @@ impl TopDocs {
|
|||||||
///
|
///
|
||||||
/// In the following example will will tweak our ranking a bit by
|
/// In the following example will will tweak our ranking a bit by
|
||||||
/// boosting popular products a notch.
|
/// boosting popular products a notch.
|
||||||
///
|
///
|
||||||
/// In more serious application, this tweaking could involved running a
|
/// In more serious application, this tweaking could involved running a
|
||||||
/// learning-to-rank model over various features
|
/// learning-to-rank model over various features
|
||||||
///
|
///
|
||||||
@@ -199,27 +403,33 @@ impl TopDocs {
|
|||||||
/// use tantivy::collector::TopDocs;
|
/// use tantivy::collector::TopDocs;
|
||||||
/// use tantivy::schema::Field;
|
/// use tantivy::schema::Field;
|
||||||
///
|
///
|
||||||
/// # fn create_schema() -> Schema {
|
/// fn create_schema() -> Schema {
|
||||||
/// # let mut schema_builder = Schema::builder();
|
/// let mut schema_builder = Schema::builder();
|
||||||
/// # schema_builder.add_text_field("product_name", TEXT);
|
/// schema_builder.add_text_field("product_name", TEXT);
|
||||||
/// # schema_builder.add_u64_field("popularity", FAST);
|
/// schema_builder.add_u64_field("popularity", FAST);
|
||||||
/// # schema_builder.build()
|
/// schema_builder.build()
|
||||||
/// # }
|
/// }
|
||||||
/// #
|
///
|
||||||
/// # fn main() -> tantivy::Result<()> {
|
/// fn create_index() -> tantivy::Result<Index> {
|
||||||
/// # let schema = create_schema();
|
/// let schema = create_schema();
|
||||||
/// # let index = Index::create_in_ram(schema);
|
/// let index = Index::create_in_ram(schema);
|
||||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
/// # let product_name = index.schema().get_field("product_name").unwrap();
|
/// let product_name = index.schema().get_field("product_name").unwrap();
|
||||||
/// #
|
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||||
|
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
||||||
|
/// index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
|
||||||
|
/// index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
|
||||||
|
/// index_writer.commit()?;
|
||||||
|
/// Ok(index)
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// let index = create_index().unwrap();
|
||||||
|
/// let product_name = index.schema().get_field("product_name").unwrap();
|
||||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||||
/// # index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
///
|
||||||
/// # index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
|
/// let user_query_str = "diary";
|
||||||
/// # index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
|
/// let query_parser = QueryParser::for_index(&index, vec![product_name]);
|
||||||
/// # index_writer.commit()?;
|
/// let query = query_parser.parse_query(user_query_str).unwrap();
|
||||||
/// // ...
|
|
||||||
/// # let user_query = "diary";
|
|
||||||
/// # let query = QueryParser::for_index(&index, vec![product_name]).parse_query(user_query)?;
|
|
||||||
///
|
///
|
||||||
/// // This is where we build our collector with our custom score.
|
/// // This is where we build our collector with our custom score.
|
||||||
/// let top_docs_by_custom_score = TopDocs
|
/// let top_docs_by_custom_score = TopDocs
|
||||||
@@ -242,19 +452,16 @@ impl TopDocs {
|
|||||||
/// let popularity: u64 = popularity_reader.get(doc);
|
/// let popularity: u64 = popularity_reader.get(doc);
|
||||||
/// // Well.. For the sake of the example we use a simple logarithm
|
/// // Well.. For the sake of the example we use a simple logarithm
|
||||||
/// // function.
|
/// // function.
|
||||||
/// let popularity_boost_score = ((2u64 + popularity) as f32).log2();
|
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
|
||||||
/// popularity_boost_score * original_score
|
/// popularity_boost_score * original_score
|
||||||
/// }
|
/// }
|
||||||
/// });
|
/// });
|
||||||
/// # let reader = index.reader()?;
|
/// let reader = index.reader().unwrap();
|
||||||
/// # let searcher = reader.searcher();
|
/// let searcher = reader.searcher();
|
||||||
/// // ... and here are our documents. Note this is a simple vec.
|
/// // ... and here are our documents. Note this is a simple vec.
|
||||||
/// // The `Score` in the pair is our tweaked score.
|
/// // The `Score` in the pair is our tweaked score.
|
||||||
/// let resulting_docs: Vec<(Score, DocAddress)> =
|
/// let resulting_docs: Vec<(Score, DocAddress)> =
|
||||||
/// searcher.search(&*query, &top_docs_by_custom_score)?;
|
/// searcher.search(&query, &top_docs_by_custom_score).unwrap();
|
||||||
///
|
|
||||||
/// # Ok(())
|
|
||||||
/// # }
|
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// # See also
|
/// # See also
|
||||||
@@ -266,9 +473,9 @@ impl TopDocs {
|
|||||||
where
|
where
|
||||||
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
||||||
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
|
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
|
||||||
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
|
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker> + Send + Sync,
|
||||||
{
|
{
|
||||||
TweakedScoreTopCollector::new(score_tweaker, self.0.limit())
|
TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ranks the documents using a custom score.
|
/// Ranks the documents using a custom score.
|
||||||
@@ -313,9 +520,9 @@ impl TopDocs {
|
|||||||
/// # fn main() -> tantivy::Result<()> {
|
/// # fn main() -> tantivy::Result<()> {
|
||||||
/// # let schema = create_schema();
|
/// # let schema = create_schema();
|
||||||
/// # let index = Index::create_in_ram(schema);
|
/// # let index = Index::create_in_ram(schema);
|
||||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
/// # let product_name = index.schema().get_field("product_name").unwrap();
|
/// # let product_name = index.schema().get_field("product_name").unwrap();
|
||||||
/// #
|
/// #
|
||||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||||
/// let boosted: Field = index.schema().get_field("boosted").unwrap();
|
/// let boosted: Field = index.schema().get_field("boosted").unwrap();
|
||||||
/// # index_writer.add_document(doc!(boosted=>1u64, product_name => "The Diary of Muadib", popularity => 1u64));
|
/// # index_writer.add_document(doc!(boosted=>1u64, product_name => "The Diary of Muadib", popularity => 1u64));
|
||||||
@@ -349,7 +556,7 @@ impl TopDocs {
|
|||||||
/// segment_reader.fast_fields().u64(popularity).unwrap();
|
/// segment_reader.fast_fields().u64(popularity).unwrap();
|
||||||
/// let boosted_reader =
|
/// let boosted_reader =
|
||||||
/// segment_reader.fast_fields().u64(boosted).unwrap();
|
/// segment_reader.fast_fields().u64(boosted).unwrap();
|
||||||
///
|
///
|
||||||
/// // We can now define our actual scoring function
|
/// // We can now define our actual scoring function
|
||||||
/// move |doc: DocId| {
|
/// move |doc: DocId| {
|
||||||
/// let popularity: u64 = popularity_reader.get(doc);
|
/// let popularity: u64 = popularity_reader.get(doc);
|
||||||
@@ -380,9 +587,9 @@ impl TopDocs {
|
|||||||
where
|
where
|
||||||
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
||||||
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
|
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
|
||||||
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
|
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer> + Send + Sync,
|
||||||
{
|
{
|
||||||
CustomScoreTopCollector::new(custom_score, self.0.limit())
|
CustomScoreTopCollector::new(custom_score, self.0.into_tscore())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -395,8 +602,8 @@ impl Collector for TopDocs {
|
|||||||
&self,
|
&self,
|
||||||
segment_local_id: SegmentLocalId,
|
segment_local_id: SegmentLocalId,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
) -> Result<Self::Child> {
|
) -> crate::Result<Self::Child> {
|
||||||
let collector = self.0.for_segment(segment_local_id, reader)?;
|
let collector = self.0.for_segment(segment_local_id, reader);
|
||||||
Ok(TopScoreSegmentCollector(collector))
|
Ok(TopScoreSegmentCollector(collector))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -404,9 +611,70 @@ impl Collector for TopDocs {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, child_fruits: Vec<Vec<(Score, DocAddress)>>) -> Result<Self::Fruit> {
|
fn merge_fruits(
|
||||||
|
&self,
|
||||||
|
child_fruits: Vec<Vec<(Score, DocAddress)>>,
|
||||||
|
) -> crate::Result<Self::Fruit> {
|
||||||
self.0.merge_fruits(child_fruits)
|
self.0.merge_fruits(child_fruits)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn collect_segment(
|
||||||
|
&self,
|
||||||
|
weight: &dyn Weight,
|
||||||
|
segment_ord: u32,
|
||||||
|
reader: &SegmentReader,
|
||||||
|
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
||||||
|
let heap_len = self.0.limit + self.0.offset;
|
||||||
|
let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len);
|
||||||
|
|
||||||
|
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||||
|
let mut threshold = Score::MIN;
|
||||||
|
weight.for_each_pruning(threshold, reader, &mut |doc, score| {
|
||||||
|
if delete_bitset.is_deleted(doc) {
|
||||||
|
return threshold;
|
||||||
|
}
|
||||||
|
let heap_item = ComparableDoc {
|
||||||
|
feature: score,
|
||||||
|
doc,
|
||||||
|
};
|
||||||
|
if heap.len() < heap_len {
|
||||||
|
heap.push(heap_item);
|
||||||
|
if heap.len() == heap_len {
|
||||||
|
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||||
|
}
|
||||||
|
return threshold;
|
||||||
|
}
|
||||||
|
*heap.peek_mut().unwrap() = heap_item;
|
||||||
|
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||||
|
threshold
|
||||||
|
})?;
|
||||||
|
} else {
|
||||||
|
weight.for_each_pruning(Score::MIN, reader, &mut |doc, score| {
|
||||||
|
let heap_item = ComparableDoc {
|
||||||
|
feature: score,
|
||||||
|
doc,
|
||||||
|
};
|
||||||
|
if heap.len() < heap_len {
|
||||||
|
heap.push(heap_item);
|
||||||
|
// TODO the threshold is suboptimal for heap.len == heap_len
|
||||||
|
if heap.len() == heap_len {
|
||||||
|
return heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||||
|
} else {
|
||||||
|
return Score::MIN;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*heap.peek_mut().unwrap() = heap_item;
|
||||||
|
heap.peek().map(|el| el.feature).unwrap_or(Score::MIN)
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let fruit = heap
|
||||||
|
.into_sorted_vec()
|
||||||
|
.into_iter()
|
||||||
|
.map(|cid| (cid.feature, DocAddress(segment_ord, cid.doc)))
|
||||||
|
.collect();
|
||||||
|
Ok(fruit)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Segment Collector associated to `TopDocs`.
|
/// Segment Collector associated to `TopDocs`.
|
||||||
@@ -416,7 +684,7 @@ impl SegmentCollector for TopScoreSegmentCollector {
|
|||||||
type Fruit = Vec<(Score, DocAddress)>;
|
type Fruit = Vec<(Score, DocAddress)>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: DocId, score: Score) {
|
fn collect(&mut self, doc: DocId, score: Score) {
|
||||||
self.0.collect(doc, score)
|
self.0.collect(doc, score);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn harvest(self) -> Vec<(Score, DocAddress)> {
|
fn harvest(self) -> Vec<(Score, DocAddress)> {
|
||||||
@@ -428,12 +696,12 @@ impl SegmentCollector for TopScoreSegmentCollector {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::TopDocs;
|
use super::TopDocs;
|
||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
use crate::query::{Query, QueryParser};
|
use crate::query::{AllQuery, Query, QueryParser};
|
||||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||||
use crate::DocAddress;
|
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::IndexWriter;
|
use crate::IndexWriter;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
|
use crate::{DocAddress, DocId, SegmentReader};
|
||||||
|
|
||||||
fn make_index() -> Index {
|
fn make_index() -> Index {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
@@ -442,7 +710,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
|
index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
|
||||||
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
|
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
|
||||||
index_writer.add_document(doc!(text_field=>"I like Droopy"));
|
index_writer.add_document(doc!(text_field=>"I like Droopy"));
|
||||||
@@ -451,8 +719,15 @@ mod tests {
|
|||||||
index
|
index
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn assert_results_equals(results: &[(Score, DocAddress)], expected: &[(Score, DocAddress)]) {
|
||||||
|
for (result, expected) in results.iter().zip(expected.iter()) {
|
||||||
|
assert_eq!(result.1, expected.1);
|
||||||
|
crate::assert_nearly_equals!(result.0, expected.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_top_collector_not_at_capacity() {
|
fn test_top_collector_not_at_capacity_without_offset() {
|
||||||
let index = make_index();
|
let index = make_index();
|
||||||
let field = index.schema().get_field("text").unwrap();
|
let field = index.schema().get_field("text").unwrap();
|
||||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||||
@@ -463,16 +738,31 @@ mod tests {
|
|||||||
.searcher()
|
.searcher()
|
||||||
.search(&text_query, &TopDocs::with_limit(4))
|
.search(&text_query, &TopDocs::with_limit(4))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_results_equals(
|
||||||
score_docs,
|
&score_docs,
|
||||||
vec![
|
&[
|
||||||
(0.81221175, DocAddress(0u32, 1)),
|
(0.81221175, DocAddress(0u32, 1)),
|
||||||
(0.5376842, DocAddress(0u32, 2)),
|
(0.5376842, DocAddress(0u32, 2)),
|
||||||
(0.48527452, DocAddress(0, 0))
|
(0.48527452, DocAddress(0, 0)),
|
||||||
]
|
],
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_top_collector_not_at_capacity_with_offset() {
|
||||||
|
let index = make_index();
|
||||||
|
let field = index.schema().get_field("text").unwrap();
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||||
|
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||||
|
let score_docs: Vec<(Score, DocAddress)> = index
|
||||||
|
.reader()
|
||||||
|
.unwrap()
|
||||||
|
.searcher()
|
||||||
|
.search(&text_query, &TopDocs::with_limit(4).and_offset(2))
|
||||||
|
.unwrap();
|
||||||
|
assert_results_equals(&score_docs[..], &[(0.48527452, DocAddress(0, 0))]);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_top_collector_at_capacity() {
|
fn test_top_collector_at_capacity() {
|
||||||
let index = make_index();
|
let index = make_index();
|
||||||
@@ -485,15 +775,59 @@ mod tests {
|
|||||||
.searcher()
|
.searcher()
|
||||||
.search(&text_query, &TopDocs::with_limit(2))
|
.search(&text_query, &TopDocs::with_limit(2))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_results_equals(
|
||||||
score_docs,
|
&score_docs,
|
||||||
vec![
|
&[
|
||||||
(0.81221175, DocAddress(0u32, 1)),
|
(0.81221175, DocAddress(0u32, 1)),
|
||||||
(0.5376842, DocAddress(0u32, 2)),
|
(0.5376842, DocAddress(0u32, 2)),
|
||||||
]
|
],
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_top_collector_at_capacity_with_offset() {
|
||||||
|
let index = make_index();
|
||||||
|
let field = index.schema().get_field("text").unwrap();
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||||
|
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||||
|
let score_docs: Vec<(Score, DocAddress)> = index
|
||||||
|
.reader()
|
||||||
|
.unwrap()
|
||||||
|
.searcher()
|
||||||
|
.search(&text_query, &TopDocs::with_limit(2).and_offset(1))
|
||||||
|
.unwrap();
|
||||||
|
assert_results_equals(
|
||||||
|
&score_docs[..],
|
||||||
|
&[
|
||||||
|
(0.5376842, DocAddress(0u32, 2)),
|
||||||
|
(0.48527452, DocAddress(0, 0)),
|
||||||
|
],
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_top_collector_stable_sorting() {
|
||||||
|
let index = make_index();
|
||||||
|
|
||||||
|
// using AllQuery to get a constant score
|
||||||
|
let searcher = index.reader().unwrap().searcher();
|
||||||
|
|
||||||
|
let page_1 = searcher.search(&AllQuery, &TopDocs::with_limit(2)).unwrap();
|
||||||
|
|
||||||
|
let page_2 = searcher.search(&AllQuery, &TopDocs::with_limit(3)).unwrap();
|
||||||
|
|
||||||
|
// precondition for the test to be meaningful: we did get documents
|
||||||
|
// with the same score
|
||||||
|
assert!(page_1.iter().all(|result| result.0 == page_1[0].0));
|
||||||
|
assert!(page_2.iter().all(|result| result.0 == page_2[0].0));
|
||||||
|
|
||||||
|
// sanity check since we're relying on make_index()
|
||||||
|
assert_eq!(page_1.len(), 2);
|
||||||
|
assert_eq!(page_2.len(), 3);
|
||||||
|
|
||||||
|
assert_eq!(page_1, &page_2[..page_1.len()]);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic]
|
#[should_panic]
|
||||||
fn test_top_0() {
|
fn test_top_0() {
|
||||||
@@ -528,8 +862,8 @@ mod tests {
|
|||||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||||
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
|
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
top_docs,
|
&top_docs[..],
|
||||||
vec![
|
&[
|
||||||
(64, DocAddress(0, 1)),
|
(64, DocAddress(0, 1)),
|
||||||
(16, DocAddress(0, 2)),
|
(16, DocAddress(0, 2)),
|
||||||
(12, DocAddress(0, 0))
|
(12, DocAddress(0, 0))
|
||||||
@@ -537,6 +871,94 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_top_field_collector_datetime() -> crate::Result<()> {
|
||||||
|
use std::str::FromStr;
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let name = schema_builder.add_text_field("name", TEXT);
|
||||||
|
let birthday = schema_builder.add_date_field("birthday", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
let pr_birthday = crate::DateTime::from_str("1898-04-09T00:00:00+00:00")?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
name => "Paul Robeson",
|
||||||
|
birthday => pr_birthday
|
||||||
|
));
|
||||||
|
let mr_birthday = crate::DateTime::from_str("1947-11-08T00:00:00+00:00")?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
name => "Minnie Riperton",
|
||||||
|
birthday => mr_birthday
|
||||||
|
));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let top_collector = TopDocs::with_limit(3).order_by_fast_field(birthday);
|
||||||
|
let top_docs: Vec<(crate::DateTime, DocAddress)> =
|
||||||
|
searcher.search(&AllQuery, &top_collector)?;
|
||||||
|
assert_eq!(
|
||||||
|
&top_docs[..],
|
||||||
|
&[
|
||||||
|
(mr_birthday, DocAddress(0, 1)),
|
||||||
|
(pr_birthday, DocAddress(0, 0)),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_top_field_collector_i64() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let city = schema_builder.add_text_field("city", TEXT);
|
||||||
|
let altitude = schema_builder.add_i64_field("altitude", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
city => "georgetown",
|
||||||
|
altitude => -1i64,
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
city => "tokyo",
|
||||||
|
altitude => 40i64,
|
||||||
|
));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
|
||||||
|
let top_docs: Vec<(i64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
||||||
|
assert_eq!(
|
||||||
|
&top_docs[..],
|
||||||
|
&[(40i64, DocAddress(0, 1)), (-1i64, DocAddress(0, 0)),]
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_top_field_collector_f64() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let city = schema_builder.add_text_field("city", TEXT);
|
||||||
|
let altitude = schema_builder.add_f64_field("altitude", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
city => "georgetown",
|
||||||
|
altitude => -1.0f64,
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
city => "tokyo",
|
||||||
|
altitude => 40f64,
|
||||||
|
));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
|
||||||
|
let top_docs: Vec<(f64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
||||||
|
assert_eq!(
|
||||||
|
&top_docs[..],
|
||||||
|
&[(40f64, DocAddress(0, 1)), (-1.0f64, DocAddress(0, 0)),]
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic]
|
#[should_panic]
|
||||||
fn test_field_does_not_exist() {
|
fn test_field_does_not_exist() {
|
||||||
@@ -559,22 +981,83 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic(expected = "Field requested is not a i64/u64 fast field")]
|
fn test_field_not_fast_field() -> crate::Result<()> {
|
||||||
fn test_field_not_fast_field() {
|
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
|
||||||
let size = schema_builder.add_u64_field(SIZE, STORED);
|
let size = schema_builder.add_u64_field(SIZE, STORED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let (index, _) = index("beer", title, schema, |index_writer| {
|
let index = Index::create_in_ram(schema);
|
||||||
index_writer.add_document(doc!(
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
title => "bottle of beer",
|
index_writer.add_document(doc!(size=>1u64));
|
||||||
size => 12u64,
|
index_writer.commit()?;
|
||||||
));
|
let searcher = index.reader()?.searcher();
|
||||||
});
|
|
||||||
let searcher = index.reader().unwrap().searcher();
|
|
||||||
let segment = searcher.segment_reader(0);
|
let segment = searcher.segment_reader(0);
|
||||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||||
assert!(top_collector.for_segment(0, segment).is_ok());
|
let err = top_collector.for_segment(0, segment).err().unwrap();
|
||||||
|
assert!(matches!(err, crate::TantivyError::SchemaError(_)));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_field_wrong_type() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let size = schema_builder.add_u64_field(SIZE, STORED);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(size=>1u64));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let segment = searcher.segment_reader(0);
|
||||||
|
let top_collector = TopDocs::with_limit(4).order_by_fast_field::<i64>(size);
|
||||||
|
let err = top_collector.for_segment(0, segment).err().unwrap();
|
||||||
|
assert!(
|
||||||
|
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field \"size\" is not a fast field.")
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_tweak_score_top_collector_with_offset() {
|
||||||
|
let index = make_index();
|
||||||
|
let field = index.schema().get_field("text").unwrap();
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||||
|
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||||
|
let collector = TopDocs::with_limit(2).and_offset(1).tweak_score(
|
||||||
|
move |_segment_reader: &SegmentReader| move |doc: DocId, _original_score: Score| doc,
|
||||||
|
);
|
||||||
|
let score_docs: Vec<(u32, DocAddress)> = index
|
||||||
|
.reader()
|
||||||
|
.unwrap()
|
||||||
|
.searcher()
|
||||||
|
.search(&text_query, &collector)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
score_docs,
|
||||||
|
vec![(1, DocAddress(0, 1)), (0, DocAddress(0, 0)),]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_custom_score_top_collector_with_offset() {
|
||||||
|
let index = make_index();
|
||||||
|
let field = index.schema().get_field("text").unwrap();
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||||
|
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||||
|
let collector = TopDocs::with_limit(2)
|
||||||
|
.and_offset(1)
|
||||||
|
.custom_score(move |_segment_reader: &SegmentReader| move |doc: DocId| doc);
|
||||||
|
let score_docs: Vec<(u32, DocAddress)> = index
|
||||||
|
.reader()
|
||||||
|
.unwrap()
|
||||||
|
.searcher()
|
||||||
|
.search(&text_query, &collector)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
score_docs,
|
||||||
|
vec![(1, DocAddress(0, 1)), (0, DocAddress(0, 0)),]
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn index(
|
fn index(
|
||||||
@@ -584,8 +1067,7 @@ mod tests {
|
|||||||
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
|
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
|
||||||
) -> (Index, Box<dyn Query>) {
|
) -> (Index, Box<dyn Query>) {
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
doc_adder(&mut index_writer);
|
doc_adder(&mut index_writer);
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
let query_parser = QueryParser::for_index(&index, vec![query_field]);
|
let query_parser = QueryParser::for_index(&index, vec![query_field]);
|
||||||
|
|||||||
@@ -14,11 +14,11 @@ where
|
|||||||
{
|
{
|
||||||
pub fn new(
|
pub fn new(
|
||||||
score_tweaker: TScoreTweaker,
|
score_tweaker: TScoreTweaker,
|
||||||
limit: usize,
|
collector: TopCollector<TScore>,
|
||||||
) -> TweakedScoreTopCollector<TScoreTweaker, TScore> {
|
) -> TweakedScoreTopCollector<TScoreTweaker, TScore> {
|
||||||
TweakedScoreTopCollector {
|
TweakedScoreTopCollector {
|
||||||
score_tweaker,
|
score_tweaker,
|
||||||
collector: TopCollector::with_limit(limit),
|
collector,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -29,7 +29,7 @@ where
|
|||||||
/// It is the segment local version of the [`ScoreTweaker`](./trait.ScoreTweaker.html).
|
/// It is the segment local version of the [`ScoreTweaker`](./trait.ScoreTweaker.html).
|
||||||
pub trait ScoreSegmentTweaker<TScore>: 'static {
|
pub trait ScoreSegmentTweaker<TScore>: 'static {
|
||||||
/// Tweak the given `score` for the document `doc`.
|
/// Tweak the given `score` for the document `doc`.
|
||||||
fn score(&self, doc: DocId, score: Score) -> TScore;
|
fn score(&mut self, doc: DocId, score: Score) -> TScore;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `ScoreTweaker` makes it possible to tweak the score
|
/// `ScoreTweaker` makes it possible to tweak the score
|
||||||
@@ -49,7 +49,7 @@ pub trait ScoreTweaker<TScore>: Sync {
|
|||||||
|
|
||||||
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
|
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
|
||||||
where
|
where
|
||||||
TScoreTweaker: ScoreTweaker<TScore>,
|
TScoreTweaker: ScoreTweaker<TScore> + Send + Sync,
|
||||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||||
{
|
{
|
||||||
type Fruit = Vec<(TScore, DocAddress)>;
|
type Fruit = Vec<(TScore, DocAddress)>;
|
||||||
@@ -62,9 +62,7 @@ where
|
|||||||
segment_reader: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
) -> Result<Self::Child> {
|
) -> Result<Self::Child> {
|
||||||
let segment_scorer = self.score_tweaker.segment_tweaker(segment_reader)?;
|
let segment_scorer = self.score_tweaker.segment_tweaker(segment_reader)?;
|
||||||
let segment_collector = self
|
let segment_collector = self.collector.for_segment(segment_local_id, segment_reader);
|
||||||
.collector
|
|
||||||
.for_segment(segment_local_id, segment_reader)?;
|
|
||||||
Ok(TopTweakedScoreSegmentCollector {
|
Ok(TopTweakedScoreSegmentCollector {
|
||||||
segment_collector,
|
segment_collector,
|
||||||
segment_scorer,
|
segment_scorer,
|
||||||
@@ -121,9 +119,9 @@ where
|
|||||||
|
|
||||||
impl<F, TScore> ScoreSegmentTweaker<TScore> for F
|
impl<F, TScore> ScoreSegmentTweaker<TScore> for F
|
||||||
where
|
where
|
||||||
F: 'static + Sync + Send + Fn(DocId, Score) -> TScore,
|
F: 'static + FnMut(DocId, Score) -> TScore,
|
||||||
{
|
{
|
||||||
fn score(&self, doc: DocId, score: Score) -> TScore {
|
fn score(&mut self, doc: DocId, score: Score) -> TScore {
|
||||||
(self)(doc, score)
|
(self)(doc, score)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::ops::Deref;
|
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
|
|
||||||
pub(crate) struct BitPacker {
|
pub(crate) struct BitPacker {
|
||||||
mini_buffer: u64,
|
mini_buffer: u64,
|
||||||
@@ -60,20 +61,14 @@ impl BitPacker {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BitUnpacker<Data>
|
pub struct BitUnpacker {
|
||||||
where
|
|
||||||
Data: Deref<Target = [u8]>,
|
|
||||||
{
|
|
||||||
num_bits: u64,
|
num_bits: u64,
|
||||||
mask: u64,
|
mask: u64,
|
||||||
data: Data,
|
data: OwnedBytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Data> BitUnpacker<Data>
|
impl BitUnpacker {
|
||||||
where
|
pub fn new(data: OwnedBytes, num_bits: u8) -> BitUnpacker {
|
||||||
Data: Deref<Target = [u8]>,
|
|
||||||
{
|
|
||||||
pub fn new(data: Data, num_bits: u8) -> BitUnpacker<Data> {
|
|
||||||
let mask: u64 = if num_bits == 64 {
|
let mask: u64 = if num_bits == 64 {
|
||||||
!0u64
|
!0u64
|
||||||
} else {
|
} else {
|
||||||
@@ -90,7 +85,7 @@ where
|
|||||||
if self.num_bits == 0 {
|
if self.num_bits == 0 {
|
||||||
return 0u64;
|
return 0u64;
|
||||||
}
|
}
|
||||||
let data: &[u8] = &*self.data;
|
let data: &[u8] = self.data.as_slice();
|
||||||
let num_bits = self.num_bits;
|
let num_bits = self.num_bits;
|
||||||
let mask = self.mask;
|
let mask = self.mask;
|
||||||
let addr_in_bits = idx * num_bits;
|
let addr_in_bits = idx * num_bits;
|
||||||
@@ -109,8 +104,9 @@ where
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::{BitPacker, BitUnpacker};
|
use super::{BitPacker, BitUnpacker};
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
|
|
||||||
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
|
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>) {
|
||||||
let mut data = Vec::new();
|
let mut data = Vec::new();
|
||||||
let mut bitpacker = BitPacker::new();
|
let mut bitpacker = BitPacker::new();
|
||||||
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
|
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
|
||||||
@@ -122,7 +118,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
bitpacker.close(&mut data).unwrap();
|
bitpacker.close(&mut data).unwrap();
|
||||||
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
|
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
|
||||||
let bitunpacker = BitUnpacker::new(data, num_bits);
|
let bitunpacker = BitUnpacker::new(OwnedBytes::new(data), num_bits);
|
||||||
(bitunpacker, vals)
|
(bitunpacker, vals)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -33,6 +33,10 @@ impl TinySet {
|
|||||||
TinySet(0u64)
|
TinySet(0u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn clear(&mut self) {
|
||||||
|
self.0 = 0u64;
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the complement of the set in `[0, 64[`.
|
/// Returns the complement of the set in `[0, 64[`.
|
||||||
fn complement(self) -> TinySet {
|
fn complement(self) -> TinySet {
|
||||||
TinySet(!self.0)
|
TinySet(!self.0)
|
||||||
@@ -43,6 +47,11 @@ impl TinySet {
|
|||||||
!self.intersect(TinySet::singleton(el)).is_empty()
|
!self.intersect(TinySet::singleton(el)).is_empty()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the number of elements in the TinySet.
|
||||||
|
pub fn len(self) -> u32 {
|
||||||
|
self.0.count_ones()
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the intersection of `self` and `other`
|
/// Returns the intersection of `self` and `other`
|
||||||
pub fn intersect(self, other: TinySet) -> TinySet {
|
pub fn intersect(self, other: TinySet) -> TinySet {
|
||||||
TinySet(self.0 & other.0)
|
TinySet(self.0 & other.0)
|
||||||
@@ -109,22 +118,12 @@ impl TinySet {
|
|||||||
pub fn range_greater_or_equal(from_included: u32) -> TinySet {
|
pub fn range_greater_or_equal(from_included: u32) -> TinySet {
|
||||||
TinySet::range_lower(from_included).complement()
|
TinySet::range_lower(from_included).complement()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear(&mut self) {
|
|
||||||
self.0 = 0u64;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn len(self) -> u32 {
|
|
||||||
self.0.count_ones()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BitSet {
|
pub struct BitSet {
|
||||||
tinysets: Box<[TinySet]>,
|
tinysets: Box<[TinySet]>,
|
||||||
len: usize, //< Technically it should be u32, but we
|
len: usize,
|
||||||
// count multiple inserts.
|
|
||||||
// `usize` guards us from overflow.
|
|
||||||
max_value: u32,
|
max_value: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -204,7 +203,7 @@ mod tests {
|
|||||||
|
|
||||||
use super::BitSet;
|
use super::BitSet;
|
||||||
use super::TinySet;
|
use super::TinySet;
|
||||||
use crate::docset::DocSet;
|
use crate::docset::{DocSet, TERMINATED};
|
||||||
use crate::query::BitSetDocSet;
|
use crate::query::BitSetDocSet;
|
||||||
use crate::tests;
|
use crate::tests;
|
||||||
use crate::tests::generate_nonunique_unsorted;
|
use crate::tests::generate_nonunique_unsorted;
|
||||||
@@ -278,11 +277,13 @@ mod tests {
|
|||||||
}
|
}
|
||||||
assert_eq!(btreeset.len(), bitset.len());
|
assert_eq!(btreeset.len(), bitset.len());
|
||||||
let mut bitset_docset = BitSetDocSet::from(bitset);
|
let mut bitset_docset = BitSetDocSet::from(bitset);
|
||||||
|
let mut remaining = true;
|
||||||
for el in btreeset.into_iter() {
|
for el in btreeset.into_iter() {
|
||||||
bitset_docset.advance();
|
assert!(remaining);
|
||||||
assert_eq!(bitset_docset.doc(), el);
|
assert_eq!(bitset_docset.doc(), el);
|
||||||
|
remaining = bitset_docset.advance() != TERMINATED;
|
||||||
}
|
}
|
||||||
assert!(!bitset_docset.advance());
|
assert!(!remaining);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -1,14 +1,17 @@
|
|||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::common::CountingWriter;
|
use crate::common::CountingWriter;
|
||||||
use crate::common::VInt;
|
use crate::common::VInt;
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::FileSlice;
|
||||||
use crate::directory::{TerminatingWrite, WritePtr};
|
use crate::directory::{TerminatingWrite, WritePtr};
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::space_usage::FieldUsage;
|
use crate::space_usage::FieldUsage;
|
||||||
use crate::space_usage::PerFieldSpaceUsage;
|
use crate::space_usage::PerFieldSpaceUsage;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::io::Write;
|
use std::io::{self, Read, Write};
|
||||||
use std::io::{self, Read};
|
use std::iter::ExactSizeIterator;
|
||||||
|
use std::ops::Range;
|
||||||
|
|
||||||
|
use super::HasLen;
|
||||||
|
|
||||||
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
|
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
|
||||||
pub struct FileAddr {
|
pub struct FileAddr {
|
||||||
@@ -103,25 +106,26 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
|||||||
/// for each field.
|
/// for each field.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct CompositeFile {
|
pub struct CompositeFile {
|
||||||
data: ReadOnlySource,
|
data: FileSlice,
|
||||||
offsets_index: HashMap<FileAddr, (usize, usize)>,
|
offsets_index: HashMap<FileAddr, Range<usize>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CompositeFile {
|
impl CompositeFile {
|
||||||
/// Opens a composite file stored in a given
|
/// Opens a composite file stored in a given
|
||||||
/// `ReadOnlySource`.
|
/// `FileSlice`.
|
||||||
pub fn open(data: &ReadOnlySource) -> io::Result<CompositeFile> {
|
pub fn open(data: &FileSlice) -> io::Result<CompositeFile> {
|
||||||
let end = data.len();
|
let end = data.len();
|
||||||
let footer_len_data = data.slice_from(end - 4);
|
let footer_len_data = data.slice_from(end - 4).read_bytes()?;
|
||||||
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
|
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
|
||||||
let footer_start = end - 4 - footer_len;
|
let footer_start = end - 4 - footer_len;
|
||||||
let footer_data = data.slice(footer_start, footer_start + footer_len);
|
let footer_data = data
|
||||||
|
.slice(footer_start..footer_start + footer_len)
|
||||||
|
.read_bytes()?;
|
||||||
let mut footer_buffer = footer_data.as_slice();
|
let mut footer_buffer = footer_data.as_slice();
|
||||||
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
|
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
|
||||||
|
|
||||||
let mut file_addrs = vec![];
|
let mut file_addrs = vec![];
|
||||||
let mut offsets = vec![];
|
let mut offsets = vec![];
|
||||||
|
|
||||||
let mut field_index = HashMap::new();
|
let mut field_index = HashMap::new();
|
||||||
|
|
||||||
let mut offset = 0;
|
let mut offset = 0;
|
||||||
@@ -136,7 +140,7 @@ impl CompositeFile {
|
|||||||
let file_addr = file_addrs[i];
|
let file_addr = file_addrs[i];
|
||||||
let start_offset = offsets[i];
|
let start_offset = offsets[i];
|
||||||
let end_offset = offsets[i + 1];
|
let end_offset = offsets[i + 1];
|
||||||
field_index.insert(file_addr, (start_offset, end_offset));
|
field_index.insert(file_addr, start_offset..end_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(CompositeFile {
|
Ok(CompositeFile {
|
||||||
@@ -150,31 +154,31 @@ impl CompositeFile {
|
|||||||
pub fn empty() -> CompositeFile {
|
pub fn empty() -> CompositeFile {
|
||||||
CompositeFile {
|
CompositeFile {
|
||||||
offsets_index: HashMap::new(),
|
offsets_index: HashMap::new(),
|
||||||
data: ReadOnlySource::empty(),
|
data: FileSlice::empty(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `ReadOnlySource` associated
|
/// Returns the `FileSlice` associated
|
||||||
/// to a given `Field` and stored in a `CompositeFile`.
|
/// to a given `Field` and stored in a `CompositeFile`.
|
||||||
pub fn open_read(&self, field: Field) -> Option<ReadOnlySource> {
|
pub fn open_read(&self, field: Field) -> Option<FileSlice> {
|
||||||
self.open_read_with_idx(field, 0)
|
self.open_read_with_idx(field, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `ReadOnlySource` associated
|
/// Returns the `FileSlice` associated
|
||||||
/// to a given `Field` and stored in a `CompositeFile`.
|
/// to a given `Field` and stored in a `CompositeFile`.
|
||||||
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<ReadOnlySource> {
|
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> {
|
||||||
self.offsets_index
|
self.offsets_index
|
||||||
.get(&FileAddr { field, idx })
|
.get(&FileAddr { field, idx })
|
||||||
.map(|&(from, to)| self.data.slice(from, to))
|
.map(|byte_range| self.data.slice(byte_range.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn space_usage(&self) -> PerFieldSpaceUsage {
|
pub fn space_usage(&self) -> PerFieldSpaceUsage {
|
||||||
let mut fields = HashMap::new();
|
let mut fields = HashMap::new();
|
||||||
for (&field_addr, &(start, end)) in self.offsets_index.iter() {
|
for (&field_addr, byte_range) in &self.offsets_index {
|
||||||
fields
|
fields
|
||||||
.entry(field_addr.field)
|
.entry(field_addr.field)
|
||||||
.or_insert_with(|| FieldUsage::empty(field_addr.field))
|
.or_insert_with(|| FieldUsage::empty(field_addr.field))
|
||||||
.add_field_idx(field_addr.idx, end - start);
|
.add_field_idx(field_addr.idx, byte_range.len());
|
||||||
}
|
}
|
||||||
PerFieldSpaceUsage::new(fields)
|
PerFieldSpaceUsage::new(fields)
|
||||||
}
|
}
|
||||||
@@ -192,46 +196,44 @@ mod test {
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_composite_file() {
|
fn test_composite_file() -> crate::Result<()> {
|
||||||
let path = Path::new("test_path");
|
let path = Path::new("test_path");
|
||||||
let mut directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let w = directory.open_write(path).unwrap();
|
let w = directory.open_write(path).unwrap();
|
||||||
let mut composite_write = CompositeWrite::wrap(w);
|
let mut composite_write = CompositeWrite::wrap(w);
|
||||||
{
|
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
|
||||||
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
|
VInt(32431123u64).serialize(&mut write_0)?;
|
||||||
VInt(32431123u64).serialize(&mut write_0).unwrap();
|
write_0.flush()?;
|
||||||
write_0.flush().unwrap();
|
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
|
||||||
}
|
VInt(2).serialize(&mut write_4)?;
|
||||||
|
write_4.flush()?;
|
||||||
{
|
composite_write.close()?;
|
||||||
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
|
|
||||||
VInt(2).serialize(&mut write_4).unwrap();
|
|
||||||
write_4.flush().unwrap();
|
|
||||||
}
|
|
||||||
composite_write.close().unwrap();
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let r = directory.open_read(path).unwrap();
|
let r = directory.open_read(path)?;
|
||||||
let composite_file = CompositeFile::open(&r).unwrap();
|
let composite_file = CompositeFile::open(&r)?;
|
||||||
{
|
{
|
||||||
let file0 = composite_file
|
let file0 = composite_file
|
||||||
.open_read(Field::from_field_id(0u32))
|
.open_read(Field::from_field_id(0u32))
|
||||||
.unwrap();
|
.unwrap()
|
||||||
|
.read_bytes()?;
|
||||||
let mut file0_buf = file0.as_slice();
|
let mut file0_buf = file0.as_slice();
|
||||||
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
|
let payload_0 = VInt::deserialize(&mut file0_buf)?.0;
|
||||||
assert_eq!(file0_buf.len(), 0);
|
assert_eq!(file0_buf.len(), 0);
|
||||||
assert_eq!(payload_0, 32431123u64);
|
assert_eq!(payload_0, 32431123u64);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let file4 = composite_file
|
let file4 = composite_file
|
||||||
.open_read(Field::from_field_id(4u32))
|
.open_read(Field::from_field_id(4u32))
|
||||||
.unwrap();
|
.unwrap()
|
||||||
|
.read_bytes()?;
|
||||||
let mut file4_buf = file4.as_slice();
|
let mut file4_buf = file4.as_slice();
|
||||||
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
|
let payload_4 = VInt::deserialize(&mut file4_buf)?.0;
|
||||||
assert_eq!(file4_buf.len(), 0);
|
assert_eq!(file4_buf.len(), 0);
|
||||||
assert_eq!(payload_4, 2u64);
|
assert_eq!(payload_4, 2u64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,9 +20,10 @@ impl<W: Write> CountingWriter<W> {
|
|||||||
self.written_bytes
|
self.written_bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn finish(mut self) -> io::Result<(W, u64)> {
|
/// Returns the underlying write object.
|
||||||
self.flush()?;
|
/// Note that this method does not trigger any flushing.
|
||||||
Ok((self.underlying, self.written_bytes))
|
pub fn finish(self) -> W {
|
||||||
|
self.underlying
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,7 +47,6 @@ impl<W: Write> Write for CountingWriter<W> {
|
|||||||
|
|
||||||
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
||||||
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
|
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
|
||||||
self.flush()?;
|
|
||||||
self.underlying.terminate_ref(token)
|
self.underlying.terminate_ref(token)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -63,8 +63,9 @@ mod test {
|
|||||||
let mut counting_writer = CountingWriter::wrap(buffer);
|
let mut counting_writer = CountingWriter::wrap(buffer);
|
||||||
let bytes = (0u8..10u8).collect::<Vec<u8>>();
|
let bytes = (0u8..10u8).collect::<Vec<u8>>();
|
||||||
counting_writer.write_all(&bytes).unwrap();
|
counting_writer.write_all(&bytes).unwrap();
|
||||||
let (w, len): (Vec<u8>, u64) = counting_writer.finish().unwrap();
|
let len = counting_writer.written_bytes();
|
||||||
|
let buffer_restituted: Vec<u8> = counting_writer.finish();
|
||||||
assert_eq!(len, 10u64);
|
assert_eq!(len, 10u64);
|
||||||
assert_eq!(w.len(), 10);
|
assert_eq!(buffer_restituted.len(), 10);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,9 @@ pub(crate) use self::bitset::TinySet;
|
|||||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||||
pub use self::counting_writer::CountingWriter;
|
pub use self::counting_writer::CountingWriter;
|
||||||
pub use self::serialize::{BinarySerializable, FixedSize};
|
pub use self::serialize::{BinarySerializable, FixedSize};
|
||||||
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
|
pub use self::vint::{
|
||||||
|
read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt,
|
||||||
|
};
|
||||||
pub use byteorder::LittleEndian as Endianness;
|
pub use byteorder::LittleEndian as Endianness;
|
||||||
|
|
||||||
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
||||||
@@ -18,6 +20,19 @@ pub use byteorder::LittleEndian as Endianness;
|
|||||||
/// We do not allow segments with more than
|
/// We do not allow segments with more than
|
||||||
pub const MAX_DOC_LIMIT: u32 = 1 << 31;
|
pub const MAX_DOC_LIMIT: u32 = 1 << 31;
|
||||||
|
|
||||||
|
pub fn minmax<I, T>(mut vals: I) -> Option<(T, T)>
|
||||||
|
where
|
||||||
|
I: Iterator<Item = T>,
|
||||||
|
T: Copy + Ord,
|
||||||
|
{
|
||||||
|
if let Some(first_el) = vals.next() {
|
||||||
|
return Some(vals.fold((first_el, first_el), |(min_val, max_val), el| {
|
||||||
|
(min_val.min(el), max_val.max(el))
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
/// Computes the number of bits that will be used for bitpacking.
|
/// Computes the number of bits that will be used for bitpacking.
|
||||||
///
|
///
|
||||||
/// In general the target is the minimum number of bits
|
/// In general the target is the minimum number of bits
|
||||||
@@ -51,10 +66,6 @@ pub(crate) fn compute_num_bits(n: u64) -> u8 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn is_power_of_2(n: usize) -> bool {
|
|
||||||
(n > 0) && (n & (n - 1) == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Has length trait
|
/// Has length trait
|
||||||
pub trait HasLen {
|
pub trait HasLen {
|
||||||
/// Return length
|
/// Return length
|
||||||
@@ -104,11 +115,16 @@ pub fn u64_to_i64(val: u64) -> i64 {
|
|||||||
/// For simplicity, tantivy internally handles `f64` as `u64`.
|
/// For simplicity, tantivy internally handles `f64` as `u64`.
|
||||||
/// The mapping is defined by this function.
|
/// The mapping is defined by this function.
|
||||||
///
|
///
|
||||||
/// Maps `f64` to `u64` so that lexical order is preserved.
|
/// Maps `f64` to `u64` in a monotonic manner, so that bytes lexical order is preserved.
|
||||||
///
|
///
|
||||||
/// This is more suited than simply casting (`val as u64`)
|
/// This is more suited than simply casting (`val as u64`)
|
||||||
/// which would truncate the result
|
/// which would truncate the result
|
||||||
///
|
///
|
||||||
|
/// # Reference
|
||||||
|
///
|
||||||
|
/// Daniel Lemire's [blog post](https://lemire.me/blog/2020/12/14/converting-floating-point-numbers-to-integers-while-preserving-order/)
|
||||||
|
/// explains the mapping in a clear manner.
|
||||||
|
///
|
||||||
/// # See also
|
/// # See also
|
||||||
/// The [reverse mapping is `u64_to_f64`](./fn.u64_to_f64.html).
|
/// The [reverse mapping is `u64_to_f64`](./fn.u64_to_f64.html).
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
@@ -134,8 +150,10 @@ pub fn u64_to_f64(val: u64) -> f64 {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub(crate) mod test {
|
pub(crate) mod test {
|
||||||
|
|
||||||
|
pub use super::minmax;
|
||||||
pub use super::serialize::test::fixed_size_test;
|
pub use super::serialize::test::fixed_size_test;
|
||||||
use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
||||||
|
use proptest::prelude::*;
|
||||||
use std::f64;
|
use std::f64;
|
||||||
|
|
||||||
fn test_i64_converter_helper(val: i64) {
|
fn test_i64_converter_helper(val: i64) {
|
||||||
@@ -146,6 +164,15 @@ pub(crate) mod test {
|
|||||||
assert_eq!(u64_to_f64(f64_to_u64(val)), val);
|
assert_eq!(u64_to_f64(f64_to_u64(val)), val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
proptest! {
|
||||||
|
#[test]
|
||||||
|
fn test_f64_converter_monotonicity_proptest((left, right) in (proptest::num::f64::NORMAL, proptest::num::f64::NORMAL)) {
|
||||||
|
let left_u64 = f64_to_u64(left);
|
||||||
|
let right_u64 = f64_to_u64(right);
|
||||||
|
assert_eq!(left_u64 < right_u64, left < right);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_i64_converter() {
|
fn test_i64_converter() {
|
||||||
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
|
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
|
||||||
@@ -199,4 +226,21 @@ pub(crate) mod test {
|
|||||||
assert!(((super::MAX_DOC_LIMIT - 1) as i32) >= 0);
|
assert!(((super::MAX_DOC_LIMIT - 1) as i32) >= 0);
|
||||||
assert!((super::MAX_DOC_LIMIT as i32) < 0);
|
assert!((super::MAX_DOC_LIMIT as i32) < 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_minmax_empty() {
|
||||||
|
let vals: Vec<u32> = vec![];
|
||||||
|
assert_eq!(minmax(vals.into_iter()), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_minmax_one() {
|
||||||
|
assert_eq!(minmax(vec![1].into_iter()), Some((1, 1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_minmax_two() {
|
||||||
|
assert_eq!(minmax(vec![1, 2].into_iter()), Some((1, 2)));
|
||||||
|
assert_eq!(minmax(vec![2, 1].into_iter()), Some((1, 2)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -89,6 +89,19 @@ impl FixedSize for u64 {
|
|||||||
const SIZE_IN_BYTES: usize = 8;
|
const SIZE_IN_BYTES: usize = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for f32 {
|
||||||
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
writer.write_f32::<Endianness>(*self)
|
||||||
|
}
|
||||||
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
reader.read_f32::<Endianness>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FixedSize for f32 {
|
||||||
|
const SIZE_IN_BYTES: usize = 4;
|
||||||
|
}
|
||||||
|
|
||||||
impl BinarySerializable for i64 {
|
impl BinarySerializable for i64 {
|
||||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_i64::<Endianness>(*self)
|
writer.write_i64::<Endianness>(*self)
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ use std::io::Read;
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
/// Wrapper over a `u64` that serializes as a variable int.
|
/// Wrapper over a `u64` that serializes as a variable int.
|
||||||
#[derive(Debug, Eq, PartialEq)]
|
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||||
pub struct VInt(pub u64);
|
pub struct VInt(pub u64);
|
||||||
|
|
||||||
const STOP_BIT: u8 = 128;
|
const STOP_BIT: u8 = 128;
|
||||||
|
|
||||||
pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
|
||||||
const START_2: u64 = 1 << 7;
|
const START_2: u64 = 1 << 7;
|
||||||
const START_3: u64 = 1 << 14;
|
const START_3: u64 = 1 << 14;
|
||||||
const START_4: u64 = 1 << 21;
|
const START_4: u64 = 1 << 21;
|
||||||
@@ -29,7 +29,7 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
|||||||
|
|
||||||
let val = u64::from(val);
|
let val = u64::from(val);
|
||||||
const STOP_BIT: u64 = 128u64;
|
const STOP_BIT: u64 = 128u64;
|
||||||
match val {
|
let (res, num_bytes) = match val {
|
||||||
0..=STOP_1 => (val | STOP_BIT, 1),
|
0..=STOP_1 => (val | STOP_BIT, 1),
|
||||||
START_2..=STOP_2 => (
|
START_2..=STOP_2 => (
|
||||||
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
|
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
|
||||||
@@ -56,7 +56,9 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
|||||||
| (STOP_BIT << (8 * 4)),
|
| (STOP_BIT << (8 * 4)),
|
||||||
5,
|
5,
|
||||||
),
|
),
|
||||||
}
|
};
|
||||||
|
LittleEndian::write_u64(&mut buf[..], res);
|
||||||
|
&buf[0..num_bytes]
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of bytes covered by a
|
/// Returns the number of bytes covered by a
|
||||||
@@ -85,23 +87,26 @@ fn vint_len(data: &[u8]) -> usize {
|
|||||||
/// If the buffer does not start by a valid
|
/// If the buffer does not start by a valid
|
||||||
/// vint payload
|
/// vint payload
|
||||||
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
||||||
let vlen = vint_len(*data);
|
let (result, vlen) = read_u32_vint_no_advance(*data);
|
||||||
|
*data = &data[vlen..];
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
|
||||||
|
let vlen = vint_len(data);
|
||||||
let mut result = 0u32;
|
let mut result = 0u32;
|
||||||
let mut shift = 0u64;
|
let mut shift = 0u64;
|
||||||
for &b in &data[..vlen] {
|
for &b in &data[..vlen] {
|
||||||
result |= u32::from(b & 127u8) << shift;
|
result |= u32::from(b & 127u8) << shift;
|
||||||
shift += 7;
|
shift += 7;
|
||||||
}
|
}
|
||||||
*data = &data[vlen..];
|
(result, vlen)
|
||||||
result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write a `u32` as a vint payload.
|
/// Write a `u32` as a vint payload.
|
||||||
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
||||||
let (val, num_bytes) = serialize_vint_u32(val);
|
let mut buf = [0u8; 8];
|
||||||
let mut buffer = [0u8; 8];
|
let data = serialize_vint_u32(val, &mut buf);
|
||||||
LittleEndian::write_u64(&mut buffer, val);
|
writer.write_all(&data)
|
||||||
writer.write_all(&buffer[..num_bytes])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VInt {
|
impl VInt {
|
||||||
@@ -172,7 +177,6 @@ mod tests {
|
|||||||
use super::serialize_vint_u32;
|
use super::serialize_vint_u32;
|
||||||
use super::VInt;
|
use super::VInt;
|
||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use byteorder::{ByteOrder, LittleEndian};
|
|
||||||
|
|
||||||
fn aux_test_vint(val: u64) {
|
fn aux_test_vint(val: u64) {
|
||||||
let mut v = [14u8; 10];
|
let mut v = [14u8; 10];
|
||||||
@@ -208,12 +212,10 @@ mod tests {
|
|||||||
|
|
||||||
fn aux_test_serialize_vint_u32(val: u32) {
|
fn aux_test_serialize_vint_u32(val: u32) {
|
||||||
let mut buffer = [0u8; 10];
|
let mut buffer = [0u8; 10];
|
||||||
let mut buffer2 = [0u8; 10];
|
let mut buffer2 = [0u8; 8];
|
||||||
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
|
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
|
||||||
let (vint, len) = serialize_vint_u32(val);
|
let res2 = serialize_vint_u32(val, &mut buffer2);
|
||||||
assert_eq!(len, len_vint, "len wrong for val {}", val);
|
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
|
||||||
LittleEndian::write_u64(&mut buffer2, vint);
|
|
||||||
assert_eq!(&buffer[..len], &buffer2[..len], "array wrong for {}", val);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use crate::Result;
|
|
||||||
use crossbeam::channel;
|
use crossbeam::channel;
|
||||||
use scoped_pool::{Pool, ThreadConfig};
|
use rayon::{ThreadPool, ThreadPoolBuilder};
|
||||||
|
|
||||||
/// Search executor whether search request are single thread or multithread.
|
/// Search executor whether search request are single thread or multithread.
|
||||||
///
|
///
|
||||||
@@ -10,8 +9,10 @@ use scoped_pool::{Pool, ThreadConfig};
|
|||||||
/// API of a dependency, knowing it might conflict with a different version
|
/// API of a dependency, knowing it might conflict with a different version
|
||||||
/// used by the client. Second, we may stop using rayon in the future.
|
/// used by the client. Second, we may stop using rayon in the future.
|
||||||
pub enum Executor {
|
pub enum Executor {
|
||||||
|
/// Single thread variant of an Executor
|
||||||
SingleThread,
|
SingleThread,
|
||||||
ThreadPool(Pool),
|
/// Thread pool variant of an Executor
|
||||||
|
ThreadPool(ThreadPool),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Executor {
|
impl Executor {
|
||||||
@@ -20,37 +21,39 @@ impl Executor {
|
|||||||
Executor::SingleThread
|
Executor::SingleThread
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates an Executor that dispatches the tasks in a thread pool.
|
/// Creates an Executor that dispatches the tasks in a thread pool.
|
||||||
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Executor {
|
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> crate::Result<Executor> {
|
||||||
let thread_config = ThreadConfig::new().prefix(prefix);
|
let pool = ThreadPoolBuilder::new()
|
||||||
let pool = Pool::with_thread_config(num_threads, thread_config);
|
.num_threads(num_threads)
|
||||||
Executor::ThreadPool(pool)
|
.thread_name(move |num| format!("{}{}", prefix, num))
|
||||||
|
.build()?;
|
||||||
|
Ok(Executor::ThreadPool(pool))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Perform a map in the thread pool.
|
/// Perform a map in the thread pool.
|
||||||
//
|
///
|
||||||
// Regardless of the executor (`SingleThread` or `ThreadPool`), panics in the task
|
/// Regardless of the executor (`SingleThread` or `ThreadPool`), panics in the task
|
||||||
// will propagate to the caller.
|
/// will propagate to the caller.
|
||||||
pub fn map<
|
pub fn map<
|
||||||
A: Send,
|
A: Send,
|
||||||
R: Send,
|
R: Send,
|
||||||
AIterator: Iterator<Item = A>,
|
AIterator: Iterator<Item = A>,
|
||||||
F: Sized + Sync + Fn(A) -> Result<R>,
|
F: Sized + Sync + Fn(A) -> crate::Result<R>,
|
||||||
>(
|
>(
|
||||||
&self,
|
&self,
|
||||||
f: F,
|
f: F,
|
||||||
args: AIterator,
|
args: AIterator,
|
||||||
) -> Result<Vec<R>> {
|
) -> crate::Result<Vec<R>> {
|
||||||
match self {
|
match self {
|
||||||
Executor::SingleThread => args.map(f).collect::<Result<_>>(),
|
Executor::SingleThread => args.map(f).collect::<crate::Result<_>>(),
|
||||||
Executor::ThreadPool(pool) => {
|
Executor::ThreadPool(pool) => {
|
||||||
let args_with_indices: Vec<(usize, A)> = args.enumerate().collect();
|
let args_with_indices: Vec<(usize, A)> = args.enumerate().collect();
|
||||||
let num_fruits = args_with_indices.len();
|
let num_fruits = args_with_indices.len();
|
||||||
let fruit_receiver = {
|
let fruit_receiver = {
|
||||||
let (fruit_sender, fruit_receiver) = channel::unbounded();
|
let (fruit_sender, fruit_receiver) = channel::unbounded();
|
||||||
pool.scoped(|scope| {
|
pool.scope(|scope| {
|
||||||
for arg_with_idx in args_with_indices {
|
for arg_with_idx in args_with_indices {
|
||||||
scope.execute(|| {
|
scope.spawn(|_| {
|
||||||
let (idx, arg) = arg_with_idx;
|
let (idx, arg) = arg_with_idx;
|
||||||
let fruit = f(arg);
|
let fruit = f(arg);
|
||||||
if let Err(err) = fruit_sender.send((idx, fruit)) {
|
if let Err(err) = fruit_sender.send((idx, fruit)) {
|
||||||
@@ -103,6 +106,7 @@ mod tests {
|
|||||||
#[should_panic] //< unfortunately the panic message is not propagated
|
#[should_panic] //< unfortunately the panic message is not propagated
|
||||||
fn test_panic_propagates_multi_thread() {
|
fn test_panic_propagates_multi_thread() {
|
||||||
let _result: Vec<usize> = Executor::multi_thread(1, "search-test")
|
let _result: Vec<usize> = Executor::multi_thread(1, "search-test")
|
||||||
|
.unwrap()
|
||||||
.map(
|
.map(
|
||||||
|_| {
|
|_| {
|
||||||
panic!("panic should propagate");
|
panic!("panic should propagate");
|
||||||
@@ -126,6 +130,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_map_multithread() {
|
fn test_map_multithread() {
|
||||||
let result: Vec<usize> = Executor::multi_thread(3, "search-test")
|
let result: Vec<usize> = Executor::multi_thread(3, "search-test")
|
||||||
|
.unwrap()
|
||||||
.map(|i| Ok(i * 2), 0..10)
|
.map(|i| Ok(i * 2), 0..10)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(result.len(), 10);
|
assert_eq!(result.len(), 10);
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
use super::segment::create_segment;
|
|
||||||
use super::segment::Segment;
|
use super::segment::Segment;
|
||||||
use crate::core::Executor;
|
use crate::core::Executor;
|
||||||
use crate::core::IndexMeta;
|
use crate::core::IndexMeta;
|
||||||
@@ -6,6 +5,7 @@ use crate::core::SegmentId;
|
|||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::core::SegmentMetaInventory;
|
use crate::core::SegmentMetaInventory;
|
||||||
use crate::core::META_FILEPATH;
|
use crate::core::META_FILEPATH;
|
||||||
|
use crate::directory::error::OpenReadError;
|
||||||
use crate::directory::ManagedDirectory;
|
use crate::directory::ManagedDirectory;
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
use crate::directory::MmapDirectory;
|
use crate::directory::MmapDirectory;
|
||||||
@@ -20,26 +20,36 @@ use crate::reader::IndexReaderBuilder;
|
|||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::tokenizer::BoxedTokenizer;
|
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||||
use crate::tokenizer::TokenizerManager;
|
|
||||||
use crate::IndexWriter;
|
use crate::IndexWriter;
|
||||||
use crate::Result;
|
|
||||||
use num_cpus;
|
|
||||||
use std::borrow::BorrowMut;
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::Path;
|
||||||
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
fn load_metas(directory: &dyn Directory, inventory: &SegmentMetaInventory) -> Result<IndexMeta> {
|
fn load_metas(
|
||||||
|
directory: &dyn Directory,
|
||||||
|
inventory: &SegmentMetaInventory,
|
||||||
|
) -> crate::Result<IndexMeta> {
|
||||||
let meta_data = directory.atomic_read(&META_FILEPATH)?;
|
let meta_data = directory.atomic_read(&META_FILEPATH)?;
|
||||||
let meta_string = String::from_utf8_lossy(&meta_data);
|
let meta_string = String::from_utf8(meta_data).map_err(|_utf8_err| {
|
||||||
|
error!("Meta data is not valid utf8.");
|
||||||
|
DataCorruption::new(
|
||||||
|
META_FILEPATH.to_path_buf(),
|
||||||
|
"Meta file does not contain valid utf8 file.".to_string(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
IndexMeta::deserialize(&meta_string, &inventory)
|
IndexMeta::deserialize(&meta_string, &inventory)
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
DataCorruption::new(
|
DataCorruption::new(
|
||||||
META_FILEPATH.to_path_buf(),
|
META_FILEPATH.to_path_buf(),
|
||||||
format!("Meta file cannot be deserialized. {:?}.", e),
|
format!(
|
||||||
|
"Meta file cannot be deserialized. {:?}. Content: {:?}",
|
||||||
|
e, meta_string
|
||||||
|
),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.map_err(From::from)
|
.map_err(From::from)
|
||||||
@@ -56,8 +66,10 @@ pub struct Index {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Index {
|
impl Index {
|
||||||
/// Examines the director to see if it contains an index
|
/// Examines the directory to see if it contains an index.
|
||||||
pub fn exists<Dir: Directory>(dir: &Dir) -> bool {
|
///
|
||||||
|
/// Effectively, it only checks for the presence of the `meta.json` file.
|
||||||
|
pub fn exists<Dir: Directory>(dir: &Dir) -> Result<bool, OpenReadError> {
|
||||||
dir.exists(&META_FILEPATH)
|
dir.exists(&META_FILEPATH)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -73,15 +85,16 @@ impl Index {
|
|||||||
|
|
||||||
/// Replace the default single thread search executor pool
|
/// Replace the default single thread search executor pool
|
||||||
/// by a thread pool with a given number of threads.
|
/// by a thread pool with a given number of threads.
|
||||||
pub fn set_multithread_executor(&mut self, num_threads: usize) {
|
pub fn set_multithread_executor(&mut self, num_threads: usize) -> crate::Result<()> {
|
||||||
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-"));
|
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-")?);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Replace the default single thread search executor pool
|
/// Replace the default single thread search executor pool
|
||||||
/// by a thread pool with a given number of threads.
|
/// by a thread pool with a given number of threads.
|
||||||
pub fn set_default_multithread_executor(&mut self) {
|
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
|
||||||
let default_num_threads = num_cpus::get();
|
let default_num_threads = num_cpus::get();
|
||||||
self.set_multithread_executor(default_num_threads);
|
self.set_multithread_executor(default_num_threads)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new index using the `RAMDirectory`.
|
/// Creates a new index using the `RAMDirectory`.
|
||||||
@@ -98,28 +111,29 @@ impl Index {
|
|||||||
///
|
///
|
||||||
/// If a previous index was in this directory, then its meta file will be destroyed.
|
/// If a previous index was in this directory, then its meta file will be destroyed.
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
pub fn create_in_dir<P: AsRef<Path>>(directory_path: P, schema: Schema) -> Result<Index> {
|
pub fn create_in_dir<P: AsRef<Path>>(
|
||||||
|
directory_path: P,
|
||||||
|
schema: Schema,
|
||||||
|
) -> crate::Result<Index> {
|
||||||
let mmap_directory = MmapDirectory::open(directory_path)?;
|
let mmap_directory = MmapDirectory::open(directory_path)?;
|
||||||
if Index::exists(&mmap_directory) {
|
if Index::exists(&mmap_directory)? {
|
||||||
return Err(TantivyError::IndexAlreadyExists);
|
return Err(TantivyError::IndexAlreadyExists);
|
||||||
}
|
}
|
||||||
|
|
||||||
Index::create(mmap_directory, schema)
|
Index::create(mmap_directory, schema)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Opens or creates a new index in the provided directory
|
/// Opens or creates a new index in the provided directory
|
||||||
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
|
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
||||||
if Index::exists(&dir) {
|
if !Index::exists(&dir)? {
|
||||||
let index = Index::open(dir)?;
|
return Index::create(dir, schema);
|
||||||
if index.schema() == schema {
|
}
|
||||||
Ok(index)
|
let index = Index::open(dir)?;
|
||||||
} else {
|
if index.schema() == schema {
|
||||||
Err(TantivyError::SchemaError(
|
Ok(index)
|
||||||
"An index exists but the schema does not match.".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
Index::create(dir, schema)
|
Err(TantivyError::SchemaError(
|
||||||
|
"An index exists but the schema does not match.".to_string(),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,13 +146,15 @@ impl Index {
|
|||||||
/// The temp directory is only used for testing the `MmapDirectory`.
|
/// The temp directory is only used for testing the `MmapDirectory`.
|
||||||
/// For other unit tests, prefer the `RAMDirectory`, see: `create_in_ram`.
|
/// For other unit tests, prefer the `RAMDirectory`, see: `create_in_ram`.
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
pub fn create_from_tempdir(schema: Schema) -> Result<Index> {
|
pub fn create_from_tempdir(schema: Schema) -> crate::Result<Index> {
|
||||||
let mmap_directory = MmapDirectory::create_from_tempdir()?;
|
let mmap_directory = MmapDirectory::create_from_tempdir()?;
|
||||||
Index::create(mmap_directory, schema)
|
Index::create(mmap_directory, schema)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new index given an implementation of the trait `Directory`
|
/// Creates a new index given an implementation of the trait `Directory`.
|
||||||
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
|
///
|
||||||
|
/// If a directory previously existed, it will be erased.
|
||||||
|
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
||||||
let directory = ManagedDirectory::wrap(dir)?;
|
let directory = ManagedDirectory::wrap(dir)?;
|
||||||
Index::from_directory(directory, schema)
|
Index::from_directory(directory, schema)
|
||||||
}
|
}
|
||||||
@@ -146,10 +162,11 @@ impl Index {
|
|||||||
/// Create a new index from a directory.
|
/// Create a new index from a directory.
|
||||||
///
|
///
|
||||||
/// This will overwrite existing meta.json
|
/// This will overwrite existing meta.json
|
||||||
fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> Result<Index> {
|
fn from_directory(directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
|
||||||
save_new_metas(schema.clone(), directory.borrow_mut())?;
|
save_new_metas(schema.clone(), &directory)?;
|
||||||
let metas = IndexMeta::with_schema(schema);
|
let metas = IndexMeta::with_schema(schema);
|
||||||
Index::create_from_metas(directory, &metas, SegmentMetaInventory::default())
|
let index = Index::create_from_metas(directory, &metas, SegmentMetaInventory::default());
|
||||||
|
Ok(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new index given a directory and an `IndexMeta`.
|
/// Creates a new index given a directory and an `IndexMeta`.
|
||||||
@@ -157,15 +174,15 @@ impl Index {
|
|||||||
directory: ManagedDirectory,
|
directory: ManagedDirectory,
|
||||||
metas: &IndexMeta,
|
metas: &IndexMeta,
|
||||||
inventory: SegmentMetaInventory,
|
inventory: SegmentMetaInventory,
|
||||||
) -> Result<Index> {
|
) -> Index {
|
||||||
let schema = metas.schema.clone();
|
let schema = metas.schema.clone();
|
||||||
Ok(Index {
|
Index {
|
||||||
directory,
|
directory,
|
||||||
schema,
|
schema,
|
||||||
tokenizers: TokenizerManager::default(),
|
tokenizers: TokenizerManager::default(),
|
||||||
executor: Arc::new(Executor::single_thread()),
|
executor: Arc::new(Executor::single_thread()),
|
||||||
inventory,
|
inventory,
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor for the tokenizer manager.
|
/// Accessor for the tokenizer manager.
|
||||||
@@ -174,11 +191,11 @@ impl Index {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Helper to access the tokenizer associated to a specific field.
|
/// Helper to access the tokenizer associated to a specific field.
|
||||||
pub fn tokenizer_for_field(&self, field: Field) -> Result<BoxedTokenizer> {
|
pub fn tokenizer_for_field(&self, field: Field) -> crate::Result<TextAnalyzer> {
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
let field_type = field_entry.field_type();
|
let field_type = field_entry.field_type();
|
||||||
let tokenizer_manager: &TokenizerManager = self.tokenizers();
|
let tokenizer_manager: &TokenizerManager = self.tokenizers();
|
||||||
let tokenizer_name_opt: Option<BoxedTokenizer> = match field_type {
|
let tokenizer_name_opt: Option<TextAnalyzer> = match field_type {
|
||||||
FieldType::Str(text_options) => text_options
|
FieldType::Str(text_options) => text_options
|
||||||
.get_indexing_options()
|
.get_indexing_options()
|
||||||
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
|
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
|
||||||
@@ -197,7 +214,7 @@ impl Index {
|
|||||||
/// Create a default `IndexReader` for the given index.
|
/// Create a default `IndexReader` for the given index.
|
||||||
///
|
///
|
||||||
/// See [`Index.reader_builder()`](#method.reader_builder).
|
/// See [`Index.reader_builder()`](#method.reader_builder).
|
||||||
pub fn reader(&self) -> Result<IndexReader> {
|
pub fn reader(&self) -> crate::Result<IndexReader> {
|
||||||
self.reader_builder().try_into()
|
self.reader_builder().try_into()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -212,7 +229,7 @@ impl Index {
|
|||||||
|
|
||||||
/// Opens a new directory from an index path.
|
/// Opens a new directory from an index path.
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> Result<Index> {
|
pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> crate::Result<Index> {
|
||||||
let mmap_directory = MmapDirectory::open(directory_path)?;
|
let mmap_directory = MmapDirectory::open(directory_path)?;
|
||||||
Index::open(mmap_directory)
|
Index::open(mmap_directory)
|
||||||
}
|
}
|
||||||
@@ -236,15 +253,16 @@ impl Index {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Open the index using the provided directory
|
/// Open the index using the provided directory
|
||||||
pub fn open<D: Directory>(directory: D) -> Result<Index> {
|
pub fn open<D: Directory>(directory: D) -> crate::Result<Index> {
|
||||||
let directory = ManagedDirectory::wrap(directory)?;
|
let directory = ManagedDirectory::wrap(directory)?;
|
||||||
let inventory = SegmentMetaInventory::default();
|
let inventory = SegmentMetaInventory::default();
|
||||||
let metas = load_metas(&directory, &inventory)?;
|
let metas = load_metas(&directory, &inventory)?;
|
||||||
Index::create_from_metas(directory, &metas, inventory)
|
let index = Index::create_from_metas(directory, &metas, inventory);
|
||||||
|
Ok(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reads the index meta file from the directory.
|
/// Reads the index meta file from the directory.
|
||||||
pub fn load_metas(&self) -> Result<IndexMeta> {
|
pub fn load_metas(&self) -> crate::Result<IndexMeta> {
|
||||||
load_metas(self.directory(), &self.inventory)
|
load_metas(self.directory(), &self.inventory)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -272,7 +290,7 @@ impl Index {
|
|||||||
&self,
|
&self,
|
||||||
num_threads: usize,
|
num_threads: usize,
|
||||||
overall_heap_size_in_bytes: usize,
|
overall_heap_size_in_bytes: usize,
|
||||||
) -> Result<IndexWriter> {
|
) -> crate::Result<IndexWriter> {
|
||||||
let directory_lock = self
|
let directory_lock = self
|
||||||
.directory
|
.directory
|
||||||
.acquire_lock(&INDEX_WRITER_LOCK)
|
.acquire_lock(&INDEX_WRITER_LOCK)
|
||||||
@@ -280,7 +298,7 @@ impl Index {
|
|||||||
TantivyError::LockFailure(
|
TantivyError::LockFailure(
|
||||||
err,
|
err,
|
||||||
Some(
|
Some(
|
||||||
"Failed to acquire index lock. If you are using\
|
"Failed to acquire index lock. If you are using \
|
||||||
a regular directory, this means there is already an \
|
a regular directory, this means there is already an \
|
||||||
`IndexWriter` working on this `Directory`, in this process \
|
`IndexWriter` working on this `Directory`, in this process \
|
||||||
or in a different process."
|
or in a different process."
|
||||||
@@ -297,6 +315,15 @@ impl Index {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper to create an index writer for tests.
|
||||||
|
///
|
||||||
|
/// That index writer only simply has a single thread and a heap of 5 MB.
|
||||||
|
/// Using a single thread gives us a deterministic allocation of DocId.
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn writer_for_tests(&self) -> crate::Result<IndexWriter> {
|
||||||
|
self.writer_with_num_threads(1, 10_000_000)
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates a multithreaded writer
|
/// Creates a multithreaded writer
|
||||||
///
|
///
|
||||||
/// Tantivy will automatically define the number of threads to use.
|
/// Tantivy will automatically define the number of threads to use.
|
||||||
@@ -307,7 +334,7 @@ impl Index {
|
|||||||
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
||||||
/// # Panics
|
/// # Panics
|
||||||
/// If the heap size per thread is too small, panics.
|
/// If the heap size per thread is too small, panics.
|
||||||
pub fn writer(&self, overall_heap_size_in_bytes: usize) -> Result<IndexWriter> {
|
pub fn writer(&self, overall_heap_size_in_bytes: usize) -> crate::Result<IndexWriter> {
|
||||||
let mut num_threads = num_cpus::get();
|
let mut num_threads = num_cpus::get();
|
||||||
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
|
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
|
||||||
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
||||||
@@ -324,7 +351,7 @@ impl Index {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the list of segments that are searchable
|
/// Returns the list of segments that are searchable
|
||||||
pub fn searchable_segments(&self) -> Result<Vec<Segment>> {
|
pub fn searchable_segments(&self) -> crate::Result<Vec<Segment>> {
|
||||||
Ok(self
|
Ok(self
|
||||||
.searchable_segment_metas()?
|
.searchable_segment_metas()?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -334,7 +361,7 @@ impl Index {
|
|||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn segment(&self, segment_meta: SegmentMeta) -> Segment {
|
pub fn segment(&self, segment_meta: SegmentMeta) -> Segment {
|
||||||
create_segment(self.clone(), segment_meta)
|
Segment::for_index(self.clone(), segment_meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new segment.
|
/// Creates a new segment.
|
||||||
@@ -357,12 +384,12 @@ impl Index {
|
|||||||
|
|
||||||
/// Reads the meta.json and returns the list of
|
/// Reads the meta.json and returns the list of
|
||||||
/// `SegmentMeta` from the last commit.
|
/// `SegmentMeta` from the last commit.
|
||||||
pub fn searchable_segment_metas(&self) -> Result<Vec<SegmentMeta>> {
|
pub fn searchable_segment_metas(&self) -> crate::Result<Vec<SegmentMeta>> {
|
||||||
Ok(self.load_metas()?.segments)
|
Ok(self.load_metas()?.segments)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the list of segment ids that are searchable.
|
/// Returns the list of segment ids that are searchable.
|
||||||
pub fn searchable_segment_ids(&self) -> Result<Vec<SegmentId>> {
|
pub fn searchable_segment_ids(&self) -> crate::Result<Vec<SegmentId>> {
|
||||||
Ok(self
|
Ok(self
|
||||||
.searchable_segment_metas()?
|
.searchable_segment_metas()?
|
||||||
.iter()
|
.iter()
|
||||||
@@ -371,7 +398,7 @@ impl Index {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the set of corrupted files
|
/// Returns the set of corrupted files
|
||||||
pub fn validate_checksum(&self) -> Result<HashSet<PathBuf>> {
|
pub fn validate_checksum(&self) -> crate::Result<HashSet<PathBuf>> {
|
||||||
self.directory.list_damaged().map_err(Into::into)
|
self.directory.list_damaged().map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -384,15 +411,12 @@ impl fmt::Debug for Index {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::directory::RAMDirectory;
|
use crate::directory::{RAMDirectory, WatchCallback};
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::{Schema, INDEXED, TEXT};
|
use crate::schema::{Schema, INDEXED, TEXT};
|
||||||
use crate::Index;
|
|
||||||
use crate::IndexReader;
|
use crate::IndexReader;
|
||||||
use crate::IndexWriter;
|
|
||||||
use crate::ReloadPolicy;
|
use crate::ReloadPolicy;
|
||||||
use std::thread;
|
use crate::{Directory, Index};
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexer_for_field() {
|
fn test_indexer_for_field() {
|
||||||
@@ -411,24 +435,24 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_index_exists() {
|
fn test_index_exists() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(!Index::exists(&directory));
|
assert!(!Index::exists(&directory).unwrap());
|
||||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory));
|
assert!(Index::exists(&directory).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn open_or_create_should_create() {
|
fn open_or_create_should_create() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(!Index::exists(&directory));
|
assert!(!Index::exists(&directory).unwrap());
|
||||||
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory));
|
assert!(Index::exists(&directory).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn open_or_create_should_open() {
|
fn open_or_create_should_open() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory));
|
assert!(Index::exists(&directory).unwrap());
|
||||||
assert!(Index::open_or_create(directory, throw_away_schema()).is_ok());
|
assert!(Index::open_or_create(directory, throw_away_schema()).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -436,7 +460,7 @@ mod tests {
|
|||||||
fn create_should_wipeoff_existing() {
|
fn create_should_wipeoff_existing() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory));
|
assert!(Index::exists(&directory).unwrap());
|
||||||
assert!(Index::create(directory.clone(), Schema::builder().build()).is_ok());
|
assert!(Index::create(directory.clone(), Schema::builder().build()).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -444,7 +468,7 @@ mod tests {
|
|||||||
fn open_or_create_exists_but_schema_does_not_match() {
|
fn open_or_create_exists_but_schema_does_not_match() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory));
|
assert!(Index::exists(&directory).unwrap());
|
||||||
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
let err = Index::open_or_create(directory, Schema::builder().build());
|
let err = Index::open_or_create(directory, Schema::builder().build());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -470,14 +494,14 @@ mod tests {
|
|||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
||||||
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
mod mmap_specific {
|
mod mmap_specific {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::Directory;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
|
|
||||||
@@ -488,36 +512,38 @@ mod tests {
|
|||||||
let tempdir = TempDir::new().unwrap();
|
let tempdir = TempDir::new().unwrap();
|
||||||
let tempdir_path = PathBuf::from(tempdir.path());
|
let tempdir_path = PathBuf::from(tempdir.path());
|
||||||
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
|
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
|
||||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
writer.commit().unwrap();
|
|
||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::OnCommit)
|
.reload_policy(ReloadPolicy::OnCommit)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
|
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_index_manual_policy_mmap() {
|
fn test_index_manual_policy_mmap() -> crate::Result<()> {
|
||||||
let schema = throw_away_schema();
|
let schema = throw_away_schema();
|
||||||
let field = schema.get_field("num_likes").unwrap();
|
let field = schema.get_field("num_likes").unwrap();
|
||||||
let index = Index::create_from_tempdir(schema).unwrap();
|
let mut index = Index::create_from_tempdir(schema)?;
|
||||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut writer = index.writer_for_tests()?;
|
||||||
writer.commit().unwrap();
|
writer.commit()?;
|
||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()
|
.try_into()?;
|
||||||
.unwrap();
|
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
writer.add_document(doc!(field=>1u64));
|
writer.add_document(doc!(field=>1u64));
|
||||||
writer.commit().unwrap();
|
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||||
thread::sleep(Duration::from_millis(500));
|
let _handle = index.directory_mut().watch(WatchCallback::new(move || {
|
||||||
|
let _ = sender.send(());
|
||||||
|
}));
|
||||||
|
writer.commit()?;
|
||||||
|
assert!(receiver.recv().is_ok());
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
reader.reload().unwrap();
|
reader.reload()?;
|
||||||
assert_eq!(reader.searcher().num_docs(), 1);
|
assert_eq!(reader.searcher().num_docs(), 1);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -534,39 +560,38 @@ mod tests {
|
|||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
let mut writer = write_index.writer_with_num_threads(1, 3_000_000).unwrap();
|
test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
|
||||||
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
|
||||||
fn test_index_on_commit_reload_policy_aux(
|
let mut reader_index = reader.index();
|
||||||
field: Field,
|
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||||
writer: &mut IndexWriter,
|
let _watch_handle = reader_index
|
||||||
reader: &IndexReader,
|
.directory_mut()
|
||||||
) {
|
.watch(WatchCallback::new(move || {
|
||||||
|
let _ = sender.send(());
|
||||||
|
}));
|
||||||
|
let mut writer = index.writer_for_tests().unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
writer.add_document(doc!(field=>1u64));
|
writer.add_document(doc!(field=>1u64));
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
let mut count = 0;
|
// We need a loop here because it is possible for notify to send more than
|
||||||
for _ in 0..100 {
|
// one modify event. It was observed on CI on MacOS.
|
||||||
count = reader.searcher().num_docs();
|
loop {
|
||||||
if count > 0 {
|
assert!(receiver.recv().is_ok());
|
||||||
|
if reader.searcher().num_docs() == 1 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
thread::sleep(Duration::from_millis(100));
|
|
||||||
}
|
}
|
||||||
assert_eq!(count, 1);
|
|
||||||
writer.add_document(doc!(field=>2u64));
|
writer.add_document(doc!(field=>2u64));
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
let mut count = 0;
|
// ... Same as above
|
||||||
for _ in 0..10 {
|
loop {
|
||||||
count = reader.searcher().num_docs();
|
assert!(receiver.recv().is_ok());
|
||||||
if count > 1 {
|
if reader.searcher().num_docs() == 2 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
thread::sleep(Duration::from_millis(100));
|
|
||||||
}
|
}
|
||||||
assert_eq!(count, 2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test will not pass on windows, because windows
|
// This test will not pass on windows, because windows
|
||||||
@@ -583,9 +608,13 @@ mod tests {
|
|||||||
for i in 0u64..8_000u64 {
|
for i in 0u64..8_000u64 {
|
||||||
writer.add_document(doc!(field => i));
|
writer.add_document(doc!(field => i));
|
||||||
}
|
}
|
||||||
|
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||||
|
let _handle = directory.watch(WatchCallback::new(move || {
|
||||||
|
let _ = sender.send(());
|
||||||
|
}));
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
let mem_right_after_commit = directory.total_mem_usage();
|
let mem_right_after_commit = directory.total_mem_usage();
|
||||||
thread::sleep(Duration::from_millis(1_000));
|
assert!(receiver.recv().is_ok());
|
||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
@@ -599,6 +628,11 @@ mod tests {
|
|||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
assert_eq!(searcher.num_docs(), 8_000);
|
assert_eq!(searcher.num_docs(), 8_000);
|
||||||
assert!(mem_right_after_merge_finished < mem_right_after_commit);
|
assert!(
|
||||||
|
mem_right_after_merge_finished < mem_right_after_commit,
|
||||||
|
"(mem after merge){} is expected < (mem before merge){}",
|
||||||
|
mem_right_after_merge_finished,
|
||||||
|
mem_right_after_commit
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,8 +3,7 @@ use crate::core::SegmentId;
|
|||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use census::{Inventory, TrackedObject};
|
use census::{Inventory, TrackedObject};
|
||||||
use serde;
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_json;
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
@@ -150,6 +149,21 @@ impl SegmentMeta {
|
|||||||
self.num_deleted_docs() > 0
|
self.num_deleted_docs() > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Updates the max_doc value from the `SegmentMeta`.
|
||||||
|
///
|
||||||
|
/// This method is only used when updating `max_doc` from 0
|
||||||
|
/// as we finalize a fresh new segment.
|
||||||
|
pub(crate) fn with_max_doc(self, max_doc: u32) -> SegmentMeta {
|
||||||
|
assert_eq!(self.tracked.max_doc, 0);
|
||||||
|
assert!(self.tracked.deletes.is_none());
|
||||||
|
let tracked = self.tracked.map(move |inner_meta| InnerSegmentMeta {
|
||||||
|
segment_id: inner_meta.segment_id,
|
||||||
|
max_doc,
|
||||||
|
deletes: None,
|
||||||
|
});
|
||||||
|
SegmentMeta { tracked }
|
||||||
|
}
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta {
|
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta {
|
||||||
let delete_meta = DeleteMeta {
|
let delete_meta = DeleteMeta {
|
||||||
@@ -199,7 +213,7 @@ pub struct IndexMeta {
|
|||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
/// Payload associated to the last commit.
|
/// Payload associated to the last commit.
|
||||||
///
|
///
|
||||||
/// Upon commit, clients can optionally add a small `Striing` payload to their commit
|
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
||||||
/// to help identify this commit.
|
/// to help identify this commit.
|
||||||
/// This payload is entirely unused by tantivy.
|
/// This payload is entirely unused by tantivy.
|
||||||
pub payload: Option<String>,
|
pub payload: Option<String>,
|
||||||
@@ -285,6 +299,9 @@ mod tests {
|
|||||||
payload: None,
|
payload: None,
|
||||||
};
|
};
|
||||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||||
assert_eq!(json, r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#);
|
assert_eq!(
|
||||||
|
json,
|
||||||
|
r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
|
use std::io;
|
||||||
|
|
||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::FileSlice;
|
||||||
use crate::positions::PositionReader;
|
use crate::positions::PositionReader;
|
||||||
use crate::postings::TermInfo;
|
use crate::postings::TermInfo;
|
||||||
use crate::postings::{BlockSegmentPostings, SegmentPostings};
|
use crate::postings::{BlockSegmentPostings, SegmentPostings};
|
||||||
use crate::schema::FieldType;
|
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
use owned_read::OwnedRead;
|
|
||||||
|
|
||||||
/// The inverted index reader is in charge of accessing
|
/// The inverted index reader is in charge of accessing
|
||||||
/// the inverted index associated to a specific field.
|
/// the inverted index associated to a specific field.
|
||||||
@@ -16,7 +16,7 @@ use owned_read::OwnedRead;
|
|||||||
///
|
///
|
||||||
/// It is safe to delete the segment associated to
|
/// It is safe to delete the segment associated to
|
||||||
/// an `InvertedIndexReader`. As long as it is open,
|
/// an `InvertedIndexReader`. As long as it is open,
|
||||||
/// the `ReadOnlySource` it is relying on should
|
/// the `FileSlice` it is relying on should
|
||||||
/// stay available.
|
/// stay available.
|
||||||
///
|
///
|
||||||
///
|
///
|
||||||
@@ -24,9 +24,9 @@ use owned_read::OwnedRead;
|
|||||||
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
||||||
pub struct InvertedIndexReader {
|
pub struct InvertedIndexReader {
|
||||||
termdict: TermDictionary,
|
termdict: TermDictionary,
|
||||||
postings_source: ReadOnlySource,
|
postings_file_slice: FileSlice,
|
||||||
positions_source: ReadOnlySource,
|
positions_file_slice: FileSlice,
|
||||||
positions_idx_source: ReadOnlySource,
|
positions_idx_file_slice: FileSlice,
|
||||||
record_option: IndexRecordOption,
|
record_option: IndexRecordOption,
|
||||||
total_num_tokens: u64,
|
total_num_tokens: u64,
|
||||||
}
|
}
|
||||||
@@ -35,42 +35,38 @@ impl InvertedIndexReader {
|
|||||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
|
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
termdict: TermDictionary,
|
termdict: TermDictionary,
|
||||||
postings_source: ReadOnlySource,
|
postings_file_slice: FileSlice,
|
||||||
positions_source: ReadOnlySource,
|
positions_file_slice: FileSlice,
|
||||||
positions_idx_source: ReadOnlySource,
|
positions_idx_file_slice: FileSlice,
|
||||||
record_option: IndexRecordOption,
|
record_option: IndexRecordOption,
|
||||||
) -> InvertedIndexReader {
|
) -> io::Result<InvertedIndexReader> {
|
||||||
let total_num_tokens_data = postings_source.slice(0, 8);
|
let (total_num_tokens_slice, postings_body) = postings_file_slice.split(8);
|
||||||
let mut total_num_tokens_cursor = total_num_tokens_data.as_slice();
|
let total_num_tokens = u64::deserialize(&mut total_num_tokens_slice.read_bytes()?)?;
|
||||||
let total_num_tokens = u64::deserialize(&mut total_num_tokens_cursor).unwrap_or(0u64);
|
Ok(InvertedIndexReader {
|
||||||
InvertedIndexReader {
|
|
||||||
termdict,
|
termdict,
|
||||||
postings_source: postings_source.slice_from(8),
|
postings_file_slice: postings_body,
|
||||||
positions_source,
|
positions_file_slice,
|
||||||
positions_idx_source,
|
positions_idx_file_slice,
|
||||||
record_option,
|
record_option,
|
||||||
total_num_tokens,
|
total_num_tokens,
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates an empty `InvertedIndexReader` object, which
|
/// Creates an empty `InvertedIndexReader` object, which
|
||||||
/// contains no terms at all.
|
/// contains no terms at all.
|
||||||
pub fn empty(field_type: &FieldType) -> InvertedIndexReader {
|
pub fn empty(record_option: IndexRecordOption) -> InvertedIndexReader {
|
||||||
let record_option = field_type
|
|
||||||
.get_index_record_option()
|
|
||||||
.unwrap_or(IndexRecordOption::Basic);
|
|
||||||
InvertedIndexReader {
|
InvertedIndexReader {
|
||||||
termdict: TermDictionary::empty(&field_type),
|
termdict: TermDictionary::empty(),
|
||||||
postings_source: ReadOnlySource::empty(),
|
postings_file_slice: FileSlice::empty(),
|
||||||
positions_source: ReadOnlySource::empty(),
|
positions_file_slice: FileSlice::empty(),
|
||||||
positions_idx_source: ReadOnlySource::empty(),
|
positions_idx_file_slice: FileSlice::empty(),
|
||||||
record_option,
|
record_option,
|
||||||
total_num_tokens: 0u64,
|
total_num_tokens: 0u64,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the term info associated with the term.
|
/// Returns the term info associated with the term.
|
||||||
pub fn get_term_info(&self, term: &Term) -> Option<TermInfo> {
|
pub fn get_term_info(&self, term: &Term) -> io::Result<Option<TermInfo>> {
|
||||||
self.termdict.get(term.value_bytes())
|
self.termdict.get(term.value_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -93,12 +89,12 @@ impl InvertedIndexReader {
|
|||||||
&self,
|
&self,
|
||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
block_postings: &mut BlockSegmentPostings,
|
block_postings: &mut BlockSegmentPostings,
|
||||||
) {
|
) -> io::Result<()> {
|
||||||
let offset = term_info.postings_offset as usize;
|
let postings_slice = self
|
||||||
let end_source = self.postings_source.len();
|
.postings_file_slice
|
||||||
let postings_slice = self.postings_source.slice(offset, end_source);
|
.slice(term_info.postings_range.clone());
|
||||||
let postings_reader = OwnedRead::new(postings_slice);
|
block_postings.reset(term_info.doc_freq, postings_slice.read_bytes()?);
|
||||||
block_postings.reset(term_info.doc_freq, postings_reader);
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a block postings given a `Term`.
|
/// Returns a block postings given a `Term`.
|
||||||
@@ -109,9 +105,10 @@ impl InvertedIndexReader {
|
|||||||
&self,
|
&self,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
option: IndexRecordOption,
|
option: IndexRecordOption,
|
||||||
) -> Option<BlockSegmentPostings> {
|
) -> io::Result<Option<BlockSegmentPostings>> {
|
||||||
self.get_term_info(term)
|
self.get_term_info(term)?
|
||||||
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
|
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
|
||||||
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a block postings given a `term_info`.
|
/// Returns a block postings given a `term_info`.
|
||||||
@@ -122,12 +119,13 @@ impl InvertedIndexReader {
|
|||||||
&self,
|
&self,
|
||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
requested_option: IndexRecordOption,
|
requested_option: IndexRecordOption,
|
||||||
) -> BlockSegmentPostings {
|
) -> io::Result<BlockSegmentPostings> {
|
||||||
let offset = term_info.postings_offset as usize;
|
let postings_data = self
|
||||||
let postings_data = self.postings_source.slice_from(offset);
|
.postings_file_slice
|
||||||
BlockSegmentPostings::from_data(
|
.slice(term_info.postings_range.clone());
|
||||||
|
BlockSegmentPostings::open(
|
||||||
term_info.doc_freq,
|
term_info.doc_freq,
|
||||||
OwnedRead::new(postings_data),
|
postings_data,
|
||||||
self.record_option,
|
self.record_option,
|
||||||
requested_option,
|
requested_option,
|
||||||
)
|
)
|
||||||
@@ -141,20 +139,23 @@ impl InvertedIndexReader {
|
|||||||
&self,
|
&self,
|
||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
option: IndexRecordOption,
|
option: IndexRecordOption,
|
||||||
) -> SegmentPostings {
|
) -> io::Result<SegmentPostings> {
|
||||||
let block_postings = self.read_block_postings_from_terminfo(term_info, option);
|
let block_postings = self.read_block_postings_from_terminfo(term_info, option)?;
|
||||||
let position_stream = {
|
let position_stream = {
|
||||||
if option.has_positions() {
|
if option.has_positions() {
|
||||||
let position_reader = self.positions_source.clone();
|
let position_reader = self.positions_file_slice.clone();
|
||||||
let skip_reader = self.positions_idx_source.clone();
|
let skip_reader = self.positions_idx_file_slice.clone();
|
||||||
let position_reader =
|
let position_reader =
|
||||||
PositionReader::new(position_reader, skip_reader, term_info.positions_idx);
|
PositionReader::new(position_reader, skip_reader, term_info.positions_idx)?;
|
||||||
Some(position_reader)
|
Some(position_reader)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
SegmentPostings::from_block_postings(block_postings, position_stream)
|
Ok(SegmentPostings::from_block_postings(
|
||||||
|
block_postings,
|
||||||
|
position_stream,
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the total number of tokens recorded for all documents
|
/// Returns the total number of tokens recorded for all documents
|
||||||
@@ -173,24 +174,31 @@ impl InvertedIndexReader {
|
|||||||
/// For instance, requesting `IndexRecordOption::Freq` for a
|
/// For instance, requesting `IndexRecordOption::Freq` for a
|
||||||
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
||||||
/// with `DocId`s and frequencies.
|
/// with `DocId`s and frequencies.
|
||||||
pub fn read_postings(&self, term: &Term, option: IndexRecordOption) -> Option<SegmentPostings> {
|
pub fn read_postings(
|
||||||
self.get_term_info(term)
|
&self,
|
||||||
|
term: &Term,
|
||||||
|
option: IndexRecordOption,
|
||||||
|
) -> io::Result<Option<SegmentPostings>> {
|
||||||
|
self.get_term_info(term)?
|
||||||
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option))
|
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option))
|
||||||
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn read_postings_no_deletes(
|
pub(crate) fn read_postings_no_deletes(
|
||||||
&self,
|
&self,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
option: IndexRecordOption,
|
option: IndexRecordOption,
|
||||||
) -> Option<SegmentPostings> {
|
) -> io::Result<Option<SegmentPostings>> {
|
||||||
self.get_term_info(term)
|
self.get_term_info(term)?
|
||||||
.map(|term_info| self.read_postings_from_terminfo(&term_info, option))
|
.map(|term_info| self.read_postings_from_terminfo(&term_info, option))
|
||||||
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of documents containing the term.
|
/// Returns the number of documents containing the term.
|
||||||
pub fn doc_freq(&self, term: &Term) -> u32 {
|
pub fn doc_freq(&self, term: &Term) -> io::Result<u32> {
|
||||||
self.get_term_info(term)
|
Ok(self
|
||||||
|
.get_term_info(term)?
|
||||||
.map(|term_info| term_info.doc_freq)
|
.map(|term_info| term_info.doc_freq)
|
||||||
.unwrap_or(0u32)
|
.unwrap_or(0u32))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,42 +1,17 @@
|
|||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
use crate::collector::SegmentCollector;
|
|
||||||
use crate::core::Executor;
|
use crate::core::Executor;
|
||||||
use crate::core::InvertedIndexReader;
|
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::query::Query;
|
use crate::query::Query;
|
||||||
use crate::query::Scorer;
|
|
||||||
use crate::query::Weight;
|
|
||||||
use crate::schema::Document;
|
use crate::schema::Document;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::schema::{Field, Term};
|
use crate::schema::Term;
|
||||||
use crate::space_usage::SearcherSpaceUsage;
|
use crate::space_usage::SearcherSpaceUsage;
|
||||||
use crate::store::StoreReader;
|
use crate::store::StoreReader;
|
||||||
use crate::termdict::TermMerger;
|
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::Result;
|
|
||||||
use std::fmt;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
fn collect_segment<C: Collector>(
|
use std::{fmt, io};
|
||||||
collector: &C,
|
|
||||||
weight: &dyn Weight,
|
|
||||||
segment_ord: u32,
|
|
||||||
segment_reader: &SegmentReader,
|
|
||||||
) -> Result<C::Fruit> {
|
|
||||||
let mut scorer = weight.scorer(segment_reader)?;
|
|
||||||
let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?;
|
|
||||||
if let Some(delete_bitset) = segment_reader.delete_bitset() {
|
|
||||||
scorer.for_each(&mut |doc, score| {
|
|
||||||
if delete_bitset.is_alive(doc) {
|
|
||||||
segment_collector.collect(doc, score);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
scorer.for_each(&mut |doc, score| segment_collector.collect(doc, score));
|
|
||||||
}
|
|
||||||
Ok(segment_collector.harvest())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Holds a list of `SegmentReader`s ready for search.
|
/// Holds a list of `SegmentReader`s ready for search.
|
||||||
///
|
///
|
||||||
@@ -56,17 +31,17 @@ impl Searcher {
|
|||||||
schema: Schema,
|
schema: Schema,
|
||||||
index: Index,
|
index: Index,
|
||||||
segment_readers: Vec<SegmentReader>,
|
segment_readers: Vec<SegmentReader>,
|
||||||
) -> Searcher {
|
) -> io::Result<Searcher> {
|
||||||
let store_readers = segment_readers
|
let store_readers: Vec<StoreReader> = segment_readers
|
||||||
.iter()
|
.iter()
|
||||||
.map(SegmentReader::get_store_reader)
|
.map(SegmentReader::get_store_reader)
|
||||||
.collect();
|
.collect::<io::Result<Vec<_>>>()?;
|
||||||
Searcher {
|
Ok(Searcher {
|
||||||
schema,
|
schema,
|
||||||
index,
|
index,
|
||||||
segment_readers,
|
segment_readers,
|
||||||
store_readers,
|
store_readers,
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `Index` associated to the `Searcher`
|
/// Returns the `Index` associated to the `Searcher`
|
||||||
@@ -78,7 +53,7 @@ impl Searcher {
|
|||||||
///
|
///
|
||||||
/// The searcher uses the segment ordinal to route the
|
/// The searcher uses the segment ordinal to route the
|
||||||
/// the request to the right `Segment`.
|
/// the request to the right `Segment`.
|
||||||
pub fn doc(&self, doc_address: DocAddress) -> Result<Document> {
|
pub fn doc(&self, doc_address: DocAddress) -> crate::Result<Document> {
|
||||||
let DocAddress(segment_local_id, doc_id) = doc_address;
|
let DocAddress(segment_local_id, doc_id) = doc_address;
|
||||||
let store_reader = &self.store_readers[segment_local_id as usize];
|
let store_reader = &self.store_readers[segment_local_id as usize];
|
||||||
store_reader.get(doc_id)
|
store_reader.get(doc_id)
|
||||||
@@ -99,13 +74,14 @@ impl Searcher {
|
|||||||
|
|
||||||
/// Return the overall number of documents containing
|
/// Return the overall number of documents containing
|
||||||
/// the given term.
|
/// the given term.
|
||||||
pub fn doc_freq(&self, term: &Term) -> u64 {
|
pub fn doc_freq(&self, term: &Term) -> crate::Result<u64> {
|
||||||
self.segment_readers
|
let mut total_doc_freq = 0;
|
||||||
.iter()
|
for segment_reader in &self.segment_readers {
|
||||||
.map(|segment_reader| {
|
let inverted_index = segment_reader.inverted_index(term.field())?;
|
||||||
u64::from(segment_reader.inverted_index(term.field()).doc_freq(term))
|
let doc_freq = inverted_index.doc_freq(term)?;
|
||||||
})
|
total_doc_freq += u64::from(doc_freq);
|
||||||
.sum::<u64>()
|
}
|
||||||
|
Ok(total_doc_freq)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the list of segment readers
|
/// Return the list of segment readers
|
||||||
@@ -132,7 +108,11 @@ impl Searcher {
|
|||||||
///
|
///
|
||||||
/// Finally, the Collector merges each of the child collectors into itself for result usability
|
/// Finally, the Collector merges each of the child collectors into itself for result usability
|
||||||
/// by the caller.
|
/// by the caller.
|
||||||
pub fn search<C: Collector>(&self, query: &dyn Query, collector: &C) -> Result<C::Fruit> {
|
pub fn search<C: Collector>(
|
||||||
|
&self,
|
||||||
|
query: &dyn Query,
|
||||||
|
collector: &C,
|
||||||
|
) -> crate::Result<C::Fruit> {
|
||||||
let executor = self.index.search_executor();
|
let executor = self.index.search_executor();
|
||||||
self.search_with_executor(query, collector, executor)
|
self.search_with_executor(query, collector, executor)
|
||||||
}
|
}
|
||||||
@@ -154,62 +134,26 @@ impl Searcher {
|
|||||||
query: &dyn Query,
|
query: &dyn Query,
|
||||||
collector: &C,
|
collector: &C,
|
||||||
executor: &Executor,
|
executor: &Executor,
|
||||||
) -> Result<C::Fruit> {
|
) -> crate::Result<C::Fruit> {
|
||||||
let scoring_enabled = collector.requires_scoring();
|
let scoring_enabled = collector.requires_scoring();
|
||||||
let weight = query.weight(self, scoring_enabled)?;
|
let weight = query.weight(self, scoring_enabled)?;
|
||||||
let segment_readers = self.segment_readers();
|
let segment_readers = self.segment_readers();
|
||||||
let fruits = executor.map(
|
let fruits = executor.map(
|
||||||
|(segment_ord, segment_reader)| {
|
|(segment_ord, segment_reader)| {
|
||||||
collect_segment(
|
collector.collect_segment(weight.as_ref(), segment_ord as u32, segment_reader)
|
||||||
collector,
|
|
||||||
weight.as_ref(),
|
|
||||||
segment_ord as u32,
|
|
||||||
segment_reader,
|
|
||||||
)
|
|
||||||
},
|
},
|
||||||
segment_readers.iter().enumerate(),
|
segment_readers.iter().enumerate(),
|
||||||
)?;
|
)?;
|
||||||
collector.merge_fruits(fruits)
|
collector.merge_fruits(fruits)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the field searcher associated to a `Field`.
|
|
||||||
pub fn field(&self, field: Field) -> FieldSearcher {
|
|
||||||
let inv_index_readers = self
|
|
||||||
.segment_readers
|
|
||||||
.iter()
|
|
||||||
.map(|segment_reader| segment_reader.inverted_index(field))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
FieldSearcher::new(inv_index_readers)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Summarize total space usage of this searcher.
|
/// Summarize total space usage of this searcher.
|
||||||
pub fn space_usage(&self) -> SearcherSpaceUsage {
|
pub fn space_usage(&self) -> io::Result<SearcherSpaceUsage> {
|
||||||
let mut space_usage = SearcherSpaceUsage::new();
|
let mut space_usage = SearcherSpaceUsage::new();
|
||||||
for segment_reader in self.segment_readers.iter() {
|
for segment_reader in &self.segment_readers {
|
||||||
space_usage.add_segment(segment_reader.space_usage());
|
space_usage.add_segment(segment_reader.space_usage()?);
|
||||||
}
|
}
|
||||||
space_usage
|
Ok(space_usage)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct FieldSearcher {
|
|
||||||
inv_index_readers: Vec<Arc<InvertedIndexReader>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FieldSearcher {
|
|
||||||
fn new(inv_index_readers: Vec<Arc<InvertedIndexReader>>) -> FieldSearcher {
|
|
||||||
FieldSearcher { inv_index_readers }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a Stream over all of the sorted unique terms of
|
|
||||||
/// for the given field.
|
|
||||||
pub fn terms(&self) -> TermMerger<'_> {
|
|
||||||
let term_streamers: Vec<_> = self
|
|
||||||
.inv_index_readers
|
|
||||||
.iter()
|
|
||||||
.map(|inverted_index| inverted_index.terms().stream())
|
|
||||||
.collect();
|
|
||||||
TermMerger::new(term_streamers)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,14 +4,12 @@ use crate::core::SegmentId;
|
|||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::directory::error::{OpenReadError, OpenWriteError};
|
use crate::directory::error::{OpenReadError, OpenWriteError};
|
||||||
use crate::directory::Directory;
|
use crate::directory::Directory;
|
||||||
use crate::directory::{ReadOnlySource, WritePtr};
|
use crate::directory::{FileSlice, WritePtr};
|
||||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use crate::Result;
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::result;
|
|
||||||
|
|
||||||
/// A segment is a piece of the index.
|
/// A segment is a piece of the index.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -26,15 +24,12 @@ impl fmt::Debug for Segment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new segment given an `Index` and a `SegmentId`
|
|
||||||
///
|
|
||||||
/// The function is here to make it private outside `tantivy`.
|
|
||||||
/// #[doc(hidden)]
|
|
||||||
pub fn create_segment(index: Index, meta: SegmentMeta) -> Segment {
|
|
||||||
Segment { index, meta }
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Segment {
|
impl Segment {
|
||||||
|
/// Creates a new segment given an `Index` and a `SegmentId`
|
||||||
|
pub(crate) fn for_index(index: Index, meta: SegmentMeta) -> Segment {
|
||||||
|
Segment { index, meta }
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the index the segment belongs to.
|
/// Returns the index the segment belongs to.
|
||||||
pub fn index(&self) -> &Index {
|
pub fn index(&self) -> &Index {
|
||||||
&self.index
|
&self.index
|
||||||
@@ -50,6 +45,17 @@ impl Segment {
|
|||||||
&self.meta
|
&self.meta
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Updates the max_doc value from the `SegmentMeta`.
|
||||||
|
///
|
||||||
|
/// This method is only used when updating `max_doc` from 0
|
||||||
|
/// as we finalize a fresh new segment.
|
||||||
|
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
|
||||||
|
Segment {
|
||||||
|
index: self.index,
|
||||||
|
meta: self.meta.with_max_doc(max_doc),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
||||||
Segment {
|
Segment {
|
||||||
@@ -72,20 +78,13 @@ impl Segment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Open one of the component file for a *regular* read.
|
/// Open one of the component file for a *regular* read.
|
||||||
pub fn open_read(
|
pub fn open_read(&self, component: SegmentComponent) -> Result<FileSlice, OpenReadError> {
|
||||||
&self,
|
|
||||||
component: SegmentComponent,
|
|
||||||
) -> result::Result<ReadOnlySource, OpenReadError> {
|
|
||||||
let path = self.relative_path(component);
|
let path = self.relative_path(component);
|
||||||
let source = self.index.directory().open_read(&path)?;
|
self.index.directory().open_read(&path)
|
||||||
Ok(source)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Open one of the component file for *regular* write.
|
/// Open one of the component file for *regular* write.
|
||||||
pub fn open_write(
|
pub fn open_write(&mut self, component: SegmentComponent) -> Result<WritePtr, OpenWriteError> {
|
||||||
&mut self,
|
|
||||||
component: SegmentComponent,
|
|
||||||
) -> result::Result<WritePtr, OpenWriteError> {
|
|
||||||
let path = self.relative_path(component);
|
let path = self.relative_path(component);
|
||||||
let write = self.index.directory_mut().open_write(&path)?;
|
let write = self.index.directory_mut().open_write(&path)?;
|
||||||
Ok(write)
|
Ok(write)
|
||||||
@@ -98,5 +97,5 @@ pub trait SerializableSegment {
|
|||||||
///
|
///
|
||||||
/// # Returns
|
/// # Returns
|
||||||
/// The number of documents in the segment.
|
/// The number of documents in the segment.
|
||||||
fn write(&self, serializer: SegmentSerializer) -> Result<u32>;
|
fn write(&self, serializer: SegmentSerializer) -> crate::Result<u32>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ pub enum SegmentComponent {
|
|||||||
/// Dictionary associating `Term`s to `TermInfo`s which is
|
/// Dictionary associating `Term`s to `TermInfo`s which is
|
||||||
/// simply an address into the `postings` file and the `positions` file.
|
/// simply an address into the `postings` file and the `positions` file.
|
||||||
TERMS,
|
TERMS,
|
||||||
/// Row-oriented, LZ4-compressed storage of the documents.
|
/// Row-oriented, compressed storage of the documents.
|
||||||
/// Accessing a document from the store is relatively slow, as it
|
/// Accessing a document from the store is relatively slow, as it
|
||||||
/// requires to decompress the entire block it belongs to.
|
/// requires to decompress the entire block it belongs to.
|
||||||
STORE,
|
STORE,
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ use uuid::Uuid;
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@@ -76,7 +77,7 @@ impl SegmentId {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Error type used when parsing a `SegmentId` from a string fails.
|
/// Error type used when parsing a `SegmentId` from a string fails.
|
||||||
pub struct SegmentIdParseError(uuid::parser::ParseError);
|
pub struct SegmentIdParseError(uuid::Error);
|
||||||
|
|
||||||
impl Error for SegmentIdParseError {}
|
impl Error for SegmentIdParseError {}
|
||||||
|
|
||||||
|
|||||||
@@ -1,27 +1,26 @@
|
|||||||
use crate::common::CompositeFile;
|
|
||||||
use crate::common::HasLen;
|
use crate::common::HasLen;
|
||||||
use crate::core::InvertedIndexReader;
|
use crate::core::InvertedIndexReader;
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::core::SegmentComponent;
|
use crate::core::SegmentComponent;
|
||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::FileSlice;
|
||||||
use crate::fastfield::DeleteBitSet;
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::fastfield::FacetReader;
|
use crate::fastfield::FacetReader;
|
||||||
use crate::fastfield::FastFieldReaders;
|
use crate::fastfield::FastFieldReaders;
|
||||||
use crate::fieldnorm::FieldNormReader;
|
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
|
||||||
use crate::schema::Field;
|
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
|
use crate::schema::{Field, IndexRecordOption};
|
||||||
use crate::space_usage::SegmentSpaceUsage;
|
use crate::space_usage::SegmentSpaceUsage;
|
||||||
use crate::store::StoreReader;
|
use crate::store::StoreReader;
|
||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Result;
|
use crate::{common::CompositeFile, error::DataCorruption};
|
||||||
use fail::fail_point;
|
use fail::fail_point;
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
|
use std::{collections::HashMap, io};
|
||||||
|
|
||||||
/// Entry point to access all of the datastructures of the `Segment`
|
/// Entry point to access all of the datastructures of the `Segment`
|
||||||
///
|
///
|
||||||
@@ -49,9 +48,9 @@ pub struct SegmentReader {
|
|||||||
positions_composite: CompositeFile,
|
positions_composite: CompositeFile,
|
||||||
positions_idx_composite: CompositeFile,
|
positions_idx_composite: CompositeFile,
|
||||||
fast_fields_readers: Arc<FastFieldReaders>,
|
fast_fields_readers: Arc<FastFieldReaders>,
|
||||||
fieldnorms_composite: CompositeFile,
|
fieldnorm_readers: FieldNormReaders,
|
||||||
|
|
||||||
store_source: ReadOnlySource,
|
store_file: FileSlice,
|
||||||
delete_bitset_opt: Option<DeleteBitSet>,
|
delete_bitset_opt: Option<DeleteBitSet>,
|
||||||
schema: Schema,
|
schema: Schema,
|
||||||
}
|
}
|
||||||
@@ -107,16 +106,24 @@ impl SegmentReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the `FacetReader` associated to a given `Field`.
|
/// Accessor to the `FacetReader` associated to a given `Field`.
|
||||||
pub fn facet_reader(&self, field: Field) -> Option<FacetReader> {
|
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> {
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
if field_entry.field_type() != &FieldType::HierarchicalFacet {
|
|
||||||
return None;
|
match field_entry.field_type() {
|
||||||
|
FieldType::HierarchicalFacet(_) => {
|
||||||
|
let term_ords_reader = self.fast_fields().u64s(field)?;
|
||||||
|
let termdict = self
|
||||||
|
.termdict_composite
|
||||||
|
.open_read(field)
|
||||||
|
.map(TermDictionary::open)
|
||||||
|
.unwrap_or_else(|| Ok(TermDictionary::empty()))?;
|
||||||
|
Ok(FacetReader::new(term_ords_reader, termdict))
|
||||||
|
}
|
||||||
|
_ => Err(crate::TantivyError::InvalidArgument(format!(
|
||||||
|
"Field {:?} is not a facet field.",
|
||||||
|
field_entry.name()
|
||||||
|
))),
|
||||||
}
|
}
|
||||||
let term_ords_reader = self.fast_fields().u64s(field)?;
|
|
||||||
let termdict_source = self.termdict_composite.open_read(field)?;
|
|
||||||
let termdict = TermDictionary::from_source(&termdict_source);
|
|
||||||
let facet_reader = FacetReader::new(term_ords_reader, termdict);
|
|
||||||
Some(facet_reader)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the segment's `Field norms`'s reader.
|
/// Accessor to the segment's `Field norms`'s reader.
|
||||||
@@ -126,47 +133,45 @@ impl SegmentReader {
|
|||||||
///
|
///
|
||||||
/// They are simply stored as a fast field, serialized in
|
/// They are simply stored as a fast field, serialized in
|
||||||
/// the `.fieldnorm` file of the segment.
|
/// the `.fieldnorm` file of the segment.
|
||||||
pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader {
|
pub fn get_fieldnorms_reader(&self, field: Field) -> crate::Result<FieldNormReader> {
|
||||||
if let Some(fieldnorm_source) = self.fieldnorms_composite.open_read(field) {
|
self.fieldnorm_readers.get_field(field)?.ok_or_else(|| {
|
||||||
FieldNormReader::open(fieldnorm_source)
|
|
||||||
} else {
|
|
||||||
let field_name = self.schema.get_field_name(field);
|
let field_name = self.schema.get_field_name(field);
|
||||||
let err_msg = format!(
|
let err_msg = format!(
|
||||||
"Field norm not found for field {:?}. Was it market as indexed during indexing.",
|
"Field norm not found for field {:?}. Was it marked as indexed during indexing?",
|
||||||
field_name
|
field_name
|
||||||
);
|
);
|
||||||
panic!(err_msg);
|
crate::TantivyError::SchemaError(err_msg)
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the segment's `StoreReader`.
|
/// Accessor to the segment's `StoreReader`.
|
||||||
pub fn get_store_reader(&self) -> StoreReader {
|
pub fn get_store_reader(&self) -> io::Result<StoreReader> {
|
||||||
StoreReader::from_source(self.store_source.clone())
|
StoreReader::open(self.store_file.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Open a new segment for reading.
|
/// Open a new segment for reading.
|
||||||
pub fn open(segment: &Segment) -> Result<SegmentReader> {
|
pub fn open(segment: &Segment) -> crate::Result<SegmentReader> {
|
||||||
let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
|
let termdict_file = segment.open_read(SegmentComponent::TERMS)?;
|
||||||
let termdict_composite = CompositeFile::open(&termdict_source)?;
|
let termdict_composite = CompositeFile::open(&termdict_file)?;
|
||||||
|
|
||||||
let store_source = segment.open_read(SegmentComponent::STORE)?;
|
let store_file = segment.open_read(SegmentComponent::STORE)?;
|
||||||
|
|
||||||
fail_point!("SegmentReader::open#middle");
|
fail_point!("SegmentReader::open#middle");
|
||||||
|
|
||||||
let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
|
let postings_file = segment.open_read(SegmentComponent::POSTINGS)?;
|
||||||
let postings_composite = CompositeFile::open(&postings_source)?;
|
let postings_composite = CompositeFile::open(&postings_file)?;
|
||||||
|
|
||||||
let positions_composite = {
|
let positions_composite = {
|
||||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONS) {
|
if let Ok(positions_file) = segment.open_read(SegmentComponent::POSITIONS) {
|
||||||
CompositeFile::open(&source)?
|
CompositeFile::open(&positions_file)?
|
||||||
} else {
|
} else {
|
||||||
CompositeFile::empty()
|
CompositeFile::empty()
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let positions_idx_composite = {
|
let positions_idx_composite = {
|
||||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
|
if let Ok(positions_skip_file) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
|
||||||
CompositeFile::open(&source)?
|
CompositeFile::open(&positions_skip_file)?
|
||||||
} else {
|
} else {
|
||||||
CompositeFile::empty()
|
CompositeFile::empty()
|
||||||
}
|
}
|
||||||
@@ -177,28 +182,29 @@ impl SegmentReader {
|
|||||||
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
|
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
|
||||||
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
||||||
let fast_field_readers =
|
let fast_field_readers =
|
||||||
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
|
Arc::new(FastFieldReaders::new(schema.clone(), fast_fields_composite));
|
||||||
|
|
||||||
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
let fieldnorm_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
||||||
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
|
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||||
|
|
||||||
let delete_bitset_opt = if segment.meta().has_deletes() {
|
let delete_bitset_opt = if segment.meta().has_deletes() {
|
||||||
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
||||||
Some(DeleteBitSet::open(delete_data))
|
let delete_bitset = DeleteBitSet::open(delete_data)?;
|
||||||
|
Some(delete_bitset)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(SegmentReader {
|
Ok(SegmentReader {
|
||||||
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
|
inv_idx_reader_cache: Default::default(),
|
||||||
max_doc: segment.meta().max_doc(),
|
max_doc: segment.meta().max_doc(),
|
||||||
num_docs: segment.meta().num_docs(),
|
num_docs: segment.meta().num_docs(),
|
||||||
termdict_composite,
|
termdict_composite,
|
||||||
postings_composite,
|
postings_composite,
|
||||||
fast_fields_readers: fast_field_readers,
|
fast_fields_readers: fast_field_readers,
|
||||||
fieldnorms_composite,
|
fieldnorm_readers,
|
||||||
segment_id: segment.id(),
|
segment_id: segment.id(),
|
||||||
store_source,
|
store_file,
|
||||||
delete_bitset_opt,
|
delete_bitset_opt,
|
||||||
positions_composite,
|
positions_composite,
|
||||||
positions_idx_composite,
|
positions_idx_composite,
|
||||||
@@ -213,58 +219,64 @@ impl SegmentReader {
|
|||||||
/// The field reader is in charge of iterating through the
|
/// The field reader is in charge of iterating through the
|
||||||
/// term dictionary associated to a specific field,
|
/// term dictionary associated to a specific field,
|
||||||
/// and opening the posting list associated to any term.
|
/// and opening the posting list associated to any term.
|
||||||
pub fn inverted_index(&self, field: Field) -> Arc<InvertedIndexReader> {
|
///
|
||||||
|
/// If the field is marked as index, a warn is logged and an empty `InvertedIndexReader`
|
||||||
|
/// is returned.
|
||||||
|
/// Similarly if the field is marked as indexed but no term has been indexed for the given
|
||||||
|
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||||
|
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
|
||||||
if let Some(inv_idx_reader) = self
|
if let Some(inv_idx_reader) = self
|
||||||
.inv_idx_reader_cache
|
.inv_idx_reader_cache
|
||||||
.read()
|
.read()
|
||||||
.expect("Lock poisoned. This should never happen")
|
.expect("Lock poisoned. This should never happen")
|
||||||
.get(&field)
|
.get(&field)
|
||||||
{
|
{
|
||||||
return Arc::clone(inv_idx_reader);
|
return Ok(Arc::clone(inv_idx_reader));
|
||||||
}
|
}
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
let field_type = field_entry.field_type();
|
let field_type = field_entry.field_type();
|
||||||
let record_option_opt = field_type.get_index_record_option();
|
let record_option_opt = field_type.get_index_record_option();
|
||||||
|
|
||||||
if record_option_opt.is_none() {
|
if record_option_opt.is_none() {
|
||||||
panic!("Field {:?} does not seem indexed.", field_entry.name());
|
warn!("Field {:?} does not seem indexed.", field_entry.name());
|
||||||
}
|
}
|
||||||
|
|
||||||
let record_option = record_option_opt.unwrap();
|
let postings_file_opt = self.postings_composite.open_read(field);
|
||||||
|
|
||||||
let postings_source_opt = self.postings_composite.open_read(field);
|
if postings_file_opt.is_none() || record_option_opt.is_none() {
|
||||||
|
|
||||||
if postings_source_opt.is_none() {
|
|
||||||
// no documents in the segment contained this field.
|
// no documents in the segment contained this field.
|
||||||
// As a result, no data is associated to the inverted index.
|
// As a result, no data is associated to the inverted index.
|
||||||
//
|
//
|
||||||
// Returns an empty inverted index.
|
// Returns an empty inverted index.
|
||||||
return Arc::new(InvertedIndexReader::empty(field_type));
|
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
|
||||||
|
return Ok(Arc::new(InvertedIndexReader::empty(record_option)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let postings_source = postings_source_opt.unwrap();
|
let record_option = record_option_opt.unwrap();
|
||||||
|
let postings_file = postings_file_opt.unwrap();
|
||||||
|
|
||||||
let termdict_source = self.termdict_composite.open_read(field).expect(
|
let termdict_file: FileSlice = self.termdict_composite.open_read(field)
|
||||||
"Failed to open field term dictionary in composite file. Is the field indexed?",
|
.ok_or_else(||
|
||||||
);
|
DataCorruption::comment_only(format!("Failed to open field {:?}'s term dictionary in the composite file. Has the schema been modified?", field_entry.name()))
|
||||||
|
)?;
|
||||||
|
|
||||||
let positions_source = self
|
let positions_file = self
|
||||||
.positions_composite
|
.positions_composite
|
||||||
.open_read(field)
|
.open_read(field)
|
||||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||||
|
|
||||||
let positions_idx_source = self
|
let positions_idx_file = self
|
||||||
.positions_idx_composite
|
.positions_idx_composite
|
||||||
.open_read(field)
|
.open_read(field)
|
||||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||||
|
|
||||||
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
||||||
TermDictionary::from_source(&termdict_source),
|
TermDictionary::open(termdict_file)?,
|
||||||
postings_source,
|
postings_file,
|
||||||
positions_source,
|
positions_file,
|
||||||
positions_idx_source,
|
positions_idx_file,
|
||||||
record_option,
|
record_option,
|
||||||
));
|
)?);
|
||||||
|
|
||||||
// by releasing the lock in between, we may end up opening the inverting index
|
// by releasing the lock in between, we may end up opening the inverting index
|
||||||
// twice, but this is fine.
|
// twice, but this is fine.
|
||||||
@@ -273,7 +285,7 @@ impl SegmentReader {
|
|||||||
.expect("Field reader cache lock poisoned. This should never happen.")
|
.expect("Field reader cache lock poisoned. This should never happen.")
|
||||||
.insert(field, Arc::clone(&inv_idx_reader));
|
.insert(field, Arc::clone(&inv_idx_reader));
|
||||||
|
|
||||||
inv_idx_reader
|
Ok(inv_idx_reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the segment id
|
/// Returns the segment id
|
||||||
@@ -296,26 +308,26 @@ impl SegmentReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator that will iterate over the alive document ids
|
/// Returns an iterator that will iterate over the alive document ids
|
||||||
pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator<'_> {
|
pub fn doc_ids_alive(&self) -> impl Iterator<Item = DocId> + '_ {
|
||||||
SegmentReaderAliveDocsIterator::new(&self)
|
(0u32..self.max_doc).filter(move |doc| !self.is_deleted(*doc))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Summarize total space usage of this segment.
|
/// Summarize total space usage of this segment.
|
||||||
pub fn space_usage(&self) -> SegmentSpaceUsage {
|
pub fn space_usage(&self) -> io::Result<SegmentSpaceUsage> {
|
||||||
SegmentSpaceUsage::new(
|
Ok(SegmentSpaceUsage::new(
|
||||||
self.num_docs(),
|
self.num_docs(),
|
||||||
self.termdict_composite.space_usage(),
|
self.termdict_composite.space_usage(),
|
||||||
self.postings_composite.space_usage(),
|
self.postings_composite.space_usage(),
|
||||||
self.positions_composite.space_usage(),
|
self.positions_composite.space_usage(),
|
||||||
self.positions_idx_composite.space_usage(),
|
self.positions_idx_composite.space_usage(),
|
||||||
self.fast_fields_readers.space_usage(),
|
self.fast_fields_readers.space_usage(),
|
||||||
self.fieldnorms_composite.space_usage(),
|
self.fieldnorm_readers.space_usage(),
|
||||||
self.get_store_reader().space_usage(),
|
self.get_store_reader()?.space_usage(),
|
||||||
self.delete_bitset_opt
|
self.delete_bitset_opt
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(DeleteBitSet::space_usage)
|
.map(DeleteBitSet::space_usage)
|
||||||
.unwrap_or(0),
|
.unwrap_or(0),
|
||||||
)
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -325,52 +337,6 @@ impl fmt::Debug for SegmentReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Implements the iterator trait to allow easy iteration
|
|
||||||
/// over non-deleted ("alive") DocIds in a SegmentReader
|
|
||||||
pub struct SegmentReaderAliveDocsIterator<'a> {
|
|
||||||
reader: &'a SegmentReader,
|
|
||||||
max_doc: DocId,
|
|
||||||
current: DocId,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> SegmentReaderAliveDocsIterator<'a> {
|
|
||||||
pub fn new(reader: &'a SegmentReader) -> SegmentReaderAliveDocsIterator<'a> {
|
|
||||||
SegmentReaderAliveDocsIterator {
|
|
||||||
reader,
|
|
||||||
max_doc: reader.max_doc(),
|
|
||||||
current: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Iterator for SegmentReaderAliveDocsIterator<'a> {
|
|
||||||
type Item = DocId;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
// TODO: Use TinySet (like in BitSetDocSet) to speed this process up
|
|
||||||
if self.current >= self.max_doc {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
// find the next alive doc id
|
|
||||||
while self.reader.is_deleted(self.current) {
|
|
||||||
self.current += 1;
|
|
||||||
|
|
||||||
if self.current >= self.max_doc {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// capture the current alive DocId
|
|
||||||
let result = Some(self.current);
|
|
||||||
|
|
||||||
// move down the chain
|
|
||||||
self.current += 1;
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
@@ -378,7 +344,7 @@ mod test {
|
|||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_alive_docs_iterator() {
|
fn test_alive_docs_iterator() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
schema_builder.add_text_field("name", TEXT | STORED);
|
schema_builder.add_text_field("name", TEXT | STORED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -386,26 +352,26 @@ mod test {
|
|||||||
let name = schema.get_field("name").unwrap();
|
let name = schema.get_field("name").unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(name => "tantivy"));
|
index_writer.add_document(doc!(name => "tantivy"));
|
||||||
index_writer.add_document(doc!(name => "horse"));
|
index_writer.add_document(doc!(name => "horse"));
|
||||||
index_writer.add_document(doc!(name => "jockey"));
|
index_writer.add_document(doc!(name => "jockey"));
|
||||||
index_writer.add_document(doc!(name => "cap"));
|
index_writer.add_document(doc!(name => "cap"));
|
||||||
|
|
||||||
// we should now have one segment with two docs
|
// we should now have one segment with two docs
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer2 = index.writer(50_000_000).unwrap();
|
let mut index_writer2 = index.writer(50_000_000)?;
|
||||||
index_writer2.delete_term(Term::from_field_text(name, "horse"));
|
index_writer2.delete_term(Term::from_field_text(name, "horse"));
|
||||||
index_writer2.delete_term(Term::from_field_text(name, "cap"));
|
index_writer2.delete_term(Term::from_field_text(name, "cap"));
|
||||||
|
|
||||||
// ok, now we should have a deleted doc
|
// ok, now we should have a deleted doc
|
||||||
index_writer2.commit().unwrap();
|
index_writer2.commit()?;
|
||||||
}
|
}
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader()?.searcher();
|
||||||
let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect();
|
let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect();
|
||||||
assert_eq!(vec![0u32, 2u32], docs);
|
assert_eq!(vec![0u32, 2u32], docs);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use crate::directory::directory_lock::Lock;
|
use crate::directory::directory_lock::Lock;
|
||||||
use crate::directory::error::LockError;
|
use crate::directory::error::LockError;
|
||||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||||
use crate::directory::WatchCallback;
|
|
||||||
use crate::directory::WatchHandle;
|
use crate::directory::WatchHandle;
|
||||||
use crate::directory::{ReadOnlySource, WritePtr};
|
use crate::directory::{FileHandle, WatchCallback};
|
||||||
|
use crate::directory::{FileSlice, WritePtr};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@@ -11,7 +11,6 @@ use std::marker::Send;
|
|||||||
use std::marker::Sync;
|
use std::marker::Sync;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::result;
|
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
@@ -80,7 +79,7 @@ fn try_acquire_lock(
|
|||||||
) -> Result<DirectoryLock, TryAcquireLockError> {
|
) -> Result<DirectoryLock, TryAcquireLockError> {
|
||||||
let mut write = directory.open_write(filepath).map_err(|e| match e {
|
let mut write = directory.open_write(filepath).map_err(|e| match e {
|
||||||
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
|
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
|
||||||
OpenWriteError::IOError(io_error) => TryAcquireLockError::IOError(io_error.into()),
|
OpenWriteError::IOError { io_error, .. } => TryAcquireLockError::IOError(io_error),
|
||||||
})?;
|
})?;
|
||||||
write.flush().map_err(TryAcquireLockError::IOError)?;
|
write.flush().map_err(TryAcquireLockError::IOError)?;
|
||||||
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
|
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
|
||||||
@@ -109,37 +108,43 @@ fn retry_policy(is_blocking: bool) -> RetryPolicy {
|
|||||||
/// should be your default choice.
|
/// should be your default choice.
|
||||||
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
|
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
|
||||||
/// should be used mostly for tests.
|
/// should be used mostly for tests.
|
||||||
///
|
|
||||||
pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||||
/// Opens a virtual file for read.
|
/// Opens a file and returns a boxed `FileHandle`.
|
||||||
///
|
///
|
||||||
|
/// Users of `Directory` should typically call `Directory::open_read(...)`,
|
||||||
|
/// while `Directory` implementor should implement `get_file_handle()`.
|
||||||
|
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError>;
|
||||||
|
|
||||||
/// Once a virtual file is open, its data may not
|
/// Once a virtual file is open, its data may not
|
||||||
/// change.
|
/// change.
|
||||||
///
|
///
|
||||||
/// Specifically, subsequent writes or flushes should
|
/// Specifically, subsequent writes or flushes should
|
||||||
/// have no effect on the returned `ReadOnlySource` object.
|
/// have no effect on the returned `FileSlice` object.
|
||||||
///
|
///
|
||||||
/// You should only use this to read files create with [`open_write`]
|
/// You should only use this to read files create with [Directory::open_write].
|
||||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
|
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> {
|
||||||
|
let file_handle = self.get_file_handle(path)?;
|
||||||
|
Ok(FileSlice::new(file_handle))
|
||||||
|
}
|
||||||
|
|
||||||
/// Removes a file
|
/// Removes a file
|
||||||
///
|
///
|
||||||
/// Removing a file will not affect an eventual
|
/// Removing a file will not affect an eventual
|
||||||
/// existing ReadOnlySource pointing to it.
|
/// existing FileSlice pointing to it.
|
||||||
///
|
///
|
||||||
/// Removing a nonexistent file, yields a
|
/// Removing a nonexistent file, yields a
|
||||||
/// `DeleteError::DoesNotExist`.
|
/// `DeleteError::DoesNotExist`.
|
||||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
|
fn delete(&self, path: &Path) -> Result<(), DeleteError>;
|
||||||
|
|
||||||
/// Returns true iff the file exists
|
/// Returns true iff the file exists
|
||||||
fn exists(&self, path: &Path) -> bool;
|
fn exists(&self, path: &Path) -> Result<bool, OpenReadError>;
|
||||||
|
|
||||||
/// Opens a writer for the *virtual file* associated with
|
/// Opens a writer for the *virtual file* associated with
|
||||||
/// a Path.
|
/// a Path.
|
||||||
///
|
///
|
||||||
/// Right after this call, the file should be created
|
/// Right after this call, the file should be created
|
||||||
/// and any subsequent call to `open_read` for the
|
/// and any subsequent call to `open_read` for the
|
||||||
/// same path should return a `ReadOnlySource`.
|
/// same path should return a `FileSlice`.
|
||||||
///
|
///
|
||||||
/// Write operations may be aggressively buffered.
|
/// Write operations may be aggressively buffered.
|
||||||
/// The client of this trait is responsible for calling flush
|
/// The client of this trait is responsible for calling flush
|
||||||
@@ -153,14 +158,14 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
/// was not called.
|
/// was not called.
|
||||||
///
|
///
|
||||||
/// The file may not previously exist.
|
/// The file may not previously exist.
|
||||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
||||||
|
|
||||||
/// Reads the full content file that has been written using
|
/// Reads the full content file that has been written using
|
||||||
/// atomic_write.
|
/// atomic_write.
|
||||||
///
|
///
|
||||||
/// This should only be used for small files.
|
/// This should only be used for small files.
|
||||||
///
|
///
|
||||||
/// You should only use this to read files create with [`atomic_write`]
|
/// You should only use this to read files create with [Directory::atomic_write].
|
||||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
|
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
|
||||||
|
|
||||||
/// Atomically replace the content of a file with data.
|
/// Atomically replace the content of a file with data.
|
||||||
@@ -169,7 +174,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
/// a partially written file.
|
/// a partially written file.
|
||||||
///
|
///
|
||||||
/// The file may or may not previously exist.
|
/// The file may or may not previously exist.
|
||||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()>;
|
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()>;
|
||||||
|
|
||||||
/// Acquire a lock in the given directory.
|
/// Acquire a lock in the given directory.
|
||||||
///
|
///
|
||||||
@@ -197,7 +202,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
/// Registers a callback that will be called whenever a change on the `meta.json`
|
/// Registers a callback that will be called whenever a change on the `meta.json`
|
||||||
/// using the `atomic_write` API is detected.
|
/// using the `atomic_write` API is detected.
|
||||||
///
|
///
|
||||||
/// The behavior when using `.watch()` on a file using `.open_write(...)` is, on the other
|
/// The behavior when using `.watch()` on a file using [Directory::open_write] is, on the other
|
||||||
/// hand, undefined.
|
/// hand, undefined.
|
||||||
///
|
///
|
||||||
/// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is
|
/// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is
|
||||||
|
|||||||
@@ -1,243 +1,173 @@
|
|||||||
use std::error::Error as StdError;
|
use crate::Version;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
/// Error while trying to acquire a directory lock.
|
/// Error while trying to acquire a directory lock.
|
||||||
#[derive(Debug, Fail)]
|
#[derive(Debug, Error)]
|
||||||
pub enum LockError {
|
pub enum LockError {
|
||||||
/// Failed to acquired a lock as it is already held by another
|
/// Failed to acquired a lock as it is already held by another
|
||||||
/// client.
|
/// client.
|
||||||
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
|
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
|
||||||
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
|
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
|
||||||
#[fail(
|
#[error("Could not acquire lock as it is already held, possibly by a different process.")]
|
||||||
display = "Could not acquire lock as it is already held, possibly by a different process."
|
|
||||||
)]
|
|
||||||
LockBusy,
|
LockBusy,
|
||||||
/// Trying to acquire a lock failed with an `IOError`
|
/// Trying to acquire a lock failed with an `IOError`
|
||||||
#[fail(display = "Failed to acquire the lock due to an io:Error.")]
|
#[error("Failed to acquire the lock due to an io:Error.")]
|
||||||
IOError(io::Error),
|
IOError(io::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// General IO error with an optional path to the offending file.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct IOError {
|
|
||||||
path: Option<PathBuf>,
|
|
||||||
err: io::Error,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<io::Error> for IOError {
|
|
||||||
fn into(self) -> io::Error {
|
|
||||||
self.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for IOError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self.path {
|
|
||||||
Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err),
|
|
||||||
None => write!(f, "io error occurred: '{}'", self.err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StdError for IOError {
|
|
||||||
fn description(&self) -> &str {
|
|
||||||
"io error occurred"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cause(&self) -> Option<&dyn StdError> {
|
|
||||||
Some(&self.err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IOError {
|
|
||||||
pub(crate) fn with_path(path: PathBuf, err: io::Error) -> Self {
|
|
||||||
IOError {
|
|
||||||
path: Some(path),
|
|
||||||
err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<io::Error> for IOError {
|
|
||||||
fn from(err: io::Error) -> IOError {
|
|
||||||
IOError { path: None, err }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Error that may occur when opening a directory
|
/// Error that may occur when opening a directory
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Error)]
|
||||||
pub enum OpenDirectoryError {
|
pub enum OpenDirectoryError {
|
||||||
/// The underlying directory does not exists.
|
/// The underlying directory does not exists.
|
||||||
|
#[error("Directory does not exist: '{0}'.")]
|
||||||
DoesNotExist(PathBuf),
|
DoesNotExist(PathBuf),
|
||||||
/// The path exists but is not a directory.
|
/// The path exists but is not a directory.
|
||||||
|
#[error("Path exists but is not a directory: '{0}'.")]
|
||||||
NotADirectory(PathBuf),
|
NotADirectory(PathBuf),
|
||||||
|
/// Failed to create a temp directory.
|
||||||
|
#[error("Failed to create a temporary directory: '{0}'.")]
|
||||||
|
FailedToCreateTempDir(io::Error),
|
||||||
/// IoError
|
/// IoError
|
||||||
IoError(io::Error),
|
#[error("IOError '{io_error:?}' while create directory in: '{directory_path:?}'.")]
|
||||||
}
|
IoError {
|
||||||
|
/// underlying io Error.
|
||||||
impl From<io::Error> for OpenDirectoryError {
|
io_error: io::Error,
|
||||||
fn from(io_err: io::Error) -> Self {
|
/// directory we tried to open.
|
||||||
OpenDirectoryError::IoError(io_err)
|
directory_path: PathBuf,
|
||||||
}
|
},
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for OpenDirectoryError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match *self {
|
|
||||||
OpenDirectoryError::DoesNotExist(ref path) => {
|
|
||||||
write!(f, "the underlying directory '{:?}' does not exist", path)
|
|
||||||
}
|
|
||||||
OpenDirectoryError::NotADirectory(ref path) => {
|
|
||||||
write!(f, "the path '{:?}' exists but is not a directory", path)
|
|
||||||
}
|
|
||||||
OpenDirectoryError::IoError(ref err) => write!(
|
|
||||||
f,
|
|
||||||
"IOError while trying to open/create the directory. {:?}",
|
|
||||||
err
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StdError for OpenDirectoryError {
|
|
||||||
fn description(&self) -> &str {
|
|
||||||
"error occurred while opening a directory"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cause(&self) -> Option<&dyn StdError> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Error that may occur when starting to write in a file
|
/// Error that may occur when starting to write in a file
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Error)]
|
||||||
pub enum OpenWriteError {
|
pub enum OpenWriteError {
|
||||||
/// Our directory is WORM, writing an existing file is forbidden.
|
/// Our directory is WORM, writing an existing file is forbidden.
|
||||||
/// Checkout the `Directory` documentation.
|
/// Checkout the `Directory` documentation.
|
||||||
|
#[error("File already exists: '{0}'")]
|
||||||
FileAlreadyExists(PathBuf),
|
FileAlreadyExists(PathBuf),
|
||||||
/// Any kind of IO error that happens when
|
/// Any kind of IO error that happens when
|
||||||
/// writing in the underlying IO device.
|
/// writing in the underlying IO device.
|
||||||
IOError(IOError),
|
#[error("IOError '{io_error:?}' while opening file for write: '{filepath}'.")]
|
||||||
|
IOError {
|
||||||
|
/// The underlying `io::Error`.
|
||||||
|
io_error: io::Error,
|
||||||
|
/// File path of the file that tantivy failed to open for write.
|
||||||
|
filepath: PathBuf,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<IOError> for OpenWriteError {
|
impl OpenWriteError {
|
||||||
fn from(err: IOError) -> OpenWriteError {
|
/// Wraps an io error.
|
||||||
OpenWriteError::IOError(err)
|
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||||
|
Self::IOError { io_error, filepath }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/// Type of index incompatibility between the library and the index found on disk
|
||||||
|
/// Used to catch and provide a hint to solve this incompatibility issue
|
||||||
|
pub enum Incompatibility {
|
||||||
|
/// This library cannot decompress the index found on disk
|
||||||
|
CompressionMismatch {
|
||||||
|
/// Compression algorithm used by the current version of tantivy
|
||||||
|
library_compression_format: String,
|
||||||
|
/// Compression algorithm that was used to serialise the index
|
||||||
|
index_compression_format: String,
|
||||||
|
},
|
||||||
|
/// The index format found on disk isn't supported by this version of the library
|
||||||
|
IndexMismatch {
|
||||||
|
/// Version used by the library
|
||||||
|
library_version: Version,
|
||||||
|
/// Version the index was built with
|
||||||
|
index_version: Version,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Display for OpenWriteError {
|
impl fmt::Debug for Incompatibility {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
match *self {
|
match self {
|
||||||
OpenWriteError::FileAlreadyExists(ref path) => {
|
Incompatibility::CompressionMismatch {
|
||||||
write!(f, "the file '{:?}' already exists", path)
|
library_compression_format,
|
||||||
|
index_compression_format,
|
||||||
|
} => {
|
||||||
|
let err = format!(
|
||||||
|
"Library was compiled with {:?} compression, index was compressed with {:?}",
|
||||||
|
library_compression_format, index_compression_format
|
||||||
|
);
|
||||||
|
let advice = format!(
|
||||||
|
"Change the feature flag to {:?} and rebuild the library",
|
||||||
|
index_compression_format
|
||||||
|
);
|
||||||
|
write!(f, "{}. {}", err, advice)?;
|
||||||
|
}
|
||||||
|
Incompatibility::IndexMismatch {
|
||||||
|
library_version,
|
||||||
|
index_version,
|
||||||
|
} => {
|
||||||
|
let err = format!(
|
||||||
|
"Library version: {}, index version: {}",
|
||||||
|
library_version.index_format_version, index_version.index_format_version
|
||||||
|
);
|
||||||
|
// TODO make a more useful error message
|
||||||
|
// include the version range that supports this index_format_version
|
||||||
|
let advice = format!(
|
||||||
|
"Change tantivy to a version compatible with index format {} (e.g. {}.{}.x) \
|
||||||
|
and rebuild your project.",
|
||||||
|
index_version.index_format_version, index_version.major, index_version.minor
|
||||||
|
);
|
||||||
|
write!(f, "{}. {}", err, advice)?;
|
||||||
}
|
}
|
||||||
OpenWriteError::IOError(ref err) => write!(
|
|
||||||
f,
|
|
||||||
"an io error occurred while opening a file for writing: '{}'",
|
|
||||||
err
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StdError for OpenWriteError {
|
Ok(())
|
||||||
fn description(&self) -> &str {
|
|
||||||
"error occurred while opening a file for writing"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cause(&self) -> Option<&dyn StdError> {
|
|
||||||
match *self {
|
|
||||||
OpenWriteError::FileAlreadyExists(_) => None,
|
|
||||||
OpenWriteError::IOError(ref err) => Some(err),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Error that may occur when accessing a file read
|
/// Error that may occur when accessing a file read
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Error)]
|
||||||
pub enum OpenReadError {
|
pub enum OpenReadError {
|
||||||
/// The file does not exists.
|
/// The file does not exists.
|
||||||
|
#[error("Files does not exists: {0:?}")]
|
||||||
FileDoesNotExist(PathBuf),
|
FileDoesNotExist(PathBuf),
|
||||||
/// Any kind of IO error that happens when
|
/// Any kind of io::Error.
|
||||||
/// interacting with the underlying IO device.
|
#[error(
|
||||||
IOError(IOError),
|
"IOError: '{io_error:?}' happened while opening the following file for Read: {filepath}."
|
||||||
|
)]
|
||||||
|
IOError {
|
||||||
|
/// The underlying `io::Error`.
|
||||||
|
io_error: io::Error,
|
||||||
|
/// File path of the file that tantivy failed to open for read.
|
||||||
|
filepath: PathBuf,
|
||||||
|
},
|
||||||
|
/// This library does not support the index version found in file footer.
|
||||||
|
#[error("Index version unsupported: {0:?}")]
|
||||||
|
IncompatibleIndex(Incompatibility),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<IOError> for OpenReadError {
|
impl OpenReadError {
|
||||||
fn from(err: IOError) -> OpenReadError {
|
/// Wraps an io error.
|
||||||
OpenReadError::IOError(err)
|
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||||
|
Self::IOError { io_error, filepath }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for OpenReadError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match *self {
|
|
||||||
OpenReadError::FileDoesNotExist(ref path) => {
|
|
||||||
write!(f, "the file '{:?}' does not exist", path)
|
|
||||||
}
|
|
||||||
OpenReadError::IOError(ref err) => write!(
|
|
||||||
f,
|
|
||||||
"an io error occurred while opening a file for reading: '{}'",
|
|
||||||
err
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StdError for OpenReadError {
|
|
||||||
fn description(&self) -> &str {
|
|
||||||
"error occurred while opening a file for reading"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cause(&self) -> Option<&dyn StdError> {
|
|
||||||
match *self {
|
|
||||||
OpenReadError::FileDoesNotExist(_) => None,
|
|
||||||
OpenReadError::IOError(ref err) => Some(err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Error that may occur when trying to delete a file
|
/// Error that may occur when trying to delete a file
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Error)]
|
||||||
pub enum DeleteError {
|
pub enum DeleteError {
|
||||||
/// The file does not exists.
|
/// The file does not exists.
|
||||||
|
#[error("File does not exists: '{0}'.")]
|
||||||
FileDoesNotExist(PathBuf),
|
FileDoesNotExist(PathBuf),
|
||||||
/// Any kind of IO error that happens when
|
/// Any kind of IO error that happens when
|
||||||
/// interacting with the underlying IO device.
|
/// interacting with the underlying IO device.
|
||||||
IOError(IOError),
|
#[error("The following IO error happened while deleting file '{filepath}': '{io_error:?}'.")]
|
||||||
|
IOError {
|
||||||
|
/// The underlying `io::Error`.
|
||||||
|
io_error: io::Error,
|
||||||
|
/// File path of the file that tantivy failed to delete.
|
||||||
|
filepath: PathBuf,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<IOError> for DeleteError {
|
impl From<Incompatibility> for OpenReadError {
|
||||||
fn from(err: IOError) -> DeleteError {
|
fn from(incompatibility: Incompatibility) -> Self {
|
||||||
DeleteError::IOError(err)
|
OpenReadError::IncompatibleIndex(incompatibility)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for DeleteError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match *self {
|
|
||||||
DeleteError::FileDoesNotExist(ref path) => {
|
|
||||||
write!(f, "the file '{:?}' does not exist", path)
|
|
||||||
}
|
|
||||||
DeleteError::IOError(ref err) => {
|
|
||||||
write!(f, "an io error occurred while deleting a file: '{}'", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StdError for DeleteError {
|
|
||||||
fn description(&self) -> &str {
|
|
||||||
"error occurred while deleting a file"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cause(&self) -> Option<&dyn StdError> {
|
|
||||||
match *self {
|
|
||||||
DeleteError::FileDoesNotExist(_) => None,
|
|
||||||
DeleteError::IOError(ref err) => Some(err),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
246
src/directory/file_slice.rs
Normal file
246
src/directory/file_slice.rs
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
use stable_deref_trait::StableDeref;
|
||||||
|
|
||||||
|
use crate::common::HasLen;
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
|
use std::fmt;
|
||||||
|
use std::ops::Range;
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
use std::{io, ops::Deref};
|
||||||
|
|
||||||
|
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||||
|
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||||
|
|
||||||
|
/// Objects that represents files sections in tantivy.
|
||||||
|
///
|
||||||
|
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
||||||
|
/// is alive, the data associated with it cannot be altered or destroyed.
|
||||||
|
///
|
||||||
|
/// The underlying behavior is therefore specific to the `Directory` that created it.
|
||||||
|
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
|
||||||
|
/// on the filesystem.
|
||||||
|
pub trait FileHandle: 'static + Send + Sync + HasLen + fmt::Debug {
|
||||||
|
/// Reads a slice of bytes.
|
||||||
|
///
|
||||||
|
/// This method may panic if the range requested is invalid.
|
||||||
|
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileHandle for &'static [u8] {
|
||||||
|
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||||
|
let bytes = &self[range];
|
||||||
|
Ok(OwnedBytes::new(bytes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Deref<Target = [u8]>> HasLen for T {
|
||||||
|
fn len(&self) -> usize {
|
||||||
|
self.deref().len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<B> From<B> for FileSlice
|
||||||
|
where
|
||||||
|
B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync,
|
||||||
|
{
|
||||||
|
fn from(bytes: B) -> FileSlice {
|
||||||
|
FileSlice::new(Box::new(OwnedBytes::new(bytes)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Logical slice of read only file in tantivy.
|
||||||
|
///
|
||||||
|
/// It can be cloned and sliced cheaply.
|
||||||
|
///
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct FileSlice {
|
||||||
|
data: Arc<dyn FileHandle>,
|
||||||
|
range: Range<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for FileSlice {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "FileSlice({:?}, {:?})", &self.data, self.range)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileSlice {
|
||||||
|
/// Wraps a FileHandle.
|
||||||
|
pub fn new(file_handle: Box<dyn FileHandle>) -> Self {
|
||||||
|
let num_bytes = file_handle.len();
|
||||||
|
FileSlice::new_with_num_bytes(file_handle, num_bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wraps a FileHandle.
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub fn new_with_num_bytes(file_handle: Box<dyn FileHandle>, num_bytes: usize) -> Self {
|
||||||
|
FileSlice {
|
||||||
|
data: Arc::from(file_handle),
|
||||||
|
range: 0..num_bytes,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a fileslice that is just a view over a slice of the data.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if `byte_range.end` exceeds the filesize.
|
||||||
|
pub fn slice(&self, byte_range: Range<usize>) -> FileSlice {
|
||||||
|
assert!(byte_range.end <= self.len());
|
||||||
|
FileSlice {
|
||||||
|
data: self.data.clone(),
|
||||||
|
range: self.range.start + byte_range.start..self.range.start + byte_range.end,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates an empty FileSlice
|
||||||
|
pub fn empty() -> FileSlice {
|
||||||
|
const EMPTY_SLICE: &[u8] = &[];
|
||||||
|
FileSlice::from(EMPTY_SLICE)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a `OwnedBytes` with all of the data in the `FileSlice`.
|
||||||
|
///
|
||||||
|
/// The behavior is strongly dependant on the implementation of the underlying
|
||||||
|
/// `Directory` and the `FileSliceTrait` it creates.
|
||||||
|
/// In particular, it is up to the `Directory` implementation
|
||||||
|
/// to handle caching if needed.
|
||||||
|
pub fn read_bytes(&self) -> io::Result<OwnedBytes> {
|
||||||
|
self.data.read_bytes(self.range.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reads a specific slice of data.
|
||||||
|
///
|
||||||
|
/// This is equivalent to running `file_slice.slice(from, to).read_bytes()`.
|
||||||
|
pub fn read_bytes_slice(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||||
|
assert!(
|
||||||
|
range.end <= self.len(),
|
||||||
|
"end of requested range exceeds the fileslice length ({} > {})",
|
||||||
|
range.end,
|
||||||
|
self.len()
|
||||||
|
);
|
||||||
|
self.data
|
||||||
|
.read_bytes(self.range.start + range.start..self.range.start + range.end)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Splits the FileSlice at the given offset and return two file slices.
|
||||||
|
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
||||||
|
///
|
||||||
|
/// This operation is cheap and must not copy any underlying data.
|
||||||
|
pub fn split(self, left_len: usize) -> (FileSlice, FileSlice) {
|
||||||
|
let left = self.slice_to(left_len);
|
||||||
|
let right = self.slice_from(left_len);
|
||||||
|
(left, right)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Splits the file slice at the given offset and return two file slices.
|
||||||
|
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
||||||
|
pub fn split_from_end(self, right_len: usize) -> (FileSlice, FileSlice) {
|
||||||
|
let left_len = self.len() - right_len;
|
||||||
|
self.split(left_len)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Like `.slice(...)` but enforcing only the `from`
|
||||||
|
/// boundary.
|
||||||
|
///
|
||||||
|
/// Equivalent to `.slice(from_offset, self.len())`
|
||||||
|
pub fn slice_from(&self, from_offset: usize) -> FileSlice {
|
||||||
|
self.slice(from_offset..self.len())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Like `.slice(...)` but enforcing only the `to`
|
||||||
|
/// boundary.
|
||||||
|
///
|
||||||
|
/// Equivalent to `.slice(0, to_offset)`
|
||||||
|
pub fn slice_to(&self, to_offset: usize) -> FileSlice {
|
||||||
|
self.slice(0..to_offset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileHandle for FileSlice {
|
||||||
|
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||||
|
self.read_bytes_slice(range)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HasLen for FileSlice {
|
||||||
|
fn len(&self) -> usize {
|
||||||
|
self.range.len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::{FileHandle, FileSlice};
|
||||||
|
use crate::common::HasLen;
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_file_slice() -> io::Result<()> {
|
||||||
|
let file_slice = FileSlice::new(Box::new(b"abcdef".as_ref()));
|
||||||
|
assert_eq!(file_slice.len(), 6);
|
||||||
|
assert_eq!(file_slice.slice_from(2).read_bytes()?.as_slice(), b"cdef");
|
||||||
|
assert_eq!(file_slice.slice_to(2).read_bytes()?.as_slice(), b"ab");
|
||||||
|
assert_eq!(
|
||||||
|
file_slice
|
||||||
|
.slice_from(1)
|
||||||
|
.slice_to(2)
|
||||||
|
.read_bytes()?
|
||||||
|
.as_slice(),
|
||||||
|
b"bc"
|
||||||
|
);
|
||||||
|
{
|
||||||
|
let (left, right) = file_slice.clone().split(0);
|
||||||
|
assert_eq!(left.read_bytes()?.as_slice(), b"");
|
||||||
|
assert_eq!(right.read_bytes()?.as_slice(), b"abcdef");
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let (left, right) = file_slice.clone().split(2);
|
||||||
|
assert_eq!(left.read_bytes()?.as_slice(), b"ab");
|
||||||
|
assert_eq!(right.read_bytes()?.as_slice(), b"cdef");
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let (left, right) = file_slice.clone().split_from_end(0);
|
||||||
|
assert_eq!(left.read_bytes()?.as_slice(), b"abcdef");
|
||||||
|
assert_eq!(right.read_bytes()?.as_slice(), b"");
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let (left, right) = file_slice.clone().split_from_end(2);
|
||||||
|
assert_eq!(left.read_bytes()?.as_slice(), b"abcd");
|
||||||
|
assert_eq!(right.read_bytes()?.as_slice(), b"ef");
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_file_slice_trait_slice_len() {
|
||||||
|
let blop: &'static [u8] = b"abc";
|
||||||
|
let owned_bytes: Box<dyn FileHandle> = Box::new(blop);
|
||||||
|
assert_eq!(owned_bytes.len(), 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_slice_simple_read() -> io::Result<()> {
|
||||||
|
let slice = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||||
|
assert_eq!(slice.len(), 6);
|
||||||
|
assert_eq!(slice.read_bytes()?.as_ref(), b"abcdef");
|
||||||
|
assert_eq!(slice.slice(1..4).read_bytes()?.as_ref(), b"bcd");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_slice_read_slice() -> io::Result<()> {
|
||||||
|
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||||
|
assert_eq!(slice_deref.read_bytes_slice(1..4)?.as_ref(), b"bcd");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic(expected = "end of requested range exceeds the fileslice length (10 > 6)")]
|
||||||
|
fn test_slice_read_slice_invalid_range_exceeds() {
|
||||||
|
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||||
|
assert_eq!(
|
||||||
|
slice_deref.read_bytes_slice(0..10).unwrap().as_ref(),
|
||||||
|
b"bcd"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
182
src/directory/file_watcher.rs
Normal file
182
src/directory/file_watcher.rs
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
use crate::directory::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||||
|
use crc32fast::Hasher;
|
||||||
|
use std::fs;
|
||||||
|
use std::io;
|
||||||
|
use std::io::BufRead;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::thread;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
pub const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
|
||||||
|
|
||||||
|
// Watches a file and executes registered callbacks when the file is modified.
|
||||||
|
pub struct FileWatcher {
|
||||||
|
path: Arc<Path>,
|
||||||
|
callbacks: Arc<WatchCallbackList>,
|
||||||
|
state: Arc<AtomicUsize>, // 0: new, 1: runnable, 2: terminated
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileWatcher {
|
||||||
|
pub fn new(path: &Path) -> FileWatcher {
|
||||||
|
FileWatcher {
|
||||||
|
path: Arc::from(path),
|
||||||
|
callbacks: Default::default(),
|
||||||
|
state: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn spawn(&self) {
|
||||||
|
if self
|
||||||
|
.state
|
||||||
|
.compare_exchange(0, 1, Ordering::SeqCst, Ordering::SeqCst)
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let path = self.path.clone();
|
||||||
|
let callbacks = self.callbacks.clone();
|
||||||
|
let state = self.state.clone();
|
||||||
|
|
||||||
|
thread::Builder::new()
|
||||||
|
.name("thread-tantivy-meta-file-watcher".to_string())
|
||||||
|
.spawn(move || {
|
||||||
|
let mut current_checksum = None;
|
||||||
|
|
||||||
|
while state.load(Ordering::SeqCst) == 1 {
|
||||||
|
if let Ok(checksum) = FileWatcher::compute_checksum(&path) {
|
||||||
|
// `None.unwrap_or_else(|| !checksum) != checksum` evaluates to `true`
|
||||||
|
if current_checksum.unwrap_or_else(|| !checksum) != checksum {
|
||||||
|
info!("Meta file {:?} was modified", path);
|
||||||
|
current_checksum = Some(checksum);
|
||||||
|
futures::executor::block_on(callbacks.broadcast());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
thread::sleep(POLLING_INTERVAL);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.expect("Failed to spawn meta file watcher thread");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn watch(&self, callback: WatchCallback) -> WatchHandle {
|
||||||
|
let handle = self.callbacks.subscribe(callback);
|
||||||
|
self.spawn();
|
||||||
|
handle
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_checksum(path: &Path) -> Result<u32, io::Error> {
|
||||||
|
let reader = match fs::File::open(path) {
|
||||||
|
Ok(f) => io::BufReader::new(f),
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Failed to open meta file {:?}: {:?}", path, e);
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut hasher = Hasher::new();
|
||||||
|
|
||||||
|
for line in reader.lines() {
|
||||||
|
hasher.update(line?.as_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(hasher.finalize())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for FileWatcher {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.state.store(2, Ordering::SeqCst);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
|
||||||
|
use std::mem;
|
||||||
|
|
||||||
|
use crate::directory::mmap_directory::atomic_write;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_file_watcher_drop_watcher() -> crate::Result<()> {
|
||||||
|
let tmp_dir = tempfile::TempDir::new()?;
|
||||||
|
let tmp_file = tmp_dir.path().join("watched.txt");
|
||||||
|
|
||||||
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
|
let (tx, rx) = crossbeam::channel::unbounded();
|
||||||
|
let timeout = Duration::from_millis(100);
|
||||||
|
|
||||||
|
let watcher = FileWatcher::new(&tmp_file);
|
||||||
|
|
||||||
|
let state = watcher.state.clone();
|
||||||
|
assert_eq!(state.load(Ordering::SeqCst), 0);
|
||||||
|
|
||||||
|
let counter_clone = counter.clone();
|
||||||
|
|
||||||
|
let _handle = watcher.watch(WatchCallback::new(move || {
|
||||||
|
let val = counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
|
tx.send(val + 1).unwrap();
|
||||||
|
}));
|
||||||
|
|
||||||
|
assert_eq!(counter.load(Ordering::SeqCst), 0);
|
||||||
|
assert_eq!(state.load(Ordering::SeqCst), 1);
|
||||||
|
|
||||||
|
atomic_write(&tmp_file, b"foo")?;
|
||||||
|
assert_eq!(rx.recv_timeout(timeout), Ok(1));
|
||||||
|
|
||||||
|
atomic_write(&tmp_file, b"foo")?;
|
||||||
|
assert!(rx.recv_timeout(timeout).is_err());
|
||||||
|
|
||||||
|
atomic_write(&tmp_file, b"bar")?;
|
||||||
|
assert_eq!(rx.recv_timeout(timeout), Ok(2));
|
||||||
|
|
||||||
|
mem::drop(watcher);
|
||||||
|
|
||||||
|
atomic_write(&tmp_file, b"qux")?;
|
||||||
|
thread::sleep(Duration::from_millis(10));
|
||||||
|
assert_eq!(counter.load(Ordering::SeqCst), 2);
|
||||||
|
assert_eq!(state.load(Ordering::SeqCst), 2);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_file_watcher_drop_handle() -> crate::Result<()> {
|
||||||
|
let tmp_dir = tempfile::TempDir::new()?;
|
||||||
|
let tmp_file = tmp_dir.path().join("watched.txt");
|
||||||
|
|
||||||
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
|
let (tx, rx) = crossbeam::channel::unbounded();
|
||||||
|
let timeout = Duration::from_millis(100);
|
||||||
|
|
||||||
|
let watcher = FileWatcher::new(&tmp_file);
|
||||||
|
|
||||||
|
let state = watcher.state.clone();
|
||||||
|
assert_eq!(state.load(Ordering::SeqCst), 0);
|
||||||
|
|
||||||
|
let counter_clone = counter.clone();
|
||||||
|
|
||||||
|
let handle = watcher.watch(WatchCallback::new(move || {
|
||||||
|
let val = counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
|
tx.send(val + 1).unwrap();
|
||||||
|
}));
|
||||||
|
|
||||||
|
assert_eq!(counter.load(Ordering::SeqCst), 0);
|
||||||
|
assert_eq!(state.load(Ordering::SeqCst), 1);
|
||||||
|
|
||||||
|
atomic_write(&tmp_file, b"foo")?;
|
||||||
|
assert_eq!(rx.recv_timeout(timeout), Ok(1));
|
||||||
|
|
||||||
|
mem::drop(handle);
|
||||||
|
|
||||||
|
atomic_write(&tmp_file, b"qux")?;
|
||||||
|
assert_eq!(counter.load(Ordering::SeqCst), 1);
|
||||||
|
assert_eq!(state.load(Ordering::SeqCst), 1);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,159 +1,237 @@
|
|||||||
use crate::directory::read_only_source::ReadOnlySource;
|
use crate::common::{BinarySerializable, CountingWriter, FixedSize, HasLen, VInt};
|
||||||
|
use crate::directory::error::Incompatibility;
|
||||||
|
use crate::directory::FileSlice;
|
||||||
use crate::directory::{AntiCallToken, TerminatingWrite};
|
use crate::directory::{AntiCallToken, TerminatingWrite};
|
||||||
use byteorder::{ByteOrder, LittleEndian};
|
use crate::Version;
|
||||||
use crc32fast::Hasher;
|
use crc32fast::Hasher;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
const COMMON_FOOTER_SIZE: usize = 4 * 5;
|
const FOOTER_MAX_LEN: usize = 10_000;
|
||||||
|
|
||||||
|
type CrcHashU32 = u32;
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub struct Footer {
|
pub struct Footer {
|
||||||
pub tantivy_version: (u32, u32, u32),
|
pub version: Version,
|
||||||
pub meta: String,
|
pub meta: String,
|
||||||
pub versioned_footer: VersionedFooter,
|
pub versioned_footer: VersionedFooter,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Serialises the footer to a byte-array
|
||||||
|
/// - versioned_footer_len : 4 bytes
|
||||||
|
///- versioned_footer: variable bytes
|
||||||
|
/// - meta_len: 4 bytes
|
||||||
|
/// - meta: variable bytes
|
||||||
|
/// - version_len: 4 bytes
|
||||||
|
/// - version json: variable bytes
|
||||||
|
impl BinarySerializable for Footer {
|
||||||
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
BinarySerializable::serialize(&self.versioned_footer, writer)?;
|
||||||
|
BinarySerializable::serialize(&self.meta, writer)?;
|
||||||
|
let version_string =
|
||||||
|
serde_json::to_string(&self.version).map_err(|_err| io::ErrorKind::InvalidInput)?;
|
||||||
|
BinarySerializable::serialize(&version_string, writer)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let versioned_footer = VersionedFooter::deserialize(reader)?;
|
||||||
|
let meta = String::deserialize(reader)?;
|
||||||
|
let version_json = String::deserialize(reader)?;
|
||||||
|
let version = serde_json::from_str(&version_json)?;
|
||||||
|
Ok(Footer {
|
||||||
|
version,
|
||||||
|
meta,
|
||||||
|
versioned_footer,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Footer {
|
impl Footer {
|
||||||
pub fn new(versioned_footer: VersionedFooter) -> Self {
|
pub fn new(versioned_footer: VersionedFooter) -> Self {
|
||||||
let tantivy_version = (
|
let version = crate::VERSION.clone();
|
||||||
env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
|
let meta = version.to_string();
|
||||||
env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
|
|
||||||
env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
|
|
||||||
);
|
|
||||||
Footer {
|
Footer {
|
||||||
tantivy_version,
|
version,
|
||||||
meta: format!(
|
meta,
|
||||||
"tantivy {}.{}.{}, index v{}",
|
|
||||||
tantivy_version.0,
|
|
||||||
tantivy_version.1,
|
|
||||||
tantivy_version.2,
|
|
||||||
versioned_footer.version()
|
|
||||||
),
|
|
||||||
versioned_footer,
|
versioned_footer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_bytes(&self) -> Vec<u8> {
|
pub fn append_footer<W: io::Write>(&self, mut write: &mut W) -> io::Result<()> {
|
||||||
let mut res = self.versioned_footer.to_bytes();
|
let mut counting_write = CountingWriter::wrap(&mut write);
|
||||||
res.extend_from_slice(self.meta.as_bytes());
|
self.serialize(&mut counting_write)?;
|
||||||
let len = res.len();
|
let written_len = counting_write.written_bytes();
|
||||||
res.resize(len + COMMON_FOOTER_SIZE, 0);
|
(written_len as u32).serialize(write)?;
|
||||||
let mut common_footer = &mut res[len..];
|
Ok(())
|
||||||
LittleEndian::write_u32(&mut common_footer, self.meta.len() as u32);
|
|
||||||
LittleEndian::write_u32(&mut common_footer[4..], self.tantivy_version.0);
|
|
||||||
LittleEndian::write_u32(&mut common_footer[8..], self.tantivy_version.1);
|
|
||||||
LittleEndian::write_u32(&mut common_footer[12..], self.tantivy_version.2);
|
|
||||||
LittleEndian::write_u32(&mut common_footer[16..], (len + COMMON_FOOTER_SIZE) as u32);
|
|
||||||
res
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_bytes(data: &[u8]) -> Result<Self, io::Error> {
|
pub fn extract_footer(file: FileSlice) -> io::Result<(Footer, FileSlice)> {
|
||||||
let len = data.len();
|
if file.len() < 4 {
|
||||||
if len < COMMON_FOOTER_SIZE + 4 {
|
|
||||||
// 4 bytes for index version, stored in versioned footer
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::UnexpectedEof,
|
|
||||||
format!("File corrupted. The footer len must be over 24, while the entire file len is {}", len)
|
|
||||||
)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let size = LittleEndian::read_u32(&data[len - 4..]) as usize;
|
|
||||||
if len < size as usize {
|
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
io::ErrorKind::UnexpectedEof,
|
io::ErrorKind::UnexpectedEof,
|
||||||
format!(
|
format!(
|
||||||
"File corrupted. The footer len is {}, while the entire file len is {}",
|
"File corrupted. The file is smaller than 4 bytes (len={}).",
|
||||||
size, len
|
file.len()
|
||||||
),
|
),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
let footer = &data[len - size as usize..];
|
let (body_footer, footer_len_file) = file.split_from_end(u32::SIZE_IN_BYTES);
|
||||||
let meta_len = LittleEndian::read_u32(&footer[size - 20..]) as usize;
|
let mut footer_len_bytes = footer_len_file.read_bytes()?;
|
||||||
let tantivy_major = LittleEndian::read_u32(&footer[size - 16..]);
|
let footer_len = u32::deserialize(&mut footer_len_bytes)? as usize;
|
||||||
let tantivy_minor = LittleEndian::read_u32(&footer[size - 12..]);
|
let (body, footer) = body_footer.split_from_end(footer_len);
|
||||||
let tantivy_patch = LittleEndian::read_u32(&footer[size - 8..]);
|
let mut footer_bytes = footer.read_bytes()?;
|
||||||
Ok(Footer {
|
let footer = Footer::deserialize(&mut footer_bytes)?;
|
||||||
tantivy_version: (tantivy_major, tantivy_minor, tantivy_patch),
|
Ok((footer, body))
|
||||||
meta: String::from_utf8_lossy(&footer[size - meta_len - 20..size - 20]).into_owned(),
|
|
||||||
versioned_footer: VersionedFooter::from_bytes(&footer[..size - meta_len - 20])?,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
|
/// Confirms that the index will be read correctly by this version of tantivy
|
||||||
let footer = Footer::from_bytes(source.as_slice())?;
|
/// Has to be called after `extract_footer` to make sure it's not accessing uninitialised memory
|
||||||
let reader = source.slice_to(source.as_slice().len() - footer.size());
|
pub fn is_compatible(&self) -> Result<(), Incompatibility> {
|
||||||
Ok((footer, reader))
|
let library_version = crate::version();
|
||||||
}
|
match &self.versioned_footer {
|
||||||
|
VersionedFooter::V1 {
|
||||||
pub fn size(&self) -> usize {
|
crc32: _crc,
|
||||||
self.versioned_footer.size() as usize + self.meta.len() + 20
|
store_compression,
|
||||||
}
|
} => {
|
||||||
}
|
if &library_version.store_compression != store_compression {
|
||||||
|
return Err(Incompatibility::CompressionMismatch {
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
library_compression_format: library_version.store_compression.to_string(),
|
||||||
pub enum VersionedFooter {
|
index_compression_format: store_compression.to_string(),
|
||||||
UnknownVersion { version: u32, size: u32 },
|
});
|
||||||
V0(u32), // crc
|
|
||||||
}
|
|
||||||
|
|
||||||
impl VersionedFooter {
|
|
||||||
pub fn to_bytes(&self) -> Vec<u8> {
|
|
||||||
match self {
|
|
||||||
VersionedFooter::V0(crc) => {
|
|
||||||
let mut res = vec![0; 8];
|
|
||||||
LittleEndian::write_u32(&mut res, 0);
|
|
||||||
LittleEndian::write_u32(&mut res[4..], *crc);
|
|
||||||
res
|
|
||||||
}
|
|
||||||
VersionedFooter::UnknownVersion { .. } => {
|
|
||||||
panic!("Unsupported index should never get serialized");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_bytes(footer: &[u8]) -> Result<Self, io::Error> {
|
|
||||||
assert!(footer.len() >= 4);
|
|
||||||
let version = LittleEndian::read_u32(footer);
|
|
||||||
match version {
|
|
||||||
0 => {
|
|
||||||
if footer.len() == 8 {
|
|
||||||
Ok(VersionedFooter::V0(LittleEndian::read_u32(&footer[4..])))
|
|
||||||
} else {
|
|
||||||
Err(io::Error::new(
|
|
||||||
io::ErrorKind::UnexpectedEof,
|
|
||||||
format!(
|
|
||||||
"File corrupted. The versioned footer len is {}, while it should be 8",
|
|
||||||
footer.len()
|
|
||||||
),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
version => Ok(VersionedFooter::UnknownVersion {
|
VersionedFooter::V2 {
|
||||||
version,
|
crc32: _crc,
|
||||||
size: footer.len() as u32,
|
store_compression,
|
||||||
|
} => {
|
||||||
|
if &library_version.store_compression != store_compression {
|
||||||
|
return Err(Incompatibility::CompressionMismatch {
|
||||||
|
library_compression_format: library_version.store_compression.to_string(),
|
||||||
|
index_compression_format: store_compression.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
VersionedFooter::V3 {
|
||||||
|
crc32: _crc,
|
||||||
|
store_compression,
|
||||||
|
} => {
|
||||||
|
if &library_version.store_compression != store_compression {
|
||||||
|
return Err(Incompatibility::CompressionMismatch {
|
||||||
|
library_compression_format: library_version.store_compression.to_string(),
|
||||||
|
index_compression_format: store_compression.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
VersionedFooter::UnknownVersion => Err(Incompatibility::IndexMismatch {
|
||||||
|
library_version: library_version.clone(),
|
||||||
|
index_version: self.version.clone(),
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn size(&self) -> u32 {
|
/// Footer that includes a crc32 hash that enables us to checksum files in the index
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub enum VersionedFooter {
|
||||||
|
UnknownVersion,
|
||||||
|
V1 {
|
||||||
|
crc32: CrcHashU32,
|
||||||
|
store_compression: String,
|
||||||
|
},
|
||||||
|
// Introduction of the Block WAND information.
|
||||||
|
V2 {
|
||||||
|
crc32: CrcHashU32,
|
||||||
|
store_compression: String,
|
||||||
|
},
|
||||||
|
// Block wand max termfred on 1 byte
|
||||||
|
V3 {
|
||||||
|
crc32: CrcHashU32,
|
||||||
|
store_compression: String,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for VersionedFooter {
|
||||||
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
let mut buf = Vec::new();
|
||||||
match self {
|
match self {
|
||||||
VersionedFooter::V0(_) => 8,
|
VersionedFooter::V3 {
|
||||||
VersionedFooter::UnknownVersion { size, .. } => *size,
|
crc32,
|
||||||
|
store_compression: compression,
|
||||||
|
} => {
|
||||||
|
// Serializes a valid `VersionedFooter` or panics if the version is unknown
|
||||||
|
// [ version | crc_hash | compression_mode ]
|
||||||
|
// [ 0..4 | 4..8 | variable ]
|
||||||
|
BinarySerializable::serialize(&3u32, &mut buf)?;
|
||||||
|
BinarySerializable::serialize(crc32, &mut buf)?;
|
||||||
|
BinarySerializable::serialize(compression, &mut buf)?;
|
||||||
|
}
|
||||||
|
VersionedFooter::V2 { .. }
|
||||||
|
| VersionedFooter::V1 { .. }
|
||||||
|
| VersionedFooter::UnknownVersion => {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidInput,
|
||||||
|
"Cannot serialize an unknown versioned footer ",
|
||||||
|
));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
BinarySerializable::serialize(&VInt(buf.len() as u64), writer)?;
|
||||||
|
assert!(buf.len() <= FOOTER_MAX_LEN);
|
||||||
|
writer.write_all(&buf[..])?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn version(&self) -> u32 {
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
match self {
|
let len = VInt::deserialize(reader)?.0 as usize;
|
||||||
VersionedFooter::V0(_) => 0,
|
if len > FOOTER_MAX_LEN {
|
||||||
VersionedFooter::UnknownVersion { version, .. } => *version,
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
format!(
|
||||||
|
"Footer seems invalid as it suggests a footer len of {}. File is corrupted, \
|
||||||
|
or the index was created with a different & old version of tantivy.",
|
||||||
|
len
|
||||||
|
),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
let mut buf = vec![0u8; len];
|
||||||
|
reader.read_exact(&mut buf[..])?;
|
||||||
|
let mut cursor = &buf[..];
|
||||||
|
let version = u32::deserialize(&mut cursor)?;
|
||||||
|
if version > 3 {
|
||||||
|
return Ok(VersionedFooter::UnknownVersion);
|
||||||
|
}
|
||||||
|
let crc32 = u32::deserialize(&mut cursor)?;
|
||||||
|
let store_compression = String::deserialize(&mut cursor)?;
|
||||||
|
Ok(if version == 1 {
|
||||||
|
VersionedFooter::V1 {
|
||||||
|
crc32,
|
||||||
|
store_compression,
|
||||||
|
}
|
||||||
|
} else if version == 2 {
|
||||||
|
VersionedFooter::V2 {
|
||||||
|
crc32,
|
||||||
|
store_compression,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
assert_eq!(version, 3);
|
||||||
|
VersionedFooter::V3 {
|
||||||
|
crc32,
|
||||||
|
store_compression,
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn crc(&self) -> Option<u32> {
|
impl VersionedFooter {
|
||||||
|
pub fn crc(&self) -> Option<CrcHashU32> {
|
||||||
match self {
|
match self {
|
||||||
VersionedFooter::V0(crc) => Some(*crc),
|
VersionedFooter::V3 { crc32, .. } => Some(*crc32),
|
||||||
|
VersionedFooter::V2 { crc32, .. } => Some(*crc32),
|
||||||
|
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
|
||||||
VersionedFooter::UnknownVersion { .. } => None,
|
VersionedFooter::UnknownVersion { .. } => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -189,25 +267,152 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
|
|||||||
|
|
||||||
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
||||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||||
let crc = self.hasher.take().unwrap().finalize();
|
let crc32 = self.hasher.take().unwrap().finalize();
|
||||||
|
let footer = Footer::new(VersionedFooter::V3 {
|
||||||
let footer = Footer::new(VersionedFooter::V0(crc)).to_bytes();
|
crc32,
|
||||||
|
store_compression: crate::store::COMPRESSION.to_string(),
|
||||||
|
});
|
||||||
let mut writer = self.writer.take().unwrap();
|
let mut writer = self.writer.take().unwrap();
|
||||||
writer.write_all(&footer)?;
|
footer.append_footer(&mut writer)?;
|
||||||
writer.terminate()
|
writer.terminate()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
|
use super::CrcHashU32;
|
||||||
|
use super::FooterProxy;
|
||||||
|
use crate::common::{BinarySerializable, VInt};
|
||||||
use crate::directory::footer::{Footer, VersionedFooter};
|
use crate::directory::footer::{Footer, VersionedFooter};
|
||||||
|
use crate::directory::TerminatingWrite;
|
||||||
|
use byteorder::{ByteOrder, LittleEndian};
|
||||||
|
use regex::Regex;
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_versioned_footer() {
|
||||||
|
let mut vec = Vec::new();
|
||||||
|
let footer_proxy = FooterProxy::new(&mut vec);
|
||||||
|
assert!(footer_proxy.terminate().is_ok());
|
||||||
|
if crate::store::COMPRESSION == "lz4" {
|
||||||
|
assert_eq!(vec.len(), 158);
|
||||||
|
} else {
|
||||||
|
assert_eq!(vec.len(), 167);
|
||||||
|
}
|
||||||
|
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
|
||||||
|
assert!(matches!(
|
||||||
|
footer.versioned_footer,
|
||||||
|
VersionedFooter::V3 { store_compression, .. }
|
||||||
|
if store_compression == crate::store::COMPRESSION
|
||||||
|
));
|
||||||
|
assert_eq!(&footer.version, crate::version());
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_serialize_deserialize_footer() {
|
fn test_serialize_deserialize_footer() {
|
||||||
let crc = 123456;
|
let mut buffer = Vec::new();
|
||||||
let footer = Footer::new(VersionedFooter::V0(crc));
|
let crc32 = 123456u32;
|
||||||
let footer_bytes = footer.to_bytes();
|
let footer: Footer = Footer::new(VersionedFooter::V3 {
|
||||||
|
crc32,
|
||||||
|
store_compression: "lz4".to_string(),
|
||||||
|
});
|
||||||
|
footer.serialize(&mut buffer).unwrap();
|
||||||
|
let footer_deser = Footer::deserialize(&mut &buffer[..]).unwrap();
|
||||||
|
assert_eq!(footer_deser, footer);
|
||||||
|
}
|
||||||
|
|
||||||
assert_eq!(Footer::from_bytes(&footer_bytes).unwrap(), footer);
|
#[test]
|
||||||
|
fn footer_length() {
|
||||||
|
let crc32 = 1111111u32;
|
||||||
|
let versioned_footer = VersionedFooter::V3 {
|
||||||
|
crc32,
|
||||||
|
store_compression: "lz4".to_string(),
|
||||||
|
};
|
||||||
|
let mut buf = Vec::new();
|
||||||
|
versioned_footer.serialize(&mut buf).unwrap();
|
||||||
|
assert_eq!(buf.len(), 13);
|
||||||
|
let footer = Footer::new(versioned_footer);
|
||||||
|
let regex_ptn = Regex::new(
|
||||||
|
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
assert!(regex_ptn.is_match(&footer.meta));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn versioned_footer_from_bytes() {
|
||||||
|
let v_footer_bytes = vec![
|
||||||
|
// versionned footer length
|
||||||
|
12 | 128,
|
||||||
|
// index format version
|
||||||
|
3,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
// crc 32
|
||||||
|
12,
|
||||||
|
35,
|
||||||
|
89,
|
||||||
|
18,
|
||||||
|
// compression format
|
||||||
|
3 | 128,
|
||||||
|
b'l',
|
||||||
|
b'z',
|
||||||
|
b'4',
|
||||||
|
];
|
||||||
|
let mut cursor = &v_footer_bytes[..];
|
||||||
|
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
|
||||||
|
assert!(cursor.is_empty());
|
||||||
|
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
|
||||||
|
let expected_versioned_footer: VersionedFooter = VersionedFooter::V3 {
|
||||||
|
crc32: expected_crc,
|
||||||
|
store_compression: "lz4".to_string(),
|
||||||
|
};
|
||||||
|
assert_eq!(versioned_footer, expected_versioned_footer);
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
assert!(versioned_footer.serialize(&mut buffer).is_ok());
|
||||||
|
assert_eq!(&v_footer_bytes[..], &buffer[..]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn versioned_footer_panic() {
|
||||||
|
let v_footer_bytes = vec![6u8 | 128u8, 3u8, 0u8, 0u8, 1u8, 0u8, 0u8];
|
||||||
|
let mut b = &v_footer_bytes[..];
|
||||||
|
let versioned_footer = VersionedFooter::deserialize(&mut b).unwrap();
|
||||||
|
assert!(b.is_empty());
|
||||||
|
let expected_versioned_footer = VersionedFooter::UnknownVersion;
|
||||||
|
assert_eq!(versioned_footer, expected_versioned_footer);
|
||||||
|
let mut buf = Vec::new();
|
||||||
|
assert!(versioned_footer.serialize(&mut buf).is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[cfg(not(feature = "lz4"))]
|
||||||
|
fn compression_mismatch() {
|
||||||
|
let crc32 = 1111111u32;
|
||||||
|
let versioned_footer = VersionedFooter::V1 {
|
||||||
|
crc32,
|
||||||
|
store_compression: "lz4".to_string(),
|
||||||
|
};
|
||||||
|
let footer = Footer::new(versioned_footer);
|
||||||
|
let res = footer.is_compatible();
|
||||||
|
assert!(res.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_deserialize_too_large_footer() {
|
||||||
|
let mut buf = vec![];
|
||||||
|
assert!(FooterProxy::new(&mut buf).terminate().is_ok());
|
||||||
|
let mut long_len_buf = [0u8; 10];
|
||||||
|
let num_bytes = VInt(super::FOOTER_MAX_LEN as u64 + 1u64).serialize_into(&mut long_len_buf);
|
||||||
|
buf[0..num_bytes].copy_from_slice(&long_len_buf[..num_bytes]);
|
||||||
|
let err = Footer::deserialize(&mut &buf[..]).unwrap_err();
|
||||||
|
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
|
||||||
|
assert_eq!(
|
||||||
|
err.to_string(),
|
||||||
|
"Footer seems invalid as it suggests a footer len of 10001. File is corrupted, \
|
||||||
|
or the index was created with a different & old version of tantivy."
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,16 +1,16 @@
|
|||||||
use crate::core::MANAGED_FILEPATH;
|
use crate::core::{MANAGED_FILEPATH, META_FILEPATH};
|
||||||
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
|
use crate::directory::error::{DeleteError, LockError, OpenReadError, OpenWriteError};
|
||||||
use crate::directory::footer::{Footer, FooterProxy};
|
use crate::directory::footer::{Footer, FooterProxy};
|
||||||
use crate::directory::DirectoryLock;
|
use crate::directory::GarbageCollectionResult;
|
||||||
use crate::directory::Lock;
|
use crate::directory::Lock;
|
||||||
use crate::directory::META_LOCK;
|
use crate::directory::META_LOCK;
|
||||||
use crate::directory::{ReadOnlySource, WritePtr};
|
use crate::directory::{DirectoryLock, FileHandle};
|
||||||
|
use crate::directory::{FileSlice, WritePtr};
|
||||||
use crate::directory::{WatchCallback, WatchHandle};
|
use crate::directory::{WatchCallback, WatchHandle};
|
||||||
use crate::error::DataCorruption;
|
use crate::error::DataCorruption;
|
||||||
use crate::Directory;
|
use crate::Directory;
|
||||||
use crate::Result;
|
|
||||||
use crc32fast::Hasher;
|
use crc32fast::Hasher;
|
||||||
use serde_json;
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@@ -53,7 +53,7 @@ struct MetaInformation {
|
|||||||
/// Saves the file containing the list of existing files
|
/// Saves the file containing the list of existing files
|
||||||
/// that were created by tantivy.
|
/// that were created by tantivy.
|
||||||
fn save_managed_paths(
|
fn save_managed_paths(
|
||||||
directory: &mut dyn Directory,
|
directory: &dyn Directory,
|
||||||
wlock: &RwLockWriteGuard<'_, MetaInformation>,
|
wlock: &RwLockWriteGuard<'_, MetaInformation>,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let mut w = serde_json::to_vec(&wlock.managed_paths)?;
|
let mut w = serde_json::to_vec(&wlock.managed_paths)?;
|
||||||
@@ -64,7 +64,7 @@ fn save_managed_paths(
|
|||||||
|
|
||||||
impl ManagedDirectory {
|
impl ManagedDirectory {
|
||||||
/// Wraps a directory as managed directory.
|
/// Wraps a directory as managed directory.
|
||||||
pub fn wrap<Dir: Directory>(directory: Dir) -> Result<ManagedDirectory> {
|
pub fn wrap<Dir: Directory>(directory: Dir) -> crate::Result<ManagedDirectory> {
|
||||||
match directory.atomic_read(&MANAGED_FILEPATH) {
|
match directory.atomic_read(&MANAGED_FILEPATH) {
|
||||||
Ok(data) => {
|
Ok(data) => {
|
||||||
let managed_files_json = String::from_utf8_lossy(&data);
|
let managed_files_json = String::from_utf8_lossy(&data);
|
||||||
@@ -86,7 +86,12 @@ impl ManagedDirectory {
|
|||||||
directory: Box::new(directory),
|
directory: Box::new(directory),
|
||||||
meta_informations: Arc::default(),
|
meta_informations: Arc::default(),
|
||||||
}),
|
}),
|
||||||
Err(OpenReadError::IOError(e)) => Err(From::from(e)),
|
io_err @ Err(OpenReadError::IOError { .. }) => Err(io_err.err().unwrap().into()),
|
||||||
|
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
|
||||||
|
// For the moment, this should never happen `meta.json`
|
||||||
|
// do not have any footer and cannot detect incompatibility.
|
||||||
|
Err(crate::TantivyError::IncompatibleIndex(incompatibility))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -104,7 +109,10 @@ impl ManagedDirectory {
|
|||||||
/// If a file cannot be deleted (for permission reasons for instance)
|
/// If a file cannot be deleted (for permission reasons for instance)
|
||||||
/// an error is simply logged, and the file remains in the list of managed
|
/// an error is simply logged, and the file remains in the list of managed
|
||||||
/// files.
|
/// files.
|
||||||
pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>(&mut self, get_living_files: L) {
|
pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>(
|
||||||
|
&mut self,
|
||||||
|
get_living_files: L,
|
||||||
|
) -> crate::Result<GarbageCollectionResult> {
|
||||||
info!("Garbage collect");
|
info!("Garbage collect");
|
||||||
let mut files_to_delete = vec![];
|
let mut files_to_delete = vec![];
|
||||||
|
|
||||||
@@ -130,19 +138,25 @@ impl ManagedDirectory {
|
|||||||
// 2) writer change meta.json (for instance after a merge or a commit)
|
// 2) writer change meta.json (for instance after a merge or a commit)
|
||||||
// 3) gc kicks in.
|
// 3) gc kicks in.
|
||||||
// 4) gc removes a file that was useful for process B, before process B opened it.
|
// 4) gc removes a file that was useful for process B, before process B opened it.
|
||||||
if let Ok(_meta_lock) = self.acquire_lock(&META_LOCK) {
|
match self.acquire_lock(&META_LOCK) {
|
||||||
let living_files = get_living_files();
|
Ok(_meta_lock) => {
|
||||||
for managed_path in &meta_informations_rlock.managed_paths {
|
let living_files = get_living_files();
|
||||||
if !living_files.contains(managed_path) {
|
for managed_path in &meta_informations_rlock.managed_paths {
|
||||||
files_to_delete.push(managed_path.clone());
|
if !living_files.contains(managed_path) {
|
||||||
|
files_to_delete.push(managed_path.clone());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
Err(err) => {
|
||||||
error!("Failed to acquire lock for GC");
|
error!("Failed to acquire lock for GC");
|
||||||
|
return Err(crate::TantivyError::from(err));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut failed_to_delete_files = vec![];
|
||||||
let mut deleted_files = vec![];
|
let mut deleted_files = vec![];
|
||||||
|
|
||||||
for file_to_delete in files_to_delete {
|
for file_to_delete in files_to_delete {
|
||||||
match self.delete(&file_to_delete) {
|
match self.delete(&file_to_delete) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
@@ -152,9 +166,10 @@ impl ManagedDirectory {
|
|||||||
Err(file_error) => {
|
Err(file_error) => {
|
||||||
match file_error {
|
match file_error {
|
||||||
DeleteError::FileDoesNotExist(_) => {
|
DeleteError::FileDoesNotExist(_) => {
|
||||||
deleted_files.push(file_to_delete);
|
deleted_files.push(file_to_delete.clone());
|
||||||
}
|
}
|
||||||
DeleteError::IOError(_) => {
|
DeleteError::IOError { .. } => {
|
||||||
|
failed_to_delete_files.push(file_to_delete.clone());
|
||||||
if !cfg!(target_os = "windows") {
|
if !cfg!(target_os = "windows") {
|
||||||
// On windows, delete is expected to fail if the file
|
// On windows, delete is expected to fail if the file
|
||||||
// is mmapped.
|
// is mmapped.
|
||||||
@@ -177,10 +192,13 @@ impl ManagedDirectory {
|
|||||||
for delete_file in &deleted_files {
|
for delete_file in &deleted_files {
|
||||||
managed_paths_write.remove(delete_file);
|
managed_paths_write.remove(delete_file);
|
||||||
}
|
}
|
||||||
if save_managed_paths(self.directory.as_mut(), &meta_informations_wlock).is_err() {
|
save_managed_paths(self.directory.as_mut(), &meta_informations_wlock)?;
|
||||||
error!("Failed to save the list of managed files.");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(GarbageCollectionResult {
|
||||||
|
deleted_files,
|
||||||
|
failed_to_delete_files,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Registers a file as managed
|
/// Registers a file as managed
|
||||||
@@ -194,7 +212,7 @@ impl ManagedDirectory {
|
|||||||
/// File starting by "." are reserved to locks.
|
/// File starting by "." are reserved to locks.
|
||||||
/// They are not managed and cannot be subjected
|
/// They are not managed and cannot be subjected
|
||||||
/// to garbage collection.
|
/// to garbage collection.
|
||||||
fn register_file_as_managed(&mut self, filepath: &Path) -> io::Result<()> {
|
fn register_file_as_managed(&self, filepath: &Path) -> io::Result<()> {
|
||||||
// Files starting by "." (e.g. lock files) are not managed.
|
// Files starting by "." (e.g. lock files) are not managed.
|
||||||
if !is_managed(filepath) {
|
if !is_managed(filepath) {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@@ -205,7 +223,7 @@ impl ManagedDirectory {
|
|||||||
.expect("Managed file lock poisoned");
|
.expect("Managed file lock poisoned");
|
||||||
let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned());
|
let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned());
|
||||||
if has_changed {
|
if has_changed {
|
||||||
save_managed_paths(self.directory.as_mut(), &meta_wlock)?;
|
save_managed_paths(self.directory.as_ref(), &meta_wlock)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -213,10 +231,19 @@ impl ManagedDirectory {
|
|||||||
/// Verify checksum of a managed file
|
/// Verify checksum of a managed file
|
||||||
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
|
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
|
||||||
let reader = self.directory.open_read(path)?;
|
let reader = self.directory.open_read(path)?;
|
||||||
let (footer, data) = Footer::extract_footer(reader)
|
let (footer, data) =
|
||||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
Footer::extract_footer(reader).map_err(|io_error| OpenReadError::IOError {
|
||||||
|
io_error,
|
||||||
|
filepath: path.to_path_buf(),
|
||||||
|
})?;
|
||||||
|
let bytes = data
|
||||||
|
.read_bytes()
|
||||||
|
.map_err(|io_error| OpenReadError::IOError {
|
||||||
|
filepath: path.to_path_buf(),
|
||||||
|
io_error,
|
||||||
|
})?;
|
||||||
let mut hasher = Hasher::new();
|
let mut hasher = Hasher::new();
|
||||||
hasher.update(data.as_slice());
|
hasher.update(bytes.as_slice());
|
||||||
let crc = hasher.finalize();
|
let crc = hasher.finalize();
|
||||||
Ok(footer
|
Ok(footer
|
||||||
.versioned_footer
|
.versioned_footer
|
||||||
@@ -227,34 +254,42 @@ impl ManagedDirectory {
|
|||||||
|
|
||||||
/// List files for which checksum does not match content
|
/// List files for which checksum does not match content
|
||||||
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
|
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
|
||||||
let mut hashset = HashSet::new();
|
let mut managed_paths = self
|
||||||
let managed_paths = self
|
|
||||||
.meta_informations
|
.meta_informations
|
||||||
.read()
|
.read()
|
||||||
.expect("Managed directory rlock poisoned in list damaged.")
|
.expect("Managed directory rlock poisoned in list damaged.")
|
||||||
.managed_paths
|
.managed_paths
|
||||||
.clone();
|
.clone();
|
||||||
|
|
||||||
for path in managed_paths.into_iter() {
|
managed_paths.remove(*META_FILEPATH);
|
||||||
|
|
||||||
|
let mut damaged_files = HashSet::new();
|
||||||
|
for path in managed_paths {
|
||||||
if !self.validate_checksum(&path)? {
|
if !self.validate_checksum(&path)? {
|
||||||
hashset.insert(path);
|
damaged_files.insert(path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(hashset)
|
Ok(damaged_files)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Directory for ManagedDirectory {
|
impl Directory for ManagedDirectory {
|
||||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> {
|
||||||
let read_only_source = self.directory.open_read(path)?;
|
let file_slice = self.open_read(path)?;
|
||||||
let (_footer, reader) = Footer::extract_footer(read_only_source)
|
Ok(Box::new(file_slice))
|
||||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
}
|
||||||
|
|
||||||
|
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||||
|
let file_slice = self.directory.open_read(path)?;
|
||||||
|
let (footer, reader) = Footer::extract_footer(file_slice)
|
||||||
|
.map_err(|io_error| OpenReadError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||||
|
footer.is_compatible()?;
|
||||||
Ok(reader)
|
Ok(reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
|
fn open_write(&self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
|
||||||
self.register_file_as_managed(path)
|
self.register_file_as_managed(path)
|
||||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||||
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
|
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
|
||||||
self.directory
|
self.directory
|
||||||
.open_write(path)?
|
.open_write(path)?
|
||||||
@@ -264,7 +299,7 @@ impl Directory for ManagedDirectory {
|
|||||||
))))
|
))))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||||
self.register_file_as_managed(path)?;
|
self.register_file_as_managed(path)?;
|
||||||
self.directory.atomic_write(path, data)
|
self.directory.atomic_write(path, data)
|
||||||
}
|
}
|
||||||
@@ -277,7 +312,7 @@ impl Directory for ManagedDirectory {
|
|||||||
self.directory.delete(path)
|
self.directory.delete(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exists(&self, path: &Path) -> bool {
|
fn exists(&self, path: &Path) -> Result<bool, OpenReadError> {
|
||||||
self.directory.exists(path)
|
self.directory.exists(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -325,23 +360,22 @@ mod tests_mmap_specific {
|
|||||||
managed_directory
|
managed_directory
|
||||||
.atomic_write(test_path2, &[0u8, 1u8])
|
.atomic_write(test_path2, &[0u8, 1u8])
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1).unwrap());
|
||||||
assert!(managed_directory.exists(test_path2));
|
assert!(managed_directory.exists(test_path2).unwrap());
|
||||||
let living_files: HashSet<PathBuf> =
|
let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect();
|
||||||
[test_path1.to_owned()].into_iter().cloned().collect();
|
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
||||||
managed_directory.garbage_collect(|| living_files);
|
assert!(managed_directory.exists(test_path1).unwrap());
|
||||||
assert!(managed_directory.exists(test_path1));
|
assert!(!managed_directory.exists(test_path2).unwrap());
|
||||||
assert!(!managed_directory.exists(test_path2));
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1).unwrap());
|
||||||
assert!(!managed_directory.exists(test_path2));
|
assert!(!managed_directory.exists(test_path2).unwrap());
|
||||||
let living_files: HashSet<PathBuf> = HashSet::new();
|
let living_files: HashSet<PathBuf> = HashSet::new();
|
||||||
managed_directory.garbage_collect(|| living_files);
|
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
||||||
assert!(!managed_directory.exists(test_path1));
|
assert!(!managed_directory.exists(test_path1).unwrap());
|
||||||
assert!(!managed_directory.exists(test_path2));
|
assert!(!managed_directory.exists(test_path2).unwrap());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -358,56 +392,58 @@ mod tests_mmap_specific {
|
|||||||
let mut write = managed_directory.open_write(test_path1).unwrap();
|
let mut write = managed_directory.open_write(test_path1).unwrap();
|
||||||
write.write_all(&[0u8, 1u8]).unwrap();
|
write.write_all(&[0u8, 1u8]).unwrap();
|
||||||
write.terminate().unwrap();
|
write.terminate().unwrap();
|
||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1).unwrap());
|
||||||
|
|
||||||
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
|
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
|
||||||
managed_directory.garbage_collect(|| living_files.clone());
|
assert!(managed_directory
|
||||||
|
.garbage_collect(|| living_files.clone())
|
||||||
|
.is_ok());
|
||||||
if cfg!(target_os = "windows") {
|
if cfg!(target_os = "windows") {
|
||||||
// On Windows, gc should try and fail the file as it is mmapped.
|
// On Windows, gc should try and fail the file as it is mmapped.
|
||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1).unwrap());
|
||||||
// unmap should happen here.
|
// unmap should happen here.
|
||||||
drop(_mmap_read);
|
drop(_mmap_read);
|
||||||
// The file should still be in the list of managed file and
|
// The file should still be in the list of managed file and
|
||||||
// eventually be deleted once mmap is released.
|
// eventually be deleted once mmap is released.
|
||||||
managed_directory.garbage_collect(|| living_files);
|
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
||||||
assert!(!managed_directory.exists(test_path1));
|
assert!(!managed_directory.exists(test_path1).unwrap());
|
||||||
} else {
|
} else {
|
||||||
assert!(!managed_directory.exists(test_path1));
|
assert!(!managed_directory.exists(test_path1).unwrap());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_checksum() {
|
fn test_checksum() -> crate::Result<()> {
|
||||||
let test_path1: &'static Path = Path::new("some_path_for_test");
|
let test_path1: &'static Path = Path::new("some_path_for_test");
|
||||||
let test_path2: &'static Path = Path::new("other_test_path");
|
let test_path2: &'static Path = Path::new("other_test_path");
|
||||||
|
|
||||||
let tempdir = TempDir::new().unwrap();
|
let tempdir = TempDir::new().unwrap();
|
||||||
let tempdir_path = PathBuf::from(tempdir.path());
|
let tempdir_path = PathBuf::from(tempdir.path());
|
||||||
|
|
||||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
let mmap_directory = MmapDirectory::open(&tempdir_path)?;
|
||||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
let managed_directory = ManagedDirectory::wrap(mmap_directory)?;
|
||||||
let mut write = managed_directory.open_write(test_path1).unwrap();
|
let mut write = managed_directory.open_write(test_path1)?;
|
||||||
write.write_all(&[0u8, 1u8]).unwrap();
|
write.write_all(&[0u8, 1u8])?;
|
||||||
write.terminate().unwrap();
|
write.terminate()?;
|
||||||
|
|
||||||
let mut write = managed_directory.open_write(test_path2).unwrap();
|
let mut write = managed_directory.open_write(test_path2)?;
|
||||||
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
|
write.write_all(&[3u8, 4u8, 5u8])?;
|
||||||
write.terminate().unwrap();
|
write.terminate()?;
|
||||||
|
|
||||||
|
let read_file = managed_directory.open_read(test_path2)?.read_bytes()?;
|
||||||
|
assert_eq!(read_file.as_slice(), &[3u8, 4u8, 5u8]);
|
||||||
assert!(managed_directory.list_damaged().unwrap().is_empty());
|
assert!(managed_directory.list_damaged().unwrap().is_empty());
|
||||||
|
|
||||||
let mut corrupted_path = tempdir_path.clone();
|
let mut corrupted_path = tempdir_path.clone();
|
||||||
corrupted_path.push(test_path2);
|
corrupted_path.push(test_path2);
|
||||||
let mut file = OpenOptions::new()
|
let mut file = OpenOptions::new().write(true).open(&corrupted_path)?;
|
||||||
.write(true)
|
file.write_all(&[255u8])?;
|
||||||
.open(&corrupted_path)
|
file.flush()?;
|
||||||
.unwrap();
|
|
||||||
file.write_all(&[255u8]).unwrap();
|
|
||||||
file.flush().unwrap();
|
|
||||||
drop(file);
|
drop(file);
|
||||||
|
|
||||||
let damaged = managed_directory.list_damaged().unwrap();
|
let damaged = managed_directory.list_damaged()?;
|
||||||
assert_eq!(damaged.len(), 1);
|
assert_eq!(damaged.len(), 1);
|
||||||
assert!(damaged.contains(test_path2));
|
assert!(damaged.contains(test_path2));
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,28 +1,19 @@
|
|||||||
use fs2;
|
|
||||||
use notify;
|
|
||||||
|
|
||||||
use self::fs2::FileExt;
|
|
||||||
use self::notify::RawEvent;
|
|
||||||
use self::notify::RecursiveMode;
|
|
||||||
use self::notify::Watcher;
|
|
||||||
use crate::core::META_FILEPATH;
|
use crate::core::META_FILEPATH;
|
||||||
use crate::directory::error::LockError;
|
use crate::directory::error::LockError;
|
||||||
use crate::directory::error::{
|
use crate::directory::error::{DeleteError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||||
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
|
use crate::directory::file_watcher::FileWatcher;
|
||||||
};
|
|
||||||
use crate::directory::read_only_source::BoxedData;
|
|
||||||
use crate::directory::AntiCallToken;
|
|
||||||
use crate::directory::Directory;
|
use crate::directory::Directory;
|
||||||
use crate::directory::DirectoryLock;
|
use crate::directory::DirectoryLock;
|
||||||
use crate::directory::Lock;
|
use crate::directory::Lock;
|
||||||
use crate::directory::ReadOnlySource;
|
|
||||||
use crate::directory::WatchCallback;
|
use crate::directory::WatchCallback;
|
||||||
use crate::directory::WatchCallbackList;
|
|
||||||
use crate::directory::WatchHandle;
|
use crate::directory::WatchHandle;
|
||||||
|
use crate::directory::{AntiCallToken, FileHandle, OwnedBytes};
|
||||||
|
use crate::directory::{ArcBytes, WeakArcBytes};
|
||||||
use crate::directory::{TerminatingWrite, WritePtr};
|
use crate::directory::{TerminatingWrite, WritePtr};
|
||||||
use atomicwrites;
|
use fs2::FileExt;
|
||||||
use memmap::Mmap;
|
use memmap::Mmap;
|
||||||
use std::collections::HashMap;
|
use serde::{Deserialize, Serialize};
|
||||||
|
use stable_deref_trait::StableDeref;
|
||||||
use std::convert::From;
|
use std::convert::From;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::fs::OpenOptions;
|
use std::fs::OpenOptions;
|
||||||
@@ -31,12 +22,9 @@ use std::io::{self, Seek, SeekFrom};
|
|||||||
use std::io::{BufWriter, Read, Write};
|
use std::io::{BufWriter, Read, Write};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::result;
|
use std::result;
|
||||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::Mutex;
|
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use std::sync::Weak;
|
use std::{collections::HashMap, ops::Deref};
|
||||||
use std::thread;
|
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
|
|
||||||
/// Create a default io error given a string.
|
/// Create a default io error given a string.
|
||||||
@@ -47,17 +35,17 @@ pub(crate) fn make_io_err(msg: String) -> io::Error {
|
|||||||
/// Returns None iff the file exists, can be read, but is empty (and hence
|
/// Returns None iff the file exists, can be read, but is empty (and hence
|
||||||
/// cannot be mmapped)
|
/// cannot be mmapped)
|
||||||
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
||||||
let file = File::open(full_path).map_err(|e| {
|
let file = File::open(full_path).map_err(|io_err| {
|
||||||
if e.kind() == io::ErrorKind::NotFound {
|
if io_err.kind() == io::ErrorKind::NotFound {
|
||||||
OpenReadError::FileDoesNotExist(full_path.to_owned())
|
OpenReadError::FileDoesNotExist(full_path.to_path_buf())
|
||||||
} else {
|
} else {
|
||||||
OpenReadError::IOError(IOError::with_path(full_path.to_owned(), e))
|
OpenReadError::wrap_io_error(io_err, full_path.to_path_buf())
|
||||||
}
|
}
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let meta_data = file
|
let meta_data = file
|
||||||
.metadata()
|
.metadata()
|
||||||
.map_err(|e| IOError::with_path(full_path.to_owned(), e))?;
|
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_owned()))?;
|
||||||
if meta_data.len() == 0 {
|
if meta_data.len() == 0 {
|
||||||
// if the file size is 0, it will not be possible
|
// if the file size is 0, it will not be possible
|
||||||
// to mmap the file, so we return None
|
// to mmap the file, so we return None
|
||||||
@@ -67,7 +55,7 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
|||||||
unsafe {
|
unsafe {
|
||||||
memmap::Mmap::map(&file)
|
memmap::Mmap::map(&file)
|
||||||
.map(Some)
|
.map(Some)
|
||||||
.map_err(|e| From::from(IOError::with_path(full_path.to_owned(), e)))
|
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_path_buf()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,7 +76,7 @@ pub struct CacheInfo {
|
|||||||
|
|
||||||
struct MmapCache {
|
struct MmapCache {
|
||||||
counters: CacheCounters,
|
counters: CacheCounters,
|
||||||
cache: HashMap<PathBuf, Weak<BoxedData>>,
|
cache: HashMap<PathBuf, WeakArcBytes>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for MmapCache {
|
impl Default for MmapCache {
|
||||||
@@ -122,7 +110,7 @@ impl MmapCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns None if the file exists but as a len of 0 (and hence is not mmappable).
|
// Returns None if the file exists but as a len of 0 (and hence is not mmappable).
|
||||||
fn get_mmap(&mut self, full_path: &Path) -> Result<Option<Arc<BoxedData>>, OpenReadError> {
|
fn get_mmap(&mut self, full_path: &Path) -> Result<Option<ArcBytes>, OpenReadError> {
|
||||||
if let Some(mmap_weak) = self.cache.get(full_path) {
|
if let Some(mmap_weak) = self.cache.get(full_path) {
|
||||||
if let Some(mmap_arc) = mmap_weak.upgrade() {
|
if let Some(mmap_arc) = mmap_weak.upgrade() {
|
||||||
self.counters.hit += 1;
|
self.counters.hit += 1;
|
||||||
@@ -131,71 +119,13 @@ impl MmapCache {
|
|||||||
}
|
}
|
||||||
self.cache.remove(full_path);
|
self.cache.remove(full_path);
|
||||||
self.counters.miss += 1;
|
self.counters.miss += 1;
|
||||||
Ok(if let Some(mmap) = open_mmap(full_path)? {
|
let mmap_opt = open_mmap(full_path)?;
|
||||||
let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap));
|
Ok(mmap_opt.map(|mmap| {
|
||||||
|
let mmap_arc: ArcBytes = Arc::new(mmap);
|
||||||
let mmap_weak = Arc::downgrade(&mmap_arc);
|
let mmap_weak = Arc::downgrade(&mmap_arc);
|
||||||
self.cache.insert(full_path.to_owned(), mmap_weak);
|
self.cache.insert(full_path.to_owned(), mmap_weak);
|
||||||
Some(mmap_arc)
|
mmap_arc
|
||||||
} else {
|
}))
|
||||||
None
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct WatcherWrapper {
|
|
||||||
_watcher: Mutex<notify::RecommendedWatcher>,
|
|
||||||
watcher_router: Arc<WatchCallbackList>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WatcherWrapper {
|
|
||||||
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
|
|
||||||
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
|
|
||||||
// We need to initialize the
|
|
||||||
let watcher = notify::raw_watcher(tx)
|
|
||||||
.and_then(|mut watcher| {
|
|
||||||
watcher.watch(path, RecursiveMode::Recursive)?;
|
|
||||||
Ok(watcher)
|
|
||||||
})
|
|
||||||
.map_err(|err| match err {
|
|
||||||
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
|
|
||||||
_ => {
|
|
||||||
panic!("Unknown error while starting watching directory {:?}", path);
|
|
||||||
}
|
|
||||||
})?;
|
|
||||||
let watcher_router: Arc<WatchCallbackList> = Default::default();
|
|
||||||
let watcher_router_clone = watcher_router.clone();
|
|
||||||
thread::Builder::new()
|
|
||||||
.name("meta-file-watch-thread".to_string())
|
|
||||||
.spawn(move || {
|
|
||||||
loop {
|
|
||||||
match watcher_recv.recv().map(|evt| evt.path) {
|
|
||||||
Ok(Some(changed_path)) => {
|
|
||||||
// ... Actually subject to false positive.
|
|
||||||
// We might want to be more accurate than this at one point.
|
|
||||||
if let Some(filename) = changed_path.file_name() {
|
|
||||||
if filename == *META_FILEPATH {
|
|
||||||
watcher_router_clone.broadcast();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(None) => {
|
|
||||||
// not an event we are interested in.
|
|
||||||
}
|
|
||||||
Err(_e) => {
|
|
||||||
// the watch send channel was dropped
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})?;
|
|
||||||
Ok(WatcherWrapper {
|
|
||||||
_watcher: Mutex::new(watcher),
|
|
||||||
watcher_router,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle {
|
|
||||||
self.watcher_router.subscribe(watch_callback)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -220,44 +150,21 @@ struct MmapDirectoryInner {
|
|||||||
root_path: PathBuf,
|
root_path: PathBuf,
|
||||||
mmap_cache: RwLock<MmapCache>,
|
mmap_cache: RwLock<MmapCache>,
|
||||||
_temp_directory: Option<TempDir>,
|
_temp_directory: Option<TempDir>,
|
||||||
watcher: RwLock<Option<WatcherWrapper>>,
|
watcher: FileWatcher,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MmapDirectoryInner {
|
impl MmapDirectoryInner {
|
||||||
fn new(
|
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectoryInner {
|
||||||
root_path: PathBuf,
|
MmapDirectoryInner {
|
||||||
temp_directory: Option<TempDir>,
|
|
||||||
) -> Result<MmapDirectoryInner, OpenDirectoryError> {
|
|
||||||
let mmap_directory_inner = MmapDirectoryInner {
|
|
||||||
root_path,
|
|
||||||
mmap_cache: Default::default(),
|
mmap_cache: Default::default(),
|
||||||
_temp_directory: temp_directory,
|
_temp_directory: temp_directory,
|
||||||
watcher: RwLock::new(None),
|
watcher: FileWatcher::new(&root_path.join(*META_FILEPATH)),
|
||||||
};
|
root_path,
|
||||||
Ok(mmap_directory_inner)
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
|
fn watch(&self, callback: WatchCallback) -> WatchHandle {
|
||||||
// a lot of juggling here, to ensure we don't do anything that panics
|
self.watcher.watch(callback)
|
||||||
// while the rwlock is held. That way we ensure that the rwlock cannot
|
|
||||||
// be poisoned.
|
|
||||||
//
|
|
||||||
// The downside is that we might create a watch wrapper that is not useful.
|
|
||||||
let need_initialization = self.watcher.read().unwrap().is_none();
|
|
||||||
if need_initialization {
|
|
||||||
let watch_wrapper = WatcherWrapper::new(&self.root_path)?;
|
|
||||||
let mut watch_wlock = self.watcher.write().unwrap();
|
|
||||||
// the watcher could have been initialized when we released the lock, and
|
|
||||||
// we do not want to lose the watched files that were set.
|
|
||||||
if watch_wlock.is_none() {
|
|
||||||
*watch_wlock = Some(watch_wrapper);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(watch_wrapper) = self.watcher.write().unwrap().as_mut() {
|
|
||||||
Ok(watch_wrapper.watch(watch_callback))
|
|
||||||
} else {
|
|
||||||
unreachable!("At this point, watch wrapper is supposed to be initialized");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -268,14 +175,11 @@ impl fmt::Debug for MmapDirectory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl MmapDirectory {
|
impl MmapDirectory {
|
||||||
fn new(
|
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectory {
|
||||||
root_path: PathBuf,
|
let inner = MmapDirectoryInner::new(root_path, temp_directory);
|
||||||
temp_directory: Option<TempDir>,
|
MmapDirectory {
|
||||||
) -> Result<MmapDirectory, OpenDirectoryError> {
|
|
||||||
let inner = MmapDirectoryInner::new(root_path, temp_directory)?;
|
|
||||||
Ok(MmapDirectory {
|
|
||||||
inner: Arc::new(inner),
|
inner: Arc::new(inner),
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new MmapDirectory in a temporary directory.
|
/// Creates a new MmapDirectory in a temporary directory.
|
||||||
@@ -283,9 +187,11 @@ impl MmapDirectory {
|
|||||||
/// This is mostly useful to test the MmapDirectory itself.
|
/// This is mostly useful to test the MmapDirectory itself.
|
||||||
/// For your unit tests, prefer the RAMDirectory.
|
/// For your unit tests, prefer the RAMDirectory.
|
||||||
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
||||||
let tempdir = TempDir::new().map_err(OpenDirectoryError::IoError)?;
|
let tempdir = TempDir::new().map_err(OpenDirectoryError::FailedToCreateTempDir)?;
|
||||||
let tempdir_path = PathBuf::from(tempdir.path());
|
Ok(MmapDirectory::new(
|
||||||
MmapDirectory::new(tempdir_path, Some(tempdir))
|
tempdir.path().to_path_buf(),
|
||||||
|
Some(tempdir),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Opens a MmapDirectory in a directory.
|
/// Opens a MmapDirectory in a directory.
|
||||||
@@ -303,7 +209,7 @@ impl MmapDirectory {
|
|||||||
directory_path,
|
directory_path,
|
||||||
)))
|
)))
|
||||||
} else {
|
} else {
|
||||||
Ok(MmapDirectory::new(PathBuf::from(directory_path), None)?)
|
Ok(MmapDirectory::new(PathBuf::from(directory_path), None))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -407,8 +313,38 @@ impl TerminatingWrite for SafeFileWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct MmapArc(Arc<dyn Deref<Target = [u8]> + Send + Sync>);
|
||||||
|
|
||||||
|
impl Deref for MmapArc {
|
||||||
|
type Target = [u8];
|
||||||
|
|
||||||
|
fn deref(&self) -> &[u8] {
|
||||||
|
self.0.deref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
unsafe impl StableDeref for MmapArc {}
|
||||||
|
|
||||||
|
/// Writes a file in an atomic manner.
|
||||||
|
pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
|
||||||
|
// We create the temporary file in the same directory as the target file.
|
||||||
|
// Indeed the canonical temp directory and the target file might sit in different
|
||||||
|
// filesystem, in which case the atomic write may actually not work.
|
||||||
|
let parent_path = path.parent().ok_or_else(|| {
|
||||||
|
io::Error::new(
|
||||||
|
io::ErrorKind::InvalidInput,
|
||||||
|
"Path {:?} does not have parent directory.",
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let mut tempfile = tempfile::Builder::new().tempfile_in(&parent_path)?;
|
||||||
|
tempfile.write_all(content)?;
|
||||||
|
tempfile.flush()?;
|
||||||
|
tempfile.into_temp_path().persist(path)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
impl Directory for MmapDirectory {
|
impl Directory for MmapDirectory {
|
||||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
fn get_file_handle(&self, path: &Path) -> result::Result<Box<dyn FileHandle>, OpenReadError> {
|
||||||
debug!("Open Read {:?}", path);
|
debug!("Open Read {:?}", path);
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
|
|
||||||
@@ -418,12 +354,19 @@ impl Directory for MmapDirectory {
|
|||||||
on mmap cache while reading {:?}",
|
on mmap cache while reading {:?}",
|
||||||
path
|
path
|
||||||
);
|
);
|
||||||
IOError::with_path(path.to_owned(), make_io_err(msg))
|
let io_err = make_io_err(msg);
|
||||||
|
OpenReadError::wrap_io_error(io_err, path.to_path_buf())
|
||||||
})?;
|
})?;
|
||||||
Ok(mmap_cache
|
|
||||||
|
let owned_bytes = mmap_cache
|
||||||
.get_mmap(&full_path)?
|
.get_mmap(&full_path)?
|
||||||
.map(ReadOnlySource::from)
|
.map(|mmap_arc| {
|
||||||
.unwrap_or_else(ReadOnlySource::empty))
|
let mmap_arc_obj = MmapArc(mmap_arc);
|
||||||
|
OwnedBytes::new(mmap_arc_obj)
|
||||||
|
})
|
||||||
|
.unwrap_or_else(OwnedBytes::empty);
|
||||||
|
|
||||||
|
Ok(Box::new(owned_bytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Any entry associated to the path in the mmap will be
|
/// Any entry associated to the path in the mmap will be
|
||||||
@@ -431,25 +374,29 @@ impl Directory for MmapDirectory {
|
|||||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
match fs::remove_file(&full_path) {
|
match fs::remove_file(&full_path) {
|
||||||
Ok(_) => self
|
Ok(_) => self.sync_directory().map_err(|e| DeleteError::IOError {
|
||||||
.sync_directory()
|
io_error: e,
|
||||||
.map_err(|e| IOError::with_path(path.to_owned(), e).into()),
|
filepath: path.to_path_buf(),
|
||||||
|
}),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if e.kind() == io::ErrorKind::NotFound {
|
if e.kind() == io::ErrorKind::NotFound {
|
||||||
Err(DeleteError::FileDoesNotExist(path.to_owned()))
|
Err(DeleteError::FileDoesNotExist(path.to_owned()))
|
||||||
} else {
|
} else {
|
||||||
Err(IOError::with_path(path.to_owned(), e).into())
|
Err(DeleteError::IOError {
|
||||||
|
io_error: e,
|
||||||
|
filepath: path.to_path_buf(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exists(&self, path: &Path) -> bool {
|
fn exists(&self, path: &Path) -> Result<bool, OpenReadError> {
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
full_path.exists()
|
Ok(full_path.exists())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||||
debug!("Open Write {:?}", path);
|
debug!("Open Write {:?}", path);
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
|
|
||||||
@@ -458,22 +405,22 @@ impl Directory for MmapDirectory {
|
|||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open(full_path);
|
.open(full_path);
|
||||||
|
|
||||||
let mut file = open_res.map_err(|err| {
|
let mut file = open_res.map_err(|io_err| {
|
||||||
if err.kind() == io::ErrorKind::AlreadyExists {
|
if io_err.kind() == io::ErrorKind::AlreadyExists {
|
||||||
OpenWriteError::FileAlreadyExists(path.to_owned())
|
OpenWriteError::FileAlreadyExists(path.to_path_buf())
|
||||||
} else {
|
} else {
|
||||||
IOError::with_path(path.to_owned(), err).into()
|
OpenWriteError::wrap_io_error(io_err, path.to_path_buf())
|
||||||
}
|
}
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// making sure the file is created.
|
// making sure the file is created.
|
||||||
file.flush()
|
file.flush()
|
||||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||||
|
|
||||||
// Apparetntly, on some filesystem syncing the parent
|
// Apparetntly, on some filesystem syncing the parent
|
||||||
// directory is required.
|
// directory is required.
|
||||||
self.sync_directory()
|
self.sync_directory()
|
||||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
.map_err(|io_err| OpenWriteError::wrap_io_error(io_err, path.to_path_buf()))?;
|
||||||
|
|
||||||
let writer = SafeFileWriter::new(file);
|
let writer = SafeFileWriter::new(file);
|
||||||
Ok(BufWriter::new(Box::new(writer)))
|
Ok(BufWriter::new(Box::new(writer)))
|
||||||
@@ -484,26 +431,26 @@ impl Directory for MmapDirectory {
|
|||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
match File::open(&full_path) {
|
match File::open(&full_path) {
|
||||||
Ok(mut file) => {
|
Ok(mut file) => {
|
||||||
file.read_to_end(&mut buffer)
|
file.read_to_end(&mut buffer).map_err(|io_error| {
|
||||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
OpenReadError::wrap_io_error(io_error, path.to_path_buf())
|
||||||
|
})?;
|
||||||
Ok(buffer)
|
Ok(buffer)
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(io_error) => {
|
||||||
if e.kind() == io::ErrorKind::NotFound {
|
if io_error.kind() == io::ErrorKind::NotFound {
|
||||||
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
|
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
|
||||||
} else {
|
} else {
|
||||||
Err(IOError::with_path(path.to_owned(), e).into())
|
Err(OpenReadError::wrap_io_error(io_error, path.to_path_buf()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
fn atomic_write(&self, path: &Path, content: &[u8]) -> io::Result<()> {
|
||||||
debug!("Atomic Write {:?}", path);
|
debug!("Atomic Write {:?}", path);
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
let meta_file = atomicwrites::AtomicFile::new(full_path, atomicwrites::AllowOverwrite);
|
atomic_write(&full_path, content)?;
|
||||||
meta_file.write(|f| f.write_all(data))?;
|
self.sync_directory()
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn acquire_lock(&self, lock: &Lock) -> Result<DirectoryLock, LockError> {
|
fn acquire_lock(&self, lock: &Lock) -> Result<DirectoryLock, LockError> {
|
||||||
@@ -527,7 +474,7 @@ impl Directory for MmapDirectory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
|
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
|
||||||
self.inner.watch(watch_callback)
|
Ok(self.inner.watch(watch_callback))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -541,13 +488,10 @@ mod tests {
|
|||||||
use crate::schema::{Schema, SchemaBuilder, TEXT};
|
use crate::schema::{Schema, SchemaBuilder, TEXT};
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::ReloadPolicy;
|
use crate::ReloadPolicy;
|
||||||
use std::fs;
|
use crate::{common::HasLen, indexer::LogMergePolicy};
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
||||||
use std::thread;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_open_non_existant_path() {
|
fn test_open_non_existent_path() {
|
||||||
assert!(MmapDirectory::open(PathBuf::from("./nowhere")).is_err());
|
assert!(MmapDirectory::open(PathBuf::from("./nowhere")).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -557,7 +501,7 @@ mod tests {
|
|||||||
// cannot be mmapped.
|
// cannot be mmapped.
|
||||||
//
|
//
|
||||||
// In that case the directory returns a SharedVecSlice.
|
// In that case the directory returns a SharedVecSlice.
|
||||||
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||||
let path = PathBuf::from("test");
|
let path = PathBuf::from("test");
|
||||||
{
|
{
|
||||||
let mut w = mmap_directory.open_write(&path).unwrap();
|
let mut w = mmap_directory.open_write(&path).unwrap();
|
||||||
@@ -573,7 +517,7 @@ mod tests {
|
|||||||
|
|
||||||
// here we test if the cache releases
|
// here we test if the cache releases
|
||||||
// mmaps correctly.
|
// mmaps correctly.
|
||||||
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||||
let num_paths = 10;
|
let num_paths = 10;
|
||||||
let paths: Vec<PathBuf> = (0..num_paths)
|
let paths: Vec<PathBuf> = (0..num_paths)
|
||||||
.map(|i| PathBuf::from(&*format!("file_{}", i)))
|
.map(|i| PathBuf::from(&*format!("file_{}", i)))
|
||||||
@@ -633,56 +577,48 @@ mod tests {
|
|||||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_watch_wrapper() {
|
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
|
||||||
let counter_clone = counter.clone();
|
|
||||||
let tmp_dir = tempfile::TempDir::new().unwrap();
|
|
||||||
let tmp_dirpath = tmp_dir.path().to_owned();
|
|
||||||
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
|
|
||||||
let tmp_file = tmp_dirpath.join("coucou");
|
|
||||||
let _handle = watch_wrapper.watch(Box::new(move || {
|
|
||||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
|
||||||
}));
|
|
||||||
assert_eq!(counter.load(Ordering::SeqCst), 0);
|
|
||||||
fs::write(&tmp_file, b"whateverwilldo").unwrap();
|
|
||||||
thread::sleep(Duration::new(0, 1_000u32));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_mmap_released() {
|
fn test_mmap_released() {
|
||||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||||
let mut schema_builder: SchemaBuilder = Schema::builder();
|
let mut schema_builder: SchemaBuilder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
{
|
{
|
||||||
let index = Index::create(mmap_directory.clone(), schema).unwrap();
|
let index = Index::create(mmap_directory.clone(), schema).unwrap();
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
for _num_commits in 0..16 {
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
let mut log_merge_policy = LogMergePolicy::default();
|
||||||
|
log_merge_policy.set_min_merge_size(3);
|
||||||
|
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
||||||
|
for _num_commits in 0..10 {
|
||||||
for _ in 0..10 {
|
for _ in 0..10 {
|
||||||
index_writer.add_document(doc!(text_field=>"abc"));
|
index_writer.add_document(doc!(text_field=>"abc"));
|
||||||
}
|
}
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
for _ in 0..30 {
|
|
||||||
|
for _ in 0..4 {
|
||||||
index_writer.add_document(doc!(text_field=>"abc"));
|
index_writer.add_document(doc!(text_field=>"abc"));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
}
|
}
|
||||||
index_writer.wait_merging_threads().unwrap();
|
index_writer.wait_merging_threads().unwrap();
|
||||||
|
|
||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
let num_segments = reader.searcher().segment_readers().len();
|
let num_segments = reader.searcher().segment_readers().len();
|
||||||
assert_eq!(num_segments, 4);
|
assert!(num_segments <= 4);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
num_segments * 7,
|
num_segments * 7,
|
||||||
mmap_directory.get_cache_info().mmapped.len()
|
mmap_directory.get_cache_info().mmapped.len()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
assert!(mmap_directory.get_cache_info().mmapped.is_empty());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,10 +9,12 @@ mod mmap_directory;
|
|||||||
|
|
||||||
mod directory;
|
mod directory;
|
||||||
mod directory_lock;
|
mod directory_lock;
|
||||||
|
mod file_slice;
|
||||||
|
mod file_watcher;
|
||||||
mod footer;
|
mod footer;
|
||||||
mod managed_directory;
|
mod managed_directory;
|
||||||
|
mod owned_bytes;
|
||||||
mod ram_directory;
|
mod ram_directory;
|
||||||
mod read_only_source;
|
|
||||||
mod watch_event_router;
|
mod watch_event_router;
|
||||||
|
|
||||||
/// Errors specific to the directory module.
|
/// Errors specific to the directory module.
|
||||||
@@ -21,11 +23,27 @@ pub mod error;
|
|||||||
pub use self::directory::DirectoryLock;
|
pub use self::directory::DirectoryLock;
|
||||||
pub use self::directory::{Directory, DirectoryClone};
|
pub use self::directory::{Directory, DirectoryClone};
|
||||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||||
|
pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes};
|
||||||
|
pub use self::file_slice::{FileHandle, FileSlice};
|
||||||
|
pub use self::owned_bytes::OwnedBytes;
|
||||||
pub use self::ram_directory::RAMDirectory;
|
pub use self::ram_directory::RAMDirectory;
|
||||||
pub use self::read_only_source::ReadOnlySource;
|
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||||
pub(crate) use self::watch_event_router::WatchCallbackList;
|
|
||||||
pub use self::watch_event_router::{WatchCallback, WatchHandle};
|
|
||||||
use std::io::{self, BufWriter, Write};
|
use std::io::{self, BufWriter, Write};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
/// Outcome of the Garbage collection
|
||||||
|
pub struct GarbageCollectionResult {
|
||||||
|
/// List of files that were deleted in this cycle
|
||||||
|
pub deleted_files: Vec<PathBuf>,
|
||||||
|
/// List of files that were schedule to be deleted in this cycle,
|
||||||
|
/// but deletion did not work. This typically happens on windows,
|
||||||
|
/// as deleting a memory mapped file is forbidden.
|
||||||
|
///
|
||||||
|
/// If a searcher is still held, a file cannot be deleted.
|
||||||
|
/// This is not considered a bug, the file will simply be deleted
|
||||||
|
/// in the next GC.
|
||||||
|
pub failed_to_delete_files: Vec<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
pub use self::mmap_directory::MmapDirectory;
|
pub use self::mmap_directory::MmapDirectory;
|
||||||
@@ -33,6 +51,9 @@ pub use self::mmap_directory::MmapDirectory;
|
|||||||
pub use self::managed_directory::ManagedDirectory;
|
pub use self::managed_directory::ManagedDirectory;
|
||||||
|
|
||||||
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
|
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
|
||||||
|
///
|
||||||
|
/// The point is that while the type is public, it cannot be built by anyone
|
||||||
|
/// outside of this module.
|
||||||
pub struct AntiCallToken(());
|
pub struct AntiCallToken(());
|
||||||
|
|
||||||
/// Trait used to indicate when no more write need to be done on a writer
|
/// Trait used to indicate when no more write need to be done on a writer
|
||||||
@@ -63,6 +84,13 @@ impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
|
||||||
|
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
|
||||||
|
self.flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Write object for Directory.
|
/// Write object for Directory.
|
||||||
///
|
///
|
||||||
/// `WritePtr` are required to implement both Write
|
/// `WritePtr` are required to implement both Write
|
||||||
|
|||||||
290
src/directory/owned_bytes.rs
Normal file
290
src/directory/owned_bytes.rs
Normal file
@@ -0,0 +1,290 @@
|
|||||||
|
use crate::directory::FileHandle;
|
||||||
|
use stable_deref_trait::StableDeref;
|
||||||
|
use std::convert::TryInto;
|
||||||
|
use std::mem;
|
||||||
|
use std::ops::{Deref, Range};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::{fmt, io};
|
||||||
|
|
||||||
|
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
|
||||||
|
/// this data as a static slice.
|
||||||
|
///
|
||||||
|
/// The backing object is required to be `StableDeref`.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct OwnedBytes {
|
||||||
|
data: &'static [u8],
|
||||||
|
box_stable_deref: Arc<dyn Deref<Target = [u8]> + Sync + Send>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileHandle for OwnedBytes {
|
||||||
|
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||||
|
Ok(self.slice(range))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OwnedBytes {
|
||||||
|
/// Creates an empty `OwnedBytes`.
|
||||||
|
pub fn empty() -> OwnedBytes {
|
||||||
|
OwnedBytes::new(&[][..])
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates an `OwnedBytes` intance given a `StableDeref` object.
|
||||||
|
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
|
||||||
|
data_holder: T,
|
||||||
|
) -> OwnedBytes {
|
||||||
|
let box_stable_deref = Arc::new(data_holder);
|
||||||
|
let bytes: &[u8] = box_stable_deref.as_ref();
|
||||||
|
let data = unsafe { mem::transmute::<_, &'static [u8]>(bytes.deref()) };
|
||||||
|
OwnedBytes {
|
||||||
|
box_stable_deref,
|
||||||
|
data,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// creates a fileslice that is just a view over a slice of the data.
|
||||||
|
pub fn slice(&self, range: Range<usize>) -> Self {
|
||||||
|
OwnedBytes {
|
||||||
|
data: &self.data[range],
|
||||||
|
box_stable_deref: self.box_stable_deref.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the underlying slice of data.
|
||||||
|
/// `Deref` and `AsRef` are also available.
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn as_slice(&self) -> &[u8] {
|
||||||
|
self.data
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the len of the slice.
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.data.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
|
||||||
|
///
|
||||||
|
/// Left will hold `split_len` bytes.
|
||||||
|
///
|
||||||
|
/// This operation is cheap and does not require to copy any memory.
|
||||||
|
/// On the other hand, both `left` and `right` retain a handle over
|
||||||
|
/// the entire slice of memory. In other words, the memory will only
|
||||||
|
/// be released when both left and right are dropped.
|
||||||
|
pub fn split(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
|
||||||
|
let right_box_stable_deref = self.box_stable_deref.clone();
|
||||||
|
let left = OwnedBytes {
|
||||||
|
data: &self.data[..split_len],
|
||||||
|
box_stable_deref: self.box_stable_deref,
|
||||||
|
};
|
||||||
|
let right = OwnedBytes {
|
||||||
|
data: &self.data[split_len..],
|
||||||
|
box_stable_deref: right_box_stable_deref,
|
||||||
|
};
|
||||||
|
(left, right)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true iff this `OwnedBytes` is empty.
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.as_slice().is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Drops the left most `advance_len` bytes.
|
||||||
|
///
|
||||||
|
/// See also [.clip(clip_len: usize))](#method.clip).
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn advance(&mut self, advance_len: usize) {
|
||||||
|
self.data = &self.data[advance_len..]
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reads an `u8` from the `OwnedBytes` and advance by one byte.
|
||||||
|
pub fn read_u8(&mut self) -> u8 {
|
||||||
|
assert!(!self.is_empty());
|
||||||
|
|
||||||
|
let byte = self.as_slice()[0];
|
||||||
|
self.advance(1);
|
||||||
|
byte
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reads an `u64` encoded as little-endian from the `OwnedBytes` and advance by 8 bytes.
|
||||||
|
pub fn read_u64(&mut self) -> u64 {
|
||||||
|
assert!(self.len() > 7);
|
||||||
|
|
||||||
|
let octlet: [u8; 8] = self.as_slice()[..8].try_into().unwrap();
|
||||||
|
self.advance(8);
|
||||||
|
u64::from_le_bytes(octlet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for OwnedBytes {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
// We truncate the bytes in order to make sure the debug string
|
||||||
|
// is not too long.
|
||||||
|
let bytes_truncated: &[u8] = if self.len() > 8 {
|
||||||
|
&self.as_slice()[..10]
|
||||||
|
} else {
|
||||||
|
self.as_slice()
|
||||||
|
};
|
||||||
|
write!(f, "OwnedBytes({:?}, len={})", bytes_truncated, self.len())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for OwnedBytes {
|
||||||
|
type Target = [u8];
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
self.as_slice()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl io::Read for OwnedBytes {
|
||||||
|
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||||
|
let read_len = {
|
||||||
|
let data = self.as_slice();
|
||||||
|
if data.len() >= buf.len() {
|
||||||
|
let buf_len = buf.len();
|
||||||
|
buf.copy_from_slice(&data[..buf_len]);
|
||||||
|
buf.len()
|
||||||
|
} else {
|
||||||
|
let data_len = data.len();
|
||||||
|
buf[..data_len].copy_from_slice(data);
|
||||||
|
data_len
|
||||||
|
}
|
||||||
|
};
|
||||||
|
self.advance(read_len);
|
||||||
|
Ok(read_len)
|
||||||
|
}
|
||||||
|
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||||
|
let read_len = {
|
||||||
|
let data = self.as_slice();
|
||||||
|
buf.extend(data);
|
||||||
|
data.len()
|
||||||
|
};
|
||||||
|
self.advance(read_len);
|
||||||
|
Ok(read_len)
|
||||||
|
}
|
||||||
|
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
||||||
|
let read_len = self.read(buf)?;
|
||||||
|
if read_len != buf.len() {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::UnexpectedEof,
|
||||||
|
"failed to fill whole buffer",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<[u8]> for OwnedBytes {
|
||||||
|
fn as_ref(&self) -> &[u8] {
|
||||||
|
self.as_slice()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::io::{self, Read};
|
||||||
|
|
||||||
|
use super::OwnedBytes;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_debug() {
|
||||||
|
let short_bytes = OwnedBytes::new(b"abcd".as_ref());
|
||||||
|
assert_eq!(
|
||||||
|
format!("{:?}", short_bytes),
|
||||||
|
"OwnedBytes([97, 98, 99, 100], len=4)"
|
||||||
|
);
|
||||||
|
let long_bytes = OwnedBytes::new(b"abcdefghijklmnopq".as_ref());
|
||||||
|
assert_eq!(
|
||||||
|
format!("{:?}", long_bytes),
|
||||||
|
"OwnedBytes([97, 98, 99, 100, 101, 102, 103, 104, 105, 106], len=17)"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_read() -> io::Result<()> {
|
||||||
|
let mut bytes = OwnedBytes::new(b"abcdefghiklmnopqrstuvwxyz".as_ref());
|
||||||
|
{
|
||||||
|
let mut buf = [0u8; 5];
|
||||||
|
bytes.read_exact(&mut buf[..]).unwrap();
|
||||||
|
assert_eq!(&buf, b"abcde");
|
||||||
|
assert_eq!(bytes.as_slice(), b"fghiklmnopqrstuvwxyz")
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let mut buf = [0u8; 2];
|
||||||
|
bytes.read_exact(&mut buf[..]).unwrap();
|
||||||
|
assert_eq!(&buf, b"fg");
|
||||||
|
assert_eq!(bytes.as_slice(), b"hiklmnopqrstuvwxyz")
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_read_right_at_the_end() -> io::Result<()> {
|
||||||
|
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
||||||
|
let mut buf = [0u8; 5];
|
||||||
|
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
|
||||||
|
assert_eq!(&buf, b"abcde");
|
||||||
|
assert_eq!(bytes.as_slice(), b"");
|
||||||
|
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
|
||||||
|
assert_eq!(&buf, b"abcde");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_read_incomplete() -> io::Result<()> {
|
||||||
|
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
||||||
|
let mut buf = [0u8; 7];
|
||||||
|
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
|
||||||
|
assert_eq!(&buf[..5], b"abcde");
|
||||||
|
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_read_to_end() -> io::Result<()> {
|
||||||
|
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
||||||
|
let mut buf = Vec::new();
|
||||||
|
bytes.read_to_end(&mut buf)?;
|
||||||
|
assert_eq!(buf.as_slice(), b"abcde".as_ref());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_read_u8() -> io::Result<()> {
|
||||||
|
let mut bytes = OwnedBytes::new(b"\xFF".as_ref());
|
||||||
|
assert_eq!(bytes.read_u8(), 255);
|
||||||
|
assert_eq!(bytes.len(), 0);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_read_u64() -> io::Result<()> {
|
||||||
|
let mut bytes = OwnedBytes::new(b"\0\xFF\xFF\xFF\xFF\xFF\xFF\xFF".as_ref());
|
||||||
|
assert_eq!(bytes.read_u64(), u64::MAX - 255);
|
||||||
|
assert_eq!(bytes.len(), 0);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_split() {
|
||||||
|
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
|
||||||
|
let (left, right) = bytes.split(3);
|
||||||
|
assert_eq!(left.as_slice(), b"abc");
|
||||||
|
assert_eq!(right.as_slice(), b"defghi");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_owned_bytes_split_boundary() {
|
||||||
|
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
|
||||||
|
{
|
||||||
|
let (left, right) = bytes.clone().split(0);
|
||||||
|
assert_eq!(left.as_slice(), b"");
|
||||||
|
assert_eq!(right.as_slice(), b"abcdefghi");
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let (left, right) = bytes.split(9);
|
||||||
|
assert_eq!(left.as_slice(), b"abcdefghi");
|
||||||
|
assert_eq!(right.as_slice(), b"");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
use crate::core::META_FILEPATH;
|
|
||||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||||
use crate::directory::AntiCallToken;
|
use crate::directory::AntiCallToken;
|
||||||
use crate::directory::WatchCallbackList;
|
use crate::directory::WatchCallbackList;
|
||||||
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
|
use crate::directory::{Directory, FileSlice, WatchCallback, WatchHandle};
|
||||||
use crate::directory::{TerminatingWrite, WritePtr};
|
use crate::directory::{TerminatingWrite, WritePtr};
|
||||||
|
use crate::{common::HasLen, core::META_FILEPATH};
|
||||||
use fail::fail_point;
|
use fail::fail_point;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
@@ -12,6 +12,8 @@ use std::path::{Path, PathBuf};
|
|||||||
use std::result;
|
use std::result;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
|
use super::FileHandle;
|
||||||
|
|
||||||
/// Writer associated with the `RAMDirectory`
|
/// Writer associated with the `RAMDirectory`
|
||||||
///
|
///
|
||||||
/// The Writer just writes a buffer.
|
/// The Writer just writes a buffer.
|
||||||
@@ -80,17 +82,17 @@ impl TerminatingWrite for VecWriter {
|
|||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct InnerDirectory {
|
struct InnerDirectory {
|
||||||
fs: HashMap<PathBuf, ReadOnlySource>,
|
fs: HashMap<PathBuf, FileSlice>,
|
||||||
watch_router: WatchCallbackList,
|
watch_router: WatchCallbackList,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InnerDirectory {
|
impl InnerDirectory {
|
||||||
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
|
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
|
||||||
let data = ReadOnlySource::new(Vec::from(data));
|
let data = FileSlice::from(data.to_vec());
|
||||||
self.fs.insert(path, data).is_some()
|
self.fs.insert(path, data).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
|
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> {
|
||||||
self.fs
|
self.fs
|
||||||
.get(path)
|
.get(path)
|
||||||
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
||||||
@@ -144,27 +146,56 @@ impl RAMDirectory {
|
|||||||
pub fn total_mem_usage(&self) -> usize {
|
pub fn total_mem_usage(&self) -> usize {
|
||||||
self.fs.read().unwrap().total_mem_usage()
|
self.fs.read().unwrap().total_mem_usage()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Write a copy of all of the files saved in the RAMDirectory in the target `Directory`.
|
||||||
|
///
|
||||||
|
/// Files are all written using the `Directory::write` meaning, even if they were
|
||||||
|
/// written using the `atomic_write` api.
|
||||||
|
///
|
||||||
|
/// If an error is encounterred, files may be persisted partially.
|
||||||
|
pub fn persist(&self, dest: &dyn Directory) -> crate::Result<()> {
|
||||||
|
let wlock = self.fs.write().unwrap();
|
||||||
|
for (path, file) in wlock.fs.iter() {
|
||||||
|
let mut dest_wrt = dest.open_write(path)?;
|
||||||
|
dest_wrt.write_all(file.read_bytes()?.as_slice())?;
|
||||||
|
dest_wrt.terminate()?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Directory for RAMDirectory {
|
impl Directory for RAMDirectory {
|
||||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> {
|
||||||
|
let file_slice = self.open_read(path)?;
|
||||||
|
Ok(Box::new(file_slice))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||||
self.fs.read().unwrap().open_read(path)
|
self.fs.read().unwrap().open_read(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||||
fail_point!("RAMDirectory::delete", |_| {
|
fail_point!("RAMDirectory::delete", |_| {
|
||||||
use crate::directory::error::IOError;
|
Err(DeleteError::IOError {
|
||||||
let io_error = IOError::from(io::Error::from(io::ErrorKind::Other));
|
io_error: io::Error::from(io::ErrorKind::Other),
|
||||||
Err(DeleteError::from(io_error))
|
filepath: path.to_path_buf(),
|
||||||
|
})
|
||||||
});
|
});
|
||||||
self.fs.write().unwrap().delete(path)
|
self.fs.write().unwrap().delete(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exists(&self, path: &Path) -> bool {
|
fn exists(&self, path: &Path) -> Result<bool, OpenReadError> {
|
||||||
self.fs.read().unwrap().exists(path)
|
Ok(self
|
||||||
|
.fs
|
||||||
|
.read()
|
||||||
|
.map_err(|e| OpenReadError::IOError {
|
||||||
|
io_error: io::Error::new(io::ErrorKind::Other, e.to_string()),
|
||||||
|
filepath: path.to_path_buf(),
|
||||||
|
})?
|
||||||
|
.exists(path))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||||
let mut fs = self.fs.write().unwrap();
|
let mut fs = self.fs.write().unwrap();
|
||||||
let path_buf = PathBuf::from(path);
|
let path_buf = PathBuf::from(path);
|
||||||
let vec_writer = VecWriter::new(path_buf.clone(), self.clone());
|
let vec_writer = VecWriter::new(path_buf.clone(), self.clone());
|
||||||
@@ -178,24 +209,27 @@ impl Directory for RAMDirectory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||||
Ok(self.open_read(path)?.as_slice().to_owned())
|
let bytes =
|
||||||
|
self.open_read(path)?
|
||||||
|
.read_bytes()
|
||||||
|
.map_err(|io_error| OpenReadError::IOError {
|
||||||
|
io_error,
|
||||||
|
filepath: path.to_path_buf(),
|
||||||
|
})?;
|
||||||
|
Ok(bytes.as_slice().to_owned())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||||
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
|
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
|
||||||
io::ErrorKind::Other,
|
io::ErrorKind::Other,
|
||||||
msg.unwrap_or_else(|| "Undefined".to_string())
|
msg.unwrap_or_else(|| "Undefined".to_string())
|
||||||
)));
|
)));
|
||||||
let path_buf = PathBuf::from(path);
|
let path_buf = PathBuf::from(path);
|
||||||
|
|
||||||
// Reserve the path to prevent calls to .write() to succeed.
|
self.fs.write().unwrap().write(path_buf, data);
|
||||||
self.fs.write().unwrap().write(path_buf.clone(), &[]);
|
|
||||||
|
|
||||||
let mut vec_writer = VecWriter::new(path_buf.clone(), self.clone());
|
if path == *META_FILEPATH {
|
||||||
vec_writer.write_all(data)?;
|
let _ = self.fs.write().unwrap().watch_router.broadcast();
|
||||||
vec_writer.flush()?;
|
|
||||||
if path == Path::new(&*META_FILEPATH) {
|
|
||||||
self.fs.write().unwrap().watch_router.broadcast();
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -204,3 +238,28 @@ impl Directory for RAMDirectory {
|
|||||||
Ok(self.fs.write().unwrap().watch(watch_callback))
|
Ok(self.fs.write().unwrap().watch(watch_callback))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::RAMDirectory;
|
||||||
|
use crate::Directory;
|
||||||
|
use std::io::Write;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_persist() {
|
||||||
|
let msg_atomic: &'static [u8] = b"atomic is the way";
|
||||||
|
let msg_seq: &'static [u8] = b"sequential is the way";
|
||||||
|
let path_atomic: &'static Path = Path::new("atomic");
|
||||||
|
let path_seq: &'static Path = Path::new("seq");
|
||||||
|
let directory = RAMDirectory::create();
|
||||||
|
assert!(directory.atomic_write(path_atomic, msg_atomic).is_ok());
|
||||||
|
let mut wrt = directory.open_write(path_seq).unwrap();
|
||||||
|
assert!(wrt.write_all(msg_seq).is_ok());
|
||||||
|
assert!(wrt.flush().is_ok());
|
||||||
|
let directory_copy = RAMDirectory::create();
|
||||||
|
assert!(directory.persist(&directory_copy).is_ok());
|
||||||
|
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
||||||
|
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,131 +0,0 @@
|
|||||||
use crate::common::HasLen;
|
|
||||||
use stable_deref_trait::{CloneStableDeref, StableDeref};
|
|
||||||
use std::ops::Deref;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
|
||||||
|
|
||||||
/// Read object that represents files in tantivy.
|
|
||||||
///
|
|
||||||
/// These read objects are only in charge to deliver
|
|
||||||
/// the data in the form of a constant read-only `&[u8]`.
|
|
||||||
/// Whatever happens to the directory file, the data
|
|
||||||
/// hold by this object should never be altered or destroyed.
|
|
||||||
pub struct ReadOnlySource {
|
|
||||||
data: Arc<BoxedData>,
|
|
||||||
start: usize,
|
|
||||||
stop: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl StableDeref for ReadOnlySource {}
|
|
||||||
unsafe impl CloneStableDeref for ReadOnlySource {}
|
|
||||||
|
|
||||||
impl Deref for ReadOnlySource {
|
|
||||||
type Target = [u8];
|
|
||||||
|
|
||||||
fn deref(&self) -> &[u8] {
|
|
||||||
self.as_slice()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Arc<BoxedData>> for ReadOnlySource {
|
|
||||||
fn from(data: Arc<BoxedData>) -> Self {
|
|
||||||
let len = data.len();
|
|
||||||
ReadOnlySource {
|
|
||||||
data,
|
|
||||||
start: 0,
|
|
||||||
stop: len,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReadOnlySource {
|
|
||||||
pub(crate) fn new<D>(data: D) -> ReadOnlySource
|
|
||||||
where
|
|
||||||
D: Deref<Target = [u8]> + Send + Sync + 'static,
|
|
||||||
{
|
|
||||||
let len = data.len();
|
|
||||||
ReadOnlySource {
|
|
||||||
data: Arc::new(Box::new(data)),
|
|
||||||
start: 0,
|
|
||||||
stop: len,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates an empty ReadOnlySource
|
|
||||||
pub fn empty() -> ReadOnlySource {
|
|
||||||
ReadOnlySource::new(&[][..])
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the data underlying the ReadOnlySource object.
|
|
||||||
pub fn as_slice(&self) -> &[u8] {
|
|
||||||
&self.data[self.start..self.stop]
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Splits into 2 `ReadOnlySource`, at the offset given
|
|
||||||
/// as an argument.
|
|
||||||
pub fn split(self, addr: usize) -> (ReadOnlySource, ReadOnlySource) {
|
|
||||||
let left = self.slice(0, addr);
|
|
||||||
let right = self.slice_from(addr);
|
|
||||||
(left, right)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a ReadOnlySource that is just a
|
|
||||||
/// view over a slice of the data.
|
|
||||||
///
|
|
||||||
/// Keep in mind that any living slice extends
|
|
||||||
/// the lifetime of the original ReadOnlySource,
|
|
||||||
///
|
|
||||||
/// For instance, if `ReadOnlySource` wraps 500MB
|
|
||||||
/// worth of data in anonymous memory, and only a
|
|
||||||
/// 1KB slice is remaining, the whole `500MBs`
|
|
||||||
/// are retained in memory.
|
|
||||||
pub fn slice(&self, start: usize, stop: usize) -> ReadOnlySource {
|
|
||||||
assert!(
|
|
||||||
start <= stop,
|
|
||||||
"Requested negative slice [{}..{}]",
|
|
||||||
start,
|
|
||||||
stop
|
|
||||||
);
|
|
||||||
assert!(stop <= self.len());
|
|
||||||
ReadOnlySource {
|
|
||||||
data: self.data.clone(),
|
|
||||||
start: self.start + start,
|
|
||||||
stop: self.start + stop,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Like `.slice(...)` but enforcing only the `from`
|
|
||||||
/// boundary.
|
|
||||||
///
|
|
||||||
/// Equivalent to `.slice(from_offset, self.len())`
|
|
||||||
pub fn slice_from(&self, from_offset: usize) -> ReadOnlySource {
|
|
||||||
self.slice(from_offset, self.len())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Like `.slice(...)` but enforcing only the `to`
|
|
||||||
/// boundary.
|
|
||||||
///
|
|
||||||
/// Equivalent to `.slice(0, to_offset)`
|
|
||||||
pub fn slice_to(&self, to_offset: usize) -> ReadOnlySource {
|
|
||||||
self.slice(0, to_offset)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HasLen for ReadOnlySource {
|
|
||||||
fn len(&self) -> usize {
|
|
||||||
self.stop - self.start
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Clone for ReadOnlySource {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
self.slice_from(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Vec<u8>> for ReadOnlySource {
|
|
||||||
fn from(data: Vec<u8>) -> ReadOnlySource {
|
|
||||||
ReadOnlySource::new(data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,93 +1,182 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
use futures::channel::oneshot;
|
||||||
|
use futures::executor::block_on;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::atomic::AtomicUsize;
|
use std::sync::atomic::Ordering::SeqCst;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::{AtomicBool, AtomicUsize};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread;
|
|
||||||
use std::time;
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
#[test]
|
#[cfg(feature = "mmap")]
|
||||||
fn test_ram_directory() {
|
mod mmap_directory_tests {
|
||||||
let mut ram_directory = RAMDirectory::create();
|
use crate::directory::MmapDirectory;
|
||||||
test_directory(&mut ram_directory);
|
|
||||||
|
type DirectoryImpl = MmapDirectory;
|
||||||
|
|
||||||
|
fn make_directory() -> DirectoryImpl {
|
||||||
|
MmapDirectory::create_from_tempdir().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_simple() -> crate::Result<()> {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_simple(&directory)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_write_create_the_file() {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_write_create_the_file(&directory);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rewrite_forbidden() -> crate::Result<()> {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_rewrite_forbidden(&directory)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_directory_delete() -> crate::Result<()> {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_directory_delete(&directory)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_lock_non_blocking() {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_lock_non_blocking(&directory);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_lock_blocking() {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_lock_blocking(&directory);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_watch() {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_watch(&directory);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
mod ram_directory_tests {
|
||||||
#[cfg(feature = "mmap")]
|
use crate::directory::RAMDirectory;
|
||||||
fn test_mmap_directory() {
|
|
||||||
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
type DirectoryImpl = RAMDirectory;
|
||||||
test_directory(&mut mmap_directory);
|
|
||||||
|
fn make_directory() -> DirectoryImpl {
|
||||||
|
RAMDirectory::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_simple() -> crate::Result<()> {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_simple(&directory)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_write_create_the_file() {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_write_create_the_file(&directory);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rewrite_forbidden() -> crate::Result<()> {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_rewrite_forbidden(&directory)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_directory_delete() -> crate::Result<()> {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_directory_delete(&directory)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_lock_non_blocking() {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_lock_non_blocking(&directory);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_lock_blocking() {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_lock_blocking(&directory);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_watch() {
|
||||||
|
let directory = make_directory();
|
||||||
|
super::test_watch(&directory);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic]
|
#[should_panic]
|
||||||
fn ram_directory_panics_if_flush_forgotten() {
|
fn ram_directory_panics_if_flush_forgotten() {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
let mut ram_directory = RAMDirectory::create();
|
let ram_directory = RAMDirectory::create();
|
||||||
let mut write_file = ram_directory.open_write(test_path).unwrap();
|
let mut write_file = ram_directory.open_write(test_path).unwrap();
|
||||||
assert!(write_file.write_all(&[4]).is_ok());
|
assert!(write_file.write_all(&[4]).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_simple(directory: &mut dyn Directory) {
|
fn test_simple(directory: &dyn Directory) -> crate::Result<()> {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
{
|
let mut write_file = directory.open_write(test_path)?;
|
||||||
let mut write_file = directory.open_write(test_path).unwrap();
|
assert!(directory.exists(test_path).unwrap());
|
||||||
assert!(directory.exists(test_path));
|
write_file.write_all(&[4])?;
|
||||||
write_file.write_all(&[4]).unwrap();
|
write_file.write_all(&[3])?;
|
||||||
write_file.write_all(&[3]).unwrap();
|
write_file.write_all(&[7, 3, 5])?;
|
||||||
write_file.write_all(&[7, 3, 5]).unwrap();
|
write_file.flush()?;
|
||||||
write_file.flush().unwrap();
|
let read_file = directory.open_read(test_path)?.read_bytes()?;
|
||||||
}
|
assert_eq!(read_file.as_slice(), &[4u8, 3u8, 7u8, 3u8, 5u8]);
|
||||||
{
|
mem::drop(read_file);
|
||||||
let read_file = directory.open_read(test_path).unwrap();
|
|
||||||
let data: &[u8] = &*read_file;
|
|
||||||
assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]);
|
|
||||||
}
|
|
||||||
assert!(directory.delete(test_path).is_ok());
|
assert!(directory.delete(test_path).is_ok());
|
||||||
assert!(!directory.exists(test_path));
|
assert!(!directory.exists(test_path).unwrap());
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_rewrite_forbidden(directory: &mut dyn Directory) {
|
fn test_rewrite_forbidden(directory: &dyn Directory) -> crate::Result<()> {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
{
|
directory.open_write(test_path)?;
|
||||||
directory.open_write(test_path).unwrap();
|
assert!(directory.exists(test_path).unwrap());
|
||||||
assert!(directory.exists(test_path));
|
assert!(directory.open_write(test_path).is_err());
|
||||||
}
|
|
||||||
{
|
|
||||||
assert!(directory.open_write(test_path).is_err());
|
|
||||||
}
|
|
||||||
assert!(directory.delete(test_path).is_ok());
|
assert!(directory.delete(test_path).is_ok());
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_write_create_the_file(directory: &mut dyn Directory) {
|
fn test_write_create_the_file(directory: &dyn Directory) {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
{
|
{
|
||||||
assert!(directory.open_read(test_path).is_err());
|
assert!(directory.open_read(test_path).is_err());
|
||||||
let _w = directory.open_write(test_path).unwrap();
|
let _w = directory.open_write(test_path).unwrap();
|
||||||
assert!(directory.exists(test_path));
|
assert!(directory.exists(test_path).unwrap());
|
||||||
assert!(directory.open_read(test_path).is_ok());
|
assert!(directory.open_read(test_path).is_ok());
|
||||||
assert!(directory.delete(test_path).is_ok());
|
assert!(directory.delete(test_path).is_ok());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_directory_delete(directory: &mut dyn Directory) {
|
fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
assert!(directory.open_read(test_path).is_err());
|
assert!(directory.open_read(test_path).is_err());
|
||||||
let mut write_file = directory.open_write(&test_path).unwrap();
|
let mut write_file = directory.open_write(&test_path)?;
|
||||||
write_file.write_all(&[1, 2, 3, 4]).unwrap();
|
write_file.write_all(&[1, 2, 3, 4])?;
|
||||||
write_file.flush().unwrap();
|
write_file.flush()?;
|
||||||
{
|
{
|
||||||
let read_handle = directory.open_read(&test_path).unwrap();
|
let read_handle = directory.open_read(&test_path)?.read_bytes()?;
|
||||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]);
|
||||||
// Mapped files can't be deleted on Windows
|
// Mapped files can't be deleted on Windows
|
||||||
if !cfg!(windows) {
|
if !cfg!(windows) {
|
||||||
assert!(directory.delete(&test_path).is_ok());
|
assert!(directory.delete(&test_path).is_ok());
|
||||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
|
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,53 +186,40 @@ fn test_directory_delete(directory: &mut dyn Directory) {
|
|||||||
|
|
||||||
assert!(directory.open_read(&test_path).is_err());
|
assert!(directory.open_read(&test_path).is_err());
|
||||||
assert!(directory.delete(&test_path).is_err());
|
assert!(directory.delete(&test_path).is_err());
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_directory(directory: &mut dyn Directory) {
|
fn test_watch(directory: &dyn Directory) {
|
||||||
test_simple(directory);
|
|
||||||
test_rewrite_forbidden(directory);
|
|
||||||
test_write_create_the_file(directory);
|
|
||||||
test_directory_delete(directory);
|
|
||||||
test_lock_non_blocking(directory);
|
|
||||||
test_lock_blocking(directory);
|
|
||||||
test_watch(directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_watch(directory: &mut dyn Directory) {
|
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let counter_clone = counter.clone();
|
let (tx, rx) = crossbeam::channel::unbounded();
|
||||||
let watch_callback = Box::new(move || {
|
let timeout = Duration::from_millis(500);
|
||||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
|
||||||
});
|
let handle = directory
|
||||||
assert!(directory
|
.watch(WatchCallback::new(move || {
|
||||||
.atomic_write(Path::new("meta.json"), b"random_test_data")
|
let val = counter.fetch_add(1, SeqCst);
|
||||||
.is_ok());
|
tx.send(val + 1).unwrap();
|
||||||
thread::sleep(Duration::new(0, 10_000));
|
}))
|
||||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
.unwrap();
|
||||||
|
|
||||||
let watch_handle = directory.watch(watch_callback).unwrap();
|
|
||||||
for i in 0..10 {
|
|
||||||
assert_eq!(i, counter.load(Ordering::SeqCst));
|
|
||||||
assert!(directory
|
|
||||||
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
|
|
||||||
.is_ok());
|
|
||||||
for _ in 0..1_000 {
|
|
||||||
if counter.load(Ordering::SeqCst) > i {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
thread::sleep(Duration::from_millis(10));
|
|
||||||
}
|
|
||||||
assert_eq!(i + 1, counter.load(Ordering::SeqCst));
|
|
||||||
}
|
|
||||||
mem::drop(watch_handle);
|
|
||||||
assert!(directory
|
assert!(directory
|
||||||
.atomic_write(Path::new("meta.json"), b"random_test_data")
|
.atomic_write(Path::new("meta.json"), b"foo")
|
||||||
.is_ok());
|
.is_ok());
|
||||||
thread::sleep(Duration::from_millis(200));
|
assert_eq!(rx.recv_timeout(timeout), Ok(1));
|
||||||
assert_eq!(10, counter.load(Ordering::SeqCst));
|
|
||||||
|
assert!(directory
|
||||||
|
.atomic_write(Path::new("meta.json"), b"bar")
|
||||||
|
.is_ok());
|
||||||
|
assert_eq!(rx.recv_timeout(timeout), Ok(2));
|
||||||
|
|
||||||
|
mem::drop(handle);
|
||||||
|
|
||||||
|
assert!(directory
|
||||||
|
.atomic_write(Path::new("meta.json"), b"qux")
|
||||||
|
.is_ok());
|
||||||
|
assert!(rx.recv_timeout(timeout).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_lock_non_blocking(directory: &mut dyn Directory) {
|
fn test_lock_non_blocking(directory: &dyn Directory) {
|
||||||
{
|
{
|
||||||
let lock_a_res = directory.acquire_lock(&Lock {
|
let lock_a_res = directory.acquire_lock(&Lock {
|
||||||
filepath: PathBuf::from("a.lock"),
|
filepath: PathBuf::from("a.lock"),
|
||||||
@@ -168,15 +244,19 @@ fn test_lock_non_blocking(directory: &mut dyn Directory) {
|
|||||||
assert!(lock_a_res.is_ok());
|
assert!(lock_a_res.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_lock_blocking(directory: &mut dyn Directory) {
|
fn test_lock_blocking(directory: &dyn Directory) {
|
||||||
let lock_a_res = directory.acquire_lock(&Lock {
|
let lock_a_res = directory.acquire_lock(&Lock {
|
||||||
filepath: PathBuf::from("a.lock"),
|
filepath: PathBuf::from("a.lock"),
|
||||||
is_blocking: true,
|
is_blocking: true,
|
||||||
});
|
});
|
||||||
assert!(lock_a_res.is_ok());
|
assert!(lock_a_res.is_ok());
|
||||||
|
let in_thread = Arc::new(AtomicBool::default());
|
||||||
|
let in_thread_clone = in_thread.clone();
|
||||||
|
let (sender, receiver) = oneshot::channel();
|
||||||
std::thread::spawn(move || {
|
std::thread::spawn(move || {
|
||||||
//< lock_a_res is sent to the thread.
|
//< lock_a_res is sent to the thread.
|
||||||
std::thread::sleep(time::Duration::from_millis(10));
|
in_thread_clone.store(true, SeqCst);
|
||||||
|
let _just_sync = block_on(receiver);
|
||||||
// explicitely droping lock_a_res. It would have been sufficient to just force it
|
// explicitely droping lock_a_res. It would have been sufficient to just force it
|
||||||
// to be part of the move, but the intent seems clearer that way.
|
// to be part of the move, but the intent seems clearer that way.
|
||||||
drop(lock_a_res);
|
drop(lock_a_res);
|
||||||
@@ -189,14 +269,18 @@ fn test_lock_blocking(directory: &mut dyn Directory) {
|
|||||||
});
|
});
|
||||||
assert!(lock_a_res.is_err());
|
assert!(lock_a_res.is_err());
|
||||||
}
|
}
|
||||||
{
|
let directory_clone = directory.box_clone();
|
||||||
// the blocking call should wait for at least 10ms.
|
let (sender2, receiver2) = oneshot::channel();
|
||||||
let start = time::Instant::now();
|
let join_handle = std::thread::spawn(move || {
|
||||||
let lock_a_res = directory.acquire_lock(&Lock {
|
assert!(sender2.send(()).is_ok());
|
||||||
|
let lock_a_res = directory_clone.acquire_lock(&Lock {
|
||||||
filepath: PathBuf::from("a.lock"),
|
filepath: PathBuf::from("a.lock"),
|
||||||
is_blocking: true,
|
is_blocking: true,
|
||||||
});
|
});
|
||||||
|
assert!(in_thread.load(SeqCst));
|
||||||
assert!(lock_a_res.is_ok());
|
assert!(lock_a_res.is_ok());
|
||||||
assert!(start.elapsed().subsec_millis() >= 10);
|
});
|
||||||
}
|
assert!(block_on(receiver2).is_ok());
|
||||||
|
assert!(sender.send(()).is_ok());
|
||||||
|
assert!(join_handle.join().is_ok());
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,23 @@
|
|||||||
|
use futures::channel::oneshot;
|
||||||
|
use futures::{Future, TryFutureExt};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use std::sync::Weak;
|
use std::sync::Weak;
|
||||||
|
|
||||||
/// Type alias for callbacks registered when watching files of a `Directory`.
|
/// Cloneable wrapper for callbacks registered when watching files of a `Directory`.
|
||||||
pub type WatchCallback = Box<dyn Fn() -> () + Sync + Send>;
|
#[derive(Clone)]
|
||||||
|
pub struct WatchCallback(Arc<dyn Fn() + Sync + Send>);
|
||||||
|
|
||||||
|
impl WatchCallback {
|
||||||
|
/// Wraps a `Fn()` to create a WatchCallback.
|
||||||
|
pub fn new<F: Fn() + Sync + Send + 'static>(op: F) -> Self {
|
||||||
|
WatchCallback(Arc::new(op))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&self) {
|
||||||
|
self.0()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Helper struct to implement the watch method in `Directory` implementations.
|
/// Helper struct to implement the watch method in `Directory` implementations.
|
||||||
///
|
///
|
||||||
@@ -22,22 +36,36 @@ pub struct WatchCallbackList {
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct WatchHandle(Arc<WatchCallback>);
|
pub struct WatchHandle(Arc<WatchCallback>);
|
||||||
|
|
||||||
|
impl WatchHandle {
|
||||||
|
/// Create a WatchHandle handle.
|
||||||
|
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
|
||||||
|
WatchHandle(watch_callback)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an empty watch handle.
|
||||||
|
///
|
||||||
|
/// This function is only useful when implementing a readonly directory.
|
||||||
|
pub fn empty() -> WatchHandle {
|
||||||
|
WatchHandle::new(Arc::new(WatchCallback::new(|| {})))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl WatchCallbackList {
|
impl WatchCallbackList {
|
||||||
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
|
/// Subscribes a new callback and returns a handle that controls the lifetime of the callback.
|
||||||
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
|
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
|
||||||
let watch_callback_arc = Arc::new(watch_callback);
|
let watch_callback_arc = Arc::new(watch_callback);
|
||||||
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
|
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
|
||||||
self.router.write().unwrap().push(watch_callback_weak);
|
self.router.write().unwrap().push(watch_callback_weak);
|
||||||
WatchHandle(watch_callback_arc)
|
WatchHandle::new(watch_callback_arc)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn list_callback(&self) -> Vec<Arc<WatchCallback>> {
|
fn list_callback(&self) -> Vec<WatchCallback> {
|
||||||
let mut callbacks = vec![];
|
let mut callbacks: Vec<WatchCallback> = vec![];
|
||||||
let mut router_wlock = self.router.write().unwrap();
|
let mut router_wlock = self.router.write().unwrap();
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
while i < router_wlock.len() {
|
while i < router_wlock.len() {
|
||||||
if let Some(watch) = router_wlock[i].upgrade() {
|
if let Some(watch) = router_wlock[i].upgrade() {
|
||||||
callbacks.push(watch);
|
callbacks.push(watch.as_ref().clone());
|
||||||
i += 1;
|
i += 1;
|
||||||
} else {
|
} else {
|
||||||
router_wlock.swap_remove(i);
|
router_wlock.swap_remove(i);
|
||||||
@@ -47,14 +75,21 @@ impl WatchCallbackList {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Triggers all callbacks
|
/// Triggers all callbacks
|
||||||
pub fn broadcast(&self) {
|
pub fn broadcast(&self) -> impl Future<Output = ()> {
|
||||||
let callbacks = self.list_callback();
|
let callbacks = self.list_callback();
|
||||||
|
let (sender, receiver) = oneshot::channel();
|
||||||
|
let result = receiver.unwrap_or_else(|_| ());
|
||||||
|
if callbacks.is_empty() {
|
||||||
|
let _ = sender.send(());
|
||||||
|
return result;
|
||||||
|
}
|
||||||
let spawn_res = std::thread::Builder::new()
|
let spawn_res = std::thread::Builder::new()
|
||||||
.name("watch-callbacks".to_string())
|
.name("watch-callbacks".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
for callback in callbacks {
|
for callback in callbacks {
|
||||||
callback();
|
callback.call();
|
||||||
}
|
}
|
||||||
|
let _ = sender.send(());
|
||||||
});
|
});
|
||||||
if let Err(err) = spawn_res {
|
if let Err(err) = spawn_res {
|
||||||
error!(
|
error!(
|
||||||
@@ -62,44 +97,42 @@ impl WatchCallbackList {
|
|||||||
err
|
err
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
result
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::directory::WatchCallbackList;
|
use crate::directory::{WatchCallback, WatchCallbackList};
|
||||||
|
use futures::executor::block_on;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
const WAIT_TIME: u64 = 20;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_watch_event_router_simple() {
|
fn test_watch_event_router_simple() {
|
||||||
let watch_event_router = WatchCallbackList::default();
|
let watch_event_router = WatchCallbackList::default();
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let counter_clone = counter.clone();
|
let counter_clone = counter.clone();
|
||||||
let inc_callback = Box::new(move || {
|
let inc_callback = WatchCallback::new(move || {
|
||||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
});
|
});
|
||||||
watch_event_router.broadcast();
|
block_on(watch_event_router.broadcast());
|
||||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||||
let handle_a = watch_event_router.subscribe(inc_callback);
|
let handle_a = watch_event_router.subscribe(inc_callback);
|
||||||
thread::sleep(Duration::from_millis(WAIT_TIME));
|
|
||||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||||
watch_event_router.broadcast();
|
block_on(watch_event_router.broadcast());
|
||||||
thread::sleep(Duration::from_millis(WAIT_TIME));
|
|
||||||
assert_eq!(1, counter.load(Ordering::SeqCst));
|
assert_eq!(1, counter.load(Ordering::SeqCst));
|
||||||
watch_event_router.broadcast();
|
block_on(async {
|
||||||
watch_event_router.broadcast();
|
(
|
||||||
watch_event_router.broadcast();
|
watch_event_router.broadcast().await,
|
||||||
thread::sleep(Duration::from_millis(WAIT_TIME));
|
watch_event_router.broadcast().await,
|
||||||
|
watch_event_router.broadcast().await,
|
||||||
|
)
|
||||||
|
});
|
||||||
assert_eq!(4, counter.load(Ordering::SeqCst));
|
assert_eq!(4, counter.load(Ordering::SeqCst));
|
||||||
mem::drop(handle_a);
|
mem::drop(handle_a);
|
||||||
watch_event_router.broadcast();
|
block_on(watch_event_router.broadcast());
|
||||||
thread::sleep(Duration::from_millis(WAIT_TIME));
|
|
||||||
assert_eq!(4, counter.load(Ordering::SeqCst));
|
assert_eq!(4, counter.load(Ordering::SeqCst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -109,26 +142,26 @@ mod tests {
|
|||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let inc_callback = |inc: usize| {
|
let inc_callback = |inc: usize| {
|
||||||
let counter_clone = counter.clone();
|
let counter_clone = counter.clone();
|
||||||
Box::new(move || {
|
WatchCallback::new(move || {
|
||||||
counter_clone.fetch_add(inc, Ordering::SeqCst);
|
counter_clone.fetch_add(inc, Ordering::SeqCst);
|
||||||
})
|
})
|
||||||
};
|
};
|
||||||
let handle_a = watch_event_router.subscribe(inc_callback(1));
|
let handle_a = watch_event_router.subscribe(inc_callback(1));
|
||||||
let handle_a2 = watch_event_router.subscribe(inc_callback(10));
|
let handle_a2 = watch_event_router.subscribe(inc_callback(10));
|
||||||
thread::sleep(Duration::from_millis(WAIT_TIME));
|
|
||||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||||
watch_event_router.broadcast();
|
block_on(async {
|
||||||
watch_event_router.broadcast();
|
futures::join!(
|
||||||
thread::sleep(Duration::from_millis(WAIT_TIME));
|
watch_event_router.broadcast(),
|
||||||
|
watch_event_router.broadcast()
|
||||||
|
)
|
||||||
|
});
|
||||||
assert_eq!(22, counter.load(Ordering::SeqCst));
|
assert_eq!(22, counter.load(Ordering::SeqCst));
|
||||||
mem::drop(handle_a);
|
mem::drop(handle_a);
|
||||||
watch_event_router.broadcast();
|
block_on(watch_event_router.broadcast());
|
||||||
thread::sleep(Duration::from_millis(WAIT_TIME));
|
|
||||||
assert_eq!(32, counter.load(Ordering::SeqCst));
|
assert_eq!(32, counter.load(Ordering::SeqCst));
|
||||||
mem::drop(handle_a2);
|
mem::drop(handle_a2);
|
||||||
watch_event_router.broadcast();
|
block_on(watch_event_router.broadcast());
|
||||||
watch_event_router.broadcast();
|
block_on(watch_event_router.broadcast());
|
||||||
thread::sleep(Duration::from_millis(WAIT_TIME));
|
|
||||||
assert_eq!(32, counter.load(Ordering::SeqCst));
|
assert_eq!(32, counter.load(Ordering::SeqCst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -137,19 +170,20 @@ mod tests {
|
|||||||
let watch_event_router = WatchCallbackList::default();
|
let watch_event_router = WatchCallbackList::default();
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let counter_clone = counter.clone();
|
let counter_clone = counter.clone();
|
||||||
let inc_callback = Box::new(move || {
|
let inc_callback = WatchCallback::new(move || {
|
||||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
});
|
});
|
||||||
let handle_a = watch_event_router.subscribe(inc_callback);
|
let handle_a = watch_event_router.subscribe(inc_callback);
|
||||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||||
watch_event_router.broadcast();
|
block_on(async {
|
||||||
watch_event_router.broadcast();
|
let future1 = watch_event_router.broadcast();
|
||||||
thread::sleep(Duration::from_millis(WAIT_TIME));
|
let future2 = watch_event_router.broadcast();
|
||||||
|
futures::join!(future1, future2)
|
||||||
|
});
|
||||||
assert_eq!(2, counter.load(Ordering::SeqCst));
|
assert_eq!(2, counter.load(Ordering::SeqCst));
|
||||||
thread::sleep(Duration::from_millis(WAIT_TIME));
|
|
||||||
mem::drop(handle_a);
|
mem::drop(handle_a);
|
||||||
watch_event_router.broadcast();
|
let _ = watch_event_router.broadcast();
|
||||||
thread::sleep(Duration::from_millis(WAIT_TIME));
|
block_on(watch_event_router.broadcast());
|
||||||
assert_eq!(2, counter.load(Ordering::SeqCst));
|
assert_eq!(2, counter.load(Ordering::SeqCst));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
141
src/docset.rs
141
src/docset.rs
@@ -1,58 +1,48 @@
|
|||||||
use crate::common::BitSet;
|
|
||||||
use crate::fastfield::DeleteBitSet;
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use std::borrow::Borrow;
|
use std::borrow::Borrow;
|
||||||
use std::borrow::BorrowMut;
|
use std::borrow::BorrowMut;
|
||||||
use std::cmp::Ordering;
|
|
||||||
|
|
||||||
/// Expresses the outcome of a call to `DocSet`'s `.skip_next(...)`.
|
/// Sentinel value returned when a DocSet has been entirely consumed.
|
||||||
#[derive(PartialEq, Eq, Debug)]
|
///
|
||||||
pub enum SkipResult {
|
/// This is not u32::MAX as one would have expected, due to the lack of SSE2 instructions
|
||||||
/// target was in the docset
|
/// to compare [u32; 4].
|
||||||
Reached,
|
pub const TERMINATED: DocId = std::i32::MAX as u32;
|
||||||
/// target was not in the docset, skipping stopped as a greater element was found
|
|
||||||
OverStep,
|
|
||||||
/// the docset was entirely consumed without finding the target, nor any
|
|
||||||
/// element greater than the target.
|
|
||||||
End,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents an iterable set of sorted doc ids.
|
/// Represents an iterable set of sorted doc ids.
|
||||||
pub trait DocSet {
|
pub trait DocSet: Send {
|
||||||
/// Goes to the next element.
|
/// Goes to the next element.
|
||||||
/// `.advance(...)` needs to be called a first time to point to the correct
|
///
|
||||||
/// element.
|
/// The DocId of the next element is returned.
|
||||||
fn advance(&mut self) -> bool;
|
/// In other words we should always have :
|
||||||
|
/// ```ignore
|
||||||
|
/// let doc = docset.advance();
|
||||||
|
/// assert_eq!(doc, docset.doc());
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// If we reached the end of the DocSet, TERMINATED should be returned.
|
||||||
|
///
|
||||||
|
/// Calling `.advance()` on a terminated DocSet should be supported, and TERMINATED should
|
||||||
|
/// be returned.
|
||||||
|
/// TODO Test existing docsets.
|
||||||
|
fn advance(&mut self) -> DocId;
|
||||||
|
|
||||||
/// After skipping, position the iterator in such a way that `.doc()`
|
/// Advances the DocSet forward until reaching the target, or going to the
|
||||||
/// will return a value greater than or equal to target.
|
/// lowest DocId greater than the target.
|
||||||
///
|
///
|
||||||
/// SkipResult expresses whether the `target value` was reached, overstepped,
|
/// If the end of the DocSet is reached, TERMINATED is returned.
|
||||||
/// or if the `DocSet` was entirely consumed without finding any value
|
|
||||||
/// greater or equal to the `target`.
|
|
||||||
///
|
///
|
||||||
/// WARNING: Calling skip always advances the docset.
|
/// Calling `.seek(target)` on a terminated DocSet is legal. Implementation
|
||||||
/// More specifically, if the docset is already positionned on the target
|
/// of DocSet should support it.
|
||||||
/// skipping will advance to the next position and return SkipResult::Overstep.
|
|
||||||
///
|
///
|
||||||
/// If `.skip_next()` oversteps, then the docset must be positionned correctly
|
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a DocSet.
|
||||||
/// on an existing document. In other words, `.doc()` should return the first document
|
fn seek(&mut self, target: DocId) -> DocId {
|
||||||
/// greater than `DocId`.
|
let mut doc = self.doc();
|
||||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
debug_assert!(doc <= target);
|
||||||
if !self.advance() {
|
while doc < target {
|
||||||
return SkipResult::End;
|
doc = self.advance();
|
||||||
}
|
|
||||||
loop {
|
|
||||||
match self.doc().cmp(&target) {
|
|
||||||
Ordering::Less => {
|
|
||||||
if !self.advance() {
|
|
||||||
return SkipResult::End;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ordering::Equal => return SkipResult::Reached,
|
|
||||||
Ordering::Greater => return SkipResult::OverStep,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
doc
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fills a given mutable buffer with the next doc ids from the
|
/// Fills a given mutable buffer with the next doc ids from the
|
||||||
@@ -71,38 +61,38 @@ pub trait DocSet {
|
|||||||
/// use case where batching. The normal way to
|
/// use case where batching. The normal way to
|
||||||
/// go through the `DocId`'s is to call `.advance()`.
|
/// go through the `DocId`'s is to call `.advance()`.
|
||||||
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
||||||
|
if self.doc() == TERMINATED {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
||||||
if self.advance() {
|
*buffer_val = self.doc();
|
||||||
*buffer_val = self.doc();
|
if self.advance() == TERMINATED {
|
||||||
} else {
|
return i + 1;
|
||||||
return i;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
buffer.len()
|
buffer.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the current document
|
/// Returns the current document
|
||||||
|
/// Right after creating a new DocSet, the docset points to the first document.
|
||||||
|
///
|
||||||
|
/// If the DocSet is empty, .doc() should return `TERMINATED`.
|
||||||
fn doc(&self) -> DocId;
|
fn doc(&self) -> DocId;
|
||||||
|
|
||||||
/// Returns a best-effort hint of the
|
/// Returns a best-effort hint of the
|
||||||
/// length of the docset.
|
/// length of the docset.
|
||||||
fn size_hint(&self) -> u32;
|
fn size_hint(&self) -> u32;
|
||||||
|
|
||||||
/// Appends all docs to a `bitset`.
|
|
||||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
|
||||||
while self.advance() {
|
|
||||||
bitset.insert(self.doc());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the number documents matching.
|
/// Returns the number documents matching.
|
||||||
/// Calling this method consumes the `DocSet`.
|
/// Calling this method consumes the `DocSet`.
|
||||||
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||||
let mut count = 0u32;
|
let mut count = 0u32;
|
||||||
while self.advance() {
|
let mut doc = self.doc();
|
||||||
if !delete_bitset.is_deleted(self.doc()) {
|
while doc != TERMINATED {
|
||||||
|
if !delete_bitset.is_deleted(doc) {
|
||||||
count += 1u32;
|
count += 1u32;
|
||||||
}
|
}
|
||||||
|
doc = self.advance();
|
||||||
}
|
}
|
||||||
count
|
count
|
||||||
}
|
}
|
||||||
@@ -114,22 +104,50 @@ pub trait DocSet {
|
|||||||
/// given by `count()`.
|
/// given by `count()`.
|
||||||
fn count_including_deleted(&mut self) -> u32 {
|
fn count_including_deleted(&mut self) -> u32 {
|
||||||
let mut count = 0u32;
|
let mut count = 0u32;
|
||||||
while self.advance() {
|
let mut doc = self.doc();
|
||||||
|
while doc != TERMINATED {
|
||||||
count += 1u32;
|
count += 1u32;
|
||||||
|
doc = self.advance();
|
||||||
}
|
}
|
||||||
count
|
count
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'a> DocSet for &'a mut dyn DocSet {
|
||||||
|
fn advance(&mut self) -> u32 {
|
||||||
|
(**self).advance()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn seek(&mut self, target: DocId) -> DocId {
|
||||||
|
(**self).seek(target)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn doc(&self) -> u32 {
|
||||||
|
(**self).doc()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size_hint(&self) -> u32 {
|
||||||
|
(**self).size_hint()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||||
|
(**self).count(delete_bitset)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn count_including_deleted(&mut self) -> u32 {
|
||||||
|
(**self).count_including_deleted()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
||||||
fn advance(&mut self) -> bool {
|
fn advance(&mut self) -> DocId {
|
||||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||||
unboxed.advance()
|
unboxed.advance()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
fn seek(&mut self, target: DocId) -> DocId {
|
||||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||||
unboxed.skip_next(target)
|
unboxed.seek(target)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
@@ -151,9 +169,4 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
|||||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||||
unboxed.count_including_deleted()
|
unboxed.count_including_deleted()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
|
||||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
|
||||||
unboxed.append_to_bitset(bitset);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
117
src/error.rs
117
src/error.rs
@@ -2,22 +2,27 @@
|
|||||||
|
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
use crate::directory::error::LockError;
|
use crate::directory::error::{Incompatibility, LockError};
|
||||||
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
|
||||||
use crate::fastfield::FastFieldNotAvailableError;
|
use crate::fastfield::FastFieldNotAvailableError;
|
||||||
use crate::query;
|
use crate::query;
|
||||||
use crate::schema;
|
use crate::{
|
||||||
use serde_json;
|
directory::error::{OpenDirectoryError, OpenReadError, OpenWriteError},
|
||||||
|
schema,
|
||||||
|
};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::PoisonError;
|
use std::sync::PoisonError;
|
||||||
|
|
||||||
|
/// Represents a `DataCorruption` error.
|
||||||
|
///
|
||||||
|
/// When facing data corruption, tantivy actually panic or return this error.
|
||||||
pub struct DataCorruption {
|
pub struct DataCorruption {
|
||||||
filepath: Option<PathBuf>,
|
filepath: Option<PathBuf>,
|
||||||
comment: String,
|
comment: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DataCorruption {
|
impl DataCorruption {
|
||||||
|
/// Creates a `DataCorruption` Error.
|
||||||
pub fn new(filepath: PathBuf, comment: String) -> DataCorruption {
|
pub fn new(filepath: PathBuf, comment: String) -> DataCorruption {
|
||||||
DataCorruption {
|
DataCorruption {
|
||||||
filepath: Some(filepath),
|
filepath: Some(filepath),
|
||||||
@@ -25,10 +30,11 @@ impl DataCorruption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn comment_only(comment: String) -> DataCorruption {
|
/// Creates a `DataCorruption` Error, when the filepath is irrelevant.
|
||||||
|
pub fn comment_only<TStr: ToString>(comment: TStr) -> DataCorruption {
|
||||||
DataCorruption {
|
DataCorruption {
|
||||||
filepath: None,
|
filepath: None,
|
||||||
comment,
|
comment: comment.to_string(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -44,42 +50,48 @@ impl fmt::Debug for DataCorruption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The library's failure based error enum
|
/// The library's error enum
|
||||||
#[derive(Debug, Fail)]
|
#[derive(Debug, Error)]
|
||||||
pub enum TantivyError {
|
pub enum TantivyError {
|
||||||
/// Path does not exist.
|
/// Failed to open the directory.
|
||||||
#[fail(display = "Path does not exist: '{:?}'", _0)]
|
#[error("Failed to open the directory: '{0:?}'")]
|
||||||
PathDoesNotExist(PathBuf),
|
OpenDirectoryError(#[from] OpenDirectoryError),
|
||||||
/// File already exists, this is a problem when we try to write into a new file.
|
/// Failed to open a file for read.
|
||||||
#[fail(display = "File already exists: '{:?}'", _0)]
|
#[error("Failed to open file for read: '{0:?}'")]
|
||||||
FileAlreadyExists(PathBuf),
|
OpenReadError(#[from] OpenReadError),
|
||||||
|
/// Failed to open a file for write.
|
||||||
|
#[error("Failed to open file for write: '{0:?}'")]
|
||||||
|
OpenWriteError(#[from] OpenWriteError),
|
||||||
/// Index already exists in this directory
|
/// Index already exists in this directory
|
||||||
#[fail(display = "Index already exists")]
|
#[error("Index already exists")]
|
||||||
IndexAlreadyExists,
|
IndexAlreadyExists,
|
||||||
/// Failed to acquire file lock
|
/// Failed to acquire file lock
|
||||||
#[fail(display = "Failed to acquire Lockfile: {:?}. {:?}", _0, _1)]
|
#[error("Failed to acquire Lockfile: {0:?}. {1:?}")]
|
||||||
LockFailure(LockError, Option<String>),
|
LockFailure(LockError, Option<String>),
|
||||||
/// IO Error.
|
/// IO Error.
|
||||||
#[fail(display = "An IO error occurred: '{}'", _0)]
|
#[error("An IO error occurred: '{0}'")]
|
||||||
IOError(#[cause] IOError),
|
IOError(#[from] io::Error),
|
||||||
/// Data corruption.
|
/// Data corruption.
|
||||||
#[fail(display = "{:?}", _0)]
|
#[error("Data corrupted: '{0:?}'")]
|
||||||
DataCorruption(DataCorruption),
|
DataCorruption(DataCorruption),
|
||||||
/// A thread holding the locked panicked and poisoned the lock.
|
/// A thread holding the locked panicked and poisoned the lock.
|
||||||
#[fail(display = "A thread holding the locked panicked and poisoned the lock")]
|
#[error("A thread holding the locked panicked and poisoned the lock")]
|
||||||
Poisoned,
|
Poisoned,
|
||||||
/// Invalid argument was passed by the user.
|
/// Invalid argument was passed by the user.
|
||||||
#[fail(display = "An invalid argument was passed: '{}'", _0)]
|
#[error("An invalid argument was passed: '{0}'")]
|
||||||
InvalidArgument(String),
|
InvalidArgument(String),
|
||||||
/// An Error happened in one of the thread.
|
/// An Error happened in one of the thread.
|
||||||
#[fail(display = "An error occurred in a thread: '{}'", _0)]
|
#[error("An error occurred in a thread: '{0}'")]
|
||||||
ErrorInThread(String),
|
ErrorInThread(String),
|
||||||
/// An Error appeared related to the schema.
|
/// An Error appeared related to the schema.
|
||||||
#[fail(display = "Schema error: '{}'", _0)]
|
#[error("Schema error: '{0}'")]
|
||||||
SchemaError(String),
|
SchemaError(String),
|
||||||
/// System error. (e.g.: We failed spawning a new thread)
|
/// System error. (e.g.: We failed spawning a new thread)
|
||||||
#[fail(display = "System error.'{}'", _0)]
|
#[error("System error.'{0}'")]
|
||||||
SystemError(String),
|
SystemError(String),
|
||||||
|
/// Index incompatible with current version of tantivy
|
||||||
|
#[error("{0:?}")]
|
||||||
|
IncompatibleIndex(Incompatibility),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<DataCorruption> for TantivyError {
|
impl From<DataCorruption> for TantivyError {
|
||||||
@@ -87,31 +99,17 @@ impl From<DataCorruption> for TantivyError {
|
|||||||
TantivyError::DataCorruption(data_corruption)
|
TantivyError::DataCorruption(data_corruption)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<FastFieldNotAvailableError> for TantivyError {
|
impl From<FastFieldNotAvailableError> for TantivyError {
|
||||||
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
|
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
|
||||||
TantivyError::SchemaError(format!("{}", fastfield_error))
|
TantivyError::SchemaError(format!("{}", fastfield_error))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<LockError> for TantivyError {
|
impl From<LockError> for TantivyError {
|
||||||
fn from(lock_error: LockError) -> TantivyError {
|
fn from(lock_error: LockError) -> TantivyError {
|
||||||
TantivyError::LockFailure(lock_error, None)
|
TantivyError::LockFailure(lock_error, None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<IOError> for TantivyError {
|
|
||||||
fn from(io_error: IOError) -> TantivyError {
|
|
||||||
TantivyError::IOError(io_error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<io::Error> for TantivyError {
|
|
||||||
fn from(io_error: io::Error) -> TantivyError {
|
|
||||||
TantivyError::IOError(io_error.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<query::QueryParserError> for TantivyError {
|
impl From<query::QueryParserError> for TantivyError {
|
||||||
fn from(parsing_error: query::QueryParserError) -> TantivyError {
|
fn from(parsing_error: query::QueryParserError) -> TantivyError {
|
||||||
TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error))
|
TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error))
|
||||||
@@ -124,12 +122,9 @@ impl<Guard> From<PoisonError<Guard>> for TantivyError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<OpenReadError> for TantivyError {
|
impl From<chrono::ParseError> for TantivyError {
|
||||||
fn from(error: OpenReadError) -> TantivyError {
|
fn from(err: chrono::ParseError) -> TantivyError {
|
||||||
match error {
|
TantivyError::InvalidArgument(err.to_string())
|
||||||
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
|
|
||||||
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -139,34 +134,14 @@ impl From<schema::DocParsingError> for TantivyError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<OpenWriteError> for TantivyError {
|
|
||||||
fn from(error: OpenWriteError) -> TantivyError {
|
|
||||||
match error {
|
|
||||||
OpenWriteError::FileAlreadyExists(filepath) => {
|
|
||||||
TantivyError::FileAlreadyExists(filepath)
|
|
||||||
}
|
|
||||||
OpenWriteError::IOError(io_error) => TantivyError::IOError(io_error),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<OpenDirectoryError> for TantivyError {
|
|
||||||
fn from(error: OpenDirectoryError) -> TantivyError {
|
|
||||||
match error {
|
|
||||||
OpenDirectoryError::DoesNotExist(directory_path) => {
|
|
||||||
TantivyError::PathDoesNotExist(directory_path)
|
|
||||||
}
|
|
||||||
OpenDirectoryError::NotADirectory(directory_path) => {
|
|
||||||
TantivyError::InvalidArgument(format!("{:?} is not a directory", directory_path))
|
|
||||||
}
|
|
||||||
OpenDirectoryError::IoError(err) => TantivyError::IOError(IOError::from(err)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<serde_json::Error> for TantivyError {
|
impl From<serde_json::Error> for TantivyError {
|
||||||
fn from(error: serde_json::Error) -> TantivyError {
|
fn from(error: serde_json::Error) -> TantivyError {
|
||||||
let io_err = io::Error::from(error);
|
TantivyError::IOError(error.into())
|
||||||
TantivyError::IOError(io_err.into())
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<rayon::ThreadPoolBuildError> for TantivyError {
|
||||||
|
fn from(error: rayon::ThreadPoolBuildError) -> TantivyError {
|
||||||
|
TantivyError::SystemError(error.to_string())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,31 +6,114 @@ pub use self::writer::BytesFastFieldWriter;
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::schema::Schema;
|
use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value};
|
||||||
use crate::Index;
|
use crate::{query::TermQuery, schema::FAST, schema::INDEXED, schema::STORED};
|
||||||
|
use crate::{DocAddress, DocSet, Index, Searcher, Term};
|
||||||
|
use std::ops::Deref;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_bytes() {
|
fn test_bytes() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let field = schema_builder.add_bytes_field("bytesfield");
|
let bytes_field = schema_builder.add_bytes_field("bytesfield", FAST);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(field=>vec![0u8, 1, 2, 3]));
|
index_writer.add_document(doc!(bytes_field=>vec![0u8, 1, 2, 3]));
|
||||||
index_writer.add_document(doc!(field=>vec![]));
|
index_writer.add_document(doc!(bytes_field=>vec![]));
|
||||||
index_writer.add_document(doc!(field=>vec![255u8]));
|
index_writer.add_document(doc!(bytes_field=>vec![255u8]));
|
||||||
index_writer.add_document(doc!(field=>vec![1u8, 3, 5, 7, 9]));
|
index_writer.add_document(doc!(bytes_field=>vec![1u8, 3, 5, 7, 9]));
|
||||||
index_writer.add_document(doc!(field=>vec![0u8; 1000]));
|
index_writer.add_document(doc!(bytes_field=>vec![0u8; 1000]));
|
||||||
assert!(index_writer.commit().is_ok());
|
index_writer.commit()?;
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader()?.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let bytes_reader = segment_reader.fast_fields().bytes(field).unwrap();
|
let bytes_reader = segment_reader.fast_fields().bytes(bytes_field).unwrap();
|
||||||
|
|
||||||
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
|
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
|
||||||
assert!(bytes_reader.get_bytes(1).is_empty());
|
assert!(bytes_reader.get_bytes(1).is_empty());
|
||||||
assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
|
assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
|
||||||
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]);
|
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]);
|
||||||
let long = vec![0u8; 1000];
|
let long = vec![0u8; 1000];
|
||||||
assert_eq!(bytes_reader.get_bytes(4), long.as_slice());
|
assert_eq!(bytes_reader.get_bytes(4), long.as_slice());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_index_for_test<T: Into<BytesOptions>>(
|
||||||
|
byte_options: T,
|
||||||
|
) -> crate::Result<impl Deref<Target = Searcher>> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let field = schema_builder.add_bytes_field("string_bytes", byte_options.into());
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
field => b"tantivy".as_ref(),
|
||||||
|
field => b"lucene".as_ref()
|
||||||
|
));
|
||||||
|
index_writer.commit()?;
|
||||||
|
Ok(index.reader()?.searcher())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_stored_bytes() -> crate::Result<()> {
|
||||||
|
let searcher = create_index_for_test(STORED)?;
|
||||||
|
assert_eq!(searcher.num_docs(), 1);
|
||||||
|
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
||||||
|
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||||
|
let values: Vec<&Value> = retrieved_doc.get_all(field).collect();
|
||||||
|
assert_eq!(values.len(), 2);
|
||||||
|
let values_bytes: Vec<&[u8]> = values
|
||||||
|
.into_iter()
|
||||||
|
.flat_map(|value| value.bytes_value())
|
||||||
|
.collect();
|
||||||
|
assert_eq!(values_bytes, &[&b"tantivy"[..], &b"lucene"[..]]);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_non_stored_bytes() -> crate::Result<()> {
|
||||||
|
let searcher = create_index_for_test(INDEXED)?;
|
||||||
|
assert_eq!(searcher.num_docs(), 1);
|
||||||
|
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
||||||
|
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||||
|
assert!(retrieved_doc.get_first(field).is_none());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_bytes() -> crate::Result<()> {
|
||||||
|
let searcher = create_index_for_test(INDEXED)?;
|
||||||
|
assert_eq!(searcher.num_docs(), 1);
|
||||||
|
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||||
|
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
||||||
|
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||||
|
let term_weight = term_query.specialized_weight(&searcher, true)?;
|
||||||
|
let term_scorer = term_weight.specialized_scorer(searcher.segment_reader(0), 1.0)?;
|
||||||
|
assert_eq!(term_scorer.doc(), 0u32);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_non_index_bytes() -> crate::Result<()> {
|
||||||
|
let searcher = create_index_for_test(STORED)?;
|
||||||
|
assert_eq!(searcher.num_docs(), 1);
|
||||||
|
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||||
|
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
||||||
|
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||||
|
let term_weight_err = term_query.specialized_weight(&searcher, false);
|
||||||
|
assert!(matches!(
|
||||||
|
term_weight_err,
|
||||||
|
Err(crate::TantivyError::SchemaError(_))
|
||||||
|
));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_fast_bytes_multivalue_value() -> crate::Result<()> {
|
||||||
|
let searcher = create_index_for_test(FAST)?;
|
||||||
|
assert_eq!(searcher.num_docs(), 1);
|
||||||
|
let fast_fields = searcher.segment_reader(0u32).fast_fields();
|
||||||
|
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||||
|
let fast_field_reader = fast_fields.bytes(field).unwrap();
|
||||||
|
assert_eq!(fast_field_reader.get_bytes(0u32), b"tantivy");
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use owning_ref::OwningRef;
|
use crate::directory::FileSlice;
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
use crate::directory::ReadOnlySource;
|
|
||||||
use crate::fastfield::FastFieldReader;
|
use crate::fastfield::FastFieldReader;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
@@ -17,16 +16,16 @@ use crate::DocId;
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BytesFastFieldReader {
|
pub struct BytesFastFieldReader {
|
||||||
idx_reader: FastFieldReader<u64>,
|
idx_reader: FastFieldReader<u64>,
|
||||||
values: OwningRef<ReadOnlySource, [u8]>,
|
values: OwnedBytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BytesFastFieldReader {
|
impl BytesFastFieldReader {
|
||||||
pub(crate) fn open(
|
pub(crate) fn open(
|
||||||
idx_reader: FastFieldReader<u64>,
|
idx_reader: FastFieldReader<u64>,
|
||||||
values_source: ReadOnlySource,
|
values_file: FileSlice,
|
||||||
) -> BytesFastFieldReader {
|
) -> crate::Result<BytesFastFieldReader> {
|
||||||
let values = OwningRef::new(values_source).map(|source| &source[..]);
|
let values = values_file.read_bytes()?;
|
||||||
BytesFastFieldReader { idx_reader, values }
|
Ok(BytesFastFieldReader { idx_reader, values })
|
||||||
}
|
}
|
||||||
|
|
||||||
fn range(&self, doc: DocId) -> (usize, usize) {
|
fn range(&self, doc: DocId) -> (usize, usize) {
|
||||||
@@ -38,7 +37,7 @@ impl BytesFastFieldReader {
|
|||||||
/// Returns the bytes associated to the given `doc`
|
/// Returns the bytes associated to the given `doc`
|
||||||
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
||||||
let (start, stop) = self.range(doc);
|
let (start, stop) = self.range(doc);
|
||||||
&self.values[start..stop]
|
&self.values.as_slice()[start..stop]
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the overall number of bytes in this bytes fast field.
|
/// Returns the overall number of bytes in this bytes fast field.
|
||||||
|
|||||||
@@ -49,16 +49,10 @@ impl BytesFastFieldWriter {
|
|||||||
/// matching field values present in the document.
|
/// matching field values present in the document.
|
||||||
pub fn add_document(&mut self, doc: &Document) {
|
pub fn add_document(&mut self, doc: &Document) {
|
||||||
self.next_doc();
|
self.next_doc();
|
||||||
for field_value in doc.field_values() {
|
for field_value in doc.get_all(self.field) {
|
||||||
if field_value.field() == self.field {
|
if let Value::Bytes(ref bytes) = field_value {
|
||||||
if let Value::Bytes(ref bytes) = *field_value.value() {
|
self.vals.extend_from_slice(bytes);
|
||||||
self.vals.extend_from_slice(bytes);
|
return;
|
||||||
} else {
|
|
||||||
panic!(
|
|
||||||
"Bytes field contained non-Bytes Value!. Field {:?} = {:?}",
|
|
||||||
self.field, field_value
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -76,21 +70,18 @@ impl BytesFastFieldWriter {
|
|||||||
|
|
||||||
/// Serializes the fast field values by pushing them to the `FastFieldSerializer`.
|
/// Serializes the fast field values by pushing them to the `FastFieldSerializer`.
|
||||||
pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> {
|
pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> {
|
||||||
{
|
// writing the offset index
|
||||||
// writing the offset index
|
let mut doc_index_serializer =
|
||||||
let mut doc_index_serializer =
|
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
||||||
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
for &offset in &self.doc_index {
|
||||||
for &offset in &self.doc_index {
|
doc_index_serializer.add_val(offset)?;
|
||||||
doc_index_serializer.add_val(offset)?;
|
|
||||||
}
|
|
||||||
doc_index_serializer.add_val(self.vals.len() as u64)?;
|
|
||||||
doc_index_serializer.close_field()?;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
// writing the values themselves
|
|
||||||
let mut value_serializer = serializer.new_bytes_fast_field_with_idx(self.field, 1)?;
|
|
||||||
value_serializer.write_all(&self.vals)?;
|
|
||||||
}
|
}
|
||||||
|
doc_index_serializer.add_val(self.vals.len() as u64)?;
|
||||||
|
doc_index_serializer.close_field()?;
|
||||||
|
// writing the values themselves
|
||||||
|
serializer
|
||||||
|
.new_bytes_fast_field_with_idx(self.field, 1)
|
||||||
|
.write_all(&self.vals)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,17 +1,22 @@
|
|||||||
use crate::common::HasLen;
|
use crate::common::{BitSet, HasLen};
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::FileSlice;
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
use crate::directory::WritePtr;
|
use crate::directory::WritePtr;
|
||||||
use crate::space_usage::ByteCount;
|
use crate::space_usage::ByteCount;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use bit_set::BitSet;
|
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
/// Write a delete `BitSet`
|
/// Write a delete `BitSet`
|
||||||
///
|
///
|
||||||
/// where `delete_bitset` is the set of deleted `DocId`.
|
/// where `delete_bitset` is the set of deleted `DocId`.
|
||||||
pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> {
|
/// Warning: this function does not call terminate. The caller is in charge of
|
||||||
let max_doc = delete_bitset.capacity();
|
/// closing the writer properly.
|
||||||
|
pub fn write_delete_bitset(
|
||||||
|
delete_bitset: &BitSet,
|
||||||
|
max_doc: u32,
|
||||||
|
writer: &mut WritePtr,
|
||||||
|
) -> io::Result<()> {
|
||||||
let mut byte = 0u8;
|
let mut byte = 0u8;
|
||||||
let mut shift = 0u8;
|
let mut shift = 0u8;
|
||||||
for doc in 0..max_doc {
|
for doc in 0..max_doc {
|
||||||
@@ -29,28 +34,47 @@ pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io:
|
|||||||
if max_doc % 8 > 0 {
|
if max_doc % 8 > 0 {
|
||||||
writer.write_all(&[byte])?;
|
writer.write_all(&[byte])?;
|
||||||
}
|
}
|
||||||
writer.flush()
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set of deleted `DocId`s.
|
/// Set of deleted `DocId`s.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DeleteBitSet {
|
pub struct DeleteBitSet {
|
||||||
data: ReadOnlySource,
|
data: OwnedBytes,
|
||||||
len: usize,
|
len: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DeleteBitSet {
|
impl DeleteBitSet {
|
||||||
/// Opens a delete bitset given its data source.
|
#[cfg(test)]
|
||||||
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
|
pub(crate) fn for_test(docs: &[DocId], max_doc: u32) -> DeleteBitSet {
|
||||||
let num_deleted: usize = data
|
use crate::directory::{Directory, RAMDirectory, TerminatingWrite};
|
||||||
|
use std::path::Path;
|
||||||
|
assert!(docs.iter().all(|&doc| doc < max_doc));
|
||||||
|
let mut bitset = BitSet::with_max_value(max_doc);
|
||||||
|
for &doc in docs {
|
||||||
|
bitset.insert(doc);
|
||||||
|
}
|
||||||
|
let directory = RAMDirectory::create();
|
||||||
|
let path = Path::new("dummydeletebitset");
|
||||||
|
let mut wrt = directory.open_write(path).unwrap();
|
||||||
|
write_delete_bitset(&bitset, max_doc, &mut wrt).unwrap();
|
||||||
|
wrt.terminate().unwrap();
|
||||||
|
let file = directory.open_read(path).unwrap();
|
||||||
|
Self::open(file).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Opens a delete bitset given its file.
|
||||||
|
pub fn open(file: FileSlice) -> crate::Result<DeleteBitSet> {
|
||||||
|
let bytes = file.read_bytes()?;
|
||||||
|
let num_deleted: usize = bytes
|
||||||
.as_slice()
|
.as_slice()
|
||||||
.iter()
|
.iter()
|
||||||
.map(|b| b.count_ones() as usize)
|
.map(|b| b.count_ones() as usize)
|
||||||
.sum();
|
.sum();
|
||||||
DeleteBitSet {
|
Ok(DeleteBitSet {
|
||||||
data,
|
data: bytes,
|
||||||
len: num_deleted,
|
len: num_deleted,
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true iff the document is still "alive". In other words, if it has not been deleted.
|
/// Returns true iff the document is still "alive". In other words, if it has not been deleted.
|
||||||
@@ -62,7 +86,7 @@ impl DeleteBitSet {
|
|||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||||
let byte_offset = doc / 8u32;
|
let byte_offset = doc / 8u32;
|
||||||
let b: u8 = (*self.data)[byte_offset as usize];
|
let b: u8 = self.data.as_slice()[byte_offset as usize];
|
||||||
let shift = (doc & 7u32) as u8;
|
let shift = (doc & 7u32) as u8;
|
||||||
b & (1u8 << shift) != 0
|
b & (1u8 << shift) != 0
|
||||||
}
|
}
|
||||||
@@ -81,45 +105,35 @@ impl HasLen for DeleteBitSet {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::DeleteBitSet;
|
||||||
use crate::directory::*;
|
use crate::common::HasLen;
|
||||||
use bit_set::BitSet;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
fn test_delete_bitset_helper(bitset: &BitSet) {
|
#[test]
|
||||||
let test_path = PathBuf::from("test");
|
fn test_delete_bitset_empty() {
|
||||||
let mut directory = RAMDirectory::create();
|
let delete_bitset = DeleteBitSet::for_test(&[], 10);
|
||||||
{
|
for doc in 0..10 {
|
||||||
let mut writer = directory.open_write(&*test_path).unwrap();
|
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
|
||||||
write_delete_bitset(bitset, &mut writer).unwrap();
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let source = directory.open_read(&test_path).unwrap();
|
|
||||||
let delete_bitset = DeleteBitSet::open(source);
|
|
||||||
let n = bitset.capacity();
|
|
||||||
for doc in 0..n {
|
|
||||||
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
|
|
||||||
}
|
|
||||||
assert_eq!(delete_bitset.len(), bitset.len());
|
|
||||||
}
|
}
|
||||||
|
assert_eq!(delete_bitset.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_bitset() {
|
fn test_delete_bitset() {
|
||||||
{
|
let delete_bitset = DeleteBitSet::for_test(&[1, 9], 10);
|
||||||
let mut bitset = BitSet::with_capacity(10);
|
assert!(delete_bitset.is_alive(0));
|
||||||
bitset.insert(1);
|
assert!(delete_bitset.is_deleted(1));
|
||||||
bitset.insert(9);
|
assert!(delete_bitset.is_alive(2));
|
||||||
test_delete_bitset_helper(&bitset);
|
assert!(delete_bitset.is_alive(3));
|
||||||
}
|
assert!(delete_bitset.is_alive(4));
|
||||||
{
|
assert!(delete_bitset.is_alive(5));
|
||||||
let mut bitset = BitSet::with_capacity(8);
|
assert!(delete_bitset.is_alive(6));
|
||||||
bitset.insert(1);
|
assert!(delete_bitset.is_alive(6));
|
||||||
bitset.insert(2);
|
assert!(delete_bitset.is_alive(7));
|
||||||
bitset.insert(3);
|
assert!(delete_bitset.is_alive(8));
|
||||||
bitset.insert(5);
|
assert!(delete_bitset.is_deleted(9));
|
||||||
bitset.insert(7);
|
for doc in 0..10 {
|
||||||
test_delete_bitset_helper(&bitset);
|
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
|
||||||
}
|
}
|
||||||
|
assert_eq!(delete_bitset.len(), 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ use std::result;
|
|||||||
/// `FastFieldNotAvailableError` is returned when the
|
/// `FastFieldNotAvailableError` is returned when the
|
||||||
/// user requested for a fast field reader, and the field was not
|
/// user requested for a fast field reader, and the field was not
|
||||||
/// defined in the schema as a fast field.
|
/// defined in the schema as a fast field.
|
||||||
#[derive(Debug, Fail)]
|
#[derive(Debug, Error)]
|
||||||
#[fail(display = "Fast field not available: '{:?}'", field_name)]
|
#[error("Fast field not available: '{field_name:?}'")]
|
||||||
pub struct FastFieldNotAvailableError {
|
pub struct FastFieldNotAvailableError {
|
||||||
field_name: String,
|
field_name: String,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use super::MultiValueIntFastFieldReader;
|
use super::MultiValuedFastFieldReader;
|
||||||
|
use crate::error::DataCorruption;
|
||||||
use crate::schema::Facet;
|
use crate::schema::Facet;
|
||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
use crate::termdict::TermOrdinal;
|
use crate::termdict::TermOrdinal;
|
||||||
@@ -19,7 +20,7 @@ use std::str;
|
|||||||
/// list of facets. This ordinal is segment local and
|
/// list of facets. This ordinal is segment local and
|
||||||
/// only makes sense for a given segment.
|
/// only makes sense for a given segment.
|
||||||
pub struct FacetReader {
|
pub struct FacetReader {
|
||||||
term_ords: MultiValueIntFastFieldReader<u64>,
|
term_ords: MultiValuedFastFieldReader<u64>,
|
||||||
term_dict: TermDictionary,
|
term_dict: TermDictionary,
|
||||||
buffer: Vec<u8>,
|
buffer: Vec<u8>,
|
||||||
}
|
}
|
||||||
@@ -28,12 +29,12 @@ impl FacetReader {
|
|||||||
/// Creates a new `FacetReader`.
|
/// Creates a new `FacetReader`.
|
||||||
///
|
///
|
||||||
/// A facet reader just wraps :
|
/// A facet reader just wraps :
|
||||||
/// - a `MultiValueIntFastFieldReader` that makes it possible to
|
/// - a `MultiValuedFastFieldReader` that makes it possible to
|
||||||
/// access the list of facet ords for a given document.
|
/// access the list of facet ords for a given document.
|
||||||
/// - a `TermDictionary` that helps associating a facet to
|
/// - a `TermDictionary` that helps associating a facet to
|
||||||
/// an ordinal and vice versa.
|
/// an ordinal and vice versa.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
term_ords: MultiValueIntFastFieldReader<u64>,
|
term_ords: MultiValuedFastFieldReader<u64>,
|
||||||
term_dict: TermDictionary,
|
term_dict: TermDictionary,
|
||||||
) -> FacetReader {
|
) -> FacetReader {
|
||||||
FacetReader {
|
FacetReader {
|
||||||
@@ -62,18 +63,166 @@ impl FacetReader {
|
|||||||
&mut self,
|
&mut self,
|
||||||
facet_ord: TermOrdinal,
|
facet_ord: TermOrdinal,
|
||||||
output: &mut Facet,
|
output: &mut Facet,
|
||||||
) -> Result<(), str::Utf8Error> {
|
) -> crate::Result<()> {
|
||||||
let found_term = self
|
let found_term = self
|
||||||
.term_dict
|
.term_dict
|
||||||
.ord_to_term(facet_ord as u64, &mut self.buffer);
|
.ord_to_term(facet_ord as u64, &mut self.buffer)?;
|
||||||
assert!(found_term, "Term ordinal {} no found.", facet_ord);
|
assert!(found_term, "Term ordinal {} no found.", facet_ord);
|
||||||
let facet_str = str::from_utf8(&self.buffer[..])?;
|
let facet_str = str::from_utf8(&self.buffer[..])
|
||||||
|
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?;
|
||||||
output.set_facet_str(facet_str);
|
output.set_facet_str(facet_str);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the list of facet ordinals associated to a document.
|
/// Return the list of facet ordinals associated to a document.
|
||||||
pub fn facet_ords(&mut self, doc: DocId, output: &mut Vec<u64>) {
|
pub fn facet_ords(&self, doc: DocId, output: &mut Vec<u64>) {
|
||||||
self.term_ords.get_vals(doc, output);
|
self.term_ords.get_vals(doc, output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::Index;
|
||||||
|
use crate::{
|
||||||
|
schema::{Facet, FacetOptions, SchemaBuilder, Value, INDEXED, STORED},
|
||||||
|
DocAddress, Document,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_facet_only_indexed() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = SchemaBuilder::default();
|
||||||
|
let facet_field = schema_builder.add_facet_field("facet", INDEXED);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(facet_field=>Facet::from_text("/a/b")));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let facet_reader = searcher
|
||||||
|
.segment_reader(0u32)
|
||||||
|
.facet_reader(facet_field)
|
||||||
|
.unwrap();
|
||||||
|
let mut facet_ords = Vec::new();
|
||||||
|
facet_reader.facet_ords(0u32, &mut facet_ords);
|
||||||
|
assert_eq!(&facet_ords, &[2u64]);
|
||||||
|
let doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
||||||
|
let value = doc.get_first(facet_field).and_then(Value::path);
|
||||||
|
assert_eq!(value, None);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_facet_only_stored() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = SchemaBuilder::default();
|
||||||
|
let facet_field = schema_builder.add_facet_field("facet", STORED);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(facet_field=>Facet::from_text("/a/b")));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let facet_reader = searcher
|
||||||
|
.segment_reader(0u32)
|
||||||
|
.facet_reader(facet_field)
|
||||||
|
.unwrap();
|
||||||
|
let mut facet_ords = Vec::new();
|
||||||
|
facet_reader.facet_ords(0u32, &mut facet_ords);
|
||||||
|
assert!(facet_ords.is_empty());
|
||||||
|
let doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
||||||
|
let value = doc.get_first(facet_field).and_then(Value::path);
|
||||||
|
assert_eq!(value, Some("/a/b".to_string()));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_facet_stored_and_indexed() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = SchemaBuilder::default();
|
||||||
|
let facet_field = schema_builder.add_facet_field("facet", STORED | INDEXED);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(facet_field=>Facet::from_text("/a/b")));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let facet_reader = searcher
|
||||||
|
.segment_reader(0u32)
|
||||||
|
.facet_reader(facet_field)
|
||||||
|
.unwrap();
|
||||||
|
let mut facet_ords = Vec::new();
|
||||||
|
facet_reader.facet_ords(0u32, &mut facet_ords);
|
||||||
|
assert_eq!(&facet_ords, &[2u64]);
|
||||||
|
let doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
||||||
|
let value = doc.get_first(facet_field).and_then(Value::path);
|
||||||
|
assert_eq!(value, Some("/a/b".to_string()));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_facet_neither_stored_and_indexed() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = SchemaBuilder::default();
|
||||||
|
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(facet_field=>Facet::from_text("/a/b")));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let facet_reader = searcher
|
||||||
|
.segment_reader(0u32)
|
||||||
|
.facet_reader(facet_field)
|
||||||
|
.unwrap();
|
||||||
|
let mut facet_ords = Vec::new();
|
||||||
|
facet_reader.facet_ords(0u32, &mut facet_ords);
|
||||||
|
assert!(facet_ords.is_empty());
|
||||||
|
let doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
||||||
|
let value = doc.get_first(facet_field).and_then(Value::path);
|
||||||
|
assert_eq!(value, None);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_facet_not_populated_for_all_docs() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = SchemaBuilder::default();
|
||||||
|
let facet_field = schema_builder.add_facet_field("facet", INDEXED);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(facet_field=>Facet::from_text("/a/b")));
|
||||||
|
index_writer.add_document(Document::default());
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let facet_reader = searcher
|
||||||
|
.segment_reader(0u32)
|
||||||
|
.facet_reader(facet_field)
|
||||||
|
.unwrap();
|
||||||
|
let mut facet_ords = Vec::new();
|
||||||
|
facet_reader.facet_ords(0u32, &mut facet_ords);
|
||||||
|
assert_eq!(&facet_ords, &[2u64]);
|
||||||
|
facet_reader.facet_ords(1u32, &mut facet_ords);
|
||||||
|
assert!(facet_ords.is_empty());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_facet_not_populated_for_any_docs() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = SchemaBuilder::default();
|
||||||
|
let facet_field = schema_builder.add_facet_field("facet", INDEXED);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(Document::default());
|
||||||
|
index_writer.add_document(Document::default());
|
||||||
|
index_writer.commit()?;
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let facet_reader = searcher
|
||||||
|
.segment_reader(0u32)
|
||||||
|
.facet_reader(facet_field)
|
||||||
|
.unwrap();
|
||||||
|
let mut facet_ords = Vec::new();
|
||||||
|
facet_reader.facet_ords(0u32, &mut facet_ords);
|
||||||
|
assert!(facet_ords.is_empty());
|
||||||
|
facet_reader.facet_ords(1u32, &mut facet_ords);
|
||||||
|
assert!(facet_ords.is_empty());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ pub use self::delete::write_delete_bitset;
|
|||||||
pub use self::delete::DeleteBitSet;
|
pub use self::delete::DeleteBitSet;
|
||||||
pub use self::error::{FastFieldNotAvailableError, Result};
|
pub use self::error::{FastFieldNotAvailableError, Result};
|
||||||
pub use self::facet_reader::FacetReader;
|
pub use self::facet_reader::FacetReader;
|
||||||
pub use self::multivalued::{MultiValueIntFastFieldReader, MultiValueIntFastFieldWriter};
|
pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter};
|
||||||
pub use self::reader::FastFieldReader;
|
pub use self::reader::FastFieldReader;
|
||||||
pub use self::readers::FastFieldReaders;
|
pub use self::readers::FastFieldReaders;
|
||||||
pub use self::serializer::FastFieldSerializer;
|
pub use self::serializer::FastFieldSerializer;
|
||||||
@@ -37,6 +37,10 @@ use crate::common;
|
|||||||
use crate::schema::Cardinality;
|
use crate::schema::Cardinality;
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
use crate::schema::Value;
|
use crate::schema::Value;
|
||||||
|
use crate::{
|
||||||
|
chrono::{NaiveDateTime, Utc},
|
||||||
|
schema::Type,
|
||||||
|
};
|
||||||
|
|
||||||
mod bytes;
|
mod bytes;
|
||||||
mod delete;
|
mod delete;
|
||||||
@@ -49,7 +53,7 @@ mod serializer;
|
|||||||
mod writer;
|
mod writer;
|
||||||
|
|
||||||
/// Trait for types that are allowed for fast fields: (u64, i64 and f64).
|
/// Trait for types that are allowed for fast fields: (u64, i64 and f64).
|
||||||
pub trait FastValue: Default + Clone + Copy + Send + Sync + PartialOrd {
|
pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd + 'static {
|
||||||
/// Converts a value from u64
|
/// Converts a value from u64
|
||||||
///
|
///
|
||||||
/// Internally all fast field values are encoded as u64.
|
/// Internally all fast field values are encoded as u64.
|
||||||
@@ -69,6 +73,15 @@ pub trait FastValue: Default + Clone + Copy + Send + Sync + PartialOrd {
|
|||||||
/// Cast value to `u64`.
|
/// Cast value to `u64`.
|
||||||
/// The value is just reinterpreted in memory.
|
/// The value is just reinterpreted in memory.
|
||||||
fn as_u64(&self) -> u64;
|
fn as_u64(&self) -> u64;
|
||||||
|
|
||||||
|
/// Build a default value. This default value is never used, so the value does not
|
||||||
|
/// really matter.
|
||||||
|
fn make_zero() -> Self {
|
||||||
|
Self::from_u64(0i64.to_u64())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the `schema::Type` for this FastValue.
|
||||||
|
fn to_type() -> Type;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for u64 {
|
impl FastValue for u64 {
|
||||||
@@ -83,7 +96,7 @@ impl FastValue for u64 {
|
|||||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||||
match *field_type {
|
match *field_type {
|
||||||
FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||||
FieldType::HierarchicalFacet => Some(Cardinality::MultiValues),
|
FieldType::HierarchicalFacet(_) => Some(Cardinality::MultiValues),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -91,6 +104,10 @@ impl FastValue for u64 {
|
|||||||
fn as_u64(&self) -> u64 {
|
fn as_u64(&self) -> u64 {
|
||||||
*self
|
*self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn to_type() -> Type {
|
||||||
|
Type::U64
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for i64 {
|
impl FastValue for i64 {
|
||||||
@@ -112,6 +129,10 @@ impl FastValue for i64 {
|
|||||||
fn as_u64(&self) -> u64 {
|
fn as_u64(&self) -> u64 {
|
||||||
*self as u64
|
*self as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn to_type() -> Type {
|
||||||
|
Type::I64
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for f64 {
|
impl FastValue for f64 {
|
||||||
@@ -133,6 +154,36 @@ impl FastValue for f64 {
|
|||||||
fn as_u64(&self) -> u64 {
|
fn as_u64(&self) -> u64 {
|
||||||
self.to_bits()
|
self.to_bits()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn to_type() -> Type {
|
||||||
|
Type::F64
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FastValue for crate::DateTime {
|
||||||
|
fn from_u64(timestamp_u64: u64) -> Self {
|
||||||
|
let timestamp_i64 = i64::from_u64(timestamp_u64);
|
||||||
|
crate::DateTime::from_utc(NaiveDateTime::from_timestamp(timestamp_i64, 0), Utc)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_u64(&self) -> u64 {
|
||||||
|
self.timestamp().to_u64()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||||
|
match *field_type {
|
||||||
|
FieldType::Date(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_u64(&self) -> u64 {
|
||||||
|
self.timestamp().as_u64()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_type() -> Type {
|
||||||
|
Type::Date
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn value_to_u64(value: &Value) -> u64 {
|
fn value_to_u64(value: &Value) -> u64 {
|
||||||
@@ -140,6 +191,7 @@ fn value_to_u64(value: &Value) -> u64 {
|
|||||||
Value::U64(ref val) => *val,
|
Value::U64(ref val) => *val,
|
||||||
Value::I64(ref val) => common::i64_to_u64(*val),
|
Value::I64(ref val) => common::i64_to_u64(*val),
|
||||||
Value::F64(ref val) => common::f64_to_u64(*val),
|
Value::F64(ref val) => common::f64_to_u64(*val),
|
||||||
|
Value::Date(ref datetime) => common::i64_to_u64(datetime.timestamp()),
|
||||||
_ => panic!("Expected a u64/i64/f64 field, got {:?} ", value),
|
_ => panic!("Expected a u64/i64/f64 field, got {:?} ", value),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -151,10 +203,13 @@ mod tests {
|
|||||||
use crate::common::CompositeFile;
|
use crate::common::CompositeFile;
|
||||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||||
use crate::fastfield::FastFieldReader;
|
use crate::fastfield::FastFieldReader;
|
||||||
use crate::schema::Document;
|
use crate::merge_policy::NoMergePolicy;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::schema::FAST;
|
use crate::schema::FAST;
|
||||||
|
use crate::schema::{Document, IntOptions};
|
||||||
|
use crate::{Index, SegmentId, SegmentReader};
|
||||||
|
use common::HasLen;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use rand::prelude::SliceRandom;
|
use rand::prelude::SliceRandom;
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
@@ -179,9 +234,15 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_small() {
|
pub fn test_fastfield_i64_u64() {
|
||||||
|
let datetime = crate::DateTime::from_utc(NaiveDateTime::from_timestamp(0i64, 0), Utc);
|
||||||
|
assert_eq!(i64::from_u64(datetime.to_u64()), 0i64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_intfastfield_small() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||||
@@ -194,27 +255,24 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
{
|
assert_eq!(file.len(), 36 as usize);
|
||||||
assert_eq!(source.len(), 36 as usize);
|
let composite_file = CompositeFile::open(&file)?;
|
||||||
}
|
let file = composite_file.open_read(*FIELD).unwrap();
|
||||||
{
|
let fast_field_reader = FastFieldReader::<u64>::open(file)?;
|
||||||
let composite_file = CompositeFile::open(&source).unwrap();
|
assert_eq!(fast_field_reader.get(0), 13u64);
|
||||||
let field_source = composite_file.open_read(*FIELD).unwrap();
|
assert_eq!(fast_field_reader.get(1), 14u64);
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(field_source);
|
assert_eq!(fast_field_reader.get(2), 2u64);
|
||||||
assert_eq!(fast_field_reader.get(0), 13u64);
|
Ok(())
|
||||||
assert_eq!(fast_field_reader.get(1), 14u64);
|
|
||||||
assert_eq!(fast_field_reader.get(2), 2u64);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_large() {
|
fn test_intfastfield_large() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
||||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = FastFieldSerializer::from_write(write)?;
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>4u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>4u64));
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64));
|
||||||
@@ -225,19 +283,15 @@ mod tests {
|
|||||||
fast_field_writers.add_document(&doc!(*FIELD=>1_002u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>1_002u64));
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>1_501u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>1_501u64));
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>215u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>215u64));
|
||||||
fast_field_writers
|
fast_field_writers.serialize(&mut serializer, &HashMap::new())?;
|
||||||
.serialize(&mut serializer, &HashMap::new())
|
serializer.close()?;
|
||||||
.unwrap();
|
|
||||||
serializer.close().unwrap();
|
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path)?;
|
||||||
|
assert_eq!(file.len(), 61 as usize);
|
||||||
{
|
{
|
||||||
assert_eq!(source.len(), 61 as usize);
|
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||||
}
|
|
||||||
{
|
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||||
assert_eq!(fast_field_reader.get(0), 4u64);
|
assert_eq!(fast_field_reader.get(0), 4u64);
|
||||||
assert_eq!(fast_field_reader.get(1), 14_082_001u64);
|
assert_eq!(fast_field_reader.get(1), 14_082_001u64);
|
||||||
assert_eq!(fast_field_reader.get(2), 3_052u64);
|
assert_eq!(fast_field_reader.get(2), 3_052u64);
|
||||||
@@ -248,12 +302,13 @@ mod tests {
|
|||||||
assert_eq!(fast_field_reader.get(7), 1_501u64);
|
assert_eq!(fast_field_reader.get(7), 1_501u64);
|
||||||
assert_eq!(fast_field_reader.get(8), 215u64);
|
assert_eq!(fast_field_reader.get(8), 215u64);
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_null_amplitude() {
|
fn test_intfastfield_null_amplitude() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
|
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
@@ -267,24 +322,23 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
|
assert_eq!(file.len(), 34 as usize);
|
||||||
{
|
{
|
||||||
assert_eq!(source.len(), 34 as usize);
|
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||||
}
|
|
||||||
{
|
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||||
for doc in 0..10_000 {
|
for doc in 0..10_000 {
|
||||||
assert_eq!(fast_field_reader.get(doc), 100_000u64);
|
assert_eq!(fast_field_reader.get(doc), 100_000u64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_large_numbers() {
|
fn test_intfastfield_large_numbers() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
|
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
@@ -300,14 +354,12 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
|
assert_eq!(file.len(), 80042 as usize);
|
||||||
{
|
{
|
||||||
assert_eq!(source.len(), 80042 as usize);
|
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||||
}
|
|
||||||
{
|
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||||
assert_eq!(fast_field_reader.get(0), 0u64);
|
assert_eq!(fast_field_reader.get(0), 0u64);
|
||||||
for doc in 1..10_001 {
|
for doc in 1..10_001 {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -316,12 +368,13 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_signed_intfastfield() {
|
fn test_signed_intfastfield() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
let i64_field = schema_builder.add_i64_field("field", FAST);
|
let i64_field = schema_builder.add_i64_field("field", FAST);
|
||||||
@@ -340,14 +393,12 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
|
assert_eq!(file.len(), 17709 as usize);
|
||||||
{
|
{
|
||||||
assert_eq!(source.len(), 17709 as usize);
|
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||||
}
|
|
||||||
{
|
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
|
||||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<i64>::open(data);
|
let fast_field_reader = FastFieldReader::<i64>::open(data)?;
|
||||||
|
|
||||||
assert_eq!(fast_field_reader.min_value(), -100i64);
|
assert_eq!(fast_field_reader.min_value(), -100i64);
|
||||||
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
||||||
@@ -360,12 +411,13 @@ mod tests {
|
|||||||
assert_eq!(buffer[i], -100i64 + 53i64 + i as i64);
|
assert_eq!(buffer[i], -100i64 + 53i64 + i as i64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_signed_intfastfield_default_val() {
|
fn test_signed_intfastfield_default_val() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let i64_field = schema_builder.add_i64_field("field", FAST);
|
let i64_field = schema_builder.add_i64_field("field", FAST);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -382,13 +434,14 @@ mod tests {
|
|||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<i64>::open(data);
|
let fast_field_reader = FastFieldReader::<i64>::open(data)?;
|
||||||
assert_eq!(fast_field_reader.get(0u32), 0i64);
|
assert_eq!(fast_field_reader.get(0u32), 0i64);
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warning: this generates the same permutation at each call
|
// Warning: this generates the same permutation at each call
|
||||||
@@ -399,28 +452,26 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_permutation() {
|
fn test_intfastfield_permutation() -> crate::Result<()> {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let permutation = generate_permutation();
|
let permutation = generate_permutation();
|
||||||
let n = permutation.len();
|
let n = permutation.len();
|
||||||
let mut directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
||||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = FastFieldSerializer::from_write(write)?;
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
for &x in &permutation {
|
for &x in &permutation {
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers.serialize(&mut serializer, &HashMap::new())?;
|
||||||
.serialize(&mut serializer, &HashMap::new())
|
serializer.close()?;
|
||||||
.unwrap();
|
|
||||||
serializer.close().unwrap();
|
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path)?;
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||||
|
|
||||||
let mut a = 0u64;
|
let mut a = 0u64;
|
||||||
for _ in 0..n {
|
for _ in 0..n {
|
||||||
@@ -428,6 +479,94 @@ mod tests {
|
|||||||
a = fast_field_reader.get(a as u32);
|
a = fast_field_reader.get(a as u32);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_merge_missing_date_fast_field() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let date_field = schema_builder.add_date_field("date", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
|
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
index_writer.add_document(doc!());
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let segment_ids: Vec<SegmentId> = reader
|
||||||
|
.searcher()
|
||||||
|
.segment_readers()
|
||||||
|
.iter()
|
||||||
|
.map(SegmentReader::segment_id)
|
||||||
|
.collect();
|
||||||
|
assert_eq!(segment_ids.len(), 2);
|
||||||
|
let merge_future = index_writer.merge(&segment_ids[..]);
|
||||||
|
let merge_res = futures::executor::block_on(merge_future);
|
||||||
|
assert!(merge_res.is_ok());
|
||||||
|
assert!(reader.reload().is_ok());
|
||||||
|
assert_eq!(reader.searcher().segment_readers().len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_default_datetime() {
|
||||||
|
assert_eq!(crate::DateTime::make_zero().timestamp(), 0i64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_datefastfield() {
|
||||||
|
use crate::fastfield::FastValue;
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let date_field = schema_builder.add_date_field("date", FAST);
|
||||||
|
let multi_date_field = schema_builder.add_date_field(
|
||||||
|
"multi_date",
|
||||||
|
IntOptions::default().set_fast(Cardinality::MultiValues),
|
||||||
|
);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
date_field => crate::DateTime::from_u64(1i64.to_u64()),
|
||||||
|
multi_date_field => crate::DateTime::from_u64(2i64.to_u64()),
|
||||||
|
multi_date_field => crate::DateTime::from_u64(3i64.to_u64())
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
date_field => crate::DateTime::from_u64(4i64.to_u64())
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
multi_date_field => crate::DateTime::from_u64(5i64.to_u64()),
|
||||||
|
multi_date_field => crate::DateTime::from_u64(6i64.to_u64())
|
||||||
|
));
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.segment_readers().len(), 1);
|
||||||
|
let segment_reader = searcher.segment_reader(0);
|
||||||
|
let fast_fields = segment_reader.fast_fields();
|
||||||
|
let date_fast_field = fast_fields.date(date_field).unwrap();
|
||||||
|
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
|
||||||
|
let mut dates = vec![];
|
||||||
|
{
|
||||||
|
assert_eq!(date_fast_field.get(0u32).timestamp(), 1i64);
|
||||||
|
dates_fast_field.get_vals(0u32, &mut dates);
|
||||||
|
assert_eq!(dates.len(), 2);
|
||||||
|
assert_eq!(dates[0].timestamp(), 2i64);
|
||||||
|
assert_eq!(dates[1].timestamp(), 3i64);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
assert_eq!(date_fast_field.get(1u32).timestamp(), 4i64);
|
||||||
|
dates_fast_field.get_vals(1u32, &mut dates);
|
||||||
|
assert!(dates.is_empty());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
assert_eq!(date_fast_field.get(2u32).timestamp(), 0i64);
|
||||||
|
dates_fast_field.get_vals(2u32, &mut dates);
|
||||||
|
assert_eq!(dates.len(), 2);
|
||||||
|
assert_eq!(dates[0].timestamp(), 5i64);
|
||||||
|
assert_eq!(dates[1].timestamp(), 6i64);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -473,7 +612,7 @@ mod bench {
|
|||||||
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
|
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let permutation = generate_permutation();
|
let permutation = generate_permutation();
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||||
@@ -486,11 +625,11 @@ mod bench {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
let fast_field_reader = FastFieldReader::<u64>::open(data).unwrap();
|
||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let n = test::black_box(7000u32);
|
let n = test::black_box(7000u32);
|
||||||
@@ -507,7 +646,7 @@ mod bench {
|
|||||||
fn bench_intfastfield_fflookup(b: &mut Bencher) {
|
fn bench_intfastfield_fflookup(b: &mut Bencher) {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let permutation = generate_permutation();
|
let permutation = generate_permutation();
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||||
@@ -520,11 +659,11 @@ mod bench {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&path).unwrap();
|
let file = directory.open_read(&path).unwrap();
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
let fast_field_reader = FastFieldReader::<u64>::open(data).unwrap();
|
||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let n = test::black_box(1000u32);
|
let n = test::black_box(1000u32);
|
||||||
|
|||||||
@@ -1,22 +1,21 @@
|
|||||||
mod reader;
|
mod reader;
|
||||||
mod writer;
|
mod writer;
|
||||||
|
|
||||||
pub use self::reader::MultiValueIntFastFieldReader;
|
pub use self::reader::MultiValuedFastFieldReader;
|
||||||
pub use self::writer::MultiValueIntFastFieldWriter;
|
pub use self::writer::MultiValuedFastFieldWriter;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use time;
|
|
||||||
|
|
||||||
use self::time::Duration;
|
|
||||||
use crate::collector::TopDocs;
|
use crate::collector::TopDocs;
|
||||||
use crate::query::QueryParser;
|
use crate::query::QueryParser;
|
||||||
use crate::schema::Cardinality;
|
use crate::schema::Cardinality;
|
||||||
use crate::schema::Facet;
|
use crate::schema::Facet;
|
||||||
use crate::schema::IntOptions;
|
use crate::schema::IntOptions;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
|
use crate::schema::INDEXED;
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
|
use chrono::Duration;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_multivalued_u64() {
|
fn test_multivalued_u64() {
|
||||||
@@ -27,7 +26,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(field=>1u64, field=>3u64));
|
index_writer.add_document(doc!(field=>1u64, field=>3u64));
|
||||||
index_writer.add_document(doc!());
|
index_writer.add_document(doc!());
|
||||||
index_writer.add_document(doc!(field=>4u64));
|
index_writer.add_document(doc!(field=>4u64));
|
||||||
@@ -66,7 +65,7 @@ mod tests {
|
|||||||
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
|
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let first_time_stamp = chrono::Utc::now();
|
let first_time_stamp = chrono::Utc::now();
|
||||||
index_writer.add_document(
|
index_writer.add_document(
|
||||||
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
|
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
|
||||||
@@ -102,6 +101,7 @@ mod tests {
|
|||||||
.get_first(date_field)
|
.get_first(date_field)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.date_value()
|
.date_value()
|
||||||
|
.unwrap()
|
||||||
.timestamp(),
|
.timestamp(),
|
||||||
first_time_stamp.timestamp()
|
first_time_stamp.timestamp()
|
||||||
);
|
);
|
||||||
@@ -110,7 +110,7 @@ mod tests {
|
|||||||
.get_first(time_i)
|
.get_first(time_i)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.i64_value(),
|
.i64_value(),
|
||||||
1i64
|
Some(1i64)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -133,6 +133,7 @@ mod tests {
|
|||||||
.get_first(date_field)
|
.get_first(date_field)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.date_value()
|
.date_value()
|
||||||
|
.unwrap()
|
||||||
.timestamp(),
|
.timestamp(),
|
||||||
two_secs_ahead.timestamp()
|
two_secs_ahead.timestamp()
|
||||||
);
|
);
|
||||||
@@ -141,7 +142,7 @@ mod tests {
|
|||||||
.get_first(time_i)
|
.get_first(time_i)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.i64_value(),
|
.i64_value(),
|
||||||
3i64
|
Some(3i64)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -188,7 +189,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(field=> 1i64, field => 3i64));
|
index_writer.add_document(doc!(field=> 1i64, field => 3i64));
|
||||||
index_writer.add_document(doc!());
|
index_writer.add_document(doc!());
|
||||||
index_writer.add_document(doc!(field=> -4i64));
|
index_writer.add_document(doc!(field=> -4i64));
|
||||||
@@ -199,31 +200,23 @@ mod tests {
|
|||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let mut vals = Vec::new();
|
let mut vals = Vec::new();
|
||||||
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
|
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
|
||||||
{
|
multi_value_reader.get_vals(2, &mut vals);
|
||||||
multi_value_reader.get_vals(2, &mut vals);
|
assert_eq!(&vals, &[-4i64]);
|
||||||
assert_eq!(&vals, &[-4i64]);
|
multi_value_reader.get_vals(0, &mut vals);
|
||||||
}
|
assert_eq!(&vals, &[1i64, 3i64]);
|
||||||
{
|
multi_value_reader.get_vals(1, &mut vals);
|
||||||
multi_value_reader.get_vals(0, &mut vals);
|
assert!(vals.is_empty());
|
||||||
assert_eq!(&vals, &[1i64, 3i64]);
|
multi_value_reader.get_vals(3, &mut vals);
|
||||||
}
|
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
|
||||||
{
|
|
||||||
multi_value_reader.get_vals(1, &mut vals);
|
|
||||||
assert!(vals.is_empty());
|
|
||||||
}
|
|
||||||
{
|
|
||||||
multi_value_reader.get_vals(3, &mut vals);
|
|
||||||
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
#[ignore]
|
||||||
fn test_many_facets() {
|
fn test_many_facets() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let field = schema_builder.add_facet_field("facetfield");
|
let field = schema_builder.add_facet_field("facetfield", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
for i in 0..100_000 {
|
for i in 0..100_000 {
|
||||||
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
|
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
use std::ops::Range;
|
||||||
|
|
||||||
use crate::fastfield::{FastFieldReader, FastValue};
|
use crate::fastfield::{FastFieldReader, FastValue};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
@@ -10,49 +12,42 @@ use crate::DocId;
|
|||||||
/// The `idx_reader` associated, for each document, the index of its first value.
|
/// The `idx_reader` associated, for each document, the index of its first value.
|
||||||
///
|
///
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct MultiValueIntFastFieldReader<Item: FastValue> {
|
pub struct MultiValuedFastFieldReader<Item: FastValue> {
|
||||||
idx_reader: FastFieldReader<u64>,
|
idx_reader: FastFieldReader<u64>,
|
||||||
vals_reader: FastFieldReader<Item>,
|
vals_reader: FastFieldReader<Item>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
|
impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||||
pub(crate) fn open(
|
pub(crate) fn open(
|
||||||
idx_reader: FastFieldReader<u64>,
|
idx_reader: FastFieldReader<u64>,
|
||||||
vals_reader: FastFieldReader<Item>,
|
vals_reader: FastFieldReader<Item>,
|
||||||
) -> MultiValueIntFastFieldReader<Item> {
|
) -> MultiValuedFastFieldReader<Item> {
|
||||||
MultiValueIntFastFieldReader {
|
MultiValuedFastFieldReader {
|
||||||
idx_reader,
|
idx_reader,
|
||||||
vals_reader,
|
vals_reader,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn into_u64s_reader(self) -> MultiValueIntFastFieldReader<u64> {
|
|
||||||
MultiValueIntFastFieldReader {
|
|
||||||
idx_reader: self.idx_reader,
|
|
||||||
vals_reader: self.vals_reader.into_u64_reader(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns `(start, stop)`, such that the values associated
|
/// Returns `(start, stop)`, such that the values associated
|
||||||
/// to the given document are `start..stop`.
|
/// to the given document are `start..stop`.
|
||||||
fn range(&self, doc: DocId) -> (u64, u64) {
|
fn range(&self, doc: DocId) -> Range<u64> {
|
||||||
let start = self.idx_reader.get(doc);
|
let start = self.idx_reader.get(doc);
|
||||||
let stop = self.idx_reader.get(doc + 1);
|
let stop = self.idx_reader.get(doc + 1);
|
||||||
(start, stop)
|
start..stop
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the array of values associated to the given `doc`.
|
/// Returns the array of values associated to the given `doc`.
|
||||||
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
||||||
let (start, stop) = self.range(doc);
|
let range = self.range(doc);
|
||||||
let len = (stop - start) as usize;
|
let len = (range.end - range.start) as usize;
|
||||||
vals.resize(len, Item::default());
|
vals.resize(len, Item::make_zero());
|
||||||
self.vals_reader.get_range_u64(start, &mut vals[..]);
|
self.vals_reader.get_range_u64(range.start, &mut vals[..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of values associated with the document `DocId`.
|
/// Returns the number of values associated with the document `DocId`.
|
||||||
pub fn num_vals(&self, doc: DocId) -> usize {
|
pub fn num_vals(&self, doc: DocId) -> usize {
|
||||||
let (start, stop) = self.range(doc);
|
let range = self.range(doc);
|
||||||
(stop - start) as usize
|
(range.end - range.start) as usize
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the overall number of values in this field .
|
/// Returns the overall number of values in this field .
|
||||||
@@ -65,16 +60,16 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
use crate::schema::{Facet, Schema};
|
use crate::schema::{Facet, Schema, INDEXED};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_multifastfield_reader() {
|
fn test_multifastfield_reader() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let facet_field = schema_builder.add_facet_field("facets");
|
let facet_field = schema_builder.add_facet_field("facets", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index
|
let mut index_writer = index
|
||||||
.writer_with_num_threads(1, 30_000_000)
|
.writer_for_tests()
|
||||||
.expect("Failed to create index writer.");
|
.expect("Failed to create index writer.");
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from("/category/cat2"),
|
facet_field => Facet::from("/category/cat2"),
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ use crate::schema::{Document, Field};
|
|||||||
use crate::termdict::TermOrdinal;
|
use crate::termdict::TermOrdinal;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
use itertools::Itertools;
|
|
||||||
use std::io;
|
use std::io;
|
||||||
|
use std::iter::once;
|
||||||
|
|
||||||
/// Writer for multi-valued (as in, more than one value per document)
|
/// Writer for multi-valued (as in, more than one value per document)
|
||||||
/// int fast field.
|
/// int fast field.
|
||||||
@@ -19,7 +19,7 @@ use std::io;
|
|||||||
/// in your schema
|
/// in your schema
|
||||||
/// - add your document simply by calling `.add_document(...)`.
|
/// - add your document simply by calling `.add_document(...)`.
|
||||||
///
|
///
|
||||||
/// The `MultiValueIntFastFieldWriter` can be acquired from the
|
/// The `MultiValuedFastFieldWriter` can be acquired from the
|
||||||
/// fastfield writer, by calling [`.get_multivalue_writer(...)`](./struct.FastFieldsWriter.html#method.get_multivalue_writer).
|
/// fastfield writer, by calling [`.get_multivalue_writer(...)`](./struct.FastFieldsWriter.html#method.get_multivalue_writer).
|
||||||
///
|
///
|
||||||
/// Once acquired, writing is done by calling calls to
|
/// Once acquired, writing is done by calling calls to
|
||||||
@@ -30,17 +30,17 @@ use std::io;
|
|||||||
/// This makes it possible to push unordered term ids,
|
/// This makes it possible to push unordered term ids,
|
||||||
/// during indexing and remap them to their respective
|
/// during indexing and remap them to their respective
|
||||||
/// term ids when the segment is getting serialized.
|
/// term ids when the segment is getting serialized.
|
||||||
pub struct MultiValueIntFastFieldWriter {
|
pub struct MultiValuedFastFieldWriter {
|
||||||
field: Field,
|
field: Field,
|
||||||
vals: Vec<UnorderedTermId>,
|
vals: Vec<UnorderedTermId>,
|
||||||
doc_index: Vec<u64>,
|
doc_index: Vec<u64>,
|
||||||
is_facet: bool,
|
is_facet: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MultiValueIntFastFieldWriter {
|
impl MultiValuedFastFieldWriter {
|
||||||
/// Creates a new `IntFastFieldWriter`
|
/// Creates a new `IntFastFieldWriter`
|
||||||
pub(crate) fn new(field: Field, is_facet: bool) -> Self {
|
pub(crate) fn new(field: Field, is_facet: bool) -> Self {
|
||||||
MultiValueIntFastFieldWriter {
|
MultiValuedFastFieldWriter {
|
||||||
field,
|
field,
|
||||||
vals: Vec::new(),
|
vals: Vec::new(),
|
||||||
doc_index: Vec::new(),
|
doc_index: Vec::new(),
|
||||||
@@ -48,7 +48,7 @@ impl MultiValueIntFastFieldWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Access the field associated to the `MultiValueIntFastFieldWriter`
|
/// Access the field associated to the `MultiValuedFastFieldWriter`
|
||||||
pub fn field(&self) -> Field {
|
pub fn field(&self) -> Field {
|
||||||
self.field
|
self.field
|
||||||
}
|
}
|
||||||
@@ -126,33 +126,30 @@ impl MultiValueIntFastFieldWriter {
|
|||||||
1,
|
1,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let last_interval = (
|
let last_interval =
|
||||||
self.doc_index.last().cloned().unwrap(),
|
self.doc_index.last().cloned().unwrap() as usize..self.vals.len();
|
||||||
self.vals.len() as u64,
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut doc_vals: Vec<u64> = Vec::with_capacity(100);
|
let mut doc_vals: Vec<u64> = Vec::with_capacity(100);
|
||||||
for (start, stop) in self
|
for range in self
|
||||||
.doc_index
|
.doc_index
|
||||||
.windows(2)
|
.windows(2)
|
||||||
.map(|interval| (interval[0], interval[1]))
|
.map(|interval| interval[0] as usize..interval[1] as usize)
|
||||||
.chain(Some(last_interval).into_iter())
|
.chain(once(last_interval))
|
||||||
.map(|(start, stop)| (start as usize, stop as usize))
|
|
||||||
{
|
{
|
||||||
doc_vals.clear();
|
doc_vals.clear();
|
||||||
let remapped_vals = self.vals[start..stop]
|
let remapped_vals = self.vals[range]
|
||||||
.iter()
|
.iter()
|
||||||
.map(|val| *mapping.get(val).expect("Missing term ordinal"));
|
.map(|val| *mapping.get(val).expect("Missing term ordinal"));
|
||||||
doc_vals.extend(remapped_vals);
|
doc_vals.extend(remapped_vals);
|
||||||
doc_vals.sort();
|
doc_vals.sort_unstable();
|
||||||
for &val in &doc_vals {
|
for &val in &doc_vals {
|
||||||
value_serializer.add_val(val)?;
|
value_serializer.add_val(val)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
let val_min_max = self.vals.iter().cloned().minmax();
|
let val_min_max = crate::common::minmax(self.vals.iter().cloned());
|
||||||
let (val_min, val_max) = val_min_max.into_option().unwrap_or((0u64, 0u64));
|
let (val_min, val_max) = val_min_max.unwrap_or((0u64, 0u64));
|
||||||
value_serializer =
|
value_serializer =
|
||||||
serializer.new_u64_fast_field_with_idx(self.field, val_min, val_max, 1)?;
|
serializer.new_u64_fast_field_with_idx(self.field, val_min, val_max, 1)?;
|
||||||
for &val in &self.vals {
|
for &val in &self.vals {
|
||||||
|
|||||||
@@ -3,13 +3,12 @@ use crate::common::bitpacker::BitUnpacker;
|
|||||||
use crate::common::compute_num_bits;
|
use crate::common::compute_num_bits;
|
||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::common::CompositeFile;
|
use crate::common::CompositeFile;
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::FileSlice;
|
||||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||||
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
|
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::schema::FAST;
|
use crate::schema::FAST;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use owning_ref::OwningRef;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
@@ -20,43 +19,27 @@ use std::path::Path;
|
|||||||
/// fast field is required.
|
/// fast field is required.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct FastFieldReader<Item: FastValue> {
|
pub struct FastFieldReader<Item: FastValue> {
|
||||||
bit_unpacker: BitUnpacker<OwningRef<ReadOnlySource, [u8]>>,
|
bit_unpacker: BitUnpacker,
|
||||||
min_value_u64: u64,
|
min_value_u64: u64,
|
||||||
max_value_u64: u64,
|
max_value_u64: u64,
|
||||||
_phantom: PhantomData<Item>,
|
_phantom: PhantomData<Item>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Item: FastValue> FastFieldReader<Item> {
|
impl<Item: FastValue> FastFieldReader<Item> {
|
||||||
/// Opens a fast field given a source.
|
/// Opens a fast field given a file.
|
||||||
pub fn open(data: ReadOnlySource) -> Self {
|
pub fn open(file: FileSlice) -> crate::Result<Self> {
|
||||||
let min_value: u64;
|
let mut bytes = file.read_bytes()?;
|
||||||
let amplitude: u64;
|
let min_value = u64::deserialize(&mut bytes)?;
|
||||||
{
|
let amplitude = u64::deserialize(&mut bytes)?;
|
||||||
let mut cursor = data.as_slice();
|
|
||||||
min_value =
|
|
||||||
u64::deserialize(&mut cursor).expect("Failed to read the min_value of fast field.");
|
|
||||||
amplitude =
|
|
||||||
u64::deserialize(&mut cursor).expect("Failed to read the amplitude of fast field.");
|
|
||||||
}
|
|
||||||
let max_value = min_value + amplitude;
|
let max_value = min_value + amplitude;
|
||||||
let num_bits = compute_num_bits(amplitude);
|
let num_bits = compute_num_bits(amplitude);
|
||||||
let owning_ref = OwningRef::new(data).map(|data| &data[16..]);
|
let bit_unpacker = BitUnpacker::new(bytes, num_bits);
|
||||||
let bit_unpacker = BitUnpacker::new(owning_ref, num_bits);
|
Ok(FastFieldReader {
|
||||||
FastFieldReader {
|
|
||||||
min_value_u64: min_value,
|
min_value_u64: min_value,
|
||||||
max_value_u64: max_value,
|
max_value_u64: max_value,
|
||||||
bit_unpacker,
|
bit_unpacker,
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
}
|
})
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> {
|
|
||||||
FastFieldReader {
|
|
||||||
bit_unpacker: self.bit_unpacker,
|
|
||||||
min_value_u64: self.min_value_u64,
|
|
||||||
max_value_u64: self.max_value_u64,
|
|
||||||
_phantom: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the value associated to the given document.
|
/// Return the value associated to the given document.
|
||||||
@@ -135,7 +118,7 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
|
|||||||
let field = schema_builder.add_u64_field("field", FAST);
|
let field = schema_builder.add_u64_field("field", FAST);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let path = Path::new("__dummy__");
|
let path = Path::new("__dummy__");
|
||||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
let directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory
|
let write: WritePtr = directory
|
||||||
.open_write(path)
|
.open_write(path)
|
||||||
@@ -157,12 +140,11 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
|
|||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let source = directory.open_read(path).expect("Failed to open the file");
|
let file = directory.open_read(path).expect("Failed to open the file");
|
||||||
let composite_file =
|
let composite_file = CompositeFile::open(&file).expect("Failed to read the composite file");
|
||||||
CompositeFile::open(&source).expect("Failed to read the composite file");
|
let field_file = composite_file
|
||||||
let field_source = composite_file
|
|
||||||
.open_read(field)
|
.open_read(field)
|
||||||
.expect("File component not found");
|
.expect("File component not found");
|
||||||
FastFieldReader::open(field_source)
|
FastFieldReader::open(field_file).unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,31 +1,27 @@
|
|||||||
use crate::common::CompositeFile;
|
use crate::common::CompositeFile;
|
||||||
use crate::fastfield::BytesFastFieldReader;
|
use crate::directory::FileSlice;
|
||||||
use crate::fastfield::MultiValueIntFastFieldReader;
|
use crate::fastfield::MultiValuedFastFieldReader;
|
||||||
|
use crate::fastfield::{BytesFastFieldReader, FastValue};
|
||||||
use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
|
use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
|
||||||
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
||||||
use crate::space_usage::PerFieldSpaceUsage;
|
use crate::space_usage::PerFieldSpaceUsage;
|
||||||
use crate::Result;
|
use crate::TantivyError;
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
/// Provides access to all of the FastFieldReader.
|
/// Provides access to all of the FastFieldReader.
|
||||||
///
|
///
|
||||||
/// Internally, `FastFieldReaders` have preloaded fast field readers,
|
/// Internally, `FastFieldReaders` have preloaded fast field readers,
|
||||||
/// and just wraps several `HashMap`.
|
/// and just wraps several `HashMap`.
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct FastFieldReaders {
|
pub struct FastFieldReaders {
|
||||||
fast_field_i64: HashMap<Field, FastFieldReader<i64>>,
|
schema: Schema,
|
||||||
fast_field_u64: HashMap<Field, FastFieldReader<u64>>,
|
|
||||||
fast_field_f64: HashMap<Field, FastFieldReader<f64>>,
|
|
||||||
fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>,
|
|
||||||
fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>,
|
|
||||||
fast_field_f64s: HashMap<Field, MultiValueIntFastFieldReader<f64>>,
|
|
||||||
fast_bytes: HashMap<Field, BytesFastFieldReader>,
|
|
||||||
fast_fields_composite: CompositeFile,
|
fast_fields_composite: CompositeFile,
|
||||||
}
|
}
|
||||||
|
#[derive(Eq, PartialEq, Debug)]
|
||||||
enum FastType {
|
enum FastType {
|
||||||
I64,
|
I64,
|
||||||
U64,
|
U64,
|
||||||
F64,
|
F64,
|
||||||
|
Date,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
|
fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
|
||||||
@@ -39,190 +35,181 @@ fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality
|
|||||||
FieldType::F64(options) => options
|
FieldType::F64(options) => options
|
||||||
.get_fastfield_cardinality()
|
.get_fastfield_cardinality()
|
||||||
.map(|cardinality| (FastType::F64, cardinality)),
|
.map(|cardinality| (FastType::F64, cardinality)),
|
||||||
FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)),
|
FieldType::Date(options) => options
|
||||||
|
.get_fastfield_cardinality()
|
||||||
|
.map(|cardinality| (FastType::Date, cardinality)),
|
||||||
|
FieldType::HierarchicalFacet(_) => Some((FastType::U64, Cardinality::MultiValues)),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastFieldReaders {
|
impl FastFieldReaders {
|
||||||
pub(crate) fn load_all(
|
pub(crate) fn new(schema: Schema, fast_fields_composite: CompositeFile) -> FastFieldReaders {
|
||||||
schema: &Schema,
|
FastFieldReaders {
|
||||||
fast_fields_composite: &CompositeFile,
|
fast_fields_composite,
|
||||||
) -> Result<FastFieldReaders> {
|
schema,
|
||||||
let mut fast_field_readers = FastFieldReaders {
|
|
||||||
fast_field_i64: Default::default(),
|
|
||||||
fast_field_u64: Default::default(),
|
|
||||||
fast_field_f64: Default::default(),
|
|
||||||
fast_field_i64s: Default::default(),
|
|
||||||
fast_field_u64s: Default::default(),
|
|
||||||
fast_field_f64s: Default::default(),
|
|
||||||
fast_bytes: Default::default(),
|
|
||||||
fast_fields_composite: fast_fields_composite.clone(),
|
|
||||||
};
|
|
||||||
for (field, field_entry) in schema.fields() {
|
|
||||||
let field_type = field_entry.field_type();
|
|
||||||
if field_type == &FieldType::Bytes {
|
|
||||||
let idx_reader = fast_fields_composite
|
|
||||||
.open_read_with_idx(field, 0)
|
|
||||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
|
||||||
.map(FastFieldReader::open)?;
|
|
||||||
let data = fast_fields_composite
|
|
||||||
.open_read_with_idx(field, 1)
|
|
||||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
|
||||||
fast_field_readers
|
|
||||||
.fast_bytes
|
|
||||||
.insert(field, BytesFastFieldReader::open(idx_reader, data));
|
|
||||||
} else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) {
|
|
||||||
match cardinality {
|
|
||||||
Cardinality::SingleValue => {
|
|
||||||
if let Some(fast_field_data) = fast_fields_composite.open_read(field) {
|
|
||||||
match fast_type {
|
|
||||||
FastType::U64 => {
|
|
||||||
let fast_field_reader = FastFieldReader::open(fast_field_data);
|
|
||||||
fast_field_readers
|
|
||||||
.fast_field_u64
|
|
||||||
.insert(field, fast_field_reader);
|
|
||||||
}
|
|
||||||
FastType::I64 => {
|
|
||||||
fast_field_readers.fast_field_i64.insert(
|
|
||||||
field,
|
|
||||||
FastFieldReader::open(fast_field_data.clone()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
FastType::F64 => {
|
|
||||||
fast_field_readers.fast_field_f64.insert(
|
|
||||||
field,
|
|
||||||
FastFieldReader::open(fast_field_data.clone()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Cardinality::MultiValues => {
|
|
||||||
let idx_opt = fast_fields_composite.open_read_with_idx(field, 0);
|
|
||||||
let data_opt = fast_fields_composite.open_read_with_idx(field, 1);
|
|
||||||
if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) {
|
|
||||||
let idx_reader = FastFieldReader::open(fast_field_idx);
|
|
||||||
match fast_type {
|
|
||||||
FastType::I64 => {
|
|
||||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
|
||||||
let multivalued_int_fast_field =
|
|
||||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
|
||||||
fast_field_readers
|
|
||||||
.fast_field_i64s
|
|
||||||
.insert(field, multivalued_int_fast_field);
|
|
||||||
}
|
|
||||||
FastType::U64 => {
|
|
||||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
|
||||||
let multivalued_int_fast_field =
|
|
||||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
|
||||||
fast_field_readers
|
|
||||||
.fast_field_u64s
|
|
||||||
.insert(field, multivalued_int_fast_field);
|
|
||||||
}
|
|
||||||
FastType::F64 => {
|
|
||||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
|
||||||
let multivalued_int_fast_field =
|
|
||||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
|
||||||
fast_field_readers
|
|
||||||
.fast_field_f64s
|
|
||||||
.insert(field, multivalued_int_fast_field);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(fast_field_readers)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn space_usage(&self) -> PerFieldSpaceUsage {
|
pub(crate) fn space_usage(&self) -> PerFieldSpaceUsage {
|
||||||
self.fast_fields_composite.space_usage()
|
self.fast_fields_composite.space_usage()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn fast_field_data(&self, field: Field, idx: usize) -> crate::Result<FileSlice> {
|
||||||
|
self.fast_fields_composite
|
||||||
|
.open_read_with_idx(field, idx)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
let field_name = self.schema.get_field_entry(field).name();
|
||||||
|
TantivyError::SchemaError(format!("Field({}) data was not found", field_name))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_type(
|
||||||
|
&self,
|
||||||
|
field: Field,
|
||||||
|
expected_fast_type: FastType,
|
||||||
|
expected_cardinality: Cardinality,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
|
let (fast_type, cardinality) =
|
||||||
|
type_and_cardinality(field_entry.field_type()).ok_or_else(|| {
|
||||||
|
crate::TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is not a fast field.",
|
||||||
|
field_entry.name()
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
if fast_type != expected_fast_type {
|
||||||
|
return Err(crate::TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is of type {:?}, expected {:?}.",
|
||||||
|
field_entry.name(),
|
||||||
|
fast_type,
|
||||||
|
expected_fast_type
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
if cardinality != expected_cardinality {
|
||||||
|
return Err(crate::TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is of cardinality {:?}, expected {:?}.",
|
||||||
|
field_entry.name(),
|
||||||
|
cardinality,
|
||||||
|
expected_cardinality
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn typed_fast_field_reader<TFastValue: FastValue>(
|
||||||
|
&self,
|
||||||
|
field: Field,
|
||||||
|
) -> crate::Result<FastFieldReader<TFastValue>> {
|
||||||
|
let fast_field_slice = self.fast_field_data(field, 0)?;
|
||||||
|
FastFieldReader::open(fast_field_slice)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn typed_fast_field_multi_reader<TFastValue: FastValue>(
|
||||||
|
&self,
|
||||||
|
field: Field,
|
||||||
|
) -> crate::Result<MultiValuedFastFieldReader<TFastValue>> {
|
||||||
|
let fast_field_slice_idx = self.fast_field_data(field, 0)?;
|
||||||
|
let fast_field_slice_vals = self.fast_field_data(field, 1)?;
|
||||||
|
let idx_reader = FastFieldReader::open(fast_field_slice_idx)?;
|
||||||
|
let vals_reader: FastFieldReader<TFastValue> =
|
||||||
|
FastFieldReader::open(fast_field_slice_vals)?;
|
||||||
|
Ok(MultiValuedFastFieldReader::open(idx_reader, vals_reader))
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the `u64` fast field reader reader associated to `field`.
|
/// Returns the `u64` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a u64 fast field, this method returns `None`.
|
/// If `field` is not a u64 fast field, this method returns `None`.
|
||||||
pub fn u64(&self, field: Field) -> Option<FastFieldReader<u64>> {
|
pub fn u64(&self, field: Field) -> crate::Result<FastFieldReader<u64>> {
|
||||||
self.fast_field_u64.get(&field).cloned()
|
self.check_type(field, FastType::U64, Cardinality::SingleValue)?;
|
||||||
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If the field is a u64-fast field return the associated reader.
|
/// Returns the `u64` fast field reader reader associated to `field`, regardless of whether the given
|
||||||
/// If the field is a i64-fast field, return the associated u64 reader. Values are
|
/// field is effectively of type `u64` or not.
|
||||||
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. ///
|
|
||||||
///
|
///
|
||||||
///TODO should it also be lenient with f64?
|
/// If not, the fastfield reader will returns the u64-value associated to the original FastValue.
|
||||||
///
|
pub fn u64_lenient(&self, field: Field) -> crate::Result<FastFieldReader<u64>> {
|
||||||
/// This method is useful when merging segment reader.
|
self.typed_fast_field_reader(field)
|
||||||
pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> {
|
|
||||||
if let Some(u64_ff_reader) = self.u64(field) {
|
|
||||||
return Some(u64_ff_reader);
|
|
||||||
}
|
|
||||||
if let Some(i64_ff_reader) = self.i64(field) {
|
|
||||||
return Some(i64_ff_reader.into_u64_reader());
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `i64` fast field reader reader associated to `field`.
|
/// Returns the `i64` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a i64 fast field, this method returns `None`.
|
/// If `field` is not a i64 fast field, this method returns `None`.
|
||||||
pub fn i64(&self, field: Field) -> Option<FastFieldReader<i64>> {
|
pub fn i64(&self, field: Field) -> crate::Result<FastFieldReader<i64>> {
|
||||||
self.fast_field_i64.get(&field).cloned()
|
self.check_type(field, FastType::I64, Cardinality::SingleValue)?;
|
||||||
|
self.typed_fast_field_reader(field)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the `i64` fast field reader reader associated to `field`.
|
||||||
|
///
|
||||||
|
/// If `field` is not a i64 fast field, this method returns `None`.
|
||||||
|
pub fn date(&self, field: Field) -> crate::Result<FastFieldReader<crate::DateTime>> {
|
||||||
|
self.check_type(field, FastType::Date, Cardinality::SingleValue)?;
|
||||||
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `f64` fast field reader reader associated to `field`.
|
/// Returns the `f64` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a f64 fast field, this method returns `None`.
|
/// If `field` is not a f64 fast field, this method returns `None`.
|
||||||
pub fn f64(&self, field: Field) -> Option<FastFieldReader<f64>> {
|
pub fn f64(&self, field: Field) -> crate::Result<FastFieldReader<f64>> {
|
||||||
self.fast_field_f64.get(&field).cloned()
|
self.check_type(field, FastType::F64, Cardinality::SingleValue)?;
|
||||||
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`.
|
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a u64 multi-valued fast field, this method returns `None`.
|
/// If `field` is not a u64 multi-valued fast field, this method returns `None`.
|
||||||
pub fn u64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<u64>> {
|
pub fn u64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u64>> {
|
||||||
self.fast_field_u64s.get(&field).cloned()
|
self.check_type(field, FastType::U64, Cardinality::MultiValues)?;
|
||||||
}
|
self.typed_fast_field_multi_reader(field)
|
||||||
|
|
||||||
/// If the field is a u64s-fast field return the associated reader.
|
|
||||||
/// If the field is a i64s-fast field, return the associated u64s reader. Values are
|
|
||||||
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping.
|
|
||||||
///
|
|
||||||
/// This method is useful when merging segment reader.
|
|
||||||
pub(crate) fn u64s_lenient(&self, field: Field) -> Option<MultiValueIntFastFieldReader<u64>> {
|
|
||||||
if let Some(u64s_ff_reader) = self.u64s(field) {
|
|
||||||
return Some(u64s_ff_reader);
|
|
||||||
}
|
|
||||||
if let Some(i64s_ff_reader) = self.i64s(field) {
|
|
||||||
return Some(i64s_ff_reader.into_u64s_reader());
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `i64s` multi-valued fast field reader reader associated to `field`.
|
/// Returns a `i64s` multi-valued fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a i64 multi-valued fast field, this method returns `None`.
|
/// If `field` is not a i64 multi-valued fast field, this method returns `None`.
|
||||||
pub fn i64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<i64>> {
|
pub fn i64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<i64>> {
|
||||||
self.fast_field_i64s.get(&field).cloned()
|
self.check_type(field, FastType::I64, Cardinality::MultiValues)?;
|
||||||
|
self.typed_fast_field_multi_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `f64s` multi-valued fast field reader reader associated to `field`.
|
/// Returns a `f64s` multi-valued fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a f64 multi-valued fast field, this method returns `None`.
|
/// If `field` is not a f64 multi-valued fast field, this method returns `None`.
|
||||||
pub fn f64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<f64>> {
|
pub fn f64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<f64>> {
|
||||||
self.fast_field_f64s.get(&field).cloned()
|
self.check_type(field, FastType::F64, Cardinality::MultiValues)?;
|
||||||
|
self.typed_fast_field_multi_reader(field)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a `crate::DateTime` multi-valued fast field reader reader associated to `field`.
|
||||||
|
///
|
||||||
|
/// If `field` is not a `crate::DateTime` multi-valued fast field, this method returns `None`.
|
||||||
|
pub fn dates(
|
||||||
|
&self,
|
||||||
|
field: Field,
|
||||||
|
) -> crate::Result<MultiValuedFastFieldReader<crate::DateTime>> {
|
||||||
|
self.check_type(field, FastType::Date, Cardinality::MultiValues)?;
|
||||||
|
self.typed_fast_field_multi_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `bytes` fast field reader associated to `field`.
|
/// Returns the `bytes` fast field reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a bytes fast field, returns `None`.
|
/// If `field` is not a bytes fast field, returns `None`.
|
||||||
pub fn bytes(&self, field: Field) -> Option<BytesFastFieldReader> {
|
pub fn bytes(&self, field: Field) -> crate::Result<BytesFastFieldReader> {
|
||||||
self.fast_bytes.get(&field).cloned()
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
|
if let FieldType::Bytes(bytes_option) = field_entry.field_type() {
|
||||||
|
if !bytes_option.is_fast() {
|
||||||
|
return Err(crate::TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is not a fast field.",
|
||||||
|
field_entry.name()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
let fast_field_idx_file = self.fast_field_data(field, 0)?;
|
||||||
|
let idx_reader = FastFieldReader::open(fast_field_idx_file)?;
|
||||||
|
let data = self.fast_field_data(field, 1)?;
|
||||||
|
BytesFastFieldReader::open(idx_reader, data)
|
||||||
|
} else {
|
||||||
|
Err(FastFieldNotAvailableError::new(field_entry).into())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -66,9 +66,9 @@ impl FastFieldSerializer {
|
|||||||
&mut self,
|
&mut self,
|
||||||
field: Field,
|
field: Field,
|
||||||
idx: usize,
|
idx: usize,
|
||||||
) -> io::Result<FastBytesFieldSerializer<'_, CountingWriter<WritePtr>>> {
|
) -> FastBytesFieldSerializer<'_, CountingWriter<WritePtr>> {
|
||||||
let field_write = self.composite_write.for_field_with_idx(field, idx);
|
let field_write = self.composite_write.for_field_with_idx(field, idx);
|
||||||
FastBytesFieldSerializer::open(field_write)
|
FastBytesFieldSerializer { write: field_write }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Closes the serializer
|
/// Closes the serializer
|
||||||
@@ -132,10 +132,6 @@ pub struct FastBytesFieldSerializer<'a, W: Write> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, W: Write> FastBytesFieldSerializer<'a, W> {
|
impl<'a, W: Write> FastBytesFieldSerializer<'a, W> {
|
||||||
fn open(write: &'a mut W) -> io::Result<FastBytesFieldSerializer<'a, W>> {
|
|
||||||
Ok(FastBytesFieldSerializer { write })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write_all(&mut self, vals: &[u8]) -> io::Result<()> {
|
pub fn write_all(&mut self, vals: &[u8]) -> io::Result<()> {
|
||||||
self.write.write_all(vals)
|
self.write.write_all(vals)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
use super::multivalued::MultiValueIntFastFieldWriter;
|
use super::multivalued::MultiValuedFastFieldWriter;
|
||||||
use crate::common;
|
use crate::common;
|
||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::common::VInt;
|
use crate::common::VInt;
|
||||||
use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
|
use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
|
||||||
use crate::postings::UnorderedTermId;
|
use crate::postings::UnorderedTermId;
|
||||||
use crate::schema::{Cardinality, Document, Field, FieldType, Schema};
|
use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema};
|
||||||
use crate::termdict::TermOrdinal;
|
use crate::termdict::TermOrdinal;
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
@@ -13,10 +13,18 @@ use std::io;
|
|||||||
/// The fastfieldswriter regroup all of the fast field writers.
|
/// The fastfieldswriter regroup all of the fast field writers.
|
||||||
pub struct FastFieldsWriter {
|
pub struct FastFieldsWriter {
|
||||||
single_value_writers: Vec<IntFastFieldWriter>,
|
single_value_writers: Vec<IntFastFieldWriter>,
|
||||||
multi_values_writers: Vec<MultiValueIntFastFieldWriter>,
|
multi_values_writers: Vec<MultiValuedFastFieldWriter>,
|
||||||
bytes_value_writers: Vec<BytesFastFieldWriter>,
|
bytes_value_writers: Vec<BytesFastFieldWriter>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn fast_field_default_value(field_entry: &FieldEntry) -> u64 {
|
||||||
|
match *field_entry.field_type() {
|
||||||
|
FieldType::I64(_) | FieldType::Date(_) => common::i64_to_u64(0i64),
|
||||||
|
FieldType::F64(_) => common::f64_to_u64(0.0f64),
|
||||||
|
_ => 0u64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl FastFieldsWriter {
|
impl FastFieldsWriter {
|
||||||
/// Create all `FastFieldWriter` required by the schema.
|
/// Create all `FastFieldWriter` required by the schema.
|
||||||
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
|
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
|
||||||
@@ -25,35 +33,34 @@ impl FastFieldsWriter {
|
|||||||
let mut bytes_value_writers = Vec::new();
|
let mut bytes_value_writers = Vec::new();
|
||||||
|
|
||||||
for (field, field_entry) in schema.fields() {
|
for (field, field_entry) in schema.fields() {
|
||||||
let default_value = match *field_entry.field_type() {
|
match field_entry.field_type() {
|
||||||
FieldType::I64(_) => common::i64_to_u64(0i64),
|
|
||||||
FieldType::F64(_) => common::f64_to_u64(0.0f64),
|
|
||||||
_ => 0u64,
|
|
||||||
};
|
|
||||||
match *field_entry.field_type() {
|
|
||||||
FieldType::I64(ref int_options)
|
FieldType::I64(ref int_options)
|
||||||
| FieldType::U64(ref int_options)
|
| FieldType::U64(ref int_options)
|
||||||
| FieldType::F64(ref int_options) => {
|
| FieldType::F64(ref int_options)
|
||||||
|
| FieldType::Date(ref int_options) => {
|
||||||
match int_options.get_fastfield_cardinality() {
|
match int_options.get_fastfield_cardinality() {
|
||||||
Some(Cardinality::SingleValue) => {
|
Some(Cardinality::SingleValue) => {
|
||||||
let mut fast_field_writer = IntFastFieldWriter::new(field);
|
let mut fast_field_writer = IntFastFieldWriter::new(field);
|
||||||
|
let default_value = fast_field_default_value(field_entry);
|
||||||
fast_field_writer.set_val_if_missing(default_value);
|
fast_field_writer.set_val_if_missing(default_value);
|
||||||
single_value_writers.push(fast_field_writer);
|
single_value_writers.push(fast_field_writer);
|
||||||
}
|
}
|
||||||
Some(Cardinality::MultiValues) => {
|
Some(Cardinality::MultiValues) => {
|
||||||
let fast_field_writer = MultiValueIntFastFieldWriter::new(field, false);
|
let fast_field_writer = MultiValuedFastFieldWriter::new(field, false);
|
||||||
multi_values_writers.push(fast_field_writer);
|
multi_values_writers.push(fast_field_writer);
|
||||||
}
|
}
|
||||||
None => {}
|
None => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::HierarchicalFacet => {
|
FieldType::HierarchicalFacet(_) => {
|
||||||
let fast_field_writer = MultiValueIntFastFieldWriter::new(field, true);
|
let fast_field_writer = MultiValuedFastFieldWriter::new(field, true);
|
||||||
multi_values_writers.push(fast_field_writer);
|
multi_values_writers.push(fast_field_writer);
|
||||||
}
|
}
|
||||||
FieldType::Bytes => {
|
FieldType::Bytes(bytes_option) => {
|
||||||
let fast_field_writer = BytesFastFieldWriter::new(field);
|
if bytes_option.is_fast() {
|
||||||
bytes_value_writers.push(fast_field_writer);
|
let fast_field_writer = BytesFastFieldWriter::new(field);
|
||||||
|
bytes_value_writers.push(fast_field_writer);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
@@ -80,7 +87,7 @@ impl FastFieldsWriter {
|
|||||||
pub fn get_multivalue_writer(
|
pub fn get_multivalue_writer(
|
||||||
&mut self,
|
&mut self,
|
||||||
field: Field,
|
field: Field,
|
||||||
) -> Option<&mut MultiValueIntFastFieldWriter> {
|
) -> Option<&mut MultiValuedFastFieldWriter> {
|
||||||
// TODO optimize
|
// TODO optimize
|
||||||
self.multi_values_writers
|
self.multi_values_writers
|
||||||
.iter_mut()
|
.iter_mut()
|
||||||
@@ -121,6 +128,7 @@ impl FastFieldsWriter {
|
|||||||
for field_writer in &self.single_value_writers {
|
for field_writer in &self.single_value_writers {
|
||||||
field_writer.serialize(serializer)?;
|
field_writer.serialize(serializer)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
for field_writer in &self.multi_values_writers {
|
for field_writer in &self.multi_values_writers {
|
||||||
let field = field_writer.field();
|
let field = field_writer.field();
|
||||||
field_writer.serialize(serializer, mapping.get(&field))?;
|
field_writer.serialize(serializer, mapping.get(&field))?;
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ mod reader;
|
|||||||
mod serializer;
|
mod serializer;
|
||||||
mod writer;
|
mod writer;
|
||||||
|
|
||||||
pub use self::reader::FieldNormReader;
|
pub use self::reader::{FieldNormReader, FieldNormReaders};
|
||||||
pub use self::serializer::FieldNormsSerializer;
|
pub use self::serializer::FieldNormsSerializer;
|
||||||
pub use self::writer::FieldNormsWriter;
|
pub use self::writer::FieldNormsWriter;
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,47 @@
|
|||||||
use super::{fieldnorm_to_id, id_to_fieldnorm};
|
use super::{fieldnorm_to_id, id_to_fieldnorm};
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::common::CompositeFile;
|
||||||
|
use crate::directory::FileSlice;
|
||||||
|
use crate::directory::OwnedBytes;
|
||||||
|
use crate::schema::Field;
|
||||||
|
use crate::space_usage::PerFieldSpaceUsage;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
/// Reader for the fieldnorm (for each document, the number of tokens indexed in the
|
||||||
|
/// field) of all indexed fields in the index.
|
||||||
|
///
|
||||||
|
/// Each fieldnorm is approximately compressed over one byte. We refer to this byte as
|
||||||
|
/// `fieldnorm_id`.
|
||||||
|
/// The mapping from `fieldnorm` to `fieldnorm_id` is given by monotonic.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct FieldNormReaders {
|
||||||
|
data: Arc<CompositeFile>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FieldNormReaders {
|
||||||
|
/// Creates a field norm reader.
|
||||||
|
pub fn open(file: FileSlice) -> crate::Result<FieldNormReaders> {
|
||||||
|
let data = CompositeFile::open(&file)?;
|
||||||
|
Ok(FieldNormReaders {
|
||||||
|
data: Arc::new(data),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the FieldNormReader for a specific field.
|
||||||
|
pub fn get_field(&self, field: Field) -> crate::Result<Option<FieldNormReader>> {
|
||||||
|
if let Some(file) = self.data.open_read(field) {
|
||||||
|
let fieldnorm_reader = FieldNormReader::open(file)?;
|
||||||
|
Ok(Some(fieldnorm_reader))
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a break down of the space usage per field.
|
||||||
|
pub fn space_usage(&self) -> PerFieldSpaceUsage {
|
||||||
|
self.data.space_usage()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Reads the fieldnorm associated to a document.
|
/// Reads the fieldnorm associated to a document.
|
||||||
/// The fieldnorm represents the length associated to
|
/// The fieldnorm represents the length associated to
|
||||||
@@ -19,14 +60,57 @@ use crate::DocId;
|
|||||||
/// Apart from compression, this scale also makes it possible to
|
/// Apart from compression, this scale also makes it possible to
|
||||||
/// precompute computationally expensive functions of the fieldnorm
|
/// precompute computationally expensive functions of the fieldnorm
|
||||||
/// in a very short array.
|
/// in a very short array.
|
||||||
pub struct FieldNormReader {
|
#[derive(Clone)]
|
||||||
data: ReadOnlySource,
|
pub struct FieldNormReader(ReaderImplEnum);
|
||||||
|
|
||||||
|
impl From<ReaderImplEnum> for FieldNormReader {
|
||||||
|
fn from(reader_enum: ReaderImplEnum) -> FieldNormReader {
|
||||||
|
FieldNormReader(reader_enum)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
enum ReaderImplEnum {
|
||||||
|
FromData(OwnedBytes),
|
||||||
|
Const {
|
||||||
|
num_docs: u32,
|
||||||
|
fieldnorm_id: u8,
|
||||||
|
fieldnorm: u32,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FieldNormReader {
|
impl FieldNormReader {
|
||||||
/// Opens a field norm reader given its data source.
|
/// Creates a `FieldNormReader` with a constant fieldnorm.
|
||||||
pub fn open(data: ReadOnlySource) -> Self {
|
///
|
||||||
FieldNormReader { data }
|
/// The fieldnorm will be subjected to compression as if it was coming
|
||||||
|
/// from an array-backed fieldnorm reader.
|
||||||
|
pub fn constant(num_docs: u32, fieldnorm: u32) -> FieldNormReader {
|
||||||
|
let fieldnorm_id = fieldnorm_to_id(fieldnorm);
|
||||||
|
let fieldnorm = id_to_fieldnorm(fieldnorm_id);
|
||||||
|
ReaderImplEnum::Const {
|
||||||
|
num_docs,
|
||||||
|
fieldnorm_id,
|
||||||
|
fieldnorm,
|
||||||
|
}
|
||||||
|
.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Opens a field norm reader given its file.
|
||||||
|
pub fn open(fieldnorm_file: FileSlice) -> crate::Result<Self> {
|
||||||
|
let data = fieldnorm_file.read_bytes()?;
|
||||||
|
Ok(FieldNormReader::new(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new(data: OwnedBytes) -> Self {
|
||||||
|
ReaderImplEnum::FromData(data).into()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of documents in this segment.
|
||||||
|
pub fn num_docs(&self) -> u32 {
|
||||||
|
match &self.0 {
|
||||||
|
ReaderImplEnum::FromData(data) => data.len() as u32,
|
||||||
|
ReaderImplEnum::Const { num_docs, .. } => *num_docs,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `fieldnorm` associated to a doc id.
|
/// Returns the `fieldnorm` associated to a doc id.
|
||||||
@@ -39,15 +123,25 @@ impl FieldNormReader {
|
|||||||
/// The fieldnorm is effectively decoded from the
|
/// The fieldnorm is effectively decoded from the
|
||||||
/// `fieldnorm_id` by doing a simple table lookup.
|
/// `fieldnorm_id` by doing a simple table lookup.
|
||||||
pub fn fieldnorm(&self, doc_id: DocId) -> u32 {
|
pub fn fieldnorm(&self, doc_id: DocId) -> u32 {
|
||||||
let fieldnorm_id = self.fieldnorm_id(doc_id);
|
match &self.0 {
|
||||||
id_to_fieldnorm(fieldnorm_id)
|
ReaderImplEnum::FromData(data) => {
|
||||||
|
let fieldnorm_id = data.as_slice()[doc_id as usize];
|
||||||
|
id_to_fieldnorm(fieldnorm_id)
|
||||||
|
}
|
||||||
|
ReaderImplEnum::Const { fieldnorm, .. } => *fieldnorm,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `fieldnorm_id` associated to a document.
|
/// Returns the `fieldnorm_id` associated to a document.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 {
|
pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 {
|
||||||
let fielnorms_data = self.data.as_slice();
|
match &self.0 {
|
||||||
fielnorms_data[doc_id as usize]
|
ReaderImplEnum::FromData(data) => {
|
||||||
|
let fieldnorm_id = data.as_slice()[doc_id as usize];
|
||||||
|
fieldnorm_id
|
||||||
|
}
|
||||||
|
ReaderImplEnum::Const { fieldnorm_id, .. } => *fieldnorm_id,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a `fieldnorm_id` into a fieldnorm.
|
/// Converts a `fieldnorm_id` into a fieldnorm.
|
||||||
@@ -62,18 +156,48 @@ impl FieldNormReader {
|
|||||||
pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
|
pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
|
||||||
fieldnorm_to_id(fieldnorm)
|
fieldnorm_to_id(fieldnorm)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn for_test(field_norms: &[u32]) -> FieldNormReader {
|
||||||
|
let field_norms_id = field_norms
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.map(FieldNormReader::fieldnorm_to_id)
|
||||||
|
.collect::<Vec<u8>>();
|
||||||
|
let field_norms_data = OwnedBytes::new(field_norms_id);
|
||||||
|
FieldNormReader::new(field_norms_data)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
impl From<Vec<u32>> for FieldNormReader {
|
mod tests {
|
||||||
fn from(field_norms: Vec<u32>) -> FieldNormReader {
|
use crate::fieldnorm::FieldNormReader;
|
||||||
let field_norms_id = field_norms
|
|
||||||
.into_iter()
|
#[test]
|
||||||
.map(FieldNormReader::fieldnorm_to_id)
|
fn test_from_fieldnorms_array() {
|
||||||
.collect::<Vec<u8>>();
|
let fieldnorms = &[1, 2, 3, 4, 1_000_000];
|
||||||
let field_norms_data = ReadOnlySource::from(field_norms_id);
|
let fieldnorm_reader = FieldNormReader::for_test(fieldnorms);
|
||||||
FieldNormReader {
|
assert_eq!(fieldnorm_reader.num_docs(), 5);
|
||||||
data: field_norms_data,
|
assert_eq!(fieldnorm_reader.fieldnorm(0), 1);
|
||||||
}
|
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(2), 3);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(3), 4);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(4), 983_064);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_const_fieldnorm_reader_small_fieldnorm_id() {
|
||||||
|
let fieldnorm_reader = FieldNormReader::constant(1_000_000u32, 10u32);
|
||||||
|
assert_eq!(fieldnorm_reader.num_docs(), 1_000_000u32);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(0u32), 10u32);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm_id(0u32), 10u8);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_const_fieldnorm_reader_large_fieldnorm_id() {
|
||||||
|
let fieldnorm_reader = FieldNormReader::constant(1_000_000u32, 300u32);
|
||||||
|
assert_eq!(fieldnorm_reader.num_docs(), 1_000_000u32);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(0u32), 280u32);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm_id(0u32), 72u8);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use super::fieldnorm_to_id;
|
|||||||
use super::FieldNormsSerializer;
|
use super::FieldNormsSerializer;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use std::io;
|
use std::{io, iter};
|
||||||
|
|
||||||
/// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte
|
/// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte
|
||||||
/// of each document for each field with field norms.
|
/// of each document for each field with field norms.
|
||||||
@@ -44,7 +44,9 @@ impl FieldNormsWriter {
|
|||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
FieldNormsWriter {
|
FieldNormsWriter {
|
||||||
fields,
|
fields,
|
||||||
fieldnorms_buffer: (0..max_field).map(|_| Vec::new()).collect::<Vec<_>>(),
|
fieldnorms_buffer: iter::repeat_with(Vec::new)
|
||||||
|
.take(max_field)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,11 +80,12 @@ impl FieldNormsWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Serialize the seen fieldnorm values to the serializer for all fields.
|
/// Serialize the seen fieldnorm values to the serializer for all fields.
|
||||||
pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> {
|
pub fn serialize(&self, mut fieldnorms_serializer: FieldNormsSerializer) -> io::Result<()> {
|
||||||
for &field in self.fields.iter() {
|
for &field in self.fields.iter() {
|
||||||
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
|
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
|
||||||
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
|
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
|
||||||
}
|
}
|
||||||
|
fieldnorms_serializer.close()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,45 +1,93 @@
|
|||||||
use rand::thread_rng;
|
|
||||||
use std::collections::HashSet;
|
|
||||||
|
|
||||||
use crate::schema::*;
|
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::Searcher;
|
use crate::Searcher;
|
||||||
|
use crate::{doc, schema::*};
|
||||||
|
use rand::thread_rng;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
|
fn check_index_content(searcher: &Searcher, vals: &[u64]) -> crate::Result<()> {
|
||||||
assert!(searcher.segment_readers().len() < 20);
|
assert!(searcher.segment_readers().len() < 20);
|
||||||
assert_eq!(searcher.num_docs() as usize, vals.len());
|
assert_eq!(searcher.num_docs() as usize, vals.len());
|
||||||
|
for segment_reader in searcher.segment_readers() {
|
||||||
|
let store_reader = segment_reader.get_store_reader()?;
|
||||||
|
for doc_id in 0..segment_reader.max_doc() {
|
||||||
|
let _doc = store_reader.get(doc_id)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
#[ignore]
|
||||||
fn test_indexing() {
|
fn test_functional_store() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
|
let id_field = schema_builder.add_u64_field("id", INDEXED | STORED);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let reader = index.reader()?;
|
||||||
|
|
||||||
|
let mut rng = thread_rng();
|
||||||
|
|
||||||
|
let mut index_writer = index.writer_with_num_threads(3, 12_000_000)?;
|
||||||
|
|
||||||
|
let mut doc_set: Vec<u64> = Vec::new();
|
||||||
|
|
||||||
|
let mut doc_id = 0u64;
|
||||||
|
for iteration in 0..500 {
|
||||||
|
dbg!(iteration);
|
||||||
|
let num_docs: usize = rng.gen_range(0..4);
|
||||||
|
if doc_set.len() >= 1 {
|
||||||
|
let doc_to_remove_id = rng.gen_range(0..doc_set.len());
|
||||||
|
let removed_doc_id = doc_set.swap_remove(doc_to_remove_id);
|
||||||
|
index_writer.delete_term(Term::from_field_u64(id_field, removed_doc_id));
|
||||||
|
}
|
||||||
|
for _ in 0..num_docs {
|
||||||
|
doc_set.push(doc_id);
|
||||||
|
index_writer.add_document(doc!(id_field=>doc_id));
|
||||||
|
doc_id += 1;
|
||||||
|
}
|
||||||
|
index_writer.commit()?;
|
||||||
|
reader.reload()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
check_index_content(&searcher, &doc_set)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[ignore]
|
||||||
|
fn test_functional_indexing() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
let id_field = schema_builder.add_u64_field("id", INDEXED);
|
let id_field = schema_builder.add_u64_field("id", INDEXED);
|
||||||
let multiples_field = schema_builder.add_u64_field("multiples", INDEXED);
|
let multiples_field = schema_builder.add_u64_field("multiples", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_from_tempdir(schema).unwrap();
|
let index = Index::create_from_tempdir(schema)?;
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
|
|
||||||
let mut rng = thread_rng();
|
let mut rng = thread_rng();
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(3, 120_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(3, 120_000_000)?;
|
||||||
|
|
||||||
let mut committed_docs: HashSet<u64> = HashSet::new();
|
let mut committed_docs: HashSet<u64> = HashSet::new();
|
||||||
let mut uncommitted_docs: HashSet<u64> = HashSet::new();
|
let mut uncommitted_docs: HashSet<u64> = HashSet::new();
|
||||||
|
|
||||||
for _ in 0..200 {
|
for _ in 0..200 {
|
||||||
let random_val = rng.gen_range(0, 20);
|
let random_val = rng.gen_range(0..20);
|
||||||
if random_val == 0 {
|
if random_val == 0 {
|
||||||
index_writer.commit().expect("Commit failed");
|
index_writer.commit()?;
|
||||||
committed_docs.extend(&uncommitted_docs);
|
committed_docs.extend(&uncommitted_docs);
|
||||||
uncommitted_docs.clear();
|
uncommitted_docs.clear();
|
||||||
reader.reload().unwrap();
|
reader.reload()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
// check that everything is correct.
|
// check that everything is correct.
|
||||||
check_index_content(&searcher, &committed_docs);
|
check_index_content(
|
||||||
|
&searcher,
|
||||||
|
&committed_docs.iter().cloned().collect::<Vec<u64>>(),
|
||||||
|
)?;
|
||||||
} else {
|
} else {
|
||||||
if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) {
|
if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) {
|
||||||
let doc_id_term = Term::from_field_u64(id_field, random_val);
|
let doc_id_term = Term::from_field_u64(id_field, random_val);
|
||||||
@@ -55,4 +103,5 @@ fn test_indexing() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use super::operation::DeleteOperation;
|
|||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::ops::DerefMut;
|
use std::ops::DerefMut;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock, Weak};
|
||||||
|
|
||||||
// The DeleteQueue is similar in conceptually to a multiple
|
// The DeleteQueue is similar in conceptually to a multiple
|
||||||
// consumer single producer broadcast channel.
|
// consumer single producer broadcast channel.
|
||||||
@@ -14,14 +14,15 @@ use std::sync::{Arc, RwLock};
|
|||||||
//
|
//
|
||||||
// New consumer can be created in two ways
|
// New consumer can be created in two ways
|
||||||
// - calling `delete_queue.cursor()` returns a cursor, that
|
// - calling `delete_queue.cursor()` returns a cursor, that
|
||||||
// will include all future delete operation (and no past operations).
|
// will include all future delete operation (and some or none
|
||||||
|
// of the past operations... The client is in charge of checking the opstamps.).
|
||||||
// - cloning an existing cursor returns a new cursor, that
|
// - cloning an existing cursor returns a new cursor, that
|
||||||
// is at the exact same position, and can now advance independently
|
// is at the exact same position, and can now advance independently
|
||||||
// from the original cursor.
|
// from the original cursor.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct InnerDeleteQueue {
|
struct InnerDeleteQueue {
|
||||||
writer: Vec<DeleteOperation>,
|
writer: Vec<DeleteOperation>,
|
||||||
last_block: Option<Arc<Block>>,
|
last_block: Weak<Block>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -32,21 +33,31 @@ pub struct DeleteQueue {
|
|||||||
impl DeleteQueue {
|
impl DeleteQueue {
|
||||||
// Creates a new delete queue.
|
// Creates a new delete queue.
|
||||||
pub fn new() -> DeleteQueue {
|
pub fn new() -> DeleteQueue {
|
||||||
let delete_queue = DeleteQueue {
|
DeleteQueue {
|
||||||
inner: Arc::default(),
|
inner: Arc::default(),
|
||||||
};
|
|
||||||
|
|
||||||
let next_block = NextBlock::from(delete_queue.clone());
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut delete_queue_wlock = delete_queue.inner.write().unwrap();
|
|
||||||
delete_queue_wlock.last_block = Some(Arc::new(Block {
|
|
||||||
operations: Arc::default(),
|
|
||||||
next: next_block,
|
|
||||||
}));
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
delete_queue
|
fn get_last_block(&self) -> Arc<Block> {
|
||||||
|
{
|
||||||
|
// try get the last block with simply acquiring the read lock.
|
||||||
|
let rlock = self.inner.read().unwrap();
|
||||||
|
if let Some(block) = rlock.last_block.upgrade() {
|
||||||
|
return block;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// It failed. Let's double check after acquiring the write, as someone could have called
|
||||||
|
// `get_last_block` right after we released the rlock.
|
||||||
|
let mut wlock = self.inner.write().unwrap();
|
||||||
|
if let Some(block) = wlock.last_block.upgrade() {
|
||||||
|
return block;
|
||||||
|
}
|
||||||
|
let block = Arc::new(Block {
|
||||||
|
operations: Arc::new([]),
|
||||||
|
next: NextBlock::from(self.clone()),
|
||||||
|
});
|
||||||
|
wlock.last_block = Arc::downgrade(&block);
|
||||||
|
block
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new cursor that makes it possible to
|
// Creates a new cursor that makes it possible to
|
||||||
@@ -54,17 +65,7 @@ impl DeleteQueue {
|
|||||||
//
|
//
|
||||||
// Past delete operations are not accessible.
|
// Past delete operations are not accessible.
|
||||||
pub fn cursor(&self) -> DeleteCursor {
|
pub fn cursor(&self) -> DeleteCursor {
|
||||||
let last_block = self
|
let last_block = self.get_last_block();
|
||||||
.inner
|
|
||||||
.read()
|
|
||||||
.expect("Read lock poisoned when opening delete queue cursor")
|
|
||||||
.last_block
|
|
||||||
.clone()
|
|
||||||
.expect(
|
|
||||||
"Failed to unwrap last_block. This should never happen
|
|
||||||
as the Option<> is only here to make
|
|
||||||
initialization possible",
|
|
||||||
);
|
|
||||||
let operations_len = last_block.operations.len();
|
let operations_len = last_block.operations.len();
|
||||||
DeleteCursor {
|
DeleteCursor {
|
||||||
block: last_block,
|
block: last_block,
|
||||||
@@ -100,23 +101,19 @@ impl DeleteQueue {
|
|||||||
.write()
|
.write()
|
||||||
.expect("Failed to acquire write lock on delete queue writer");
|
.expect("Failed to acquire write lock on delete queue writer");
|
||||||
|
|
||||||
let delete_operations;
|
if self_wlock.writer.is_empty() {
|
||||||
{
|
return None;
|
||||||
let writer: &mut Vec<DeleteOperation> = &mut self_wlock.writer;
|
|
||||||
if writer.is_empty() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
delete_operations = mem::replace(writer, vec![]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let next_block = NextBlock::from(self.clone());
|
let delete_operations = mem::replace(&mut self_wlock.writer, vec![]);
|
||||||
{
|
|
||||||
self_wlock.last_block = Some(Arc::new(Block {
|
let new_block = Arc::new(Block {
|
||||||
operations: Arc::new(delete_operations),
|
operations: Arc::from(delete_operations.into_boxed_slice()),
|
||||||
next: next_block,
|
next: NextBlock::from(self.clone()),
|
||||||
}));
|
});
|
||||||
}
|
|
||||||
self_wlock.last_block.clone()
|
self_wlock.last_block = Arc::downgrade(&new_block);
|
||||||
|
Some(new_block)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -170,7 +167,7 @@ impl NextBlock {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct Block {
|
struct Block {
|
||||||
operations: Arc<Vec<DeleteOperation>>,
|
operations: Arc<[DeleteOperation]>,
|
||||||
next: NextBlock,
|
next: NextBlock,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,16 @@
|
|||||||
use super::operation::{AddOperation, UserOperation};
|
use super::operation::{AddOperation, UserOperation};
|
||||||
use super::segment_updater::SegmentUpdater;
|
use super::segment_updater::SegmentUpdater;
|
||||||
use super::PreparedCommit;
|
use super::PreparedCommit;
|
||||||
|
use crate::common::BitSet;
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::core::SegmentComponent;
|
use crate::core::SegmentComponent;
|
||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::directory::DirectoryLock;
|
|
||||||
use crate::directory::TerminatingWrite;
|
use crate::directory::TerminatingWrite;
|
||||||
use crate::docset::DocSet;
|
use crate::directory::{DirectoryLock, GarbageCollectionResult};
|
||||||
|
use crate::docset::{DocSet, TERMINATED};
|
||||||
use crate::error::TantivyError;
|
use crate::error::TantivyError;
|
||||||
use crate::fastfield::write_delete_bitset;
|
use crate::fastfield::write_delete_bitset;
|
||||||
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
|
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
|
||||||
@@ -23,10 +24,9 @@ use crate::schema::Document;
|
|||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use crate::Result;
|
|
||||||
use bit_set::BitSet;
|
|
||||||
use crossbeam::channel;
|
use crossbeam::channel;
|
||||||
use futures::{Canceled, Future};
|
use futures::executor::block_on;
|
||||||
|
use futures::future::Future;
|
||||||
use smallvec::smallvec;
|
use smallvec::smallvec;
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
@@ -72,7 +72,7 @@ pub struct IndexWriter {
|
|||||||
|
|
||||||
heap_size_in_bytes_per_thread: usize,
|
heap_size_in_bytes_per_thread: usize,
|
||||||
|
|
||||||
workers_join_handle: Vec<JoinHandle<Result<()>>>,
|
workers_join_handle: Vec<JoinHandle<crate::Result<()>>>,
|
||||||
|
|
||||||
operation_receiver: OperationReceiver,
|
operation_receiver: OperationReceiver,
|
||||||
operation_sender: OperationSender,
|
operation_sender: OperationSender,
|
||||||
@@ -95,7 +95,7 @@ fn compute_deleted_bitset(
|
|||||||
delete_cursor: &mut DeleteCursor,
|
delete_cursor: &mut DeleteCursor,
|
||||||
doc_opstamps: &DocToOpstampMapping,
|
doc_opstamps: &DocToOpstampMapping,
|
||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
) -> Result<bool> {
|
) -> crate::Result<bool> {
|
||||||
let mut might_have_changed = false;
|
let mut might_have_changed = false;
|
||||||
while let Some(delete_op) = delete_cursor.get() {
|
while let Some(delete_op) = delete_cursor.get() {
|
||||||
if delete_op.opstamp > target_opstamp {
|
if delete_op.opstamp > target_opstamp {
|
||||||
@@ -108,83 +108,96 @@ fn compute_deleted_bitset(
|
|||||||
// Limit doc helps identify the first document
|
// Limit doc helps identify the first document
|
||||||
// that may be affected by the delete operation.
|
// that may be affected by the delete operation.
|
||||||
let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp);
|
let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp);
|
||||||
let inverted_index = segment_reader.inverted_index(delete_op.term.field());
|
let inverted_index = segment_reader.inverted_index(delete_op.term.field())?;
|
||||||
if let Some(mut docset) =
|
if let Some(mut docset) =
|
||||||
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)
|
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)?
|
||||||
{
|
{
|
||||||
while docset.advance() {
|
let mut deleted_doc = docset.doc();
|
||||||
let deleted_doc = docset.doc();
|
while deleted_doc != TERMINATED {
|
||||||
if deleted_doc < limit_doc {
|
if deleted_doc < limit_doc {
|
||||||
delete_bitset.insert(deleted_doc as usize);
|
delete_bitset.insert(deleted_doc);
|
||||||
might_have_changed = true;
|
might_have_changed = true;
|
||||||
}
|
}
|
||||||
|
deleted_doc = docset.advance();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_cursor.advance();
|
delete_cursor.advance();
|
||||||
}
|
}
|
||||||
Ok(might_have_changed)
|
Ok(might_have_changed)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Advance delete for the given segment up
|
/// Advance delete for the given segment up to the target opstamp.
|
||||||
/// to the target opstamp.
|
///
|
||||||
|
/// Note that there are no guarantee that the resulting `segment_entry` delete_opstamp
|
||||||
|
/// is `==` target_opstamp.
|
||||||
|
/// For instance, there was no delete operation between the state of the `segment_entry` and
|
||||||
|
/// the `target_opstamp`, `segment_entry` is not updated.
|
||||||
pub(crate) fn advance_deletes(
|
pub(crate) fn advance_deletes(
|
||||||
mut segment: Segment,
|
mut segment: Segment,
|
||||||
segment_entry: &mut SegmentEntry,
|
segment_entry: &mut SegmentEntry,
|
||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
) -> Result<()> {
|
) -> crate::Result<()> {
|
||||||
{
|
if segment_entry.meta().delete_opstamp() == Some(target_opstamp) {
|
||||||
if segment_entry.meta().delete_opstamp() == Some(target_opstamp) {
|
// We are already up-to-date here.
|
||||||
// We are already up-to-date here.
|
return Ok(());
|
||||||
return Ok(());
|
}
|
||||||
}
|
|
||||||
|
|
||||||
let segment_reader = SegmentReader::open(&segment)?;
|
if segment_entry.delete_bitset().is_none() && segment_entry.delete_cursor().get().is_none() {
|
||||||
|
// There has been no `DeleteOperation` between the segment status and `target_opstamp`.
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let max_doc = segment_reader.max_doc();
|
let segment_reader = SegmentReader::open(&segment)?;
|
||||||
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
|
|
||||||
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
|
|
||||||
None => BitSet::with_capacity(max_doc as usize),
|
|
||||||
};
|
|
||||||
|
|
||||||
let delete_cursor = segment_entry.delete_cursor();
|
let max_doc = segment_reader.max_doc();
|
||||||
|
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
|
||||||
|
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
|
||||||
|
None => BitSet::with_max_value(max_doc),
|
||||||
|
};
|
||||||
|
|
||||||
compute_deleted_bitset(
|
let num_deleted_docs_before = segment.meta().num_deleted_docs();
|
||||||
&mut delete_bitset,
|
|
||||||
&segment_reader,
|
|
||||||
delete_cursor,
|
|
||||||
&DocToOpstampMapping::None,
|
|
||||||
target_opstamp,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// TODO optimize
|
compute_deleted_bitset(
|
||||||
|
&mut delete_bitset,
|
||||||
|
&segment_reader,
|
||||||
|
segment_entry.delete_cursor(),
|
||||||
|
&DocToOpstampMapping::None,
|
||||||
|
target_opstamp,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// TODO optimize
|
||||||
|
// It should be possible to do something smarter by manipulation bitsets directly
|
||||||
|
// to compute this union.
|
||||||
|
if let Some(seg_delete_bitset) = segment_reader.delete_bitset() {
|
||||||
for doc in 0u32..max_doc {
|
for doc in 0u32..max_doc {
|
||||||
if segment_reader.is_deleted(doc) {
|
if seg_delete_bitset.is_deleted(doc) {
|
||||||
delete_bitset.insert(doc as usize);
|
delete_bitset.insert(doc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let num_deleted_docs = delete_bitset.len();
|
|
||||||
if num_deleted_docs > 0 {
|
|
||||||
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
|
|
||||||
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
|
|
||||||
write_delete_bitset(&delete_bitset, &mut delete_file)?;
|
|
||||||
delete_file.terminate()?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let num_deleted_docs: u32 = delete_bitset.len() as u32;
|
||||||
|
if num_deleted_docs > num_deleted_docs_before {
|
||||||
|
// There are new deletes. We need to write a new delete file.
|
||||||
|
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
|
||||||
|
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
|
||||||
|
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
|
||||||
|
delete_file.terminate()?;
|
||||||
|
}
|
||||||
|
|
||||||
segment_entry.set_meta(segment.meta().clone());
|
segment_entry.set_meta(segment.meta().clone());
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn index_documents(
|
fn index_documents(
|
||||||
memory_budget: usize,
|
memory_budget: usize,
|
||||||
segment: &Segment,
|
segment: Segment,
|
||||||
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
|
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
|
||||||
segment_updater: &mut SegmentUpdater,
|
segment_updater: &mut SegmentUpdater,
|
||||||
mut delete_cursor: DeleteCursor,
|
mut delete_cursor: DeleteCursor,
|
||||||
) -> Result<bool> {
|
) -> crate::Result<bool> {
|
||||||
let schema = segment.schema();
|
let schema = segment.schema();
|
||||||
let segment_id = segment.id();
|
|
||||||
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
|
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
|
||||||
for document_group in grouped_document_iterator {
|
for document_group in grouped_document_iterator {
|
||||||
for doc in document_group {
|
for doc in document_group {
|
||||||
@@ -204,22 +217,32 @@ fn index_documents(
|
|||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
let num_docs = segment_writer.max_doc();
|
let max_doc = segment_writer.max_doc();
|
||||||
|
|
||||||
// this is ensured by the call to peek before starting
|
// this is ensured by the call to peek before starting
|
||||||
// the worker thread.
|
// the worker thread.
|
||||||
assert!(num_docs > 0);
|
assert!(max_doc > 0);
|
||||||
|
|
||||||
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
|
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
|
||||||
let segment_meta = segment.index().new_segment_meta(segment_id, num_docs);
|
|
||||||
|
let segment_with_max_doc = segment.with_max_doc(max_doc);
|
||||||
|
|
||||||
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
|
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
|
||||||
|
|
||||||
let delete_bitset_opt =
|
let delete_bitset_opt = apply_deletes(
|
||||||
apply_deletes(&segment, &mut delete_cursor, &doc_opstamps, last_docstamp)?;
|
&segment_with_max_doc,
|
||||||
|
&mut delete_cursor,
|
||||||
|
&doc_opstamps,
|
||||||
|
last_docstamp,
|
||||||
|
)?;
|
||||||
|
|
||||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, delete_bitset_opt);
|
let segment_entry = SegmentEntry::new(
|
||||||
Ok(segment_updater.add_segment(segment_entry))
|
segment_with_max_doc.meta().clone(),
|
||||||
|
delete_cursor,
|
||||||
|
delete_bitset_opt,
|
||||||
|
);
|
||||||
|
block_on(segment_updater.schedule_add_segment(segment_entry))?;
|
||||||
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_deletes(
|
fn apply_deletes(
|
||||||
@@ -227,7 +250,7 @@ fn apply_deletes(
|
|||||||
mut delete_cursor: &mut DeleteCursor,
|
mut delete_cursor: &mut DeleteCursor,
|
||||||
doc_opstamps: &[Opstamp],
|
doc_opstamps: &[Opstamp],
|
||||||
last_docstamp: Opstamp,
|
last_docstamp: Opstamp,
|
||||||
) -> Result<Option<BitSet<u32>>> {
|
) -> crate::Result<Option<BitSet>> {
|
||||||
if delete_cursor.get().is_none() {
|
if delete_cursor.get().is_none() {
|
||||||
// if there are no delete operation in the queue, no need
|
// if there are no delete operation in the queue, no need
|
||||||
// to even open the segment.
|
// to even open the segment.
|
||||||
@@ -235,7 +258,9 @@ fn apply_deletes(
|
|||||||
}
|
}
|
||||||
let segment_reader = SegmentReader::open(segment)?;
|
let segment_reader = SegmentReader::open(segment)?;
|
||||||
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
|
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
|
||||||
let mut deleted_bitset = BitSet::with_capacity(segment_reader.max_doc() as usize);
|
|
||||||
|
let max_doc = segment.meta().max_doc();
|
||||||
|
let mut deleted_bitset = BitSet::with_max_value(max_doc);
|
||||||
let may_have_deletes = compute_deleted_bitset(
|
let may_have_deletes = compute_deleted_bitset(
|
||||||
&mut deleted_bitset,
|
&mut deleted_bitset,
|
||||||
&segment_reader,
|
&segment_reader,
|
||||||
@@ -270,7 +295,7 @@ impl IndexWriter {
|
|||||||
num_threads: usize,
|
num_threads: usize,
|
||||||
heap_size_in_bytes_per_thread: usize,
|
heap_size_in_bytes_per_thread: usize,
|
||||||
directory_lock: DirectoryLock,
|
directory_lock: DirectoryLock,
|
||||||
) -> Result<IndexWriter> {
|
) -> crate::Result<IndexWriter> {
|
||||||
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
||||||
let err_msg = format!(
|
let err_msg = format!(
|
||||||
"The heap size per thread needs to be at least {}.",
|
"The heap size per thread needs to be at least {}.",
|
||||||
@@ -319,12 +344,17 @@ impl IndexWriter {
|
|||||||
Ok(index_writer)
|
Ok(index_writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn drop_sender(&mut self) {
|
||||||
|
let (sender, _receiver) = channel::bounded(1);
|
||||||
|
self.operation_sender = sender;
|
||||||
|
}
|
||||||
|
|
||||||
/// If there are some merging threads, blocks until they all finish their work and
|
/// If there are some merging threads, blocks until they all finish their work and
|
||||||
/// then drop the `IndexWriter`.
|
/// then drop the `IndexWriter`.
|
||||||
pub fn wait_merging_threads(mut self) -> Result<()> {
|
pub fn wait_merging_threads(mut self) -> crate::Result<()> {
|
||||||
// this will stop the indexing thread,
|
// this will stop the indexing thread,
|
||||||
// dropping the last reference to the segment_updater.
|
// dropping the last reference to the segment_updater.
|
||||||
drop(self.operation_sender);
|
self.drop_sender();
|
||||||
|
|
||||||
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]);
|
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]);
|
||||||
for join_handle in former_workers_handles {
|
for join_handle in former_workers_handles {
|
||||||
@@ -335,7 +365,6 @@ impl IndexWriter {
|
|||||||
TantivyError::ErrorInThread("Error in indexing worker thread.".into())
|
TantivyError::ErrorInThread("Error in indexing worker thread.".into())
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
drop(self.workers_join_handle);
|
|
||||||
|
|
||||||
let result = self
|
let result = self
|
||||||
.segment_updater
|
.segment_updater
|
||||||
@@ -350,10 +379,10 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn add_segment(&mut self, segment_meta: SegmentMeta) {
|
pub fn add_segment(&self, segment_meta: SegmentMeta) -> crate::Result<()> {
|
||||||
let delete_cursor = self.delete_queue.cursor();
|
let delete_cursor = self.delete_queue.cursor();
|
||||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
|
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
|
||||||
self.segment_updater.add_segment(segment_entry);
|
block_on(self.segment_updater.schedule_add_segment(segment_entry))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new segment.
|
/// Creates a new segment.
|
||||||
@@ -370,7 +399,7 @@ impl IndexWriter {
|
|||||||
|
|
||||||
/// Spawns a new worker thread for indexing.
|
/// Spawns a new worker thread for indexing.
|
||||||
/// The thread consumes documents from the pipeline.
|
/// The thread consumes documents from the pipeline.
|
||||||
fn add_indexing_worker(&mut self) -> Result<()> {
|
fn add_indexing_worker(&mut self) -> crate::Result<()> {
|
||||||
let document_receiver_clone = self.operation_receiver.clone();
|
let document_receiver_clone = self.operation_receiver.clone();
|
||||||
let mut segment_updater = self.segment_updater.clone();
|
let mut segment_updater = self.segment_updater.clone();
|
||||||
|
|
||||||
@@ -378,7 +407,7 @@ impl IndexWriter {
|
|||||||
|
|
||||||
let mem_budget = self.heap_size_in_bytes_per_thread;
|
let mem_budget = self.heap_size_in_bytes_per_thread;
|
||||||
let index = self.index.clone();
|
let index = self.index.clone();
|
||||||
let join_handle: JoinHandle<Result<()>> = thread::Builder::new()
|
let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new()
|
||||||
.name(format!("thrd-tantivy-index{}", self.worker_id))
|
.name(format!("thrd-tantivy-index{}", self.worker_id))
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
loop {
|
loop {
|
||||||
@@ -407,7 +436,7 @@ impl IndexWriter {
|
|||||||
let segment = index.new_segment();
|
let segment = index.new_segment();
|
||||||
index_documents(
|
index_documents(
|
||||||
mem_budget,
|
mem_budget,
|
||||||
&segment,
|
segment,
|
||||||
&mut document_iterator,
|
&mut document_iterator,
|
||||||
&mut segment_updater,
|
&mut segment_updater,
|
||||||
delete_cursor.clone(),
|
delete_cursor.clone(),
|
||||||
@@ -420,26 +449,27 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the merge policy.
|
/// Accessor to the merge policy.
|
||||||
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
|
pub fn get_merge_policy(&self) -> Arc<dyn MergePolicy> {
|
||||||
self.segment_updater.get_merge_policy()
|
self.segment_updater.get_merge_policy()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the merge policy.
|
/// Setter for the merge policy.
|
||||||
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
|
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
|
||||||
self.segment_updater.set_merge_policy(merge_policy);
|
self.segment_updater.set_merge_policy(merge_policy);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn start_workers(&mut self) -> Result<()> {
|
fn start_workers(&mut self) -> crate::Result<()> {
|
||||||
for _ in 0..self.num_threads {
|
for _ in 0..self.num_threads {
|
||||||
self.add_indexing_worker()?;
|
self.add_indexing_worker()?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Detects and removes the files that
|
/// Detects and removes the files that are not used by the index anymore.
|
||||||
/// are not used by the index anymore.
|
pub fn garbage_collect_files(
|
||||||
pub fn garbage_collect_files(&mut self) -> Result<()> {
|
&self,
|
||||||
self.segment_updater.garbage_collect_files().wait()
|
) -> impl Future<Output = crate::Result<GarbageCollectionResult>> {
|
||||||
|
self.segment_updater.schedule_garbage_collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deletes all documents from the index
|
/// Deletes all documents from the index
|
||||||
@@ -478,7 +508,7 @@ impl IndexWriter {
|
|||||||
/// Ok(())
|
/// Ok(())
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
pub fn delete_all_documents(&mut self) -> Result<Opstamp> {
|
pub fn delete_all_documents(&self) -> crate::Result<Opstamp> {
|
||||||
// Delete segments
|
// Delete segments
|
||||||
self.segment_updater.remove_all_segments();
|
self.segment_updater.remove_all_segments();
|
||||||
// Return new stamp - reverted stamp
|
// Return new stamp - reverted stamp
|
||||||
@@ -492,8 +522,10 @@ impl IndexWriter {
|
|||||||
pub fn merge(
|
pub fn merge(
|
||||||
&mut self,
|
&mut self,
|
||||||
segment_ids: &[SegmentId],
|
segment_ids: &[SegmentId],
|
||||||
) -> Result<impl Future<Item = SegmentMeta, Error = Canceled>> {
|
) -> impl Future<Output = crate::Result<SegmentMeta>> {
|
||||||
self.segment_updater.start_merge(segment_ids)
|
let merge_operation = self.segment_updater.make_merge_operation(segment_ids);
|
||||||
|
let segment_updater = self.segment_updater.clone();
|
||||||
|
async move { segment_updater.start_merge(merge_operation)?.await }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Closes the current document channel send.
|
/// Closes the current document channel send.
|
||||||
@@ -504,6 +536,7 @@ impl IndexWriter {
|
|||||||
/// when no documents are remaining.
|
/// when no documents are remaining.
|
||||||
///
|
///
|
||||||
/// Returns the former segment_ready channel.
|
/// Returns the former segment_ready channel.
|
||||||
|
#[allow(unused_must_use)]
|
||||||
fn recreate_document_channel(&mut self) -> OperationReceiver {
|
fn recreate_document_channel(&mut self) -> OperationReceiver {
|
||||||
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
||||||
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
||||||
@@ -519,13 +552,8 @@ impl IndexWriter {
|
|||||||
/// state as it was after the last commit.
|
/// state as it was after the last commit.
|
||||||
///
|
///
|
||||||
/// The opstamp at the last commit is returned.
|
/// The opstamp at the last commit is returned.
|
||||||
pub fn rollback(&mut self) -> Result<Opstamp> {
|
pub fn rollback(&mut self) -> crate::Result<Opstamp> {
|
||||||
info!("Rolling back to opstamp {}", self.committed_opstamp);
|
info!("Rolling back to opstamp {}", self.committed_opstamp);
|
||||||
self.rollback_impl()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Private, implementation of rollback
|
|
||||||
fn rollback_impl(&mut self) -> Result<Opstamp> {
|
|
||||||
// marks the segment updater as killed. From now on, all
|
// marks the segment updater as killed. From now on, all
|
||||||
// segment updates will be ignored.
|
// segment updates will be ignored.
|
||||||
self.segment_updater.kill();
|
self.segment_updater.kill();
|
||||||
@@ -548,7 +576,7 @@ impl IndexWriter {
|
|||||||
//
|
//
|
||||||
// This will drop the document queue, and the thread
|
// This will drop the document queue, and the thread
|
||||||
// should terminate.
|
// should terminate.
|
||||||
mem::replace(self, new_index_writer);
|
*self = new_index_writer;
|
||||||
|
|
||||||
// Drains the document receiver pipeline :
|
// Drains the document receiver pipeline :
|
||||||
// Workers don't need to index the pending documents.
|
// Workers don't need to index the pending documents.
|
||||||
@@ -581,7 +609,7 @@ impl IndexWriter {
|
|||||||
/// It is also possible to add a payload to the `commit`
|
/// It is also possible to add a payload to the `commit`
|
||||||
/// using this API.
|
/// using this API.
|
||||||
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
|
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
|
||||||
pub fn prepare_commit(&mut self) -> Result<PreparedCommit<'_>> {
|
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit> {
|
||||||
// Here, because we join all of the worker threads,
|
// Here, because we join all of the worker threads,
|
||||||
// all of the segment update for this commit have been
|
// all of the segment update for this commit have been
|
||||||
// sent.
|
// sent.
|
||||||
@@ -628,7 +656,7 @@ impl IndexWriter {
|
|||||||
/// Commit returns the `opstamp` of the last document
|
/// Commit returns the `opstamp` of the last document
|
||||||
/// that made it in the commit.
|
/// that made it in the commit.
|
||||||
///
|
///
|
||||||
pub fn commit(&mut self) -> Result<Opstamp> {
|
pub fn commit(&mut self) -> crate::Result<Opstamp> {
|
||||||
self.prepare_commit()?.commit()
|
self.prepare_commit()?.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -669,9 +697,6 @@ impl IndexWriter {
|
|||||||
/// The opstamp is an increasing `u64` that can
|
/// The opstamp is an increasing `u64` that can
|
||||||
/// be used by the client to align commits with its own
|
/// be used by the client to align commits with its own
|
||||||
/// document queue.
|
/// document queue.
|
||||||
///
|
|
||||||
/// Currently it represents the number of documents that
|
|
||||||
/// have been added since the creation of the index.
|
|
||||||
pub fn add_document(&self, document: Document) -> Opstamp {
|
pub fn add_document(&self, document: Document) -> Opstamp {
|
||||||
let opstamp = self.stamper.stamp();
|
let opstamp = self.stamper.stamp();
|
||||||
let add_operation = AddOperation { opstamp, document };
|
let add_operation = AddOperation { opstamp, document };
|
||||||
@@ -745,6 +770,16 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Drop for IndexWriter {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.segment_updater.kill();
|
||||||
|
self.drop_sender();
|
||||||
|
for work in self.workers_join_handle.drain(..) {
|
||||||
|
let _ = work.join();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
@@ -754,7 +789,7 @@ mod tests {
|
|||||||
use crate::error::*;
|
use crate::error::*;
|
||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::TermQuery;
|
use crate::query::TermQuery;
|
||||||
use crate::schema::{self, IndexRecordOption};
|
use crate::schema::{self, IndexRecordOption, STRING};
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::ReloadPolicy;
|
use crate::ReloadPolicy;
|
||||||
use crate::Term;
|
use crate::Term;
|
||||||
@@ -765,7 +800,7 @@ mod tests {
|
|||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let index_writer = index.writer_for_tests().unwrap();
|
||||||
let operations = vec![
|
let operations = vec![
|
||||||
UserOperation::Add(doc!(text_field=>"a")),
|
UserOperation::Add(doc!(text_field=>"a")),
|
||||||
UserOperation::Add(doc!(text_field=>"b")),
|
UserOperation::Add(doc!(text_field=>"b")),
|
||||||
@@ -774,6 +809,46 @@ mod tests {
|
|||||||
assert_eq!(batch_opstamp1, 2u64);
|
assert_eq!(batch_opstamp1, 2u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_no_need_to_rewrite_delete_file_if_no_new_deletes() {
|
||||||
|
let mut schema_builder = schema::Schema::builder();
|
||||||
|
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||||
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
|
|
||||||
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
index_writer.add_document(doc!(text_field => "hello1"));
|
||||||
|
index_writer.add_document(doc!(text_field => "hello2"));
|
||||||
|
assert!(index_writer.commit().is_ok());
|
||||||
|
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.segment_readers().len(), 1);
|
||||||
|
assert_eq!(searcher.segment_reader(0u32).num_deleted_docs(), 0);
|
||||||
|
|
||||||
|
index_writer.delete_term(Term::from_field_text(text_field, "hello1"));
|
||||||
|
assert!(index_writer.commit().is_ok());
|
||||||
|
|
||||||
|
assert!(reader.reload().is_ok());
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.segment_readers().len(), 1);
|
||||||
|
assert_eq!(searcher.segment_reader(0u32).num_deleted_docs(), 1);
|
||||||
|
|
||||||
|
let previous_delete_opstamp = index.load_metas().unwrap().segments[0].delete_opstamp();
|
||||||
|
|
||||||
|
// All docs containing hello1 have been already removed.
|
||||||
|
// We should not update the delete meta.
|
||||||
|
index_writer.delete_term(Term::from_field_text(text_field, "hello1"));
|
||||||
|
assert!(index_writer.commit().is_ok());
|
||||||
|
|
||||||
|
assert!(reader.reload().is_ok());
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.segment_readers().len(), 1);
|
||||||
|
assert_eq!(searcher.segment_reader(0u32).num_deleted_docs(), 1);
|
||||||
|
|
||||||
|
let after_delete_opstamp = index.load_metas().unwrap().segments[0].delete_opstamp();
|
||||||
|
assert_eq!(after_delete_opstamp, previous_delete_opstamp);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_ordered_batched_operations() {
|
fn test_ordered_batched_operations() {
|
||||||
// * one delete for `doc!(field=>"a")`
|
// * one delete for `doc!(field=>"a")`
|
||||||
@@ -789,7 +864,7 @@ mod tests {
|
|||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let a_term = Term::from_field_text(text_field, "a");
|
let a_term = Term::from_field_text(text_field, "a");
|
||||||
let b_term = Term::from_field_text(text_field, "b");
|
let b_term = Term::from_field_text(text_field, "b");
|
||||||
let operations = vec![
|
let operations = vec![
|
||||||
@@ -851,8 +926,8 @@ mod tests {
|
|||||||
fn test_lockfile_already_exists_error_msg() {
|
fn test_lockfile_already_exists_error_msg() {
|
||||||
let schema_builder = schema::Schema::builder();
|
let schema_builder = schema::Schema::builder();
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let _index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let _index_writer = index.writer_for_tests().unwrap();
|
||||||
match index.writer_with_num_threads(1, 3_000_000) {
|
match index.writer_for_tests() {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let err_msg = err.to_string();
|
let err_msg = err.to_string();
|
||||||
assert!(err_msg.contains("already an `IndexWriter`"));
|
assert!(err_msg.contains("already an `IndexWriter`"));
|
||||||
@@ -868,7 +943,7 @@ mod tests {
|
|||||||
let index_writer = index.writer(3_000_000).unwrap();
|
let index_writer = index.writer(3_000_000).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
format!("{:?}", index_writer.get_merge_policy()),
|
format!("{:?}", index_writer.get_merge_policy()),
|
||||||
"LogMergePolicy { min_merge_size: 8, min_layer_size: 10000, \
|
"LogMergePolicy { min_merge_size: 8, max_merge_size: 10000000, min_layer_size: 10000, \
|
||||||
level_log_size: 0.75 }"
|
level_log_size: 0.75 }"
|
||||||
);
|
);
|
||||||
let merge_policy = Box::new(NoMergePolicy::default());
|
let merge_policy = Box::new(NoMergePolicy::default());
|
||||||
@@ -904,7 +979,7 @@ mod tests {
|
|||||||
let num_docs_containing = |s: &str| {
|
let num_docs_containing = |s: &str| {
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_text(text_field, s);
|
let term = Term::from_field_text(text_field, s);
|
||||||
searcher.doc_freq(&term)
|
searcher.doc_freq(&term).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -940,7 +1015,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
let num_docs_containing = |s: &str| {
|
let num_docs_containing = |s: &str| {
|
||||||
let term_a = Term::from_field_text(text_field, s);
|
let term_a = Term::from_field_text(text_field, s);
|
||||||
reader.searcher().doc_freq(&term_a)
|
reader.searcher().doc_freq(&term_a).unwrap()
|
||||||
};
|
};
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
@@ -1035,6 +1110,7 @@ mod tests {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.searcher()
|
.searcher()
|
||||||
.doc_freq(&term_a)
|
.doc_freq(&term_a)
|
||||||
|
.unwrap()
|
||||||
};
|
};
|
||||||
assert_eq!(num_docs_containing("a"), 0);
|
assert_eq!(num_docs_containing("a"), 0);
|
||||||
assert_eq!(num_docs_containing("b"), 100);
|
assert_eq!(num_docs_containing("b"), 100);
|
||||||
@@ -1054,7 +1130,7 @@ mod tests {
|
|||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_text(text_field, s);
|
let term = Term::from_field_text(text_field, s);
|
||||||
searcher.doc_freq(&term)
|
searcher.doc_freq(&term).unwrap()
|
||||||
};
|
};
|
||||||
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
|
||||||
|
|
||||||
@@ -1105,7 +1181,15 @@ mod tests {
|
|||||||
|
|
||||||
// working with an empty index == no documents
|
// working with an empty index == no documents
|
||||||
let term_b = Term::from_field_text(text_field, "b");
|
let term_b = Term::from_field_text(text_field, "b");
|
||||||
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_b), 0);
|
assert_eq!(
|
||||||
|
index
|
||||||
|
.reader()
|
||||||
|
.unwrap()
|
||||||
|
.searcher()
|
||||||
|
.doc_freq(&term_b)
|
||||||
|
.unwrap(),
|
||||||
|
0
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1125,7 +1209,15 @@ mod tests {
|
|||||||
|
|
||||||
let term_a = Term::from_field_text(text_field, "a");
|
let term_a = Term::from_field_text(text_field, "a");
|
||||||
// expect the document with that term to be in the index
|
// expect the document with that term to be in the index
|
||||||
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
|
assert_eq!(
|
||||||
|
index
|
||||||
|
.reader()
|
||||||
|
.unwrap()
|
||||||
|
.searcher()
|
||||||
|
.doc_freq(&term_a)
|
||||||
|
.unwrap(),
|
||||||
|
1
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1151,7 +1243,15 @@ mod tests {
|
|||||||
// Find original docs in the index
|
// Find original docs in the index
|
||||||
let term_a = Term::from_field_text(text_field, "a");
|
let term_a = Term::from_field_text(text_field, "a");
|
||||||
// expect the document with that term to be in the index
|
// expect the document with that term to be in the index
|
||||||
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
|
assert_eq!(
|
||||||
|
index
|
||||||
|
.reader()
|
||||||
|
.unwrap()
|
||||||
|
.searcher()
|
||||||
|
.doc_freq(&term_a)
|
||||||
|
.unwrap(),
|
||||||
|
1
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1179,4 +1279,16 @@ mod tests {
|
|||||||
assert!(clear_again.is_ok());
|
assert!(clear_again.is_ok());
|
||||||
assert!(commit_again.is_ok());
|
assert!(commit_again.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_doc_missing_field() {
|
||||||
|
let mut schema_builder = schema::Schema::builder();
|
||||||
|
let idfield = schema_builder.add_text_field("id", STRING);
|
||||||
|
schema_builder.add_text_field("optfield", STRING);
|
||||||
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
index_writer.add_document(doc!(idfield=>"myid"));
|
||||||
|
let commit = index_writer.commit();
|
||||||
|
assert!(commit.is_ok());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,12 +6,14 @@ use std::f64;
|
|||||||
const DEFAULT_LEVEL_LOG_SIZE: f64 = 0.75;
|
const DEFAULT_LEVEL_LOG_SIZE: f64 = 0.75;
|
||||||
const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000;
|
const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000;
|
||||||
const DEFAULT_MIN_MERGE_SIZE: usize = 8;
|
const DEFAULT_MIN_MERGE_SIZE: usize = 8;
|
||||||
|
const DEFAULT_MAX_MERGE_SIZE: usize = 10_000_000;
|
||||||
|
|
||||||
/// `LogMergePolicy` tries tries to merge segments that have a similar number of
|
/// `LogMergePolicy` tries to merge segments that have a similar number of
|
||||||
/// documents.
|
/// documents.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct LogMergePolicy {
|
pub struct LogMergePolicy {
|
||||||
min_merge_size: usize,
|
min_merge_size: usize,
|
||||||
|
max_merge_size: usize,
|
||||||
min_layer_size: u32,
|
min_layer_size: u32,
|
||||||
level_log_size: f64,
|
level_log_size: f64,
|
||||||
}
|
}
|
||||||
@@ -26,6 +28,12 @@ impl LogMergePolicy {
|
|||||||
self.min_merge_size = min_merge_size;
|
self.min_merge_size = min_merge_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set the maximum number docs in a segment for it to be considered for
|
||||||
|
/// merging.
|
||||||
|
pub fn set_max_merge_size(&mut self, max_merge_size: usize) {
|
||||||
|
self.max_merge_size = max_merge_size;
|
||||||
|
}
|
||||||
|
|
||||||
/// Set the minimum segment size under which all segment belong
|
/// Set the minimum segment size under which all segment belong
|
||||||
/// to the same level.
|
/// to the same level.
|
||||||
pub fn set_min_layer_size(&mut self, min_layer_size: u32) {
|
pub fn set_min_layer_size(&mut self, min_layer_size: u32) {
|
||||||
@@ -46,39 +54,44 @@ impl LogMergePolicy {
|
|||||||
|
|
||||||
impl MergePolicy for LogMergePolicy {
|
impl MergePolicy for LogMergePolicy {
|
||||||
fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec<MergeCandidate> {
|
fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec<MergeCandidate> {
|
||||||
if segments.is_empty() {
|
|
||||||
return Vec::new();
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut size_sorted_tuples = segments
|
let mut size_sorted_tuples = segments
|
||||||
.iter()
|
.iter()
|
||||||
.map(SegmentMeta::num_docs)
|
.map(SegmentMeta::num_docs)
|
||||||
|
.filter(|s| s <= &(self.max_merge_size as u32))
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.collect::<Vec<(usize, u32)>>();
|
.collect::<Vec<(usize, u32)>>();
|
||||||
|
|
||||||
size_sorted_tuples.sort_by(|x, y| y.1.cmp(&(x.1)));
|
size_sorted_tuples.sort_by(|x, y| y.1.cmp(&(x.1)));
|
||||||
|
|
||||||
|
if size_sorted_tuples.len() <= 1 {
|
||||||
|
return Vec::new();
|
||||||
|
}
|
||||||
|
|
||||||
let size_sorted_log_tuples: Vec<_> = size_sorted_tuples
|
let size_sorted_log_tuples: Vec<_> = size_sorted_tuples
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(ind, num_docs)| (ind, f64::from(self.clip_min_size(num_docs)).log2()))
|
.map(|(ind, num_docs)| (ind, f64::from(self.clip_min_size(num_docs)).log2()))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let (first_ind, first_score) = size_sorted_log_tuples[0];
|
if let Some(&(first_ind, first_score)) = size_sorted_log_tuples.first() {
|
||||||
let mut current_max_log_size = first_score;
|
let mut current_max_log_size = first_score;
|
||||||
let mut levels = vec![vec![first_ind]];
|
let mut levels = vec![vec![first_ind]];
|
||||||
for &(ind, score) in (&size_sorted_log_tuples).iter().skip(1) {
|
for &(ind, score) in (&size_sorted_log_tuples).iter().skip(1) {
|
||||||
if score < (current_max_log_size - self.level_log_size) {
|
if score < (current_max_log_size - self.level_log_size) {
|
||||||
current_max_log_size = score;
|
current_max_log_size = score;
|
||||||
levels.push(Vec::new());
|
levels.push(Vec::new());
|
||||||
|
}
|
||||||
|
levels.last_mut().unwrap().push(ind);
|
||||||
}
|
}
|
||||||
levels.last_mut().unwrap().push(ind);
|
levels
|
||||||
|
.iter()
|
||||||
|
.filter(|level| level.len() >= self.min_merge_size)
|
||||||
|
.map(|ind_vec| {
|
||||||
|
MergeCandidate(ind_vec.iter().map(|&ind| segments[ind].id()).collect())
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
} else {
|
||||||
|
return vec![];
|
||||||
}
|
}
|
||||||
|
|
||||||
levels
|
|
||||||
.iter()
|
|
||||||
.filter(|level| level.len() >= self.min_merge_size)
|
|
||||||
.map(|ind_vec| MergeCandidate(ind_vec.iter().map(|&ind| segments[ind].id()).collect()))
|
|
||||||
.collect()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -86,6 +99,7 @@ impl Default for LogMergePolicy {
|
|||||||
fn default() -> LogMergePolicy {
|
fn default() -> LogMergePolicy {
|
||||||
LogMergePolicy {
|
LogMergePolicy {
|
||||||
min_merge_size: DEFAULT_MIN_MERGE_SIZE,
|
min_merge_size: DEFAULT_MIN_MERGE_SIZE,
|
||||||
|
max_merge_size: DEFAULT_MAX_MERGE_SIZE,
|
||||||
min_layer_size: DEFAULT_MIN_LAYER_SIZE,
|
min_layer_size: DEFAULT_MIN_LAYER_SIZE,
|
||||||
level_log_size: DEFAULT_LEVEL_LOG_SIZE,
|
level_log_size: DEFAULT_LEVEL_LOG_SIZE,
|
||||||
}
|
}
|
||||||
@@ -104,6 +118,7 @@ mod tests {
|
|||||||
fn test_merge_policy() -> LogMergePolicy {
|
fn test_merge_policy() -> LogMergePolicy {
|
||||||
let mut log_merge_policy = LogMergePolicy::default();
|
let mut log_merge_policy = LogMergePolicy::default();
|
||||||
log_merge_policy.set_min_merge_size(3);
|
log_merge_policy.set_min_merge_size(3);
|
||||||
|
log_merge_policy.set_max_merge_size(100_000);
|
||||||
log_merge_policy.set_min_layer_size(2);
|
log_merge_policy.set_min_layer_size(2);
|
||||||
log_merge_policy
|
log_merge_policy
|
||||||
}
|
}
|
||||||
@@ -141,11 +156,11 @@ mod tests {
|
|||||||
create_random_segment_meta(10),
|
create_random_segment_meta(10),
|
||||||
create_random_segment_meta(10),
|
create_random_segment_meta(10),
|
||||||
create_random_segment_meta(10),
|
create_random_segment_meta(10),
|
||||||
create_random_segment_meta(1000),
|
create_random_segment_meta(1_000),
|
||||||
create_random_segment_meta(1000),
|
create_random_segment_meta(1_000),
|
||||||
create_random_segment_meta(1000),
|
create_random_segment_meta(1_000),
|
||||||
create_random_segment_meta(10000),
|
create_random_segment_meta(10_000),
|
||||||
create_random_segment_meta(10000),
|
create_random_segment_meta(10_000),
|
||||||
create_random_segment_meta(10),
|
create_random_segment_meta(10),
|
||||||
create_random_segment_meta(10),
|
create_random_segment_meta(10),
|
||||||
create_random_segment_meta(10),
|
create_random_segment_meta(10),
|
||||||
@@ -168,6 +183,7 @@ mod tests {
|
|||||||
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
||||||
assert_eq!(result_list.len(), 2);
|
assert_eq!(result_list.len(), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_log_merge_policy_small_segments() {
|
fn test_log_merge_policy_small_segments() {
|
||||||
// segments under min_layer_size are merged together
|
// segments under min_layer_size are merged together
|
||||||
@@ -182,4 +198,30 @@ mod tests {
|
|||||||
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
||||||
assert_eq!(result_list.len(), 1);
|
assert_eq!(result_list.len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_log_merge_policy_all_segments_too_large_to_merge() {
|
||||||
|
let eight_large_segments: Vec<SegmentMeta> =
|
||||||
|
std::iter::repeat_with(|| create_random_segment_meta(100_001))
|
||||||
|
.take(8)
|
||||||
|
.collect();
|
||||||
|
assert!(test_merge_policy()
|
||||||
|
.compute_merge_candidates(&eight_large_segments)
|
||||||
|
.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_large_merge_segments() {
|
||||||
|
let test_input = vec![
|
||||||
|
create_random_segment_meta(1_000_000),
|
||||||
|
create_random_segment_meta(100_001),
|
||||||
|
create_random_segment_meta(100_000),
|
||||||
|
create_random_segment_meta(100_000),
|
||||||
|
create_random_segment_meta(100_000),
|
||||||
|
];
|
||||||
|
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
||||||
|
// Do not include large segments
|
||||||
|
assert_eq!(result_list.len(), 1);
|
||||||
|
assert_eq!(result_list[0].0.len(), 3)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,14 +2,23 @@ use crate::Opstamp;
|
|||||||
use crate::SegmentId;
|
use crate::SegmentId;
|
||||||
use census::{Inventory, TrackedObject};
|
use census::{Inventory, TrackedObject};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
use std::ops::Deref;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct MergeOperationInventory(Inventory<InnerMergeOperation>);
|
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>);
|
||||||
|
|
||||||
|
impl Deref for MergeOperationInventory {
|
||||||
|
type Target = Inventory<InnerMergeOperation>;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl MergeOperationInventory {
|
impl MergeOperationInventory {
|
||||||
pub fn segment_in_merge(&self) -> HashSet<SegmentId> {
|
pub fn segment_in_merge(&self) -> HashSet<SegmentId> {
|
||||||
let mut segment_in_merge = HashSet::default();
|
let mut segment_in_merge = HashSet::default();
|
||||||
for merge_op in self.0.list() {
|
for merge_op in self.list() {
|
||||||
for &segment_id in &merge_op.segment_ids {
|
for &segment_id in &merge_op.segment_ids {
|
||||||
segment_in_merge.insert(segment_id);
|
segment_in_merge.insert(segment_id);
|
||||||
}
|
}
|
||||||
@@ -35,13 +44,13 @@ pub struct MergeOperation {
|
|||||||
inner: TrackedObject<InnerMergeOperation>,
|
inner: TrackedObject<InnerMergeOperation>,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct InnerMergeOperation {
|
pub(crate) struct InnerMergeOperation {
|
||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
segment_ids: Vec<SegmentId>,
|
segment_ids: Vec<SegmentId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MergeOperation {
|
impl MergeOperation {
|
||||||
pub fn new(
|
pub(crate) fn new(
|
||||||
inventory: &MergeOperationInventory,
|
inventory: &MergeOperationInventory,
|
||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
segment_ids: Vec<SegmentId>,
|
segment_ids: Vec<SegmentId>,
|
||||||
@@ -51,7 +60,7 @@ impl MergeOperation {
|
|||||||
segment_ids,
|
segment_ids,
|
||||||
};
|
};
|
||||||
MergeOperation {
|
MergeOperation {
|
||||||
inner: inventory.0.track(inner_merge_operation),
|
inner: inventory.track(inner_merge_operation),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -18,7 +18,7 @@ mod stamper;
|
|||||||
|
|
||||||
pub use self::index_writer::IndexWriter;
|
pub use self::index_writer::IndexWriter;
|
||||||
pub use self::log_merge_policy::LogMergePolicy;
|
pub use self::log_merge_policy::LogMergePolicy;
|
||||||
pub use self::merge_operation::{MergeOperation, MergeOperationInventory};
|
pub use self::merge_operation::MergeOperation;
|
||||||
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
||||||
pub use self::prepared_commit::PreparedCommit;
|
pub use self::prepared_commit::PreparedCommit;
|
||||||
pub use self::segment_entry::SegmentEntry;
|
pub use self::segment_entry::SegmentEntry;
|
||||||
@@ -28,3 +28,27 @@ pub use self::segment_writer::SegmentWriter;
|
|||||||
|
|
||||||
/// Alias for the default merge policy, which is the `LogMergePolicy`.
|
/// Alias for the default merge policy, which is the `LogMergePolicy`.
|
||||||
pub type DefaultMergePolicy = LogMergePolicy;
|
pub type DefaultMergePolicy = LogMergePolicy;
|
||||||
|
|
||||||
|
#[cfg(feature = "mmap")]
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests_mmap {
|
||||||
|
use crate::schema::{self, Schema};
|
||||||
|
use crate::{Index, Term};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_advance_delete_bug() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||||
|
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
|
||||||
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
// there must be one deleted document in the segment
|
||||||
|
index_writer.add_document(doc!(text_field=>"b"));
|
||||||
|
index_writer.delete_term(Term::from_field_text(text_field, "b"));
|
||||||
|
// we need enough data to trigger the bug (at least 32 documents)
|
||||||
|
for _ in 0..32 {
|
||||||
|
index_writer.add_document(doc!(text_field=>"c"));
|
||||||
|
}
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,6 +9,15 @@ pub struct DeleteOperation {
|
|||||||
pub term: Term,
|
pub term: Term,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for DeleteOperation {
|
||||||
|
fn default() -> Self {
|
||||||
|
DeleteOperation {
|
||||||
|
opstamp: 0u64,
|
||||||
|
term: Term::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Timestamped Add operation.
|
/// Timestamped Add operation.
|
||||||
#[derive(Eq, PartialEq, Debug)]
|
#[derive(Eq, PartialEq, Debug)]
|
||||||
pub struct AddOperation {
|
pub struct AddOperation {
|
||||||
@@ -19,6 +28,8 @@ pub struct AddOperation {
|
|||||||
/// UserOperation is an enum type that encapsulates other operation types.
|
/// UserOperation is an enum type that encapsulates other operation types.
|
||||||
#[derive(Eq, PartialEq, Debug)]
|
#[derive(Eq, PartialEq, Debug)]
|
||||||
pub enum UserOperation {
|
pub enum UserOperation {
|
||||||
|
/// Add operation
|
||||||
Add(Document),
|
Add(Document),
|
||||||
|
/// Delete operation
|
||||||
Delete(Term),
|
Delete(Term),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use super::IndexWriter;
|
use super::IndexWriter;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use crate::Result;
|
use futures::executor::block_on;
|
||||||
|
|
||||||
/// A prepared commit
|
/// A prepared commit
|
||||||
pub struct PreparedCommit<'a> {
|
pub struct PreparedCommit<'a> {
|
||||||
@@ -26,15 +26,17 @@ impl<'a> PreparedCommit<'a> {
|
|||||||
self.payload = Some(payload.to_string())
|
self.payload = Some(payload.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn abort(self) -> Result<Opstamp> {
|
pub fn abort(self) -> crate::Result<Opstamp> {
|
||||||
self.index_writer.rollback()
|
self.index_writer.rollback()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn commit(self) -> Result<Opstamp> {
|
pub fn commit(self) -> crate::Result<Opstamp> {
|
||||||
info!("committing {}", self.opstamp);
|
info!("committing {}", self.opstamp);
|
||||||
self.index_writer
|
let _ = block_on(
|
||||||
.segment_updater()
|
self.index_writer
|
||||||
.commit(self.opstamp, self.payload)?;
|
.segment_updater()
|
||||||
|
.schedule_commit(self.opstamp, self.payload),
|
||||||
|
);
|
||||||
Ok(self.opstamp)
|
Ok(self.opstamp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
|
use crate::common::BitSet;
|
||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::indexer::delete_queue::DeleteCursor;
|
use crate::indexer::delete_queue::DeleteCursor;
|
||||||
use bit_set::BitSet;
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
/// A segment entry describes the state of
|
/// A segment entry describes the state of
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ use crate::core::SegmentMeta;
|
|||||||
use crate::error::TantivyError;
|
use crate::error::TantivyError;
|
||||||
use crate::indexer::delete_queue::DeleteCursor;
|
use crate::indexer::delete_queue::DeleteCursor;
|
||||||
use crate::indexer::SegmentEntry;
|
use crate::indexer::SegmentEntry;
|
||||||
use crate::Result as TantivyResult;
|
|
||||||
use std::collections::hash_set::HashSet;
|
use std::collections::hash_set::HashSet;
|
||||||
use std::fmt::{self, Debug, Formatter};
|
use std::fmt::{self, Debug, Formatter};
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
@@ -16,6 +15,28 @@ struct SegmentRegisters {
|
|||||||
committed: SegmentRegister,
|
committed: SegmentRegister,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq)]
|
||||||
|
pub(crate) enum SegmentsStatus {
|
||||||
|
Committed,
|
||||||
|
Uncommitted,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SegmentRegisters {
|
||||||
|
/// Check if all the segments are committed or uncommited.
|
||||||
|
///
|
||||||
|
/// If some segment is missing or segments are in a different state (this should not happen
|
||||||
|
/// if tantivy is used correctly), returns `None`.
|
||||||
|
fn segments_status(&self, segment_ids: &[SegmentId]) -> Option<SegmentsStatus> {
|
||||||
|
if self.uncommitted.contains_all(segment_ids) {
|
||||||
|
Some(SegmentsStatus::Uncommitted)
|
||||||
|
} else if self.committed.contains_all(segment_ids) {
|
||||||
|
Some(SegmentsStatus::Committed)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The segment manager stores the list of segments
|
/// The segment manager stores the list of segments
|
||||||
/// as well as their state.
|
/// as well as their state.
|
||||||
///
|
///
|
||||||
@@ -27,7 +48,7 @@ pub struct SegmentManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Debug for SegmentManager {
|
impl Debug for SegmentManager {
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||||
let lock = self.read();
|
let lock = self.read();
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
@@ -123,7 +144,7 @@ impl SegmentManager {
|
|||||||
/// Returns an error if some segments are missing, or if
|
/// Returns an error if some segments are missing, or if
|
||||||
/// the `segment_ids` are not either all committed or all
|
/// the `segment_ids` are not either all committed or all
|
||||||
/// uncommitted.
|
/// uncommitted.
|
||||||
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> TantivyResult<Vec<SegmentEntry>> {
|
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> crate::Result<Vec<SegmentEntry>> {
|
||||||
let registers_lock = self.read();
|
let registers_lock = self.read();
|
||||||
let mut segment_entries = vec![];
|
let mut segment_entries = vec![];
|
||||||
if registers_lock.uncommitted.contains_all(segment_ids) {
|
if registers_lock.uncommitted.contains_all(segment_ids) {
|
||||||
@@ -153,33 +174,35 @@ impl SegmentManager {
|
|||||||
let mut registers_lock = self.write();
|
let mut registers_lock = self.write();
|
||||||
registers_lock.uncommitted.add_segment_entry(segment_entry);
|
registers_lock.uncommitted.add_segment_entry(segment_entry);
|
||||||
}
|
}
|
||||||
|
// Replace a list of segments for their equivalent merged segment.
|
||||||
pub fn end_merge(
|
//
|
||||||
|
// Returns true if these segments are committed, false if the merge segments are uncommited.
|
||||||
|
pub(crate) fn end_merge(
|
||||||
&self,
|
&self,
|
||||||
before_merge_segment_ids: &[SegmentId],
|
before_merge_segment_ids: &[SegmentId],
|
||||||
after_merge_segment_entry: SegmentEntry,
|
after_merge_segment_entry: SegmentEntry,
|
||||||
) {
|
) -> crate::Result<SegmentsStatus> {
|
||||||
let mut registers_lock = self.write();
|
let mut registers_lock = self.write();
|
||||||
let target_register: &mut SegmentRegister = {
|
let segments_status = registers_lock
|
||||||
if registers_lock
|
.segments_status(before_merge_segment_ids)
|
||||||
.uncommitted
|
.ok_or_else(|| {
|
||||||
.contains_all(before_merge_segment_ids)
|
|
||||||
{
|
|
||||||
&mut registers_lock.uncommitted
|
|
||||||
} else if registers_lock
|
|
||||||
.committed
|
|
||||||
.contains_all(before_merge_segment_ids)
|
|
||||||
{
|
|
||||||
&mut registers_lock.committed
|
|
||||||
} else {
|
|
||||||
warn!("couldn't find segment in SegmentManager");
|
warn!("couldn't find segment in SegmentManager");
|
||||||
return;
|
crate::TantivyError::InvalidArgument(
|
||||||
}
|
"The segments that were merged could not be found in the SegmentManager. \
|
||||||
|
This is not necessarily a bug, and can happen after a rollback for instance."
|
||||||
|
.to_string(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let target_register: &mut SegmentRegister = match segments_status {
|
||||||
|
SegmentsStatus::Uncommitted => &mut registers_lock.uncommitted,
|
||||||
|
SegmentsStatus::Committed => &mut registers_lock.committed,
|
||||||
};
|
};
|
||||||
for segment_id in before_merge_segment_ids {
|
for segment_id in before_merge_segment_ids {
|
||||||
target_register.remove_segment(segment_id);
|
target_register.remove_segment(segment_id);
|
||||||
}
|
}
|
||||||
target_register.add_segment_entry(after_merge_segment_entry);
|
target_register.add_segment_entry(after_merge_segment_entry);
|
||||||
|
Ok(segments_status)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> {
|
pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> {
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
use crate::Result;
|
|
||||||
|
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::core::SegmentComponent;
|
use crate::core::SegmentComponent;
|
||||||
use crate::fastfield::FastFieldSerializer;
|
use crate::fastfield::FastFieldSerializer;
|
||||||
@@ -10,15 +8,16 @@ use crate::store::StoreWriter;
|
|||||||
/// Segment serializer is in charge of laying out on disk
|
/// Segment serializer is in charge of laying out on disk
|
||||||
/// the data accumulated and sorted by the `SegmentWriter`.
|
/// the data accumulated and sorted by the `SegmentWriter`.
|
||||||
pub struct SegmentSerializer {
|
pub struct SegmentSerializer {
|
||||||
|
segment: Segment,
|
||||||
store_writer: StoreWriter,
|
store_writer: StoreWriter,
|
||||||
fast_field_serializer: FastFieldSerializer,
|
fast_field_serializer: FastFieldSerializer,
|
||||||
fieldnorms_serializer: FieldNormsSerializer,
|
fieldnorms_serializer: Option<FieldNormsSerializer>,
|
||||||
postings_serializer: InvertedIndexSerializer,
|
postings_serializer: InvertedIndexSerializer,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentSerializer {
|
impl SegmentSerializer {
|
||||||
/// Creates a new `SegmentSerializer`.
|
/// Creates a new `SegmentSerializer`.
|
||||||
pub fn for_segment(segment: &mut Segment) -> Result<SegmentSerializer> {
|
pub fn for_segment(mut segment: Segment) -> crate::Result<SegmentSerializer> {
|
||||||
let store_write = segment.open_write(SegmentComponent::STORE)?;
|
let store_write = segment.open_write(SegmentComponent::STORE)?;
|
||||||
|
|
||||||
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
|
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
|
||||||
@@ -27,15 +26,20 @@ impl SegmentSerializer {
|
|||||||
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
|
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
|
||||||
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
|
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
|
||||||
|
|
||||||
let postings_serializer = InvertedIndexSerializer::open(segment)?;
|
let postings_serializer = InvertedIndexSerializer::open(&mut segment)?;
|
||||||
Ok(SegmentSerializer {
|
Ok(SegmentSerializer {
|
||||||
|
segment,
|
||||||
store_writer: StoreWriter::new(store_write),
|
store_writer: StoreWriter::new(store_write),
|
||||||
fast_field_serializer,
|
fast_field_serializer,
|
||||||
fieldnorms_serializer,
|
fieldnorms_serializer: Some(fieldnorms_serializer),
|
||||||
postings_serializer,
|
postings_serializer,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn segment(&self) -> &Segment {
|
||||||
|
&self.segment
|
||||||
|
}
|
||||||
|
|
||||||
/// Accessor to the `PostingsSerializer`.
|
/// Accessor to the `PostingsSerializer`.
|
||||||
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer {
|
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer {
|
||||||
&mut self.postings_serializer
|
&mut self.postings_serializer
|
||||||
@@ -46,9 +50,11 @@ impl SegmentSerializer {
|
|||||||
&mut self.fast_field_serializer
|
&mut self.fast_field_serializer
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the field norm serializer.
|
/// Extract the field norm serializer.
|
||||||
pub fn get_fieldnorms_serializer(&mut self) -> &mut FieldNormsSerializer {
|
///
|
||||||
&mut self.fieldnorms_serializer
|
/// Note the fieldnorms serializer can only be extracted once.
|
||||||
|
pub fn extract_fieldnorms_serializer(&mut self) -> Option<FieldNormsSerializer> {
|
||||||
|
self.fieldnorms_serializer.take()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the `StoreWriter`.
|
/// Accessor to the `StoreWriter`.
|
||||||
@@ -57,11 +63,13 @@ impl SegmentSerializer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Finalize the segment serialization.
|
/// Finalize the segment serialization.
|
||||||
pub fn close(self) -> Result<()> {
|
pub fn close(mut self) -> crate::Result<()> {
|
||||||
|
if let Some(fieldnorms_serializer) = self.extract_fieldnorms_serializer() {
|
||||||
|
fieldnorms_serializer.close()?;
|
||||||
|
}
|
||||||
self.fast_field_serializer.close()?;
|
self.fast_field_serializer.close()?;
|
||||||
self.postings_serializer.close()?;
|
self.postings_serializer.close()?;
|
||||||
self.store_writer.close()?;
|
self.store_writer.close()?;
|
||||||
self.fieldnorms_serializer.close()?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,39 +6,33 @@ use crate::core::SegmentId;
|
|||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::core::SerializableSegment;
|
use crate::core::SerializableSegment;
|
||||||
use crate::core::META_FILEPATH;
|
use crate::core::META_FILEPATH;
|
||||||
use crate::directory::{Directory, DirectoryClone};
|
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
|
||||||
use crate::error::TantivyError;
|
|
||||||
use crate::indexer::delete_queue::DeleteCursor;
|
use crate::indexer::delete_queue::DeleteCursor;
|
||||||
use crate::indexer::index_writer::advance_deletes;
|
use crate::indexer::index_writer::advance_deletes;
|
||||||
use crate::indexer::merge_operation::MergeOperationInventory;
|
use crate::indexer::merge_operation::MergeOperationInventory;
|
||||||
use crate::indexer::merger::IndexMerger;
|
use crate::indexer::merger::IndexMerger;
|
||||||
|
use crate::indexer::segment_manager::SegmentsStatus;
|
||||||
use crate::indexer::stamper::Stamper;
|
use crate::indexer::stamper::Stamper;
|
||||||
use crate::indexer::MergeOperation;
|
|
||||||
use crate::indexer::SegmentEntry;
|
use crate::indexer::SegmentEntry;
|
||||||
use crate::indexer::SegmentSerializer;
|
use crate::indexer::SegmentSerializer;
|
||||||
use crate::indexer::{DefaultMergePolicy, MergePolicy};
|
use crate::indexer::{DefaultMergePolicy, MergePolicy};
|
||||||
|
use crate::indexer::{MergeCandidate, MergeOperation};
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use crate::Result;
|
use futures::channel::oneshot;
|
||||||
use futures::oneshot;
|
use futures::executor::{ThreadPool, ThreadPoolBuilder};
|
||||||
use futures::sync::oneshot::Receiver;
|
use futures::future::Future;
|
||||||
use futures::Future;
|
use futures::future::TryFutureExt;
|
||||||
use futures_cpupool::Builder as CpuPoolBuilder;
|
|
||||||
use futures_cpupool::CpuFuture;
|
|
||||||
use futures_cpupool::CpuPool;
|
|
||||||
use serde_json;
|
|
||||||
use std::borrow::BorrowMut;
|
use std::borrow::BorrowMut;
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::mem;
|
use std::ops::Deref;
|
||||||
use std::ops::DerefMut;
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use std::thread;
|
|
||||||
use std::thread::JoinHandle;
|
const NUM_MERGE_THREADS: usize = 4;
|
||||||
|
|
||||||
/// Save the index meta file.
|
/// Save the index meta file.
|
||||||
/// This operation is atomic :
|
/// This operation is atomic :
|
||||||
@@ -49,7 +43,7 @@ use std::thread::JoinHandle;
|
|||||||
/// and flushed.
|
/// and flushed.
|
||||||
///
|
///
|
||||||
/// This method is not part of tantivy's public API
|
/// This method is not part of tantivy's public API
|
||||||
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> Result<()> {
|
pub fn save_new_metas(schema: Schema, directory: &dyn Directory) -> crate::Result<()> {
|
||||||
save_metas(
|
save_metas(
|
||||||
&IndexMeta {
|
&IndexMeta {
|
||||||
segments: Vec::new(),
|
segments: Vec::new(),
|
||||||
@@ -70,7 +64,7 @@ pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> Result<(
|
|||||||
/// and flushed.
|
/// and flushed.
|
||||||
///
|
///
|
||||||
/// This method is not part of tantivy's public API
|
/// This method is not part of tantivy's public API
|
||||||
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> Result<()> {
|
fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate::Result<()> {
|
||||||
info!("save metas");
|
info!("save metas");
|
||||||
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
||||||
// Just adding a new line at the end of the buffer.
|
// Just adding a new line at the end of the buffer.
|
||||||
@@ -89,21 +83,38 @@ fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> Result<()> {
|
|||||||
// We voluntarily pass a merge_operation ref to guarantee that
|
// We voluntarily pass a merge_operation ref to guarantee that
|
||||||
// the merge_operation is alive during the process
|
// the merge_operation is alive during the process
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct SegmentUpdater(Arc<InnerSegmentUpdater>);
|
pub(crate) struct SegmentUpdater(Arc<InnerSegmentUpdater>);
|
||||||
|
|
||||||
fn perform_merge(
|
impl Deref for SegmentUpdater {
|
||||||
merge_operation: &MergeOperation,
|
type Target = InnerSegmentUpdater;
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn garbage_collect_files(
|
||||||
|
segment_updater: SegmentUpdater,
|
||||||
|
) -> crate::Result<GarbageCollectionResult> {
|
||||||
|
info!("Running garbage collection");
|
||||||
|
let mut index = segment_updater.index.clone();
|
||||||
|
index
|
||||||
|
.directory_mut()
|
||||||
|
.garbage_collect(move || segment_updater.list_files())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Merges a list of segments the list of segment givens in the `segment_entries`.
|
||||||
|
/// This function happens in the calling thread and is computationally expensive.
|
||||||
|
fn merge(
|
||||||
index: &Index,
|
index: &Index,
|
||||||
mut segment_entries: Vec<SegmentEntry>,
|
mut segment_entries: Vec<SegmentEntry>,
|
||||||
) -> Result<SegmentEntry> {
|
target_opstamp: Opstamp,
|
||||||
let target_opstamp = merge_operation.target_opstamp();
|
) -> crate::Result<SegmentEntry> {
|
||||||
|
|
||||||
// first we need to apply deletes to our segment.
|
// first we need to apply deletes to our segment.
|
||||||
let mut merged_segment = index.new_segment();
|
let merged_segment = index.new_segment();
|
||||||
|
|
||||||
// TODO add logging
|
|
||||||
let schema = index.schema();
|
|
||||||
|
|
||||||
|
// First we apply all of the delet to the merged segment, up to the target opstamp.
|
||||||
for segment_entry in &mut segment_entries {
|
for segment_entry in &mut segment_entries {
|
||||||
let segment = index.segment(segment_entry.meta().clone());
|
let segment = index.segment(segment_entry.meta().clone());
|
||||||
advance_deletes(segment, segment_entry, target_opstamp)?;
|
advance_deletes(segment, segment_entry, target_opstamp)?;
|
||||||
@@ -117,22 +128,20 @@ fn perform_merge(
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// An IndexMerger is like a "view" of our merged segments.
|
// An IndexMerger is like a "view" of our merged segments.
|
||||||
let merger: IndexMerger = IndexMerger::open(schema, &segments[..])?;
|
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
|
||||||
|
|
||||||
// ... we just serialize this index merger in our new segment
|
// ... we just serialize this index merger in our new segment to merge the two segments.
|
||||||
// to merge the two segments.
|
let segment_serializer = SegmentSerializer::for_segment(merged_segment.clone())?;
|
||||||
|
|
||||||
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
|
|
||||||
|
|
||||||
let num_docs = merger.write(segment_serializer)?;
|
let num_docs = merger.write(segment_serializer)?;
|
||||||
|
|
||||||
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
|
let merged_segment_id = merged_segment.id();
|
||||||
|
|
||||||
let after_merge_segment_entry = SegmentEntry::new(segment_meta.clone(), delete_cursor, None);
|
let segment_meta = index.new_segment_meta(merged_segment_id, num_docs);
|
||||||
Ok(after_merge_segment_entry)
|
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
|
||||||
}
|
}
|
||||||
|
|
||||||
struct InnerSegmentUpdater {
|
pub(crate) struct InnerSegmentUpdater {
|
||||||
// we keep a copy of the current active IndexMeta to
|
// we keep a copy of the current active IndexMeta to
|
||||||
// avoid loading the file everytime we need it in the
|
// avoid loading the file everytime we need it in the
|
||||||
// `SegmentUpdater`.
|
// `SegmentUpdater`.
|
||||||
@@ -140,12 +149,12 @@ struct InnerSegmentUpdater {
|
|||||||
// This should be up to date as all update happen through
|
// This should be up to date as all update happen through
|
||||||
// the unique active `SegmentUpdater`.
|
// the unique active `SegmentUpdater`.
|
||||||
active_metas: RwLock<Arc<IndexMeta>>,
|
active_metas: RwLock<Arc<IndexMeta>>,
|
||||||
pool: CpuPool,
|
pool: ThreadPool,
|
||||||
|
merge_thread_pool: ThreadPool,
|
||||||
|
|
||||||
index: Index,
|
index: Index,
|
||||||
segment_manager: SegmentManager,
|
segment_manager: SegmentManager,
|
||||||
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
|
merge_policy: RwLock<Arc<dyn MergePolicy>>,
|
||||||
merging_thread_id: AtomicUsize,
|
|
||||||
merging_threads: RwLock<HashMap<usize, JoinHandle<Result<()>>>>,
|
|
||||||
killed: AtomicBool,
|
killed: AtomicBool,
|
||||||
stamper: Stamper,
|
stamper: Stamper,
|
||||||
merge_operations: MergeOperationInventory,
|
merge_operations: MergeOperationInventory,
|
||||||
@@ -156,90 +165,118 @@ impl SegmentUpdater {
|
|||||||
index: Index,
|
index: Index,
|
||||||
stamper: Stamper,
|
stamper: Stamper,
|
||||||
delete_cursor: &DeleteCursor,
|
delete_cursor: &DeleteCursor,
|
||||||
) -> Result<SegmentUpdater> {
|
) -> crate::Result<SegmentUpdater> {
|
||||||
let segments = index.searchable_segment_metas()?;
|
let segments = index.searchable_segment_metas()?;
|
||||||
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
|
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
|
||||||
let pool = CpuPoolBuilder::new()
|
let pool = ThreadPoolBuilder::new()
|
||||||
.name_prefix("segment_updater")
|
.name_prefix("segment_updater")
|
||||||
.pool_size(1)
|
.pool_size(1)
|
||||||
.create();
|
.create()
|
||||||
|
.map_err(|_| {
|
||||||
|
crate::TantivyError::SystemError(
|
||||||
|
"Failed to spawn segment updater thread".to_string(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let merge_thread_pool = ThreadPoolBuilder::new()
|
||||||
|
.name_prefix("merge_thread")
|
||||||
|
.pool_size(NUM_MERGE_THREADS)
|
||||||
|
.create()
|
||||||
|
.map_err(|_| {
|
||||||
|
crate::TantivyError::SystemError(
|
||||||
|
"Failed to spawn segment merging thread".to_string(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
let index_meta = index.load_metas()?;
|
let index_meta = index.load_metas()?;
|
||||||
Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater {
|
Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater {
|
||||||
active_metas: RwLock::new(Arc::new(index_meta)),
|
active_metas: RwLock::new(Arc::new(index_meta)),
|
||||||
pool,
|
pool,
|
||||||
|
merge_thread_pool,
|
||||||
index,
|
index,
|
||||||
segment_manager,
|
segment_manager,
|
||||||
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
|
merge_policy: RwLock::new(Arc::new(DefaultMergePolicy::default())),
|
||||||
merging_thread_id: AtomicUsize::default(),
|
|
||||||
merging_threads: RwLock::new(HashMap::new()),
|
|
||||||
killed: AtomicBool::new(false),
|
killed: AtomicBool::new(false),
|
||||||
stamper,
|
stamper,
|
||||||
merge_operations: Default::default(),
|
merge_operations: Default::default(),
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
|
pub fn get_merge_policy(&self) -> Arc<dyn MergePolicy> {
|
||||||
self.0.merge_policy.read().unwrap().clone()
|
self.merge_policy.read().unwrap().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
|
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
|
||||||
let arc_merge_policy = Arc::new(merge_policy);
|
let arc_merge_policy = Arc::from(merge_policy);
|
||||||
*self.0.merge_policy.write().unwrap() = arc_merge_policy;
|
*self.merge_policy.write().unwrap() = arc_merge_policy;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_merging_thread_id(&self) -> usize {
|
fn schedule_future<T: 'static + Send, F: Future<Output = crate::Result<T>> + 'static + Send>(
|
||||||
self.0.merging_thread_id.fetch_add(1, Ordering::SeqCst)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_async<T: 'static + Send, F: 'static + Send + FnOnce(SegmentUpdater) -> T>(
|
|
||||||
&self,
|
&self,
|
||||||
f: F,
|
f: F,
|
||||||
) -> CpuFuture<T, TantivyError> {
|
) -> impl Future<Output = crate::Result<T>> {
|
||||||
let me_clone = self.clone();
|
let (sender, receiver) = oneshot::channel();
|
||||||
self.0.pool.spawn_fn(move || Ok(f(me_clone)))
|
if self.is_alive() {
|
||||||
|
self.pool.spawn_ok(async move {
|
||||||
|
let _ = sender.send(f.await);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
let _ = sender.send(Err(crate::TantivyError::SystemError(
|
||||||
|
"Segment updater killed".to_string(),
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
receiver.unwrap_or_else(|_| {
|
||||||
|
let err_msg =
|
||||||
|
"A segment_updater future did not success. This should never happen.".to_string();
|
||||||
|
Err(crate::TantivyError::SystemError(err_msg))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_segment(&self, segment_entry: SegmentEntry) -> bool {
|
pub fn schedule_add_segment(
|
||||||
self.run_async(|segment_updater| {
|
&self,
|
||||||
segment_updater.0.segment_manager.add_segment(segment_entry);
|
segment_entry: SegmentEntry,
|
||||||
segment_updater.consider_merge_options();
|
) -> impl Future<Output = crate::Result<()>> {
|
||||||
true
|
let segment_updater = self.clone();
|
||||||
|
self.schedule_future(async move {
|
||||||
|
segment_updater.segment_manager.add_segment(segment_entry);
|
||||||
|
segment_updater.consider_merge_options().await;
|
||||||
|
Ok(())
|
||||||
})
|
})
|
||||||
.forget();
|
|
||||||
true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Orders `SegmentManager` to remove all segments
|
/// Orders `SegmentManager` to remove all segments
|
||||||
pub(crate) fn remove_all_segments(&self) {
|
pub(crate) fn remove_all_segments(&self) {
|
||||||
self.0.segment_manager.remove_all_segments();
|
self.segment_manager.remove_all_segments();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn kill(&mut self) {
|
pub fn kill(&mut self) {
|
||||||
self.0.killed.store(true, Ordering::Release);
|
self.killed.store(true, Ordering::Release);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_alive(&self) -> bool {
|
pub fn is_alive(&self) -> bool {
|
||||||
!self.0.killed.load(Ordering::Acquire)
|
!self.killed.load(Ordering::Acquire)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apply deletes up to the target opstamp to all segments.
|
/// Apply deletes up to the target opstamp to all segments.
|
||||||
///
|
///
|
||||||
/// The method returns copies of the segment entries,
|
/// The method returns copies of the segment entries,
|
||||||
/// updated with the delete information.
|
/// updated with the delete information.
|
||||||
fn purge_deletes(&self, target_opstamp: Opstamp) -> Result<Vec<SegmentEntry>> {
|
fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> {
|
||||||
let mut segment_entries = self.0.segment_manager.segment_entries();
|
let mut segment_entries = self.segment_manager.segment_entries();
|
||||||
for segment_entry in &mut segment_entries {
|
for segment_entry in &mut segment_entries {
|
||||||
let segment = self.0.index.segment(segment_entry.meta().clone());
|
let segment = self.index.segment(segment_entry.meta().clone());
|
||||||
advance_deletes(segment, segment_entry, target_opstamp)?;
|
advance_deletes(segment, segment_entry, target_opstamp)?;
|
||||||
}
|
}
|
||||||
Ok(segment_entries)
|
Ok(segment_entries)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_metas(&self, opstamp: Opstamp, commit_message: Option<String>) {
|
pub fn save_metas(
|
||||||
|
&self,
|
||||||
|
opstamp: Opstamp,
|
||||||
|
commit_message: Option<String>,
|
||||||
|
) -> crate::Result<()> {
|
||||||
if self.is_alive() {
|
if self.is_alive() {
|
||||||
let index = &self.0.index;
|
let index = &self.index;
|
||||||
let directory = index.directory();
|
let directory = index.directory();
|
||||||
let mut commited_segment_metas = self.0.segment_manager.committed_segment_metas();
|
let mut commited_segment_metas = self.segment_manager.committed_segment_metas();
|
||||||
|
|
||||||
// We sort segment_readers by number of documents.
|
// We sort segment_readers by number of documents.
|
||||||
// This is an heuristic to make multithreading more efficient.
|
// This is an heuristic to make multithreading more efficient.
|
||||||
@@ -261,16 +298,18 @@ impl SegmentUpdater {
|
|||||||
opstamp,
|
opstamp,
|
||||||
payload: commit_message,
|
payload: commit_message,
|
||||||
};
|
};
|
||||||
save_metas(&index_meta, directory.box_clone().borrow_mut())
|
// TODO add context to the error.
|
||||||
.expect("Could not save metas.");
|
save_metas(&index_meta, directory.box_clone().borrow_mut())?;
|
||||||
self.store_meta(&index_meta);
|
self.store_meta(&index_meta);
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn garbage_collect_files(&self) -> CpuFuture<(), TantivyError> {
|
pub fn schedule_garbage_collect(
|
||||||
self.run_async(move |segment_updater| {
|
&self,
|
||||||
segment_updater.garbage_collect_files_exec();
|
) -> impl Future<Output = crate::Result<GarbageCollectionResult>> {
|
||||||
})
|
let garbage_collect_future = garbage_collect_files(self.clone());
|
||||||
|
self.schedule_future(garbage_collect_future)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// List the files that are useful to the index.
|
/// List the files that are useful to the index.
|
||||||
@@ -278,148 +317,130 @@ impl SegmentUpdater {
|
|||||||
/// This does not include lock files, or files that are obsolete
|
/// This does not include lock files, or files that are obsolete
|
||||||
/// but have not yet been deleted by the garbage collector.
|
/// but have not yet been deleted by the garbage collector.
|
||||||
fn list_files(&self) -> HashSet<PathBuf> {
|
fn list_files(&self) -> HashSet<PathBuf> {
|
||||||
let mut files = HashSet::new();
|
let mut files: HashSet<PathBuf> = self
|
||||||
|
.index
|
||||||
|
.list_all_segment_metas()
|
||||||
|
.into_iter()
|
||||||
|
.flat_map(|segment_meta| segment_meta.list_files())
|
||||||
|
.collect();
|
||||||
files.insert(META_FILEPATH.to_path_buf());
|
files.insert(META_FILEPATH.to_path_buf());
|
||||||
for segment_meta in self.0.index.list_all_segment_metas() {
|
|
||||||
files.extend(segment_meta.list_files());
|
|
||||||
}
|
|
||||||
files
|
files
|
||||||
}
|
}
|
||||||
|
|
||||||
fn garbage_collect_files_exec(&self) {
|
pub fn schedule_commit(
|
||||||
info!("Running garbage collection");
|
&self,
|
||||||
let mut index = self.0.index.clone();
|
opstamp: Opstamp,
|
||||||
index.directory_mut().garbage_collect(|| self.list_files());
|
payload: Option<String>,
|
||||||
}
|
) -> impl Future<Output = crate::Result<()>> {
|
||||||
|
let segment_updater: SegmentUpdater = self.clone();
|
||||||
pub fn commit(&self, opstamp: Opstamp, payload: Option<String>) -> Result<()> {
|
self.schedule_future(async move {
|
||||||
self.run_async(move |segment_updater| {
|
let segment_entries = segment_updater.purge_deletes(opstamp)?;
|
||||||
if segment_updater.is_alive() {
|
segment_updater.segment_manager.commit(segment_entries);
|
||||||
let segment_entries = segment_updater
|
segment_updater.save_metas(opstamp, payload)?;
|
||||||
.purge_deletes(opstamp)
|
let _ = garbage_collect_files(segment_updater.clone()).await;
|
||||||
.expect("Failed purge deletes");
|
segment_updater.consider_merge_options().await;
|
||||||
segment_updater.0.segment_manager.commit(segment_entries);
|
Ok(())
|
||||||
segment_updater.save_metas(opstamp, payload);
|
|
||||||
segment_updater.garbage_collect_files_exec();
|
|
||||||
segment_updater.consider_merge_options();
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
.wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> Result<Receiver<SegmentMeta>> {
|
|
||||||
let commit_opstamp = self.load_metas().opstamp;
|
|
||||||
let merge_operation = MergeOperation::new(
|
|
||||||
&self.0.merge_operations,
|
|
||||||
commit_opstamp,
|
|
||||||
segment_ids.to_vec(),
|
|
||||||
);
|
|
||||||
self.run_async(move |segment_updater| segment_updater.start_merge_impl(merge_operation))
|
|
||||||
.wait()?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn store_meta(&self, index_meta: &IndexMeta) {
|
fn store_meta(&self, index_meta: &IndexMeta) {
|
||||||
*self.0.active_metas.write().unwrap() = Arc::new(index_meta.clone());
|
*self.active_metas.write().unwrap() = Arc::new(index_meta.clone());
|
||||||
}
|
|
||||||
fn load_metas(&self) -> Arc<IndexMeta> {
|
|
||||||
self.0.active_metas.read().unwrap().clone()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn load_metas(&self) -> Arc<IndexMeta> {
|
||||||
|
self.active_metas.read().unwrap().clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn make_merge_operation(&self, segment_ids: &[SegmentId]) -> MergeOperation {
|
||||||
|
let commit_opstamp = self.load_metas().opstamp;
|
||||||
|
MergeOperation::new(&self.merge_operations, commit_opstamp, segment_ids.to_vec())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Starts a merge operation. This function will block until the merge operation is effectively
|
||||||
|
// started. Note that it does not wait for the merge to terminate.
|
||||||
|
// The calling thread should not be block for a long time, as this only involve waiting for the
|
||||||
|
// `SegmentUpdater` queue which in turns only contains lightweight operations.
|
||||||
|
//
|
||||||
|
// The merge itself happens on a different thread.
|
||||||
|
//
|
||||||
|
// When successful, this function returns a `Future` for a `Result<SegmentMeta>` that represents
|
||||||
|
// the actual outcome of the merge operation.
|
||||||
|
//
|
||||||
|
// It returns an error if for some reason the merge operation could not be started.
|
||||||
|
//
|
||||||
|
// At this point an error is not necessarily the sign of a malfunction.
|
||||||
|
// (e.g. A rollback could have happened, between the instant when the merge operaiton was
|
||||||
|
// suggested and the moment when it ended up being executed.)
|
||||||
|
//
|
||||||
// `segment_ids` is required to be non-empty.
|
// `segment_ids` is required to be non-empty.
|
||||||
fn start_merge_impl(&self, merge_operation: MergeOperation) -> Result<Receiver<SegmentMeta>> {
|
pub fn start_merge(
|
||||||
|
&self,
|
||||||
|
merge_operation: MergeOperation,
|
||||||
|
) -> crate::Result<impl Future<Output = crate::Result<SegmentMeta>>> {
|
||||||
assert!(
|
assert!(
|
||||||
!merge_operation.segment_ids().is_empty(),
|
!merge_operation.segment_ids().is_empty(),
|
||||||
"Segment_ids cannot be empty."
|
"Segment_ids cannot be empty."
|
||||||
);
|
);
|
||||||
|
|
||||||
let segment_updater_clone = self.clone();
|
let segment_updater = self.clone();
|
||||||
let segment_entries: Vec<SegmentEntry> = self
|
let segment_entries: Vec<SegmentEntry> = self
|
||||||
.0
|
|
||||||
.segment_manager
|
.segment_manager
|
||||||
.start_merge(merge_operation.segment_ids())?;
|
.start_merge(merge_operation.segment_ids())?;
|
||||||
|
|
||||||
// let segment_ids_vec = merge_operation.segment_ids.to_vec();
|
info!("Starting merge - {:?}", merge_operation.segment_ids());
|
||||||
|
|
||||||
let merging_thread_id = self.get_merging_thread_id();
|
let (merging_future_send, merging_future_recv) =
|
||||||
info!(
|
oneshot::channel::<crate::Result<SegmentMeta>>();
|
||||||
"Starting merge thread #{} - {:?}",
|
|
||||||
merging_thread_id,
|
|
||||||
merge_operation.segment_ids()
|
|
||||||
);
|
|
||||||
let (merging_future_send, merging_future_recv) = oneshot();
|
|
||||||
|
|
||||||
// first we need to apply deletes to our segment.
|
self.merge_thread_pool.spawn_ok(async move {
|
||||||
let merging_join_handle = thread::Builder::new()
|
// The fact that `merge_operation` is moved here is important.
|
||||||
.name(format!("mergingthread-{}", merging_thread_id))
|
// Its lifetime is used to track how many merging thread are currently running,
|
||||||
.spawn(move || {
|
// as well as which segment is currently in merge and therefore should not be
|
||||||
// first we need to apply deletes to our segment.
|
// candidate for another merge.
|
||||||
let merge_result = perform_merge(
|
match merge(
|
||||||
&merge_operation,
|
&segment_updater.index,
|
||||||
&segment_updater_clone.0.index,
|
segment_entries,
|
||||||
segment_entries,
|
merge_operation.target_opstamp(),
|
||||||
);
|
) {
|
||||||
|
Ok(after_merge_segment_entry) => {
|
||||||
match merge_result {
|
let segment_meta = segment_updater
|
||||||
Ok(after_merge_segment_entry) => {
|
.end_merge(merge_operation, after_merge_segment_entry)
|
||||||
let merged_segment_meta = after_merge_segment_entry.meta().clone();
|
.await;
|
||||||
segment_updater_clone
|
let _send_result = merging_future_send.send(segment_meta);
|
||||||
.end_merge(merge_operation, after_merge_segment_entry)
|
}
|
||||||
.expect("Segment updater thread is corrupted.");
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
// the future may fail if the listener of the oneshot future
|
"Merge of {:?} was cancelled: {:?}",
|
||||||
// has been destroyed.
|
merge_operation.segment_ids().to_vec(),
|
||||||
//
|
e
|
||||||
// This is not a problem here, so we just ignore any
|
);
|
||||||
// possible error.
|
// ... cancel merge
|
||||||
let _merging_future_res = merging_future_send.send(merged_segment_meta);
|
if cfg!(test) {
|
||||||
}
|
panic!("Merge failed.");
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
|
||||||
"Merge of {:?} was cancelled: {:?}",
|
|
||||||
merge_operation.segment_ids(),
|
|
||||||
e
|
|
||||||
);
|
|
||||||
// ... cancel merge
|
|
||||||
if cfg!(test) {
|
|
||||||
panic!("Merge failed.");
|
|
||||||
}
|
|
||||||
// As `merge_operation` will be dropped, the segment in merge state will
|
|
||||||
// be available for merge again.
|
|
||||||
// `merging_future_send` will be dropped, sending an error to the future.
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
segment_updater_clone
|
}
|
||||||
.0
|
});
|
||||||
.merging_threads
|
|
||||||
.write()
|
Ok(merging_future_recv
|
||||||
.unwrap()
|
.unwrap_or_else(|_| Err(crate::TantivyError::SystemError("Merge failed".to_string()))))
|
||||||
.remove(&merging_thread_id);
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
.expect("Failed to spawn a thread.");
|
|
||||||
self.0
|
|
||||||
.merging_threads
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.insert(merging_thread_id, merging_join_handle);
|
|
||||||
Ok(merging_future_recv)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn consider_merge_options(&self) {
|
async fn consider_merge_options(&self) {
|
||||||
let merge_segment_ids: HashSet<SegmentId> = self.0.merge_operations.segment_in_merge();
|
let merge_segment_ids: HashSet<SegmentId> = self.merge_operations.segment_in_merge();
|
||||||
let (committed_segments, uncommitted_segments) =
|
let (committed_segments, uncommitted_segments) =
|
||||||
get_mergeable_segments(&merge_segment_ids, &self.0.segment_manager);
|
get_mergeable_segments(&merge_segment_ids, &self.segment_manager);
|
||||||
|
|
||||||
// Committed segments cannot be merged with uncommitted_segments.
|
// Committed segments cannot be merged with uncommitted_segments.
|
||||||
// We therefore consider merges using these two sets of segments independently.
|
// We therefore consider merges using these two sets of segments independently.
|
||||||
let merge_policy = self.get_merge_policy();
|
let merge_policy = self.get_merge_policy();
|
||||||
|
|
||||||
let current_opstamp = self.0.stamper.stamp();
|
let current_opstamp = self.stamper.stamp();
|
||||||
let mut merge_candidates: Vec<MergeOperation> = merge_policy
|
let mut merge_candidates: Vec<MergeOperation> = merge_policy
|
||||||
.compute_merge_candidates(&uncommitted_segments)
|
.compute_merge_candidates(&uncommitted_segments)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|merge_candidate| {
|
.map(|merge_candidate| {
|
||||||
MergeOperation::new(&self.0.merge_operations, current_opstamp, merge_candidate.0)
|
MergeOperation::new(&self.merge_operations, current_opstamp, merge_candidate.0)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
@@ -427,25 +448,17 @@ impl SegmentUpdater {
|
|||||||
let committed_merge_candidates = merge_policy
|
let committed_merge_candidates = merge_policy
|
||||||
.compute_merge_candidates(&committed_segments)
|
.compute_merge_candidates(&committed_segments)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|merge_candidate| {
|
.map(|merge_candidate: MergeCandidate| {
|
||||||
MergeOperation::new(&self.0.merge_operations, commit_opstamp, merge_candidate.0)
|
MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0)
|
||||||
})
|
});
|
||||||
.collect::<Vec<_>>();
|
merge_candidates.extend(committed_merge_candidates);
|
||||||
merge_candidates.extend(committed_merge_candidates.into_iter());
|
|
||||||
|
|
||||||
for merge_operation in merge_candidates {
|
for merge_operation in merge_candidates {
|
||||||
match self.start_merge_impl(merge_operation) {
|
if let Err(err) = self.start_merge(merge_operation) {
|
||||||
Ok(merge_future) => {
|
warn!(
|
||||||
if let Err(e) = merge_future.fuse().poll() {
|
"Starting the merge failed for the following reason. This is not fatal. {}",
|
||||||
error!("The merge task failed quickly after starting: {:?}", e);
|
err
|
||||||
}
|
);
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
warn!(
|
|
||||||
"Starting the merge failed for the following reason. This is not fatal. {}",
|
|
||||||
err
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -454,15 +467,17 @@ impl SegmentUpdater {
|
|||||||
&self,
|
&self,
|
||||||
merge_operation: MergeOperation,
|
merge_operation: MergeOperation,
|
||||||
mut after_merge_segment_entry: SegmentEntry,
|
mut after_merge_segment_entry: SegmentEntry,
|
||||||
) -> Result<()> {
|
) -> impl Future<Output = crate::Result<SegmentMeta>> {
|
||||||
self.run_async(move |segment_updater| {
|
let segment_updater = self.clone();
|
||||||
|
let after_merge_segment_meta = after_merge_segment_entry.meta().clone();
|
||||||
|
let end_merge_future = self.schedule_future(async move {
|
||||||
info!("End merge {:?}", after_merge_segment_entry.meta());
|
info!("End merge {:?}", after_merge_segment_entry.meta());
|
||||||
{
|
{
|
||||||
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
|
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
|
||||||
if let Some(delete_operation) = delete_cursor.get() {
|
if let Some(delete_operation) = delete_cursor.get() {
|
||||||
let committed_opstamp = segment_updater.load_metas().opstamp;
|
let committed_opstamp = segment_updater.load_metas().opstamp;
|
||||||
if delete_operation.opstamp < committed_opstamp {
|
if delete_operation.opstamp < committed_opstamp {
|
||||||
let index = &segment_updater.0.index;
|
let index = &segment_updater.index;
|
||||||
let segment = index.segment(after_merge_segment_entry.meta().clone());
|
let segment = index.segment(after_merge_segment_entry.meta().clone());
|
||||||
if let Err(e) = advance_deletes(
|
if let Err(e) = advance_deletes(
|
||||||
segment,
|
segment,
|
||||||
@@ -480,28 +495,33 @@ impl SegmentUpdater {
|
|||||||
// ... cancel merge
|
// ... cancel merge
|
||||||
// `merge_operations` are tracked. As it is dropped, the
|
// `merge_operations` are tracked. As it is dropped, the
|
||||||
// the segment_ids will be available again for merge.
|
// the segment_ids will be available again for merge.
|
||||||
return;
|
return Err(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let previous_metas = segment_updater.load_metas();
|
let previous_metas = segment_updater.load_metas();
|
||||||
segment_updater
|
let segments_status = segment_updater
|
||||||
.0
|
|
||||||
.segment_manager
|
.segment_manager
|
||||||
.end_merge(merge_operation.segment_ids(), after_merge_segment_entry);
|
.end_merge(merge_operation.segment_ids(), after_merge_segment_entry)?;
|
||||||
segment_updater.consider_merge_options();
|
|
||||||
segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload.clone());
|
if segments_status == SegmentsStatus::Committed {
|
||||||
|
segment_updater
|
||||||
|
.save_metas(previous_metas.opstamp, previous_metas.payload.clone())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
segment_updater.consider_merge_options().await;
|
||||||
} // we drop all possible handle to a now useless `SegmentMeta`.
|
} // we drop all possible handle to a now useless `SegmentMeta`.
|
||||||
segment_updater.garbage_collect_files_exec();
|
let _ = garbage_collect_files(segment_updater).await;
|
||||||
})
|
Ok(())
|
||||||
.wait()
|
});
|
||||||
|
end_merge_future.map_ok(|_| after_merge_segment_meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wait for current merging threads.
|
/// Wait for current merging threads.
|
||||||
///
|
///
|
||||||
/// Upon termination of the current merging threads,
|
/// Upon termination of the current merging threads,
|
||||||
/// merge opportunity may appear.
|
/// merge opportunity may appear.
|
||||||
//
|
///
|
||||||
/// We keep waiting until the merge policy judges that
|
/// We keep waiting until the merge policy judges that
|
||||||
/// no opportunity is available.
|
/// no opportunity is available.
|
||||||
///
|
///
|
||||||
@@ -512,26 +532,9 @@ impl SegmentUpdater {
|
|||||||
///
|
///
|
||||||
/// Obsolete files will eventually be cleaned up
|
/// Obsolete files will eventually be cleaned up
|
||||||
/// by the directory garbage collector.
|
/// by the directory garbage collector.
|
||||||
pub fn wait_merging_thread(&self) -> Result<()> {
|
pub fn wait_merging_thread(&self) -> crate::Result<()> {
|
||||||
loop {
|
self.merge_operations.wait_until_empty();
|
||||||
let merging_threads: HashMap<usize, JoinHandle<Result<()>>> = {
|
Ok(())
|
||||||
let mut merging_threads = self.0.merging_threads.write().unwrap();
|
|
||||||
mem::replace(merging_threads.deref_mut(), HashMap::new())
|
|
||||||
};
|
|
||||||
if merging_threads.is_empty() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
debug!("wait merging thread {}", merging_threads.len());
|
|
||||||
for (_, merging_thread_handle) in merging_threads {
|
|
||||||
merging_thread_handle
|
|
||||||
.join()
|
|
||||||
.map(|_| ())
|
|
||||||
.map_err(|_| TantivyError::ErrorInThread("Merging thread failed.".into()))?;
|
|
||||||
}
|
|
||||||
// Our merging thread may have queued their completed merged segment.
|
|
||||||
// Let's wait for that too.
|
|
||||||
self.run_async(move |_| {}).wait()?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -551,7 +554,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(MergeWheneverPossible));
|
index_writer.set_merge_policy(Box::new(MergeWheneverPossible));
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -604,7 +607,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
@@ -675,7 +678,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
@@ -687,7 +690,6 @@ mod tests {
|
|||||||
index_writer.segment_updater().remove_all_segments();
|
index_writer.segment_updater().remove_all_segments();
|
||||||
let seg_vec = index_writer
|
let seg_vec = index_writer
|
||||||
.segment_updater()
|
.segment_updater()
|
||||||
.0
|
|
||||||
.segment_manager
|
.segment_manager
|
||||||
.segment_entries();
|
.segment_entries();
|
||||||
assert!(seg_vec.is_empty());
|
assert!(seg_vec.is_empty());
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use super::operation::AddOperation;
|
|||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::core::SerializableSegment;
|
use crate::core::SerializableSegment;
|
||||||
use crate::fastfield::FastFieldsWriter;
|
use crate::fastfield::FastFieldsWriter;
|
||||||
use crate::fieldnorm::FieldNormsWriter;
|
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
|
||||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||||
use crate::postings::compute_table_size;
|
use crate::postings::compute_table_size;
|
||||||
use crate::postings::MultiFieldPostingsWriter;
|
use crate::postings::MultiFieldPostingsWriter;
|
||||||
@@ -11,20 +11,16 @@ use crate::schema::Schema;
|
|||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::schema::Value;
|
use crate::schema::Value;
|
||||||
use crate::schema::{Field, FieldEntry};
|
use crate::schema::{Field, FieldEntry};
|
||||||
use crate::tokenizer::BoxedTokenizer;
|
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
|
||||||
use crate::tokenizer::FacetTokenizer;
|
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
|
||||||
use crate::tokenizer::{TokenStream, Tokenizer};
|
use crate::tokenizer::{TokenStreamChain, Tokenizer};
|
||||||
use crate::DocId;
|
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use crate::Result;
|
use crate::{DocId, SegmentComponent};
|
||||||
use crate::TantivyError;
|
|
||||||
use std::io;
|
|
||||||
use std::str;
|
|
||||||
|
|
||||||
/// Computes the initial size of the hash table.
|
/// Computes the initial size of the hash table.
|
||||||
///
|
///
|
||||||
/// Returns a number of bit `b`, such that the recommended initial table size is 2^b.
|
/// Returns a number of bit `b`, such that the recommended initial table size is 2^b.
|
||||||
fn initial_table_size(per_thread_memory_budget: usize) -> Result<usize> {
|
fn initial_table_size(per_thread_memory_budget: usize) -> crate::Result<usize> {
|
||||||
let table_memory_upper_bound = per_thread_memory_budget / 3;
|
let table_memory_upper_bound = per_thread_memory_budget / 3;
|
||||||
if let Some(limit) = (10..)
|
if let Some(limit) = (10..)
|
||||||
.take_while(|num_bits: &usize| compute_table_size(*num_bits) < table_memory_upper_bound)
|
.take_while(|num_bits: &usize| compute_table_size(*num_bits) < table_memory_upper_bound)
|
||||||
@@ -32,7 +28,7 @@ fn initial_table_size(per_thread_memory_budget: usize) -> Result<usize> {
|
|||||||
{
|
{
|
||||||
Ok(limit.min(19)) // we cap it at 2^19 = 512K.
|
Ok(limit.min(19)) // we cap it at 2^19 = 512K.
|
||||||
} else {
|
} else {
|
||||||
Err(TantivyError::InvalidArgument(
|
Err(crate::TantivyError::InvalidArgument(
|
||||||
format!("per thread memory budget (={}) is too small. Raise the memory budget or lower the number of threads.", per_thread_memory_budget)))
|
format!("per thread memory budget (={}) is too small. Raise the memory budget or lower the number of threads.", per_thread_memory_budget)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -49,7 +45,8 @@ pub struct SegmentWriter {
|
|||||||
fast_field_writers: FastFieldsWriter,
|
fast_field_writers: FastFieldsWriter,
|
||||||
fieldnorms_writer: FieldNormsWriter,
|
fieldnorms_writer: FieldNormsWriter,
|
||||||
doc_opstamps: Vec<Opstamp>,
|
doc_opstamps: Vec<Opstamp>,
|
||||||
tokenizers: Vec<Option<BoxedTokenizer>>,
|
tokenizers: Vec<Option<TextAnalyzer>>,
|
||||||
|
term_buffer: Term,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentWriter {
|
impl SegmentWriter {
|
||||||
@@ -64,11 +61,12 @@ impl SegmentWriter {
|
|||||||
/// - schema
|
/// - schema
|
||||||
pub fn for_segment(
|
pub fn for_segment(
|
||||||
memory_budget: usize,
|
memory_budget: usize,
|
||||||
mut segment: Segment,
|
segment: Segment,
|
||||||
schema: &Schema,
|
schema: &Schema,
|
||||||
) -> Result<SegmentWriter> {
|
) -> crate::Result<SegmentWriter> {
|
||||||
|
let tokenizer_manager = segment.index().tokenizers().clone();
|
||||||
let table_num_bits = initial_table_size(memory_budget)?;
|
let table_num_bits = initial_table_size(memory_budget)?;
|
||||||
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
|
let segment_serializer = SegmentSerializer::for_segment(segment)?;
|
||||||
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
|
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
|
||||||
let tokenizers = schema
|
let tokenizers = schema
|
||||||
.fields()
|
.fields()
|
||||||
@@ -78,7 +76,7 @@ impl SegmentWriter {
|
|||||||
.get_indexing_options()
|
.get_indexing_options()
|
||||||
.and_then(|text_index_option| {
|
.and_then(|text_index_option| {
|
||||||
let tokenizer_name = &text_index_option.tokenizer();
|
let tokenizer_name = &text_index_option.tokenizer();
|
||||||
segment.index().tokenizers().get(tokenizer_name)
|
tokenizer_manager.get(tokenizer_name)
|
||||||
}),
|
}),
|
||||||
_ => None,
|
_ => None,
|
||||||
},
|
},
|
||||||
@@ -92,6 +90,7 @@ impl SegmentWriter {
|
|||||||
fast_field_writers: FastFieldsWriter::from_schema(schema),
|
fast_field_writers: FastFieldsWriter::from_schema(schema),
|
||||||
doc_opstamps: Vec::with_capacity(1_000),
|
doc_opstamps: Vec::with_capacity(1_000),
|
||||||
tokenizers,
|
tokenizers,
|
||||||
|
term_buffer: Term::new(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -99,7 +98,7 @@ impl SegmentWriter {
|
|||||||
///
|
///
|
||||||
/// Finalize consumes the `SegmentWriter`, so that it cannot
|
/// Finalize consumes the `SegmentWriter`, so that it cannot
|
||||||
/// be used afterwards.
|
/// be used afterwards.
|
||||||
pub fn finalize(mut self) -> Result<Vec<u64>> {
|
pub fn finalize(mut self) -> crate::Result<Vec<u64>> {
|
||||||
self.fieldnorms_writer.fill_up_to_max_doc(self.max_doc);
|
self.fieldnorms_writer.fill_up_to_max_doc(self.max_doc);
|
||||||
write(
|
write(
|
||||||
&self.multifield_postings,
|
&self.multifield_postings,
|
||||||
@@ -117,7 +116,11 @@ impl SegmentWriter {
|
|||||||
/// Indexes a new document
|
/// Indexes a new document
|
||||||
///
|
///
|
||||||
/// As a user, you should rather use `IndexWriter`'s add_document.
|
/// As a user, you should rather use `IndexWriter`'s add_document.
|
||||||
pub fn add_document(&mut self, add_operation: AddOperation, schema: &Schema) -> io::Result<()> {
|
pub fn add_document(
|
||||||
|
&mut self,
|
||||||
|
add_operation: AddOperation,
|
||||||
|
schema: &Schema,
|
||||||
|
) -> crate::Result<()> {
|
||||||
let doc_id = self.max_doc;
|
let doc_id = self.max_doc;
|
||||||
let mut doc = add_operation.document;
|
let mut doc = add_operation.document;
|
||||||
self.doc_opstamps.push(add_operation.opstamp);
|
self.doc_opstamps.push(add_operation.opstamp);
|
||||||
@@ -125,111 +128,150 @@ impl SegmentWriter {
|
|||||||
self.fast_field_writers.add_document(&doc);
|
self.fast_field_writers.add_document(&doc);
|
||||||
|
|
||||||
for (field, field_values) in doc.get_sorted_field_values() {
|
for (field, field_values) in doc.get_sorted_field_values() {
|
||||||
let field_options = schema.get_field_entry(field);
|
let field_entry = schema.get_field_entry(field);
|
||||||
if !field_options.is_indexed() {
|
let make_schema_error = || {
|
||||||
|
crate::TantivyError::SchemaError(format!(
|
||||||
|
"Expected a {:?} for field {:?}",
|
||||||
|
field_entry.field_type().value_type(),
|
||||||
|
field_entry.name()
|
||||||
|
))
|
||||||
|
};
|
||||||
|
if !field_entry.is_indexed() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
match *field_options.field_type() {
|
let (term_buffer, multifield_postings) =
|
||||||
FieldType::HierarchicalFacet => {
|
(&mut self.term_buffer, &mut self.multifield_postings);
|
||||||
let facets: Vec<&str> = field_values
|
match *field_entry.field_type() {
|
||||||
.iter()
|
FieldType::HierarchicalFacet(_) => {
|
||||||
.flat_map(|field_value| match *field_value.value() {
|
term_buffer.set_field(field);
|
||||||
Value::Facet(ref facet) => Some(facet.encoded_str()),
|
let facets =
|
||||||
_ => {
|
field_values
|
||||||
panic!("Expected hierarchical facet");
|
.iter()
|
||||||
}
|
.flat_map(|field_value| match *field_value.value() {
|
||||||
})
|
Value::Facet(ref facet) => Some(facet.encoded_str()),
|
||||||
.collect();
|
_ => {
|
||||||
let mut term = Term::for_field(field); // we set the Term
|
panic!("Expected hierarchical facet");
|
||||||
for fake_str in facets {
|
}
|
||||||
|
});
|
||||||
|
for facet_str in facets {
|
||||||
let mut unordered_term_id_opt = None;
|
let mut unordered_term_id_opt = None;
|
||||||
FacetTokenizer.token_stream(fake_str).process(&mut |token| {
|
FacetTokenizer
|
||||||
term.set_text(&token.text);
|
.token_stream(facet_str)
|
||||||
let unordered_term_id =
|
.process(&mut |token| {
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
term_buffer.set_text(&token.text);
|
||||||
unordered_term_id_opt = Some(unordered_term_id);
|
let unordered_term_id =
|
||||||
});
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
|
unordered_term_id_opt = Some(unordered_term_id);
|
||||||
|
});
|
||||||
if let Some(unordered_term_id) = unordered_term_id_opt {
|
if let Some(unordered_term_id) = unordered_term_id_opt {
|
||||||
self.fast_field_writers
|
self.fast_field_writers
|
||||||
.get_multivalue_writer(field)
|
.get_multivalue_writer(field)
|
||||||
.expect("multified writer for facet missing")
|
.expect("writer for facet missing")
|
||||||
.add_val(unordered_term_id);
|
.add_val(unordered_term_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Str(_) => {
|
FieldType::Str(_) => {
|
||||||
let num_tokens = if let Some(ref mut tokenizer) =
|
let mut token_streams: Vec<BoxTokenStream> = vec![];
|
||||||
self.tokenizers[field.field_id() as usize]
|
let mut offsets = vec![];
|
||||||
{
|
let mut total_offset = 0;
|
||||||
let texts: Vec<&str> = field_values
|
|
||||||
.iter()
|
for field_value in field_values {
|
||||||
.flat_map(|field_value| match *field_value.value() {
|
match field_value.value() {
|
||||||
Value::Str(ref text) => Some(text.as_str()),
|
Value::PreTokStr(tok_str) => {
|
||||||
_ => None,
|
offsets.push(total_offset);
|
||||||
})
|
if let Some(last_token) = tok_str.tokens.last() {
|
||||||
.collect();
|
total_offset += last_token.offset_to;
|
||||||
if texts.is_empty() {
|
}
|
||||||
0
|
token_streams
|
||||||
} else {
|
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
||||||
let mut token_stream = tokenizer.token_stream_texts(&texts[..]);
|
}
|
||||||
self.multifield_postings
|
Value::Str(ref text) => {
|
||||||
.index_text(doc_id, field, &mut token_stream)
|
if let Some(ref mut tokenizer) =
|
||||||
|
self.tokenizers[field.field_id() as usize]
|
||||||
|
{
|
||||||
|
offsets.push(total_offset);
|
||||||
|
total_offset += text.len();
|
||||||
|
token_streams.push(tokenizer.token_stream(text));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => (),
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
|
|
||||||
|
let num_tokens = if token_streams.is_empty() {
|
||||||
0
|
0
|
||||||
|
} else {
|
||||||
|
let mut token_stream = TokenStreamChain::new(offsets, token_streams);
|
||||||
|
multifield_postings.index_text(
|
||||||
|
doc_id,
|
||||||
|
field,
|
||||||
|
&mut token_stream,
|
||||||
|
term_buffer,
|
||||||
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
self.fieldnorms_writer.record(doc_id, field, num_tokens);
|
self.fieldnorms_writer.record(doc_id, field, num_tokens);
|
||||||
}
|
}
|
||||||
FieldType::U64(ref int_option) => {
|
FieldType::U64(_) => {
|
||||||
if int_option.is_indexed() {
|
for field_value in field_values {
|
||||||
for field_value in field_values {
|
term_buffer.set_field(field_value.field());
|
||||||
let term = Term::from_field_u64(
|
let u64_val = field_value
|
||||||
field_value.field(),
|
.value()
|
||||||
field_value.value().u64_value(),
|
.u64_value()
|
||||||
);
|
.ok_or_else(make_schema_error)?;
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
term_buffer.set_u64(u64_val);
|
||||||
}
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Date(ref int_option) => {
|
FieldType::Date(_) => {
|
||||||
if int_option.is_indexed() {
|
for field_value in field_values {
|
||||||
for field_value in field_values {
|
term_buffer.set_field(field_value.field());
|
||||||
let term = Term::from_field_i64(
|
let date_val = field_value
|
||||||
field_value.field(),
|
.value()
|
||||||
field_value.value().date_value().timestamp(),
|
.date_value()
|
||||||
);
|
.ok_or_else(make_schema_error)?;
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
term_buffer.set_i64(date_val.timestamp());
|
||||||
}
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::I64(ref int_option) => {
|
FieldType::I64(_) => {
|
||||||
if int_option.is_indexed() {
|
for field_value in field_values {
|
||||||
for field_value in field_values {
|
term_buffer.set_field(field_value.field());
|
||||||
let term = Term::from_field_i64(
|
let i64_val = field_value
|
||||||
field_value.field(),
|
.value()
|
||||||
field_value.value().i64_value(),
|
.i64_value()
|
||||||
);
|
.ok_or_else(make_schema_error)?;
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
term_buffer.set_i64(i64_val);
|
||||||
}
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::F64(ref int_option) => {
|
FieldType::F64(_) => {
|
||||||
if int_option.is_indexed() {
|
for field_value in field_values {
|
||||||
for field_value in field_values {
|
term_buffer.set_field(field_value.field());
|
||||||
let term = Term::from_field_f64(
|
let f64_val = field_value
|
||||||
field_value.field(),
|
.value()
|
||||||
field_value.value().f64_value(),
|
.f64_value()
|
||||||
);
|
.ok_or_else(make_schema_error)?;
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
term_buffer.set_f64(f64_val);
|
||||||
}
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Bytes => {
|
FieldType::Bytes(_) => {
|
||||||
// Do nothing. Bytes only supports fast fields.
|
for field_value in field_values {
|
||||||
|
term_buffer.set_field(field_value.field());
|
||||||
|
let bytes = field_value
|
||||||
|
.value()
|
||||||
|
.bytes_value()
|
||||||
|
.ok_or_else(make_schema_error)?;
|
||||||
|
term_buffer.set_bytes(bytes);
|
||||||
|
self.multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
|
doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
|
||||||
|
doc.prepare_for_store();
|
||||||
let doc_writer = self.segment_serializer.get_store_writer();
|
let doc_writer = self.segment_serializer.get_store_writer();
|
||||||
doc_writer.store(&doc)?;
|
doc_writer.store(&doc)?;
|
||||||
self.max_doc += 1;
|
self.max_doc += 1;
|
||||||
@@ -263,16 +305,23 @@ fn write(
|
|||||||
fast_field_writers: &FastFieldsWriter,
|
fast_field_writers: &FastFieldsWriter,
|
||||||
fieldnorms_writer: &FieldNormsWriter,
|
fieldnorms_writer: &FieldNormsWriter,
|
||||||
mut serializer: SegmentSerializer,
|
mut serializer: SegmentSerializer,
|
||||||
) -> Result<()> {
|
) -> crate::Result<()> {
|
||||||
let term_ord_map = multifield_postings.serialize(serializer.get_postings_serializer())?;
|
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
|
||||||
|
fieldnorms_writer.serialize(fieldnorms_serializer)?;
|
||||||
|
}
|
||||||
|
let fieldnorm_data = serializer
|
||||||
|
.segment()
|
||||||
|
.open_read(SegmentComponent::FIELDNORMS)?;
|
||||||
|
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||||
|
let term_ord_map =
|
||||||
|
multifield_postings.serialize(serializer.get_postings_serializer(), fieldnorm_readers)?;
|
||||||
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
|
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
|
||||||
fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer())?;
|
|
||||||
serializer.close()?;
|
serializer.close()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SerializableSegment for SegmentWriter {
|
impl SerializableSegment for SegmentWriter {
|
||||||
fn write(&self, serializer: SegmentSerializer) -> Result<u32> {
|
fn write(&self, serializer: SegmentSerializer) -> crate::Result<u32> {
|
||||||
let max_doc = self.max_doc;
|
let max_doc = self.max_doc;
|
||||||
write(
|
write(
|
||||||
&self.multifield_postings,
|
&self.multifield_postings,
|
||||||
|
|||||||
@@ -1,18 +1,76 @@
|
|||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::sync::atomic::Ordering;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
#[cfg(not(target_arch = "arm"))]
|
||||||
|
mod atomic_impl {
|
||||||
|
|
||||||
|
use crate::Opstamp;
|
||||||
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct AtomicU64Wrapper(AtomicU64);
|
||||||
|
|
||||||
|
impl AtomicU64Wrapper {
|
||||||
|
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
|
||||||
|
AtomicU64Wrapper(AtomicU64::new(first_opstamp as u64))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 {
|
||||||
|
self.0.fetch_add(val as u64, order) as u64
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn revert(&self, val: u64, order: Ordering) -> u64 {
|
||||||
|
self.0.store(val, order);
|
||||||
|
val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(target_arch = "arm")]
|
||||||
|
mod atomic_impl {
|
||||||
|
|
||||||
|
use crate::Opstamp;
|
||||||
|
/// Under other architecture, we rely on a mutex.
|
||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
use std::sync::RwLock;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct AtomicU64Wrapper(RwLock<u64>);
|
||||||
|
|
||||||
|
impl AtomicU64Wrapper {
|
||||||
|
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
|
||||||
|
AtomicU64Wrapper(RwLock::new(first_opstamp))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fetch_add(&self, incr: u64, _order: Ordering) -> u64 {
|
||||||
|
let mut lock = self.0.write().unwrap();
|
||||||
|
let previous_val = *lock;
|
||||||
|
*lock = previous_val + incr;
|
||||||
|
previous_val
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn revert(&self, val: u64, _order: Ordering) -> u64 {
|
||||||
|
let mut lock = self.0.write().unwrap();
|
||||||
|
*lock = val;
|
||||||
|
val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
use self::atomic_impl::AtomicU64Wrapper;
|
||||||
|
|
||||||
/// Stamper provides Opstamps, which is just an auto-increment id to label
|
/// Stamper provides Opstamps, which is just an auto-increment id to label
|
||||||
/// an operation.
|
/// an operation.
|
||||||
///
|
///
|
||||||
/// Cloning does not "fork" the stamp generation. The stamper actually wraps an `Arc`.
|
/// Cloning does not "fork" the stamp generation. The stamper actually wraps an `Arc`.
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
pub struct Stamper(Arc<AtomicU64>);
|
pub struct Stamper(Arc<AtomicU64Wrapper>);
|
||||||
|
|
||||||
impl Stamper {
|
impl Stamper {
|
||||||
pub fn new(first_opstamp: Opstamp) -> Stamper {
|
pub fn new(first_opstamp: Opstamp) -> Stamper {
|
||||||
Stamper(Arc::new(AtomicU64::new(first_opstamp)))
|
Stamper(Arc::new(AtomicU64Wrapper::new(first_opstamp)))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn stamp(&self) -> Opstamp {
|
pub fn stamp(&self) -> Opstamp {
|
||||||
@@ -31,8 +89,7 @@ impl Stamper {
|
|||||||
|
|
||||||
/// Reverts the stamper to a given `Opstamp` value and returns it
|
/// Reverts the stamper to a given `Opstamp` value and returns it
|
||||||
pub fn revert(&self, to_opstamp: Opstamp) -> Opstamp {
|
pub fn revert(&self, to_opstamp: Opstamp) -> Opstamp {
|
||||||
self.0.store(to_opstamp, Ordering::SeqCst);
|
self.0.revert(to_opstamp, Ordering::SeqCst)
|
||||||
to_opstamp
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
673
src/lib.rs
Executable file → Normal file
673
src/lib.rs
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user