Compare commits

..

15 Commits

Author SHA1 Message Date
François Massot
14d53851a8 Fix clippy, clean comment. 2022-03-28 01:17:07 +02:00
François Massot
2d176e66b6 Format. 2022-03-26 22:29:43 +01:00
François Massot
838a332db0 Fix fmt. 2022-03-26 21:33:08 +01:00
François Massot
defbd9139b Update fastfield codecs readme. 2022-03-26 21:33:08 +01:00
François Massot
0c87732459 Fix makefile. 2022-03-26 21:33:08 +01:00
François Massot
4d66a3f0a0 Put deprecated attributes on deprecated codecs. Clean. 2022-03-26 21:33:06 +01:00
François Massot
977f01a8a3 Deprecate linear and multilienar fast field coded, add piece wise and FOR. Update tests and clean. 2022-03-26 21:27:15 +01:00
François Massot
c14bdd26d4 Clean. 2022-03-26 21:18:13 +01:00
François Massot
3272f80171 Fix clippy. 2022-03-26 21:17:32 +01:00
François Massot
23d5ab5656 Rename new codecs. 2022-03-26 21:17:32 +01:00
François Massot
245ed5fed1 Add float dataset for comparing fast field codec. 2022-03-26 21:17:32 +01:00
François Massot
33bed01168 Clean frame of ref codec. 2022-03-26 21:17:32 +01:00
François Massot
17a5f4f0ff Seed random datasets in fast field codecs comparison. 2022-03-26 21:17:30 +01:00
François Massot
c969582308 Add frame of reference codecs. 2022-03-26 21:16:50 +01:00
François Massot
18d2ee5bb7 Add another multilinear interpolation and real world dataset. 2022-03-26 21:15:50 +01:00
203 changed files with 4380 additions and 9575 deletions

View File

@@ -12,14 +12,13 @@ jobs:
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Install Rust - name: Install Rust
run: rustup toolchain install nightly --profile minimal --component llvm-tools-preview run: rustup toolchain install nightly --component llvm-tools-preview
- uses: Swatinem/rust-cache@v2 - name: Install cargo-llvm-cov
- uses: taiki-e/install-action@cargo-llvm-cov run: curl -LsSf https://github.com/taiki-e/cargo-llvm-cov/releases/latest/download/cargo-llvm-cov-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin
- name: Generate code coverage - name: Generate code coverage
run: cargo +nightly llvm-cov --all-features --workspace --lcov --output-path lcov.info run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
uses: codecov/codecov-action@v3 uses: codecov/codecov-action@v2
continue-on-error: true
with: with:
token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
files: lcov.info files: lcov.info

View File

@@ -9,20 +9,16 @@ env:
NUM_FUNCTIONAL_TEST_ITERATIONS: 20000 NUM_FUNCTIONAL_TEST_ITERATIONS: 20000
jobs: jobs:
test: functional_test_unsorted:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
override: true
- name: Run indexing_unsorted - name: Run indexing_unsorted
run: cargo test indexing_unsorted -- --ignored run: cargo test indexing_unsorted -- --ignored
functional_test_sorted:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Run indexing_sorted - name: Run indexing_sorted
run: cargo test indexing_sorted -- --ignored run: cargo test indexing_sorted -- --ignored

View File

@@ -10,27 +10,33 @@ env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
jobs: jobs:
check: test:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Build
- name: Install nightly run: cargo build --verbose --workspace
- name: Install latest nightly to test also against unstable feature flag
uses: actions-rs/toolchain@v1 uses: actions-rs/toolchain@v1
with: with:
toolchain: nightly toolchain: nightly
profile: minimal override: true
components: rustfmt components: rustfmt
- name: Install stable
- name: Install latest nightly to test also against unstable feature flag
uses: actions-rs/toolchain@v1 uses: actions-rs/toolchain@v1
with: with:
toolchain: stable toolchain: stable
profile: minimal override: true
components: clippy components: rustfmt, clippy
- uses: Swatinem/rust-cache@v2 - name: Run tests
run: cargo +stable test --features mmap,brotli-compression,lz4-compression,snappy-compression,failpoints --verbose --workspace
- name: Run tests quickwit feature
run: cargo +stable test --features mmap,quickwit,failpoints --verbose --workspace
- name: Check Formatting - name: Check Formatting
run: cargo +nightly fmt --all -- --check run: cargo +nightly fmt --all -- --check
@@ -41,34 +47,3 @@ jobs:
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
args: --tests args: --tests
test:
runs-on: ubuntu-latest
strategy:
matrix:
features: [
{ label: "all", flags: "mmap,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints" },
{ label: "quickwit", flags: "mmap,quickwit,failpoints" }
]
name: test-${{ matrix.features.label}}
steps:
- uses: actions/checkout@v3
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
override: true
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
- name: Run tests
run: cargo +stable nextest run --features ${{ matrix.features.flags }} --verbose --workspace
- name: Run doctests
run: cargo +stable test --doc --features ${{ matrix.features.flags }} --verbose --workspace

View File

@@ -10,7 +10,6 @@ Tantivy's bread and butter is to address the problem of full-text search :
Given a large set of textual documents, and a text query, return the K-most relevant documents in a very efficient way. To execute these queries rapidly, the tantivy needs to build an index beforehand. The relevance score implemented in the tantivy is not configurable. Tantivy uses the same score as the default similarity used in Lucene / Elasticsearch, called [BM25](https://en.wikipedia.org/wiki/Okapi_BM25). Given a large set of textual documents, and a text query, return the K-most relevant documents in a very efficient way. To execute these queries rapidly, the tantivy needs to build an index beforehand. The relevance score implemented in the tantivy is not configurable. Tantivy uses the same score as the default similarity used in Lucene / Elasticsearch, called [BM25](https://en.wikipedia.org/wiki/Okapi_BM25).
But tantivy's scope does not stop there. Numerous features are required to power rich-search applications. For instance, one may want to: But tantivy's scope does not stop there. Numerous features are required to power rich-search applications. For instance, one may want to:
- compute the count of documents matching a query in the different section of an e-commerce website, - compute the count of documents matching a query in the different section of an e-commerce website,
- display an average price per meter square for a real estate search engine, - display an average price per meter square for a real estate search engine,
- take into account historical user data to rank documents in a specific way, - take into account historical user data to rank documents in a specific way,
@@ -23,28 +22,27 @@ rapidly select all documents matching a given predicate (also known as a query)
collect some information about them ([See collector](#collector-define-what-to-do-with-matched-documents)). collect some information about them ([See collector](#collector-define-what-to-do-with-matched-documents)).
Roughly speaking the design is following these guiding principles: Roughly speaking the design is following these guiding principles:
- Search should be O(1) in memory. - Search should be O(1) in memory.
- Indexing should be O(1) in memory. (In practice it is just sublinear) - Indexing should be O(1) in memory. (In practice it is just sublinear)
- Search should be as fast as possible - Search should be as fast as possible
This comes at the cost of the dynamicity of the index: while it is possible to add, and delete documents from our corpus, the tantivy is designed to handle these updates in large batches. This comes at the cost of the dynamicity of the index: while it is possible to add, and delete documents from our corpus, the tantivy is designed to handle these updates in large batches.
## [core/](src/core): Index, segments, searchers ## [core/](src/core): Index, segments, searchers.
Core contains all of the high-level code to make it possible to create an index, add documents, delete documents and commit. Core contains all of the high-level code to make it possible to create an index, add documents, delete documents and commit.
This is both the most high-level part of tantivy, the least performance-sensitive one, the seemingly most mundane code... And paradoxically the most complicated part. This is both the most high-level part of tantivy, the least performance-sensitive one, the seemingly most mundane code... And paradoxically the most complicated part.
### Index and Segments ### Index and Segments...
A tantivy index is a collection of smaller independent immutable segments. A tantivy index is a collection of smaller independent immutable segments.
Each segment contains its own independent set of data structures. Each segment contains its own independent set of data structures.
A segment is identified by a segment id that is in fact a UUID. A segment is identified by a segment id that is in fact a UUID.
The file of a segment has the format The file of a segment has the format
```segment-id . ext``` ```segment-id . ext ```
The extension signals which data structure (or [`SegmentComponent`](src/core/segment_component.rs)) is stored in the file. The extension signals which data structure (or [`SegmentComponent`](src/core/segment_component.rs)) is stored in the file.
@@ -54,15 +52,17 @@ On commit, one segment per indexing thread is written to disk, and the `meta.jso
For a better idea of how indexing works, you may read the [following blog post](https://fulmicoton.com/posts/behold-tantivy-part2/). For a better idea of how indexing works, you may read the [following blog post](https://fulmicoton.com/posts/behold-tantivy-part2/).
### Deletes ### Deletes
Deletes happen by deleting a "term". Tantivy does not offer any notion of primary id, so it is up to the user to use a field in their schema as if it was a primary id, and delete the associated term if they want to delete only one specific document. Deletes happen by deleting a "term". Tantivy does not offer any notion of primary id, so it is up to the user to use a field in their schema as if it was a primary id, and delete the associated term if they want to delete only one specific document.
On commit, tantivy will find all of the segments with documents matching this existing term and remove from [alive bitset file](src/fastfield/alive_bitset.rs) that represents the bitset of the alive document ids. On commit, tantivy will find all of the segments with documents matching this existing term and create a [tombstone file](src/fastfield/delete.rs) that represents the bitset of the document that are deleted.
Like all segment files, this file is immutable. Because it is possible to have more than one alive bitset file at a given instant, the alive bitset filename has the format ```segment_id . commit_opstamp . del```. Like all segment files, this file is immutable. Because it is possible to have more than one tombstone file at a given instant, the tombstone filename has the format ``` segment_id . commit_opstamp . del```.
An opstamp is simply an incremental id that identifies any operation applied to the index. For instance, performing a commit or adding a document. An opstamp is simply an incremental id that identifies any operation applied to the index. For instance, performing a commit or adding a document.
### DocId ### DocId
Within a segment, all documents are identified by a DocId that ranges within `[0, max_doc)`. Within a segment, all documents are identified by a DocId that ranges within `[0, max_doc)`.
@@ -74,7 +74,6 @@ The DocIds are simply allocated in the order documents are added to the index.
In separate threads, tantivy's index writer search for opportunities to merge segments. In separate threads, tantivy's index writer search for opportunities to merge segments.
The point of segment merge is to: The point of segment merge is to:
- eventually get rid of tombstoned documents - eventually get rid of tombstoned documents
- reduce the otherwise ever-growing number of segments. - reduce the otherwise ever-growing number of segments.
@@ -105,7 +104,6 @@ Tantivy's document follows a very strict schema, decided before building any ind
The schema defines all of the fields that the indexes [`Document`](src/schema/document.rs) may and should contain, their types (`text`, `i64`, `u64`, `Date`, ...) as well as how it should be indexed / represented in tantivy. The schema defines all of the fields that the indexes [`Document`](src/schema/document.rs) may and should contain, their types (`text`, `i64`, `u64`, `Date`, ...) as well as how it should be indexed / represented in tantivy.
Depending on the type of the field, you can decide to Depending on the type of the field, you can decide to
- put it in the docstore - put it in the docstore
- store it as a fast field - store it as a fast field
- index it - index it
@@ -119,10 +117,9 @@ As of today, tantivy's schema imposes a 1:1 relationship between a field that is
This is not something tantivy supports, and it is up to the user to duplicate field / concatenate fields before feeding them to tantivy. This is not something tantivy supports, and it is up to the user to duplicate field / concatenate fields before feeding them to tantivy.
## General information about these data structures ## General information about these data structures.
All data structures in tantivy, have: All data structures in tantivy, have:
- a writer - a writer
- a serializer - a serializer
- a reader - a reader
@@ -135,7 +132,7 @@ This conversion is done by the serializer.
Finally, the reader is in charge of offering an API to read on this on-disk read-only representation. Finally, the reader is in charge of offering an API to read on this on-disk read-only representation.
In tantivy, readers are designed to require very little anonymous memory. The data is read straight from an mmapped file, and loading an index is as fast as mmapping its files. In tantivy, readers are designed to require very little anonymous memory. The data is read straight from an mmapped file, and loading an index is as fast as mmapping its files.
## [store/](src/store): Here is my DocId, Gimme my document ## [store/](src/store): Here is my DocId, Gimme my document!
The docstore is a row-oriented storage that, for each document, stores a subset of the fields The docstore is a row-oriented storage that, for each document, stores a subset of the fields
that are marked as stored in the schema. The docstore is compressed using a general-purpose algorithm that are marked as stored in the schema. The docstore is compressed using a general-purpose algorithm
@@ -149,7 +146,6 @@ Once the top 10 documents have been identified, we fetch them from the store, an
**Not useful for** **Not useful for**
Fetching a document from the store is typically a "slow" operation. It usually consists in Fetching a document from the store is typically a "slow" operation. It usually consists in
- searching into a compact tree-like data structure to find the position of the right block. - searching into a compact tree-like data structure to find the position of the right block.
- decompressing a small block - decompressing a small block
- returning the document from this block. - returning the document from this block.
@@ -158,7 +154,8 @@ It is NOT meant to be called for every document matching a query.
As a rule of thumb, if you hit the docstore more than 100 times per search query, you are probably misusing tantivy. As a rule of thumb, if you hit the docstore more than 100 times per search query, you are probably misusing tantivy.
## [fastfield/](src/fastfield): Here is my DocId, Gimme my value
## [fastfield/](src/fastfield): Here is my DocId, Gimme my value!
Fast fields are stored in a column-oriented storage that allows for random access. Fast fields are stored in a column-oriented storage that allows for random access.
The only compression applied is bitpacking. The column comes with two meta data. The only compression applied is bitpacking. The column comes with two meta data.
@@ -166,7 +163,7 @@ The minimum value in the column and the number of bits per doc.
Fetching a value for a `DocId` is then as simple as computing Fetching a value for a `DocId` is then as simple as computing
```rust ```
min_value + fetch_bits(num_bits * doc_id..num_bits * (doc_id+1)) min_value + fetch_bits(num_bits * doc_id..num_bits * (doc_id+1))
``` ```
@@ -193,7 +190,7 @@ For advanced search engine, it is possible to store all of the features required
Finally facets are a specific kind of fast field, and the associated source code is in [`fastfield/facet_reader.rs`](src/fastfield/facet_reader.rs). Finally facets are a specific kind of fast field, and the associated source code is in [`fastfield/facet_reader.rs`](src/fastfield/facet_reader.rs).
# The inverted search index # The inverted search index.
The inverted index is the core part of full-text search. The inverted index is the core part of full-text search.
When presented a new document with the text field "Hello, happy tax payer!", tantivy breaks it into a list of so-called tokens. In addition to just splitting these strings into tokens, it might also do different kinds of operations like dropping the punctuation, converting the character to lowercase, apply stemming, etc. Tantivy makes it possible to configure the operations to be applied in the schema (tokenizer/ is the place where these operations are implemented). When presented a new document with the text field "Hello, happy tax payer!", tantivy breaks it into a list of so-called tokens. In addition to just splitting these strings into tokens, it might also do different kinds of operations like dropping the punctuation, converting the character to lowercase, apply stemming, etc. Tantivy makes it possible to configure the operations to be applied in the schema (tokenizer/ is the place where these operations are implemented).
@@ -218,18 +215,19 @@ The inverted index actually consists of two data structures chained together.
Where [TermInfo](src/postings/term_info.rs) is an object containing some meta data about a term. Where [TermInfo](src/postings/term_info.rs) is an object containing some meta data about a term.
## [termdict/](src/termdict): Here is a term, give me the [TermInfo](src/postings/term_info.rs)
## [termdict/](src/termdict): Here is a term, give me the [TermInfo](src/postings/term_info.rs)!
Tantivy's term dictionary is mainly in charge of supplying the function Tantivy's term dictionary is mainly in charge of supplying the function
[Term](src/schema/term.rs) ⟶ [TermInfo](src/postings/term_info.rs) [Term](src/schema/term.rs) ⟶ [TermInfo](src/postings/term_info.rs)
It is itself broken into two parts. It is itself broken into two parts.
- [Term](src/schema/term.rs) ⟶ [TermOrdinal](src/termdict/mod.rs) is addressed by a finite state transducer, implemented by the fst crate. - [Term](src/schema/term.rs) ⟶ [TermOrdinal](src/termdict/mod.rs) is addressed by a finite state transducer, implemented by the fst crate.
- [TermOrdinal](src/termdict/mod.rs) ⟶ [TermInfo](src/postings/term_info.rs) is addressed by the term info store. - [TermOrdinal](src/termdict/mod.rs) ⟶ [TermInfo](src/postings/term_info.rs) is addressed by the term info store.
## [postings/](src/postings): Iterate over documents... very fast
## [postings/](src/postings): Iterate over documents... very fast!
A posting list makes it possible to store a sorted list of doc ids and for each doc store A posting list makes it possible to store a sorted list of doc ids and for each doc store
a term frequency as well. a term frequency as well.
@@ -251,7 +249,7 @@ For instance, when the phrase query "the art of war" does not match "the war of
To make it possible, it is possible to specify in the schema that a field should store positions in addition to being indexed. To make it possible, it is possible to specify in the schema that a field should store positions in addition to being indexed.
The token positions of all of the terms are then stored in a separate file with the extension `.pos`. The token positions of all of the terms are then stored in a separate file with the extension `.pos`.
The [TermInfo](src/postings/term_info.rs) gives an offset (expressed in position this time) in this file. As we iterate through the docset, The [TermInfo](src/postings/term_info.rs) gives an offset (expressed in position this time) in this file. As we iterate throught the docset,
we advance the position reader by the number of term frequencies of the current document. we advance the position reader by the number of term frequencies of the current document.
## [fieldnorms/](src/fieldnorms): Here is my doc, how many tokens in this field? ## [fieldnorms/](src/fieldnorms): Here is my doc, how many tokens in this field?
@@ -259,6 +257,7 @@ we advance the position reader by the number of term frequencies of the current
The [BM25](https://en.wikipedia.org/wiki/Okapi_BM25) formula also requires to know the number of tokens stored in a specific field for a given document. We store this information on one byte per document in the fieldnorm. The [BM25](https://en.wikipedia.org/wiki/Okapi_BM25) formula also requires to know the number of tokens stored in a specific field for a given document. We store this information on one byte per document in the fieldnorm.
The fieldnorm is therefore compressed. Values up to 40 are encoded unchanged. The fieldnorm is therefore compressed. Values up to 40 are encoded unchanged.
## [tokenizer/](src/tokenizer): How should we process text? ## [tokenizer/](src/tokenizer): How should we process text?
Text processing is key to a good search experience. Text processing is key to a good search experience.
@@ -269,6 +268,7 @@ Text processing can be configured by selecting an off-the-shelf [`Tokenizer`](./
Tantivy's comes with few tokenizers, but external crates are offering advanced tokenizers, such as [Lindera](https://crates.io/crates/lindera) for Japanese. Tantivy's comes with few tokenizers, but external crates are offering advanced tokenizers, such as [Lindera](https://crates.io/crates/lindera) for Japanese.
## [query/](src/query): Define and compose queries ## [query/](src/query): Define and compose queries
The [Query](src/query/query.rs) trait defines what a query is. The [Query](src/query/query.rs) trait defines what a query is.

View File

@@ -1,14 +1,5 @@
Tantivy 0.19 Unreleased
================================ ================================
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
The `DateTime` type has been updated to hold timestamps with microseconds precision.
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing).
- Remove Searcher pool and make `Searcher` cloneable.
Tantivy 0.18
================================
- For date values `chrono` has been replaced with `time` (@uklotzde) #1304 : - For date values `chrono` has been replaced with `time` (@uklotzde) #1304 :
- The `time` crate is re-exported as `tantivy::time` instead of `tantivy::chrono`. - The `time` crate is re-exported as `tantivy::time` instead of `tantivy::chrono`.
- The type alias `tantivy::DateTime` has been removed. - The type alias `tantivy::DateTime` has been removed.
@@ -17,14 +8,9 @@ Tantivy 0.18
- Converting a `time::OffsetDateTime` to `Value::Date` implicitly converts the value into UTC. - Converting a `time::OffsetDateTime` to `Value::Date` implicitly converts the value into UTC.
If this is not desired do the time zone conversion yourself and use `time::PrimitiveDateTime` If this is not desired do the time zone conversion yourself and use `time::PrimitiveDateTime`
directly instead. directly instead.
- Add [histogram](https://github.com/quickwit-oss/tantivy/pull/1306) aggregation (@PSeitz)
- Add support for fastfield on text fields (@PSeitz)
- Add terms aggregation (@PSeitz)
- Add support for zstd compression (@kryesh)
Tantivy 0.17 Tantivy 0.17
================================ ================================
- LogMergePolicy now triggers merges if the ratio of deleted documents reaches a threshold (@shikhar @fulmicoton) [#115](https://github.com/quickwit-oss/tantivy/issues/115) - LogMergePolicy now triggers merges if the ratio of deleted documents reaches a threshold (@shikhar @fulmicoton) [#115](https://github.com/quickwit-oss/tantivy/issues/115)
- Adds a searcher Warmer API (@shikhar @fulmicoton) - Adds a searcher Warmer API (@shikhar @fulmicoton)
- Change to non-strict schema. Ignore fields in data which are not defined in schema. Previously this returned an error. #1211 - Change to non-strict schema. Ignore fields in data which are not defined in schema. Previously this returned an error. #1211
@@ -33,45 +19,39 @@ Tantivy 0.17
- Schema now offers not indexing fieldnorms (@lpouget) [#922](https://github.com/quickwit-oss/tantivy/issues/922) - Schema now offers not indexing fieldnorms (@lpouget) [#922](https://github.com/quickwit-oss/tantivy/issues/922)
- Reduce the number of fsync calls [#1225](https://github.com/quickwit-oss/tantivy/issues/1225) - Reduce the number of fsync calls [#1225](https://github.com/quickwit-oss/tantivy/issues/1225)
- Fix opening bytes index with dynamic codec (@PSeitz) [#1278](https://github.com/quickwit-oss/tantivy/issues/1278) - Fix opening bytes index with dynamic codec (@PSeitz) [#1278](https://github.com/quickwit-oss/tantivy/issues/1278)
- Added an aggregation collector for range, average and stats compatible with Elasticsearch. (@PSeitz) - Added an aggregation collector compatible with Elasticsearch (@PSeitz)
- Added a JSON schema type @fulmicoton [#1251](https://github.com/quickwit-oss/tantivy/issues/1251) - Added a JSON schema type @fulmicoton [#1251](https://github.com/quickwit-oss/tantivy/issues/1251)
- Added support for slop in phrase queries @halvorboe [#1068](https://github.com/quickwit-oss/tantivy/issues/1068) - Added support for slop in phrase queries @halvorboe [#1068](https://github.com/quickwit-oss/tantivy/issues/1068)
Tantivy 0.16.2 Tantivy 0.16.2
================================ ================================
- Bugfix in FuzzyTermQuery. (tranposition_cost_one was not doing anything)
- Bugfix in FuzzyTermQuery. (transposition_cost_one was not doing anything)
Tantivy 0.16.1 Tantivy 0.16.1
======================== ========================
- Major Bugfix on multivalued fastfield. #1151 - Major Bugfix on multivalued fastfield. #1151
- Demux operation (@PSeitz) - Demux operation (@PSeitz)
Tantivy 0.16.0 Tantivy 0.16.0
========================= =========================
- Bugfix in the filesum check. (@evanxg852000) #1127 - Bugfix in the filesum check. (@evanxg852000) #1127
- Bugfix in positions when the index is sorted by a field. (@appaquet) #1125 - Bugfix in positions when the index is sorted by a field. (@appaquet) #1125
Tantivy 0.15.3 Tantivy 0.15.3
========================= =========================
- Major bugfix. Deleting documents was broken when the index was sorted by a field. (@appaquet, @fulmicoton) #1101 - Major bugfix. Deleting documents was broken when the index was sorted by a field. (@appaquet, @fulmicoton) #1101
Tantivy 0.15.2 Tantivy 0.15.2
======================== ========================
- Major bugfix. DocStore still panics when a deleted doc is at the beginning of a block. (@appaquet) #1088 - Major bugfix. DocStore still panics when a deleted doc is at the beginning of a block. (@appaquet) #1088
Tantivy 0.15.1 Tantivy 0.15.1
========================= =========================
- Major bugfix. DocStore panics when first block is deleted. (@appaquet) #1077 - Major bugfix. DocStore panics when first block is deleted. (@appaquet) #1077
Tantivy 0.15.0 Tantivy 0.15.0
========================= =========================
- API Changes. Using Range instead of (start, end) in the API and internals (`FileSlice`, `OwnedBytes`, `Snippets`, ...) - API Changes. Using Range instead of (start, end) in the API and internals (`FileSlice`, `OwnedBytes`, `Snippets`, ...)
This change is breaking but migration is trivial. This change is breaking but migration is trivial.
- Added an Histogram collector. (@fulmicoton) #994 - Added an Histogram collector. (@fulmicoton) #994
@@ -93,9 +73,9 @@ Tantivy 0.15.0
- Updated TermMerger implementation to rely on the union feature of the FST (@scampi) #469 - Updated TermMerger implementation to rely on the union feature of the FST (@scampi) #469
- Add boolean marking whether position is required in the query_terms API call (@fulmicoton). #1070 - Add boolean marking whether position is required in the query_terms API call (@fulmicoton). #1070
Tantivy 0.14.0 Tantivy 0.14.0
========================= =========================
- Remove dependency to atomicwrites #833 .Implemented by @fulmicoton upon suggestion and research from @asafigan). - Remove dependency to atomicwrites #833 .Implemented by @fulmicoton upon suggestion and research from @asafigan).
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo) - Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
- API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match. - API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match.
@@ -114,19 +94,16 @@ This version breaks compatibility and requires users to reindex everything.
Tantivy 0.13.2 Tantivy 0.13.2
=================== ===================
Bugfix. Acquiring a facet reader on a segment that does not contain any Bugfix. Acquiring a facet reader on a segment that does not contain any
doc with this facet returns `None`. (#896) doc with this facet returns `None`. (#896)
Tantivy 0.13.1 Tantivy 0.13.1
=================== ===================
Made `Query` and `Collector` `Send + Sync`. Made `Query` and `Collector` `Send + Sync`.
Updated misc dependency versions. Updated misc dependency versions.
Tantivy 0.13.0 Tantivy 0.13.0
====================== ======================
Tantivy 0.13 introduce a change in the index format that will require Tantivy 0.13 introduce a change in the index format that will require
you to reindex your index (BlockWAND information are added in the skiplist). you to reindex your index (BlockWAND information are added in the skiplist).
The index size increase is minor as this information is only added for The index size increase is minor as this information is only added for
@@ -141,7 +118,6 @@ so that we can discuss possible solutions.
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet. A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId. `.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
As a result, iterating through DocSet now looks as follows As a result, iterating through DocSet now looks as follows
```rust ```rust
let mut doc = docset.doc(); let mut doc = docset.doc();
while doc != TERMINATED { while doc != TERMINATED {
@@ -149,9 +125,7 @@ while doc != TERMINATED {
doc = docset.advance(); doc = docset.advance();
} }
``` ```
The change made it possible to greatly simplify a lot of the docset's code. The change made it possible to greatly simplify a lot of the docset's code.
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton) - Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
- Added an offset option to the Top(.*)Collectors. (@robyoung) - Added an offset option to the Top(.*)Collectors. (@robyoung)
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks - Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
@@ -159,7 +133,6 @@ to the PISA team for answering all my questions!)
Tantivy 0.12.0 Tantivy 0.12.0
====================== ======================
- Removing static dispatch in tokenizers for simplicity. (#762) - Removing static dispatch in tokenizers for simplicity. (#762)
- Added backward iteration for `TermDictionary` stream. (@halvorboe) - Added backward iteration for `TermDictionary` stream. (@halvorboe)
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland) - Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
@@ -170,32 +143,30 @@ Tantivy 0.12.0
## How to update? ## How to update?
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
minor changes. Check <https://github.com/quickwit-oss/tantivy/blob/main/examples/custom_tokenizer.rs> minor changes. Check https://github.com/quickwit-oss/tantivy/blob/main/examples/custom_tokenizer.rs
to check for some code sample. to check for some code sample.
Tantivy 0.11.3 Tantivy 0.11.3
======================= =======================
- Fixed DateTime as a fast field (#735) - Fixed DateTime as a fast field (#735)
Tantivy 0.11.2 Tantivy 0.11.2
======================= =======================
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732) - The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
- Exposing a constructor for `WatchHandle` (#731) - Exposing a constructor for `WatchHandle` (#731)
Tantivy 0.11.1 Tantivy 0.11.1
===================== =====================
- Bug fix #729 - Bug fix #729
Tantivy 0.11.0 Tantivy 0.11.0
===================== =====================
- Added f64 field. Internally reuse u64 code the same way i64 does (@fdb-hiroshima) - Added f64 field. Internally reuse u64 code the same way i64 does (@fdb-hiroshima)
- Various bugfixes in the query parser. - Various bugfixes in the query parser.
- Better handling of hyphens in query parser. (#609) - Better handling of hyphens in query parser. (#609)
- Better handling of whitespaces. - Better handling of whitespaces.
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik) - Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
- API change around `Box<BoxableTokenizer>`. See detail in #629 - API change around `Box<BoxableTokenizer>`. See detail in #629
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock) - Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
@@ -226,6 +197,7 @@ Tantivy 0.10.1
Avoid watching the mmap directory until someone effectively creates a reader that uses Avoid watching the mmap directory until someone effectively creates a reader that uses
this functionality. this functionality.
Tantivy 0.10.0 Tantivy 0.10.0
===================== =====================
@@ -241,7 +213,6 @@ Tantivy 0.10.0
Minor Minor
--------- ---------
- Switched to Rust 2018 (@uvd) - Switched to Rust 2018 (@uvd)
- Small simplification of the code. - Small simplification of the code.
Calling .freq() or .doc() when .advance() has never been called Calling .freq() or .doc() when .advance() has never been called
@@ -249,7 +220,8 @@ on segment postings should panic from now on.
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking. - Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
- Fast fields are now preloaded when the `SegmentReader` is created. - Fast fields are now preloaded when the `SegmentReader` is created.
- `IndexMeta` is now public. (@hntd187) - `IndexMeta` is now public. (@hntd187)
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can - `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
only require a read lock. (@fulmicoton) only require a read lock. (@fulmicoton)
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik) - Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik) - Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
@@ -265,17 +237,16 @@ Your program should be usable as is.
Fast fields used to be accessed directly from the `SegmentReader`. Fast fields used to be accessed directly from the `SegmentReader`.
The API changed, you are now required to acquire your fast field reader via the The API changed, you are now required to acquire your fast field reader via the
`segment_reader.fast_fields()`, and use one of the typed method: `segment_reader.fast_fields()`, and use one of the typed method:
- `.u64()`, `.i64()` if your field is single-valued ; - `.u64()`, `.i64()` if your field is single-valued ;
- `.u64s()`, `.i64s()` if your field is multi-valued ; - `.u64s()`, `.i64s()` if your field is multi-valued ;
- `.bytes()` if your field is bytes fast field. - `.bytes()` if your field is bytes fast field.
Tantivy 0.9.0 Tantivy 0.9.0
===================== =====================
*0.9.0 index format is not compatible with the *0.9.0 index format is not compatible with the
previous index format.* previous index format.*
- MAJOR BUGFIX : - MAJOR BUGFIX :
Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton) Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton)
- Removed most unsafe (@fulmicoton) - Removed most unsafe (@fulmicoton)
@@ -319,40 +290,37 @@ To update from tantivy 0.8, you will need to go through the following steps.
``` ```
Tantivy 0.8.2 Tantivy 0.8.2
===================== =====================
Fixing build for x86_64 platforms. (#496) Fixing build for x86_64 platforms. (#496)
No need to update from 0.8.1 if tantivy No need to update from 0.8.1 if tantivy
is building on your platform. is building on your platform.
Tantivy 0.8.1 Tantivy 0.8.1
===================== =====================
Hotfix of #476. Hotfix of #476.
Merge was reflecting deletes before commit was passed. Merge was reflecting deletes before commit was passed.
Thanks @barrotsteindev for reporting the bug. Thanks @barrotsteindev for reporting the bug.
Tantivy 0.8.0 Tantivy 0.8.0
===================== =====================
*No change in the index format* *No change in the index format*
- API Breaking change in the collector API. (@jwolfe, @fulmicoton) - API Breaking change in the collector API. (@jwolfe, @fulmicoton)
- Multithreaded search (@jwolfe, @fulmicoton) - Multithreaded search (@jwolfe, @fulmicoton)
Tantivy 0.7.1 Tantivy 0.7.1
===================== =====================
*No change in the index format* *No change in the index format*
- Bugfix: NGramTokenizer panics on non ascii chars - Bugfix: NGramTokenizer panics on non ascii chars
- Added a space usage API - Added a space usage API
Tantivy 0.7 Tantivy 0.7
===================== =====================
- Skip data for doc ids and positions (@fulmicoton), - Skip data for doc ids and positions (@fulmicoton),
greatly improving performance greatly improving performance
- Tantivy error now rely on the failure crate (@drusellers) - Tantivy error now rely on the failure crate (@drusellers)
@@ -362,15 +330,15 @@ Tantivy 0.7
Tantivy 0.6.1 Tantivy 0.6.1
========================= =========================
- Bugfix #324. GC removing was removing file that were still in useful - Bugfix #324. GC removing was removing file that were still in useful
- Added support for parsing AllQuery and RangeQuery via QueryParser - Added support for parsing AllQuery and RangeQuery via QueryParser
- AllQuery: `*` - AllQuery: `*`
- RangeQuery: - RangeQuery:
- Inclusive `field:[startIncl to endIncl]` - Inclusive `field:[startIncl to endIncl]`
- Exclusive `field:{startExcl to endExcl}` - Exclusive `field:{startExcl to endExcl}`
- Mixed `field:[startIncl to endExcl}` and vice versa - Mixed `field:[startIncl to endExcl}` and vice versa
- Unbounded `field:[start to *]`, `field:[* to end]` - Unbounded `field:[start to *]`, `field:[* to end]`
Tantivy 0.6 Tantivy 0.6
========================== ==========================
@@ -383,53 +351,58 @@ to this release!
- Approximate field norms encoded over 1 byte. (@fulmicoton) - Approximate field norms encoded over 1 byte. (@fulmicoton)
- Compiles on stable rust (@fulmicoton) - Compiles on stable rust (@fulmicoton)
- Add &[u8] fastfield for associating arbitrary bytes to each document (@jason-wolfe) (#270) - Add &[u8] fastfield for associating arbitrary bytes to each document (@jason-wolfe) (#270)
- Completely uncompressed - Completely uncompressed
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves. - Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
- Add NGram token support (@drusellers) - Add NGram token support (@drusellers)
- Add Stopword Filter support (@drusellers) - Add Stopword Filter support (@drusellers)
- Add a FuzzyTermQuery (@drusellers) - Add a FuzzyTermQuery (@drusellers)
- Add a RegexQuery (@drusellers) - Add a RegexQuery (@drusellers)
- Various performance improvements (@fulmicoton)_ - Various performance improvements (@fulmicoton)_
Tantivy 0.5.2 Tantivy 0.5.2
=========================== ===========================
- bugfix #274 - bugfix #274
- bugfix #280 - bugfix #280
- bugfix #289 - bugfix #289
Tantivy 0.5.1 Tantivy 0.5.1
========================== ==========================
- bugfix #254 : tantivy failed if no documents in a segment contained a specific field. - bugfix #254 : tantivy failed if no documents in a segment contained a specific field.
Tantivy 0.5 Tantivy 0.5
========================== ==========================
- Faceting - Faceting
- RangeQuery - RangeQuery
- Configurable tokenization pipeline - Configurable tokenization pipeline
- Bugfix in PhraseQuery - Bugfix in PhraseQuery
- Various query optimisation - Various query optimisation
- Allowing very large indexes - Allowing very large indexes
- 64 bits file address - 64 bits file address
- Smarter encoding of the `TermInfo` objects - Smarter encoding of the `TermInfo` objects
Tantivy 0.4.3 Tantivy 0.4.3
========================== ==========================
- Bugfix race condition when deleting files. (#198) - Bugfix race condition when deleting files. (#198)
Tantivy 0.4.2 Tantivy 0.4.2
========================== ==========================
- Prevent usage of AVX2 instructions (#201) - Prevent usage of AVX2 instructions (#201)
Tantivy 0.4.1 Tantivy 0.4.1
========================== ==========================
- Bugfix for non-indexed fields. (#199) - Bugfix for non-indexed fields. (#199)
Tantivy 0.4.0 Tantivy 0.4.0
========================== ==========================
@@ -444,31 +417,37 @@ Tantivy 0.4.0
- Searching for a non-indexed field returns an explicit Error - Searching for a non-indexed field returns an explicit Error
- Phrase query for non-tokenized field are not tokenized by the query parser. - Phrase query for non-tokenized field are not tokenized by the query parser.
- Faster/Better indexing (@fulmicoton) - Faster/Better indexing (@fulmicoton)
- using murmurhash2 - using murmurhash2
- faster merging - faster merging
- more memory efficient fast field writer (@lnicola ) - more memory efficient fast field writer (@lnicola )
- better handling of collisions - better handling of collisions
- lesser memory usage - lesser memory usage
- Added API, most notably to iterate over ranges of terms (@fulmicoton) - Added API, most notably to iterate over ranges of terms (@fulmicoton)
- Bugfix that was preventing to unmap segment files, on index drop (@fulmicoton) - Bugfix that was preventing to unmap segment files, on index drop (@fulmicoton)
- Made the doc! macro public (@fulmicoton) - Made the doc! macro public (@fulmicoton)
- Added an alternative implementation of the streaming dictionary (@fulmicoton) - Added an alternative implementation of the streaming dictionary (@fulmicoton)
Tantivy 0.3.1 Tantivy 0.3.1
========================== ==========================
- Expose a method to trigger files garbage collection - Expose a method to trigger files garbage collection
Tantivy 0.3 Tantivy 0.3
========================== ==========================
Special thanks to @Kodraus @lnicola @Ameobea @manuel-woelker @celaus Special thanks to @Kodraus @lnicola @Ameobea @manuel-woelker @celaus
for their contribution to this release. for their contribution to this release.
Thanks also to everyone in tantivy gitter chat Thanks also to everyone in tantivy gitter chat
for their advise and company :) for their advise and company :)
<https://gitter.im/tantivy-search/tantivy> https://gitter.im/tantivy-search/tantivy
Warning: Warning:
@@ -477,16 +456,19 @@ code and index format.
You should not expect backward compatibility before You should not expect backward compatibility before
tantivy 1.0. tantivy 1.0.
New Features New Features
------------ ------------
- Delete. You can now delete documents from an index. - Delete. You can now delete documents from an index.
- Support for windows (Thanks to @lnicola) - Support for windows (Thanks to @lnicola)
Various Bugfixes & small improvements Various Bugfixes & small improvements
---------------------------------------- ----------------------------------------
- Added CI for Windows (<https://ci.appveyor.com/project/fulmicoton/tantivy>) - Added CI for Windows (https://ci.appveyor.com/project/fulmicoton/tantivy)
Thanks to @KodrAus ! (#108) Thanks to @KodrAus ! (#108)
- Various dependy version update (Thanks to @Ameobea) #76 - Various dependy version update (Thanks to @Ameobea) #76
- Fixed several race conditions in `Index.wait_merge_threads` - Fixed several race conditions in `Index.wait_merge_threads`
@@ -498,3 +480,7 @@ Thanks to @KodrAus ! (#108)
- Building binary targets for tantivy-cli (Thanks to @KodrAus) - Building binary targets for tantivy-cli (Thanks to @KodrAus)
- Misc invisible bug fixes, and code cleanup. - Misc invisible bug fixes, and code cleanup.
- Use - Use

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy" name = "tantivy"
version = "0.18.0" version = "0.17.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
@@ -10,73 +10,71 @@ homepage = "https://github.com/quickwit-oss/tantivy"
repository = "https://github.com/quickwit-oss/tantivy" repository = "https://github.com/quickwit-oss/tantivy"
readme = "README.md" readme = "README.md"
keywords = ["search", "information", "retrieval"] keywords = ["search", "information", "retrieval"]
edition = "2021" edition = "2018"
[dependencies] [dependencies]
oneshot = "0.1.3" oneshot = "0.1"
base64 = "0.13.0" base64 = "0.13"
byteorder = "1.4.3" byteorder = "1.4.3"
crc32fast = "1.3.2" crc32fast = "1.2.1"
once_cell = "1.10.0" once_cell = "1.7.2"
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] } regex ={ version = "1.5.4", default-features = false, features = ["std"] }
tantivy-fst = "0.3.0" tantivy-fst = "0.3"
memmap2 = { version = "0.5.3", optional = true } memmap2 = {version = "0.5", optional=true}
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true } lz4_flex = { version = "0.9", default-features = false, features = ["checked-decode"], optional = true }
brotli = { version = "3.3.4", optional = true } brotli = { version = "3.3", optional = true }
zstd = { version = "0.11", optional = true }
snap = { version = "1.0.5", optional = true } snap = { version = "1.0.5", optional = true }
tempfile = { version = "3.3.0", optional = true } tempfile = { version = "3.2", optional = true }
log = "0.4.16" log = "0.4.14"
serde = { version = "1.0.136", features = ["derive"] } serde = { version = "1.0.126", features = ["derive"] }
serde_json = "1.0.79" serde_json = "1.0.64"
num_cpus = "1.13.1" num_cpus = "1.13"
fs2={ version = "0.4.3", optional = true } fs2={ version = "0.4.3", optional = true }
levenshtein_automata = "0.2.1" levenshtein_automata = "0.2"
uuid = { version = "1.0.0", features = ["v4", "serde"] } uuid = { version = "0.8.2", features = ["v4", "serde"] }
crossbeam-channel = "0.5.4" crossbeam = "0.8.1"
tantivy-query-grammar = { version="0.18.0", path="./query-grammar" } tantivy-query-grammar = { version="0.15.0", path="./query-grammar" }
tantivy-bitpacker = { version="0.2", path="./bitpacker" } tantivy-bitpacker = { version="0.1", path="./bitpacker" }
common = { version = "0.3", path = "./common/", package = "tantivy-common" } common = { version = "0.2", path = "./common/", package = "tantivy-common" }
fastfield_codecs = { version="0.2", path="./fastfield_codecs", default-features = false } fastfield_codecs = { version="0.1", path="./fastfield_codecs", default-features = false }
ownedbytes = { version="0.3", path="./ownedbytes" } ownedbytes = { version="0.2", path="./ownedbytes" }
stable_deref_trait = "1.2.0" stable_deref_trait = "1.2"
rust-stemmers = "1.2.0" rust-stemmers = "1.2"
downcast-rs = "1.2.0" downcast-rs = "1.2"
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] } bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
census = "0.4.0" census = "0.4"
fnv = "1.0.7" fnv = "1.0.7"
thiserror = "1.0.30" thiserror = "1.0.24"
htmlescape = "0.3.1" htmlescape = "0.3.1"
fail = "0.5.0" fail = "0.5"
murmurhash32 = "0.2.0" murmurhash32 = "0.2"
time = { version = "0.3.10", features = ["serde-well-known"] } time = { version = "0.3.7", features = ["serde-well-known"] }
smallvec = "1.8.0" smallvec = "1.6.1"
rayon = "1.5.2" rayon = "1.5"
lru = "0.7.5" lru = "0.7.0"
fastdivide = "0.4.0" fastdivide = "0.4"
itertools = "0.10.3" itertools = "0.10.0"
measure_time = "0.8.2" measure_time = "0.8.0"
pretty_assertions = "1.2.1" pretty_assertions = "1.1.0"
serde_cbor = { version = "0.11.2", optional = true } serde_cbor = {version="0.11", optional=true}
async-trait = "0.1.53" async-trait = "0.1"
arc-swap = "1.5.0"
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
winapi = "0.3.9" winapi = "0.3.9"
[dev-dependencies] [dev-dependencies]
rand = "0.8.5" rand = "0.8.3"
maplit = "1.0.2" maplit = "1.0.2"
matches = "0.1.9" matches = "0.1.8"
proptest = "1.0.0" proptest = "1.0"
criterion = "0.3.5" criterion = "0.3.5"
test-log = "0.2.10" test-log = "0.2.8"
env_logger = "0.9.0" env_logger = "0.9.0"
pprof = { version = "0.10.0", features = ["flamegraph", "criterion"] } pprof = {version= "0.7", features=["flamegraph", "criterion"]}
futures = "0.3.21" futures = "0.3.15"
[dev-dependencies.fail] [dev-dependencies.fail]
version = "0.5.0" version = "0.5"
features = ["failpoints"] features = ["failpoints"]
[profile.release] [profile.release]
@@ -95,7 +93,6 @@ mmap = ["fs2", "tempfile", "memmap2"]
brotli-compression = ["brotli"] brotli-compression = ["brotli"]
lz4-compression = ["lz4_flex"] lz4-compression = ["lz4_flex"]
snappy-compression = ["snap"] snappy-compression = ["snap"]
zstd-compression = ["zstd"]
failpoints = ["fail/failpoints"] failpoints = ["fail/failpoints"]
unstable = [] # useful for benches. unstable = [] # useful for benches.

View File

@@ -5,6 +5,7 @@
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![Crates.io](https://img.shields.io/crates/v/tantivy.svg)](https://crates.io/crates/tantivy) [![Crates.io](https://img.shields.io/crates/v/tantivy.svg)](https://crates.io/crates/tantivy)
![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png) ![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png)
**Tantivy** is a **full-text search engine library** written in Rust. **Tantivy** is a **full-text search engine library** written in Rust.
@@ -15,7 +16,7 @@ to build such a search engine.
Tantivy is, in fact, strongly inspired by Lucene's design. Tantivy is, in fact, strongly inspired by Lucene's design.
If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy. If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy.
# Benchmark # Benchmark
@@ -56,6 +57,7 @@ Your mileage WILL vary depending on the nature of queries and their load.
Distributed search is out of the scope of Tantivy, but if you are looking for this feature, check out [Quickwit](https://github.com/quickwit-oss/quickwit/). Distributed search is out of the scope of Tantivy, but if you are looking for this feature, check out [Quickwit](https://github.com/quickwit-oss/quickwit/).
# Getting started # Getting started
Tantivy works on stable Rust (>= 1.27) and supports Linux, macOS, and Windows. Tantivy works on stable Rust (>= 1.27) and supports Linux, macOS, and Windows.
@@ -123,22 +125,17 @@ By default, `rustc` compiles everything in the `examples/` directory in debug mo
rust-gdb target/debug/examples/$EXAMPLE_NAME rust-gdb target/debug/examples/$EXAMPLE_NAME
$ gdb run $ gdb run
``` ```
# Companies Using Tantivy
# Companies Using Tantivy
<p align="left"> <p align="left">
<img align="center" src="doc/assets/images/Nuclia.png#gh-light-mode-only" alt="Nuclia" height="25" width="auto" /> &nbsp; <img align="center" src="doc/assets/images/Nuclia.png" alt="Nuclia" height="25" width="auto" /> &nbsp;
<img align="center" src="doc/assets/images/humanfirst.png#gh-light-mode-only" alt="Humanfirst.ai" height="30" width="auto" /> <img align="center" src="doc/assets/images/humanfirst.png" alt="Humanfirst.ai" height="30" width="auto" />&nbsp;
<img align="center" src="doc/assets/images/element.io.svg#gh-light-mode-only" alt="Element.io" height="25" width="auto" /> <img align="center" src="doc/assets/images/element.io.svg" alt="Element.io" height="25" width="auto" />
<img align="center" src="doc/assets/images/nuclia-dark-theme.png#gh-dark-mode-only" alt="Nuclia" height="35" width="auto" /> &nbsp;
<img align="center" src="doc/assets/images/humanfirst.ai-dark-theme.png#gh-dark-mode-only" alt="Humanfirst.ai" height="25" width="auto" />&nbsp; &nbsp;
<img align="center" src="doc/assets/images/element-dark-theme.png#gh-dark-mode-only" alt="Element.io" height="25" width="auto" />
</p> </p>
# FAQ # FAQ
### Can I use Tantivy in other languages? ### Can I use Tantivy in other languages?
- Python → [tantivy-py](https://github.com/quickwit-oss/tantivy-py) - Python → [tantivy-py](https://github.com/quickwit-oss/tantivy-py)
- Ruby → [tantiny](https://github.com/baygeldin/tantiny) - Ruby → [tantiny](https://github.com/baygeldin/tantiny)
@@ -152,17 +149,4 @@ You can also find other bindings on [GitHub](https://github.com/search?q=tantivy
- and [more](https://github.com/search?q=tantivy)! - and [more](https://github.com/search?q=tantivy)!
### On average, how much faster is Tantivy compared to Lucene? ### On average, how much faster is Tantivy compared to Lucene?
- According to our [search latency benchmark](https://tantivy-search.github.io/bench/), Tantivy is approximately 2x faster than Lucene.
- According to our [search latency benchmark](https://tantivy-search.github.io/bench/), Tantivy is approximately 2x faster than Lucene.
### Does tantivy support incremental indexing?
- Yes.
### How can I edit documents?
- Data in tantivy is immutable. To edit a document, the document needs to be deleted and reindexed.
### When will my documents be searchable during indexing?
- Documents will be searchable after a `commit` is called on an `IndexWriter`. Existing `IndexReader`s will also need to be reloaded in order to reflect the changes. Finally, changes are only visible to newly acquired `Searcher`.

View File

@@ -1,7 +1,7 @@
[package] [package]
name = "tantivy-bitpacker" name = "tantivy-bitpacker"
version = "0.2.0" version = "0.1.1"
edition = "2021" edition = "2018"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = [] categories = []

View File

@@ -82,16 +82,14 @@ impl BitUnpacker {
} }
} }
pub fn bit_width(&self) -> u8 {
self.num_bits as u8
}
#[inline] #[inline]
pub fn get(&self, idx: u64, data: &[u8]) -> u64 { pub fn get(&self, idx: u64, data: &[u8]) -> u64 {
if self.num_bits == 0 { if self.num_bits == 0 {
return 0u64; return 0u64;
} }
let addr_in_bits = idx * self.num_bits; let num_bits = self.num_bits;
let mask = self.mask;
let addr_in_bits = idx * num_bits;
let addr = addr_in_bits >> 3; let addr = addr_in_bits >> 3;
let bit_shift = addr_in_bits & 7; let bit_shift = addr_in_bits & 7;
debug_assert!( debug_assert!(
@@ -103,7 +101,7 @@ impl BitUnpacker {
.unwrap(); .unwrap();
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes); let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64; let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
val_shifted & self.mask val_shifted & mask
} }
} }

View File

@@ -58,10 +58,6 @@ fn metadata_test() {
assert_eq!(meta.num_bits(), 6); assert_eq!(meta.num_bits(), 6);
} }
fn mem_usage<T>(items: &Vec<T>) -> usize {
items.capacity() * std::mem::size_of::<T>()
}
impl BlockedBitpacker { impl BlockedBitpacker {
pub fn new() -> Self { pub fn new() -> Self {
let mut compressed_blocks = vec![]; let mut compressed_blocks = vec![];
@@ -77,8 +73,10 @@ impl BlockedBitpacker {
pub fn mem_usage(&self) -> usize { pub fn mem_usage(&self) -> usize {
std::mem::size_of::<BlockedBitpacker>() std::mem::size_of::<BlockedBitpacker>()
+ self.compressed_blocks.capacity() + self.compressed_blocks.capacity()
+ mem_usage(&self.offset_and_bits) + self.offset_and_bits.capacity()
+ mem_usage(&self.buffer) * std::mem::size_of_val(&self.offset_and_bits.get(0).cloned().unwrap_or_default())
+ self.buffer.capacity()
* std::mem::size_of_val(&self.buffer.get(0).cloned().unwrap_or_default())
} }
#[inline] #[inline]

View File

@@ -1,16 +1,16 @@
[package] [package]
name = "tantivy-common" name = "tantivy-common"
version = "0.3.0" version = "0.2.0"
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"] authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
license = "MIT" license = "MIT"
edition = "2021" edition = "2018"
description = "common traits and utility functions used by multiple tantivy subcrates" description = "common traits and utility functions used by multiple tantivy subcrates"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
byteorder = "1.4.3" byteorder = "1.4.3"
ownedbytes = { version="0.3", path="../ownedbytes" } ownedbytes = { version="0.2", path="../ownedbytes" }
[dev-dependencies] [dev-dependencies]
proptest = "1.0.0" proptest = "1.0.0"

View File

@@ -104,6 +104,8 @@ pub fn u64_to_f64(val: u64) -> f64 {
#[cfg(test)] #[cfg(test)]
pub mod test { pub mod test {
use std::f64;
use proptest::prelude::*; use proptest::prelude::*;
use super::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64, BinarySerializable, FixedSize}; use super::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64, BinarySerializable, FixedSize};
@@ -133,11 +135,11 @@ pub mod test {
#[test] #[test]
fn test_i64_converter() { fn test_i64_converter() {
assert_eq!(i64_to_u64(i64::MIN), u64::MIN); assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
assert_eq!(i64_to_u64(i64::MAX), u64::MAX); assert_eq!(i64_to_u64(i64::max_value()), u64::max_value());
test_i64_converter_helper(0i64); test_i64_converter_helper(0i64);
test_i64_converter_helper(i64::MIN); test_i64_converter_helper(i64::min_value());
test_i64_converter_helper(i64::MAX); test_i64_converter_helper(i64::max_value());
for i in -1000i64..1000i64 { for i in -1000i64..1000i64 {
test_i64_converter_helper(i); test_i64_converter_helper(i);
} }

View File

@@ -19,7 +19,7 @@ pub trait DeserializeFrom<T: BinarySerializable> {
/// Implement deserialize from &[u8] for all types which implement BinarySerializable. /// Implement deserialize from &[u8] for all types which implement BinarySerializable.
/// ///
/// TryFrom would actually be preferable, but not possible because of the orphan /// TryFrom would actually be preferrable, but not possible because of the orphan
/// rules (not completely sure if this could be resolved) /// rules (not completely sure if this could be resolved)
impl<T: BinarySerializable> DeserializeFrom<T> for &[u8] { impl<T: BinarySerializable> DeserializeFrom<T> for &[u8] {
fn deserialize(&mut self) -> io::Result<T> { fn deserialize(&mut self) -> io::Result<T> {
@@ -229,7 +229,7 @@ pub mod test {
fixed_size_test::<u32>(); fixed_size_test::<u32>();
assert_eq!(4, serialize_test(3u32)); assert_eq!(4, serialize_test(3u32));
assert_eq!(4, serialize_test(5u32)); assert_eq!(4, serialize_test(5u32));
assert_eq!(4, serialize_test(u32::MAX)); assert_eq!(4, serialize_test(u32::max_value()));
} }
#[test] #[test]
@@ -247,11 +247,6 @@ pub mod test {
fixed_size_test::<u64>(); fixed_size_test::<u64>();
} }
#[test]
fn test_serialize_bool() {
fixed_size_test::<bool>();
}
#[test] #[test]
fn test_serialize_string() { fn test_serialize_string() {
assert_eq!(serialize_test(String::from("")), 1); assert_eq!(serialize_test(String::from("")), 1);
@@ -277,6 +272,6 @@ pub mod test {
assert_eq!(serialize_test(VInt(1234u64)), 2); assert_eq!(serialize_test(VInt(1234u64)), 2);
assert_eq!(serialize_test(VInt(16_383u64)), 2); assert_eq!(serialize_test(VInt(16_383u64)), 2);
assert_eq!(serialize_test(VInt(16_384u64)), 3); assert_eq!(serialize_test(VInt(16_384u64)), 3);
assert_eq!(serialize_test(VInt(u64::MAX)), 10); assert_eq!(serialize_test(VInt(u64::max_value())), 10);
} }
} }

View File

@@ -199,7 +199,7 @@ mod tests {
aux_test_vint(0); aux_test_vint(0);
aux_test_vint(1); aux_test_vint(1);
aux_test_vint(5); aux_test_vint(5);
aux_test_vint(u64::MAX); aux_test_vint(u64::max_value());
for i in 1..9 { for i in 1..9 {
let power_of_128 = 1u64 << (7 * i); let power_of_128 = 1u64 << (7 * i);
aux_test_vint(power_of_128 - 1u64); aux_test_vint(power_of_128 - 1u64);
@@ -228,6 +228,6 @@ mod tests {
aux_test_serialize_vint_u32(power_of_128); aux_test_serialize_vint_u32(power_of_128);
aux_test_serialize_vint_u32(power_of_128 + 1u32); aux_test_serialize_vint_u32(power_of_128 + 1u32);
} }
aux_test_serialize_vint_u32(u32::MAX); aux_test_serialize_vint_u32(u32::max_value());
} }
} }

View File

@@ -62,7 +62,7 @@ impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
pub struct AntiCallToken(()); pub struct AntiCallToken(());
/// Trait used to indicate when no more write need to be done on a writer /// Trait used to indicate when no more write need to be done on a writer
pub trait TerminatingWrite: Write + Send + Sync { pub trait TerminatingWrite: Write {
/// Indicate that the writer will no longer be used. Internally call terminate_ref. /// Indicate that the writer will no longer be used. Internally call terminate_ref.
fn terminate(mut self) -> io::Result<()> fn terminate(mut self) -> io::Result<()>
where Self: Sized { where Self: Sized {

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.8 KiB

View File

@@ -1,5 +1,7 @@
# Summary # Summary
[Avant Propos](./avant-propos.md) [Avant Propos](./avant-propos.md)
- [Segments](./basis.md) - [Segments](./basis.md)

View File

@@ -3,7 +3,7 @@
> Tantivy is a **search** engine **library** for Rust. > Tantivy is a **search** engine **library** for Rust.
If you are familiar with Lucene, it's an excellent approximation to consider tantivy as Lucene for rust. tantivy is heavily inspired by Lucene's design and If you are familiar with Lucene, it's an excellent approximation to consider tantivy as Lucene for rust. tantivy is heavily inspired by Lucene's design and
they both have the same scope and targeted use cases. they both have the same scope and targetted use cases.
If you are not familiar with Lucene, let's break down our little tagline. If you are not familiar with Lucene, let's break down our little tagline.
@@ -31,4 +31,4 @@ relevancy, collapsing, highlighting, spatial search.
index from a different format. index from a different format.
Tantivy exposes a lot of low level API to do all of these things. Tantivy exposes a lot of low level API to do all of these things.

View File

@@ -11,7 +11,7 @@ directory shipped with tantivy is the `MmapDirectory`.
While this design has some downsides, this greatly simplifies the source code of While this design has some downsides, this greatly simplifies the source code of
tantivy. Caching is also entirely delegated to the OS. tantivy. Caching is also entirely delegated to the OS.
`tantivy` works entirely (or almost) by directly reading the datastructures as they are laid on disk. As a result, the act of opening an indexing does not involve loading different datastructures from the disk into random access memory : starting a process, opening an index, and performing your first query can typically be done in a matter of milliseconds. `tantivy` works entirely (or almost) by directly reading the datastructures as they are layed on disk. As a result, the act of opening an indexing does not involve loading different datastructures from the disk into random access memory : starting a process, opening an index, and performing your first query can typically be done in a matter of milliseconds.
This is an interesting property for a command line search engine, or for some multi-tenant log search engine : spawning a new process for each new query can be a perfectly sensible solution in some use case. This is an interesting property for a command line search engine, or for some multi-tenant log search engine : spawning a new process for each new query can be a perfectly sensible solution in some use case.
@@ -22,6 +22,7 @@ Of course this is crucial to reduce IO, and ensure that as much of our index can
Also, whenever possible its data is accessed sequentially. Of course, this is an amazing property when tantivy needs to access the data from your spinning hard disk, but this is also Also, whenever possible its data is accessed sequentially. Of course, this is an amazing property when tantivy needs to access the data from your spinning hard disk, but this is also
critical for performance, if your data is read from and an `SSD` or even already in your pagecache. critical for performance, if your data is read from and an `SSD` or even already in your pagecache.
## Segments, and the log method ## Segments, and the log method
That kind of compact layout comes at one cost: it prevents our datastructures from being dynamic. That kind of compact layout comes at one cost: it prevents our datastructures from being dynamic.
@@ -52,7 +53,11 @@ to get tantivy to fit your use case:
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated to segment `D-7`. *Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated to segment `D-7`.
## Merging
# Merging
As you index more and more data, your index will accumulate more and more segments. As you index more and more data, your index will accumulate more and more segments.
Having a lot of small segments is not really optimal. There is a bit of redundancy in having Having a lot of small segments is not really optimal. There is a bit of redundancy in having
@@ -61,7 +66,11 @@ all these term dictionary. Also when searching, we will need to do term lookups
That's where merging or compacting comes into place. Tantivy will continuously consider merge That's where merging or compacting comes into place. Tantivy will continuously consider merge
opportunities and start merging segments in the background. opportunities and start merging segments in the background.
## Indexing throughput, number of indexing threads
# Indexing throughput, number of indexing threads
[^1]: This may eventually change. [^1]: This may eventually change.

View File

@@ -1,3 +1,3 @@
# Examples # Examples
- [Basic search](/examples/basic_search.html) - [Basic search](/examples/basic_search.html)

View File

@@ -1,11 +1,11 @@
- [Index Sorting](#index-sorting) - [Index Sorting](#index-sorting)
- [Why Sorting](#why-sorting) + [Why Sorting](#why-sorting)
- [Compression](#compression) * [Compression](#compression)
- [Top-N Optimization](#top-n-optimization) * [Top-N Optimization](#top-n-optimization)
- [Pruning](#pruning) * [Pruning](#pruning)
- [Other](#other) * [Other](#other)
- [Usage](#usage) + [Usage](#usage)
# Index Sorting # Index Sorting
@@ -15,34 +15,32 @@ Tantivy allows you to sort the index according to a property.
Presorting an index has several advantages: Presorting an index has several advantages:
### Compression ###### Compression
When data is sorted it is easier to compress the data. E.g. the numbers sequence [5, 2, 3, 1, 4] would be sorted to [1, 2, 3, 4, 5]. When data is sorted it is easier to compress the data. E.g. the numbers sequence [5, 2, 3, 1, 4] would be sorted to [1, 2, 3, 4, 5].
If we apply delta encoding this list would be unsorted [5, -3, 1, -2, 3] vs. [1, 1, 1, 1, 1]. If we apply delta encoding this list would be unsorted [5, -3, 1, -2, 3] vs. [1, 1, 1, 1, 1].
Compression ratio is mainly affected on the fast field of the sorted property, every thing else is likely unaffected. Compression ratio is mainly affected on the fast field of the sorted property, every thing else is likely unaffected.
###### Top-N Optimization
### Top-N Optimization When data is presorted by a field and search queries request sorting by the same field, we can leverage the natural order of the documents.
When data is presorted by a field and search queries request sorting by the same field, we can leverage the natural order of the documents.
E.g. if the data is sorted by timestamp and want the top n newest docs containing a term, we can simply leveraging the order of the docids. E.g. if the data is sorted by timestamp and want the top n newest docs containing a term, we can simply leveraging the order of the docids.
Note: Tantivy 0.16 does not do this optimization yet. Note: Tantivy 0.16 does not do this optimization yet.
### Pruning ###### Pruning
Let's say we want all documents and want to apply the filter `>= 2010-08-11`. When the data is sorted, we could make a lookup in the fast field to find the docid range and use this as the filter. Let's say we want all documents and want to apply the filter `>= 2010-08-11`. When the data is sorted, we could make a lookup in the fast field to find the docid range and use this as the filter.
Note: Tantivy 0.16 does not do this optimization yet. Note: Tantivy 0.16 does not do this optimization yet.
### Other? ###### Other?
In principle there are many algorithms possible that exploit the monotonically increasing nature. (aggregations maybe?) In principle there are many algorithms possible that exploit the monotonically increasing nature. (aggregations maybe?)
## Usage ## Usage
The index sorting can be configured setting [`sort_by_field`](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/core/index_meta.rs#L238) on `IndexSettings` and passing it to a `IndexBuilder`. As of tantvy 0.16 only fast fields are allowed to be used.
The index sorting can be configured setting [`sort_by_field`](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/core/index_meta.rs#L238) on `IndexSettings` and passing it to a `IndexBuilder`. As of Tantivy 0.16 only fast fields are allowed to be used. ```
```rust
let settings = IndexSettings { let settings = IndexSettings {
sort_by_field: Some(IndexSortByField { sort_by_field: Some(IndexSortByField {
field: "intval".to_string(), field: "intval".to_string(),
@@ -60,3 +58,4 @@ let index = index_builder.create_in_ram().unwrap();
Sorting an index is applied in the serialization step. In general there are two serialization steps: [Finishing a single segment](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/segment_writer.rs#L338) and [merging multiple segments](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/merger.rs#L1073). Sorting an index is applied in the serialization step. In general there are two serialization steps: [Finishing a single segment](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/segment_writer.rs#L338) and [merging multiple segments](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/merger.rs#L1073).
In both cases we generate a docid mapping reflecting the sort. This mapping is used when serializing the different components (doc store, fastfields, posting list, normfield, facets). In both cases we generate a docid mapping reflecting the sort. This mapping is used when serializing the different components (doc store, fastfields, posting list, normfield, facets).

View File

@@ -21,17 +21,16 @@ For instance, if user is a json field, the following document:
``` ```
emits the following tokens: emits the following tokens:
- ("name", Text, "Paul")
- ("name", Text, "Masurel")
- ("address.city", Text, "Tokyo")
- ("address.country", Text, "Japan")
- ("created_at", Date, 15420648505)
- ("name", Text, "Paul")
- ("name", Text, "Masurel")
- ("address.city", Text, "Tokyo")
- ("address.country", Text, "Japan")
- ("created_at", Date, 15420648505)
## Bytes-encoding and lexicographical sort # Bytes-encoding and lexicographical sort.
Like any other terms, these triplets are encoded into a binary format as follows. Like any other terms, these triplets are encoded into a binary format as follows.
- `json_path`: the json path is a sequence of "segments". In the example above, `address.city` - `json_path`: the json path is a sequence of "segments". In the example above, `address.city`
is just a debug representation of the json path `["address", "city"]`. is just a debug representation of the json path `["address", "city"]`.
Its representation is done by separating segments by a unicode char `\x01`, and ending the path by `\x00`. Its representation is done by separating segments by a unicode char `\x01`, and ending the path by `\x00`.
@@ -42,16 +41,16 @@ This representation is designed to align the natural sort of Terms with the lexi
of their binary representation (Tantivy's dictionary (whether fst or sstable) is sorted and does prefix encoding). of their binary representation (Tantivy's dictionary (whether fst or sstable) is sorted and does prefix encoding).
In the example above, the terms will be sorted as In the example above, the terms will be sorted as
- ("address.city", Text, "Tokyo")
- ("address.city", Text, "Tokyo") - ("address.country", Text, "Japan")
- ("address.country", Text, "Japan") - ("name", Text, "Masurel")
- ("name", Text, "Masurel") - ("name", Text, "Paul")
- ("name", Text, "Paul") - ("created_at", Date, 15420648505)
- ("created_at", Date, 15420648505)
As seen in "pitfalls", we may end up having to search for a value for a same path in several different fields. Putting the field code after the path makes it maximizes compression opportunities but also increases the chances for the two terms to end up in the actual same term dictionary block. As seen in "pitfalls", we may end up having to search for a value for a same path in several different fields. Putting the field code after the path makes it maximizes compression opportunities but also increases the chances for the two terms to end up in the actual same term dictionary block.
## Pitfalls, limitation and corner cases
# Pitfalls, limitation and corner cases.
Json gives very little information about the type of the literals it stores. Json gives very little information about the type of the literals it stores.
All numeric types end up mapped as a "Number" and there are no types for dates. All numeric types end up mapped as a "Number" and there are no types for dates.
@@ -71,25 +70,23 @@ For instance, we do not even know if the type is a number or string based.
So the query So the query
```rust ```
my_path.my_segment:233 my_path.my_segment:233
``` ```
Will be interpreted as Will be interpreted as
`(my_path.my_segment, String, 233) or (my_path.my_segment, u64, 233)`
```rust
(my_path.my_segment, String, 233) or (my_path.my_segment, u64, 233)
```
Likewise, we need to emit two tokens if the query contains an rfc3999 date. Likewise, we need to emit two tokens if the query contains an rfc3999 date.
Indeed the date could have been actually a single token inside the text of a document at ingestion time. Generally speaking, we will always at least emit a string token in query parsing, and sometimes more. Indeed the date could have been actually a single token inside the text of a document at ingestion time. Generally speaking, we will always at least emit a string token in query parsing, and sometimes more.
If one more json field is defined, things get even more complicated. If one more json field is defined, things get even more complicated.
## Default json field ## Default json field
If the schema contains a text field called "text" and a json field that is set as a default field: If the schema contains a text field called "text" and a json field that is set as a default field:
`text:hello` could be reasonably interpreted as targeting the text field or as targeting the json field called `json_dynamic` with the json_path "text". `text:hello` could be reasonably interpreted as targetting the text field or as targetting the json field called `json_dynamic` with the json_path "text".
If there is such an ambiguity, we decide to only search in the "text" field: `text:hello`. If there is such an ambiguity, we decide to only search in the "text" field: `text:hello`.
@@ -99,11 +96,11 @@ This is a product decision.
The user can still target the JSON field by specifying its name explicitly: The user can still target the JSON field by specifying its name explicitly:
`json_dynamic.text:hello`. `json_dynamic.text:hello`.
## Range queries are not supported ## Range queries are not supported.
Json field do not support range queries. Json field do not support range queries.
## Arrays do not work like nested object ## Arrays do not work like nested object.
If json object contains an array, a search query might return more documents If json object contains an array, a search query might return more documents
than what might be expected. than what might be expected.
@@ -123,8 +120,9 @@ Let's take an example.
Despite the array structure, a document in tantivy is a bag of terms. Despite the array structure, a document in tantivy is a bag of terms.
The query: The query:
```rust ```
cart.product_type:sneakers AND cart.attributes.color:red cart.product_type:sneakers AND cart.attributes.color:red
``` ```
Actually match the document above. Actually match the document above.

View File

@@ -110,7 +110,6 @@ fn main() -> tantivy::Result<()> {
(9f64..14f64).into(), (9f64..14f64).into(),
(14f64..20f64).into(), (14f64..20f64).into(),
], ],
..Default::default()
}), }),
sub_aggregation: sub_agg_req_1.clone(), sub_aggregation: sub_agg_req_1.clone(),
}), }),
@@ -118,12 +117,12 @@ fn main() -> tantivy::Result<()> {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher(); let searcher = reader.searcher();
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap(); let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
let res: Value = serde_json::to_value(&agg_res)?; let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
println!("{}", serde_json::to_string_pretty(&res)?); println!("{}", serde_json::to_string_pretty(&res)?);
Ok(()) Ok(())

View File

@@ -7,11 +7,10 @@
// Of course, you can have a look at the tantivy's built-in collectors // Of course, you can have a look at the tantivy's built-in collectors
// such as the `CountCollector` for more examples. // such as the `CountCollector` for more examples.
use fastfield_codecs::Column;
// --- // ---
// Importing tantivy... // Importing tantivy...
use tantivy::collector::{Collector, SegmentCollector}; use tantivy::collector::{Collector, SegmentCollector};
use tantivy::fastfield::DynamicFastFieldReader; use tantivy::fastfield::{DynamicFastFieldReader, FastFieldReader};
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::{Field, Schema, FAST, INDEXED, TEXT}; use tantivy::schema::{Field, Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, Score, SegmentReader}; use tantivy::{doc, Index, Score, SegmentReader};
@@ -104,7 +103,7 @@ impl SegmentCollector for StatsSegmentCollector {
type Fruit = Option<Stats>; type Fruit = Option<Stats>;
fn collect(&mut self, doc: u32, _score: Score) { fn collect(&mut self, doc: u32, _score: Score) {
let value = self.fast_field_reader.get_val(doc as u64) as f64; let value = self.fast_field_reader.get(doc) as f64;
self.stats.count += 1; self.stats.count += 1;
self.stats.sum += value; self.stats.sum += value;
self.stats.squared_sum += value * value; self.stats.squared_sum += value * value;

View File

@@ -50,7 +50,7 @@ fn main() -> tantivy::Result<()> {
// for your unit tests... Or this example. // for your unit tests... Or this example.
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());
// here we are registering our custom tokenizer // here we are registering our custome tokenizer
// this will store tokens of 3 characters each // this will store tokens of 3 characters each
index index
.tokenizers() .tokenizers()

View File

@@ -1,69 +0,0 @@
// # DateTime field example
//
// This example shows how the DateTime field can be used
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::{Cardinality, DateOptions, Schema, Value, INDEXED, STORED, STRING};
use tantivy::Index;
fn main() -> tantivy::Result<()> {
// # Defining the schema
let mut schema_builder = Schema::builder();
let opts = DateOptions::from(INDEXED)
.set_stored()
.set_fast(Cardinality::SingleValue)
.set_precision(tantivy::DatePrecision::Seconds);
let occurred_at = schema_builder.add_date_field("occurred_at", opts);
let event_type = schema_builder.add_text_field("event", STRING | STORED);
let schema = schema_builder.build();
// # Indexing documents
let index = Index::create_in_ram(schema.clone());
let mut index_writer = index.writer(50_000_000)?;
let doc = schema.parse_document(
r#"{
"occurred_at": "2022-06-22T12:53:50.53Z",
"event": "pull-request"
}"#,
)?;
index_writer.add_document(doc)?;
let doc = schema.parse_document(
r#"{
"occurred_at": "2022-06-22T13:00:00.22Z",
"event": "comment"
}"#,
)?;
index_writer.add_document(doc)?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
// # Default fields: event_type
let query_parser = QueryParser::for_index(&index, vec![event_type]);
{
let query = query_parser.parse_query("event:comment")?;
let count_docs = searcher.search(&*query, &TopDocs::with_limit(5))?;
assert_eq!(count_docs.len(), 1);
}
{
let query = query_parser
.parse_query(r#"occurred_at:[2022-06-22T12:58:00Z TO 2022-06-23T00:00:00Z}"#)?;
let count_docs = searcher.search(&*query, &TopDocs::with_limit(4))?;
assert_eq!(count_docs.len(), 1);
for (_score, doc_address) in count_docs {
let retrieved_doc = searcher.doc(doc_address)?;
assert!(matches!(
retrieved_doc.get_first(occurred_at),
Some(Value::Date(_))
));
assert_eq!(
schema.to_json(&retrieved_doc),
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#
);
}
}
Ok(())
}

View File

@@ -1,8 +1,7 @@
// # Json field example // # Json field example
// //
// This example shows how the json field can be used // This example shows how the json field can be used
// to make tantivy partially schemaless by setting it as // to make tantivy partially schemaless.
// default query parser field.
use tantivy::collector::{Count, TopDocs}; use tantivy::collector::{Count, TopDocs};
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
@@ -11,6 +10,10 @@ use tantivy::Index;
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
// # Defining the schema // # Defining the schema
//
// We need two fields:
// - a timestamp
// - a json object field
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
schema_builder.add_date_field("timestamp", FAST | STORED); schema_builder.add_date_field("timestamp", FAST | STORED);
let event_type = schema_builder.add_text_field("event_type", STRING | STORED); let event_type = schema_builder.add_text_field("event_type", STRING | STORED);
@@ -40,8 +43,7 @@ fn main() -> tantivy::Result<()> {
"attributes": { "attributes": {
"target": "submit-button", "target": "submit-button",
"cart": {"product_id": 133}, "cart": {"product_id": 133},
"description": "das keyboard", "description": "das keyboard"
"event_type": "holiday-sale"
} }
}"#, }"#,
)?; )?;
@@ -51,9 +53,6 @@ fn main() -> tantivy::Result<()> {
let reader = index.reader()?; let reader = index.reader()?;
let searcher = reader.searcher(); let searcher = reader.searcher();
// # Default fields: event_type and attributes
// By setting attributes as a default field it allows omitting attributes itself, e.g. "target",
// instead of "attributes.target"
let query_parser = QueryParser::for_index(&index, vec![event_type, attributes]); let query_parser = QueryParser::for_index(&index, vec![event_type, attributes]);
{ {
let query = query_parser.parse_query("target:submit-button")?; let query = query_parser.parse_query("target:submit-button")?;
@@ -71,34 +70,10 @@ fn main() -> tantivy::Result<()> {
assert_eq!(count_docs, 1); assert_eq!(count_docs, 1);
} }
{ {
let query = query_parser.parse_query("click AND cart.product_id:133")?; let query = query_parser
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?; .parse_query("event_type:click AND cart.product_id:133")
assert_eq!(hits.len(), 1); .unwrap();
} let hits = searcher.search(&*query, &TopDocs::with_limit(2)).unwrap();
{
// The sub-fields in the json field marked as default field still need to be explicitly
// addressed
let query = query_parser.parse_query("click AND 133")?;
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
assert_eq!(hits.len(), 0);
}
{
// Default json fields are ignored if they collide with the schema
let query = query_parser.parse_query("event_type:holiday-sale")?;
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
assert_eq!(hits.len(), 0);
}
// # Query via full attribute path
{
// This only searches in our schema's `event_type` field
let query = query_parser.parse_query("event_type:click")?;
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
assert_eq!(hits.len(), 2);
}
{
// Default json fields can still be accessed by full path
let query = query_parser.parse_query("attributes.event_type:holiday-sale")?;
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
assert_eq!(hits.len(), 1); assert_eq!(hits.len(), 1);
} }
Ok(()) Ok(())

View File

@@ -2,8 +2,8 @@ use std::cmp::Reverse;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::sync::{Arc, RwLock, Weak}; use std::sync::{Arc, RwLock, Weak};
use fastfield_codecs::Column;
use tantivy::collector::TopDocs; use tantivy::collector::TopDocs;
use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::{Field, Schema, FAST, TEXT}; use tantivy::schema::{Field, Schema, FAST, TEXT};
use tantivy::{ use tantivy::{
@@ -52,7 +52,7 @@ impl Warmer for DynamicPriceColumn {
let product_id_reader = segment.fast_fields().u64(self.field)?; let product_id_reader = segment.fast_fields().u64(self.field)?;
let product_ids: Vec<ProductId> = segment let product_ids: Vec<ProductId> = segment
.doc_ids_alive() .doc_ids_alive()
.map(|doc| product_id_reader.get_val(doc as u64)) .map(|doc| product_id_reader.get(doc))
.collect(); .collect();
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter(); let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
let mut price_vals: Vec<Price> = Vec::new(); let mut price_vals: Vec<Price> = Vec::new();
@@ -145,7 +145,11 @@ fn main() -> tantivy::Result<()> {
let warmers: Vec<Weak<dyn Warmer>> = vec![Arc::downgrade( let warmers: Vec<Weak<dyn Warmer>> = vec![Arc::downgrade(
&(price_dynamic_column.clone() as Arc<dyn Warmer>), &(price_dynamic_column.clone() as Arc<dyn Warmer>),
)]; )];
let reader: IndexReader = index.reader_builder().warmers(warmers).try_into()?; let reader: IndexReader = index
.reader_builder()
.warmers(warmers)
.num_searchers(1)
.try_into()?;
reader.reload()?; reader.reload()?;
let query_parser = QueryParser::for_index(&index, vec![text]); let query_parser = QueryParser::for_index(&index, vec![text]);

1
fastfield_codecs/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
datasets/

View File

@@ -1,26 +1,22 @@
[package] [package]
name = "fastfield_codecs" name = "fastfield_codecs"
version = "0.2.0" version = "0.1.0"
authors = ["Pascal Seitz <pascal@quickwit.io>"] authors = ["Pascal Seitz <pascal@quickwit.io>"]
license = "MIT" license = "MIT"
edition = "2021" edition = "2018"
description = "Fast field codecs used by tantivy" description = "Fast field codecs used by tantivy"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
common = { version = "0.3", path = "../common/", package = "tantivy-common" } common = { version = "0.2", path = "../common/", package = "tantivy-common" }
tantivy-bitpacker = { version="0.2", path = "../bitpacker/" } tantivy-bitpacker = { version="0.1.1", path = "../bitpacker/" }
ownedbytes = { version = "0.3.0", path = "../ownedbytes" } prettytable-rs = {version="0.8.0", optional= true}
prettytable-rs = {version="0.9.0", optional= true}
rand = {version="0.8.3", optional= true} rand = {version="0.8.3", optional= true}
[dev-dependencies] [dev-dependencies]
more-asserts = "0.3.0" more-asserts = "0.2.1"
proptest = "1.0.0"
rand = "0.8.3" rand = "0.8.3"
[features] [features]
unstable = [] # useful for benches and experimental codecs.
bin = ["prettytable-rs", "rand"] bin = ["prettytable-rs", "rand"]
default = ["bin"] default = ["bin"]

View File

@@ -0,0 +1,6 @@
DATASETS ?= hdfs_logs_timestamps http_logs_timestamps amazon_reviews_product_ids nooc_temperatures
download:
@echo "--- Downloading datasets ---"
mkdir -p datasets
@for dataset in $(DATASETS); do curl -o - https://quickwit-datasets-public.s3.amazonaws.com/benchmarks/fastfields/$$dataset.txt.gz | gunzip > datasets/$$dataset.txt; done

View File

@@ -13,6 +13,10 @@ A codec needs to implement 2 traits:
- A reader implementing `FastFieldCodecReader` to read the codec. - A reader implementing `FastFieldCodecReader` to read the codec.
- A serializer implementing `FastFieldCodecSerializer` for compression estimation and codec name + id. - A serializer implementing `FastFieldCodecSerializer` for compression estimation and codec name + id.
### Download real world datasets for codecs comparison
Before comparing codecs, you need to execute `make download` to download real world datasets hosted on AWS S3.
To run with the unstable codecs, execute `cargo run --features unstable`.
### Tests ### Tests
Once the traits are implemented test and benchmark integration is pretty easy (see `test_with_codec_data_sets` and `bench.rs`). Once the traits are implemented test and benchmark integration is pretty easy (see `test_with_codec_data_sets` and `bench.rs`).
@@ -23,46 +27,101 @@ cargo run --features bin
``` ```
### TODO ### TODO
- Add real world data sets in comparison
- Add codec to cover sparse data sets - Add codec to cover sparse data sets
### Codec Comparison ### Codec Comparison
``` ```
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| | Compression Ratio | Compression Estimation | | | Compression ratio | Compression ratio estimation | Compression time (micro) | Reading time (micro) |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Autoincrement | | | | Autoincrement | | | | |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| LinearInterpol | 0.000039572664 | 0.000004396963 | | PiecewiseLinear | 0.0051544965 | 0.17251475 | 960 | 211 |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| MultiLinearInterpol | 0.1477348 | 0.17275847 | | FOR | 0.118189104 | 0.14172314 | 708 | 212 |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Bitpacked | 0.28126493 | 0.28125 | | Bitpacked | 0.28126493 | 0.28125 | 474 | 112 |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Monotonically increasing concave | | | | Monotonically increasing concave | | | | |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| LinearInterpol | 0.25003937 | 0.26562938 | | PiecewiseLinear | 0.005955 | 0.18813984 | 885 | 211 |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| MultiLinearInterpol | 0.190665 | 0.1883836 | | FOR | 0.16113 | 0.15734828 | 704 | 212 |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Bitpacked | 0.31251436 | 0.3125 | | Bitpacked | 0.31251436 | 0.3125 | 478 | 113 |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Monotonically increasing convex | | | | Monotonically increasing convex | | | | |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| LinearInterpol | 0.25003937 | 0.28125438 | | PiecewiseLinear | 0.00613 | 0.20376484 | 889 | 211 |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| MultiLinearInterpol | 0.18676 | 0.2040086 | | FOR | 0.157175 | 0.17297328 | 706 | 212 |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Bitpacked | 0.31251436 | 0.3125 | | Bitpacked | 0.31251436 | 0.3125 | 471 | 113 |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Almost monotonically increasing | | | | Almost monotonically increasing | | | | |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| LinearInterpol | 0.14066513 | 0.1562544 | | PiecewiseLinear | 0.14549863 | 0.17251475 | 923 | 210 |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| MultiLinearInterpol | 0.16335973 | 0.17275847 | | FOR | 0.14943957 | 0.15734814 | 703 | 211 |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Bitpacked | 0.28126493 | 0.28125 | | Bitpacked | 0.28126493 | 0.28125 | 462 | 112 |
+----------------------------------+-------------------+------------------------+ +----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Random | | | | |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| PiecewiseLinear | 0.14533783 | 0.14126475 | 924 | 211 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| FOR | 0.13381402 | 0.15734814 | 695 | 211 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Bitpacked | 0.12501445 | 0.125 | 422 | 112 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| HDFS logs timestamps | | | | |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| PiecewiseLinear | 0.39826187 | 0.4068908 | 5545 | 1086 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| FOR | 0.39214826 | 0.40734857 | 5082 | 1073 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Bitpacked | 0.39062786 | 0.390625 | 2864 | 567 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| HDFS logs timestamps SORTED | | | | |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| PiecewiseLinear | 0.032736875 | 0.094390824 | 4942 | 1067 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| FOR | 0.02667125 | 0.079223566 | 3626 | 994 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Bitpacked | 0.39062786 | 0.390625 | 2493 | 566 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| HTTP logs timestamps SORTED | | | | |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| PiecewiseLinear | 0.047942877 | 0.20376582 | 5121 | 1065 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| FOR | 0.06637425 | 0.18859856 | 3929 | 1093 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Bitpacked | 0.26562786 | 0.265625 | 2221 | 526 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Amazon review product ids | | | | |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| PiecewiseLinear | 0.41900787 | 0.4225158 | 5239 | 1089 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| FOR | 0.41504425 | 0.43859857 | 4158 | 1052 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Bitpacked | 0.40625286 | 0.40625 | 2603 | 513 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Amazon review product ids SORTED | | | | |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| PiecewiseLinear | 0.18364687 | 0.25064084 | 5036 | 990 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| FOR | 0.21239226 | 0.21984856 | 4087 | 1072 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Bitpacked | 0.40625286 | 0.40625 | 2702 | 525 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Temperatures | | | | |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| PiecewiseLinear | | Codec Disabled | 0 | 0 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| FOR | 1.0088086 | 1.001098 | 1306 | 237 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
| Bitpacked | 1.000012 | 1 | 950 | 108 |
+----------------------------------+-------------------+------------------------------+--------------------------+----------------------+
``` ```

View File

@@ -4,9 +4,10 @@ extern crate test;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use fastfield_codecs::bitpacked::BitpackedCodec; use fastfield_codecs::bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer};
use fastfield_codecs::blockwise_linear::BlockwiseLinearCodec; use fastfield_codecs::piecewise_linear::{
use fastfield_codecs::linear::LinearCodec; PiecewiseLinearFastFieldReader, PiecewiseLinearFastFieldSerializer,
};
use fastfield_codecs::*; use fastfield_codecs::*;
fn get_data() -> Vec<u64> { fn get_data() -> Vec<u64> {
@@ -25,59 +26,60 @@ mod tests {
fn value_iter() -> impl Iterator<Item = u64> { fn value_iter() -> impl Iterator<Item = u64> {
0..20_000 0..20_000
} }
fn bench_get<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) { fn bench_get<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(
b: &mut Bencher,
data: &[u64],
) {
let mut bytes = vec![]; let mut bytes = vec![];
Codec::serialize(&mut bytes, &data).unwrap(); S::serialize(
let reader = Codec::open_from_bytes(OwnedBytes::new(bytes)).unwrap(); &mut bytes,
&data,
stats_from_vec(data),
data.iter().cloned(),
data.iter().cloned(),
)
.unwrap();
let reader = R::open_from_bytes(&bytes).unwrap();
b.iter(|| { b.iter(|| {
let mut sum = 0u64;
for pos in value_iter() { for pos in value_iter() {
let val = reader.get_val(pos as u64); reader.get_u64(pos as u64, &bytes);
debug_assert_eq!(data[pos as usize], val);
sum = sum.wrapping_add(val);
} }
sum
}); });
} }
fn bench_create<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) { fn bench_create<S: FastFieldCodecSerializer>(b: &mut Bencher, data: &[u64]) {
let mut bytes = Vec::new(); let mut bytes = vec![];
b.iter(|| { b.iter(|| {
bytes.clear(); S::serialize(
Codec::serialize(&mut bytes, &data).unwrap(); &mut bytes,
&data,
stats_from_vec(data),
data.iter().cloned(),
data.iter().cloned(),
)
.unwrap();
}); });
} }
use ownedbytes::OwnedBytes;
use test::Bencher; use test::Bencher;
#[bench] #[bench]
fn bench_fastfield_bitpack_create(b: &mut Bencher) { fn bench_fastfield_bitpack_create(b: &mut Bencher) {
let data: Vec<_> = get_data(); let data: Vec<_> = get_data();
bench_create::<BitpackedCodec>(b, &data); bench_create::<BitpackedFastFieldSerializer>(b, &data);
} }
#[bench] #[bench]
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) { fn bench_fastfield_piecewise_linear_create(b: &mut Bencher) {
let data: Vec<_> = get_data(); let data: Vec<_> = get_data();
bench_create::<LinearCodec>(b, &data); bench_create::<PiecewiseLinearFastFieldSerializer>(b, &data);
}
#[bench]
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_create::<BlockwiseLinearCodec>(b, &data);
} }
#[bench] #[bench]
fn bench_fastfield_bitpack_get(b: &mut Bencher) { fn bench_fastfield_bitpack_get(b: &mut Bencher) {
let data: Vec<_> = get_data(); let data: Vec<_> = get_data();
bench_get::<BitpackedCodec>(b, &data); bench_get::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>(b, &data);
} }
#[bench] #[bench]
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) { fn bench_fastfield_piecewise_linear_get(b: &mut Bencher) {
let data: Vec<_> = get_data(); let data: Vec<_> = get_data();
bench_get::<LinearCodec>(b, &data); bench_get::<PiecewiseLinearFastFieldSerializer, PiecewiseLinearFastFieldReader>(b, &data);
}
#[bench]
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_get::<BlockwiseLinearCodec>(b, &data);
} }
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats { pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
let min_value = data.iter().cloned().min().unwrap_or(0); let min_value = data.iter().cloned().min().unwrap_or(0);

View File

@@ -1,26 +1,37 @@
use std::io::{self, Write}; use std::io::{self, Write};
use common::BinarySerializable; use common::BinarySerializable;
use ownedbytes::OwnedBytes;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker}; use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::{Column, FastFieldCodec, FastFieldCodecType}; use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
/// Depending on the field type, a different /// Depending on the field type, a different
/// fast field is required. /// fast field is required.
#[derive(Clone)] #[derive(Clone)]
pub struct BitpackedReader { pub struct BitpackedFastFieldReader {
data: OwnedBytes,
bit_unpacker: BitUnpacker, bit_unpacker: BitUnpacker,
min_value_u64: u64, pub min_value_u64: u64,
max_value_u64: u64, pub max_value_u64: u64,
num_vals: u64,
} }
impl Column for BitpackedReader { impl<'data> FastFieldCodecReader for BitpackedFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let (_data, mut footer) = bytes.split_at(bytes.len() - 16);
let min_value = u64::deserialize(&mut footer)?;
let amplitude = u64::deserialize(&mut footer)?;
let max_value = min_value + amplitude;
let num_bits = compute_num_bits(amplitude);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(BitpackedFastFieldReader {
min_value_u64: min_value,
max_value_u64: max_value,
bit_unpacker,
})
}
#[inline] #[inline]
fn get_val(&self, doc: u64) -> u64 { fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
self.min_value_u64 + self.bit_unpacker.get(doc, &self.data) self.min_value_u64 + self.bit_unpacker.get(doc, data)
} }
#[inline] #[inline]
fn min_value(&self) -> u64 { fn min_value(&self) -> u64 {
@@ -30,21 +41,16 @@ impl Column for BitpackedReader {
fn max_value(&self) -> u64 { fn max_value(&self) -> u64 {
self.max_value_u64 self.max_value_u64
} }
#[inline]
fn num_vals(&self) -> u64 {
self.num_vals
}
} }
pub struct BitpackedSerializerLegacy<'a, W: 'a + Write> { pub struct BitpackedFastFieldSerializerLegacy<'a, W: 'a + Write> {
bit_packer: BitPacker, bit_packer: BitPacker,
write: &'a mut W, write: &'a mut W,
min_value: u64, min_value: u64,
num_vals: u64,
amplitude: u64, amplitude: u64,
num_bits: u8, num_bits: u8,
} }
impl<'a, W: Write> BitpackedSerializerLegacy<'a, W> { impl<'a, W: Write> BitpackedFastFieldSerializerLegacy<'a, W> {
/// Creates a new fast field serializer. /// Creates a new fast field serializer.
/// ///
/// The serializer in fact encode the values by bitpacking /// The serializer in fact encode the values by bitpacking
@@ -57,16 +63,15 @@ impl<'a, W: Write> BitpackedSerializerLegacy<'a, W> {
write: &'a mut W, write: &'a mut W,
min_value: u64, min_value: u64,
max_value: u64, max_value: u64,
) -> io::Result<BitpackedSerializerLegacy<'a, W>> { ) -> io::Result<BitpackedFastFieldSerializerLegacy<'a, W>> {
assert!(min_value <= max_value); assert!(min_value <= max_value);
let amplitude = max_value - min_value; let amplitude = max_value - min_value;
let num_bits = compute_num_bits(amplitude); let num_bits = compute_num_bits(amplitude);
let bit_packer = BitPacker::new(); let bit_packer = BitPacker::new();
Ok(BitpackedSerializerLegacy { Ok(BitpackedFastFieldSerializerLegacy {
bit_packer, bit_packer,
write, write,
min_value, min_value,
num_vals: 0,
amplitude, amplitude,
num_bits, num_bits,
}) })
@@ -77,45 +82,21 @@ impl<'a, W: Write> BitpackedSerializerLegacy<'a, W> {
let val_to_write: u64 = val - self.min_value; let val_to_write: u64 = val - self.min_value;
self.bit_packer self.bit_packer
.write(val_to_write, self.num_bits, &mut self.write)?; .write(val_to_write, self.num_bits, &mut self.write)?;
self.num_vals += 1;
Ok(()) Ok(())
} }
pub fn close_field(mut self) -> io::Result<()> { pub fn close_field(mut self) -> io::Result<()> {
self.bit_packer.close(&mut self.write)?; self.bit_packer.close(&mut self.write)?;
self.min_value.serialize(&mut self.write)?; self.min_value.serialize(&mut self.write)?;
self.amplitude.serialize(&mut self.write)?; self.amplitude.serialize(&mut self.write)?;
self.num_vals.serialize(&mut self.write)?;
Ok(()) Ok(())
} }
} }
pub struct BitpackedCodec; pub struct BitpackedFastFieldSerializer {}
impl FastFieldCodec for BitpackedCodec {
/// The CODEC_TYPE is an enum value used for serialization.
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Bitpacked;
type Reader = BitpackedReader;
/// Opens a fast field given a file.
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
let footer_offset = bytes.len() - 24;
let (data, mut footer) = bytes.split(footer_offset);
let min_value = u64::deserialize(&mut footer)?;
let amplitude = u64::deserialize(&mut footer)?;
let num_vals = u64::deserialize(&mut footer)?;
let max_value = min_value + amplitude;
let num_bits = compute_num_bits(amplitude);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(BitpackedReader {
data,
bit_unpacker,
min_value_u64: min_value,
max_value_u64: max_value,
num_vals,
})
}
impl FastFieldCodecSerializer for BitpackedFastFieldSerializer {
const NAME: &'static str = "Bitpacked";
const ID: u8 = 1;
/// Serializes data with the BitpackedFastFieldSerializer. /// Serializes data with the BitpackedFastFieldSerializer.
/// ///
/// The serializer in fact encode the values by bitpacking /// The serializer in fact encode the values by bitpacking
@@ -124,41 +105,54 @@ impl FastFieldCodec for BitpackedCodec {
/// It requires a `min_value` and a `max_value` to compute /// It requires a `min_value` and a `max_value` to compute
/// compute the minimum number of bits required to encode /// compute the minimum number of bits required to encode
/// values. /// values.
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column) -> io::Result<()> { fn serialize(
let mut serializer = BitpackedSerializerLegacy::open( write: &mut impl Write,
write, _fastfield_accessor: &impl FastFieldDataAccess,
fastfield_accessor.min_value(), stats: FastFieldStats,
fastfield_accessor.max_value(), data_iter: impl Iterator<Item = u64>,
)?; _data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
let mut serializer =
BitpackedFastFieldSerializerLegacy::open(write, stats.min_value, stats.max_value)?;
for val in fastfield_accessor.iter() { for val in data_iter {
serializer.add_val(val)?; serializer.add_val(val)?;
} }
serializer.close_field()?; serializer.close_field()?;
Ok(()) Ok(())
} }
fn is_applicable(
fn estimate(fastfield_accessor: &impl Column) -> Option<f32> { _fastfield_accessor: &impl FastFieldDataAccess,
let amplitude = fastfield_accessor.max_value() - fastfield_accessor.min_value(); _stats: FastFieldStats,
) -> bool {
true
}
fn estimate_compression_ratio(
_fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> f32 {
let amplitude = stats.max_value - stats.min_value;
let num_bits = compute_num_bits(amplitude); let num_bits = compute_num_bits(amplitude);
let num_bits_uncompressed = 64; let num_bits_uncompressed = 64;
Some(num_bits as f32 / num_bits_uncompressed as f32) num_bits as f32 / num_bits_uncompressed as f32
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::tests::get_codec_test_datasets; use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) { fn create_and_validate(data: &[u64], name: &str) {
crate::tests::create_and_validate::<BitpackedCodec>(data, name); crate::tests::create_and_validate::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>(
data, name,
);
} }
#[test] #[test]
fn test_with_codec_data_sets() { fn test_with_codec_data_sets() {
let data_sets = get_codec_test_datasets(); let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets { for (mut data, name) in data_sets {
create_and_validate(&data, name); create_and_validate(&data, name);
data.reverse(); data.reverse();

View File

@@ -1,49 +0,0 @@
pub trait Column<T = u64> {
/// Return the value associated to the given idx.
///
/// This accessor should return as fast as possible.
///
/// # Panics
///
/// May panic if `idx` is greater than the column length.
fn get_val(&self, idx: u64) -> T;
/// Fills an output buffer with the fast field values
/// associated with the `DocId` going from
/// `start` to `start + output.len()`.
///
/// Regardless of the type of `Item`, this method works
/// - transmuting the output array
/// - extracting the `Item`s as if they were `u64`
/// - possibly converting the `u64` value to the right type.
///
/// # Panics
///
/// May panic if `start + output.len()` is greater than
/// the segment's `maxdoc`.
fn get_range(&self, start: u64, output: &mut [T]) {
for (out, idx) in output.iter_mut().zip(start..) {
*out = self.get_val(idx);
}
}
/// Returns the minimum value for this fast field.
///
/// The min value does not take in account of possible
/// deleted document, and should be considered as a lower bound
/// of the actual minimum value.
fn min_value(&self) -> T;
/// Returns the maximum value for this fast field.
///
/// The max value does not take in account of possible
/// deleted document, and should be considered as an upper bound
/// of the actual maximum value
fn max_value(&self) -> T;
fn num_vals(&self) -> u64;
/// Returns a iterator over the data
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = T> + 'a> {
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
}
}

View File

@@ -0,0 +1,272 @@
use std::io::{self, Read, Write};
use common::{BinarySerializable, DeserializeFrom};
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
const BLOCK_SIZE: u64 = 128;
#[derive(Clone)]
pub struct FORFastFieldReader {
num_vals: u64,
min_value: u64,
max_value: u64,
block_readers: Vec<BlockReader>,
}
#[derive(Clone, Debug, Default)]
struct BlockMetadata {
min: u64,
num_bits: u8,
}
#[derive(Clone, Debug, Default)]
struct BlockReader {
metadata: BlockMetadata,
start_offset: u64,
bit_unpacker: BitUnpacker,
}
impl BlockReader {
fn new(metadata: BlockMetadata, start_offset: u64) -> Self {
Self {
bit_unpacker: BitUnpacker::new(metadata.num_bits),
metadata,
start_offset,
}
}
#[inline]
fn get_u64(&self, block_pos: u64, data: &[u8]) -> u64 {
let diff = self
.bit_unpacker
.get(block_pos, &data[self.start_offset as usize..]);
self.metadata.min + diff
}
}
impl BinarySerializable for BlockMetadata {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
self.min.serialize(write)?;
self.num_bits.serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
let min = u64::deserialize(reader)?;
let num_bits = u8::deserialize(reader)?;
Ok(Self { min, num_bits })
}
}
#[derive(Clone, Debug)]
pub struct FORFooter {
pub num_vals: u64,
pub min_value: u64,
pub max_value: u64,
block_metadatas: Vec<BlockMetadata>,
}
impl BinarySerializable for FORFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
let mut out = vec![];
self.num_vals.serialize(&mut out)?;
self.min_value.serialize(&mut out)?;
self.max_value.serialize(&mut out)?;
self.block_metadatas.serialize(&mut out)?;
write.write_all(&out)?;
(out.len() as u32).serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
let footer = Self {
num_vals: u64::deserialize(reader)?,
min_value: u64::deserialize(reader)?,
max_value: u64::deserialize(reader)?,
block_metadatas: Vec::<BlockMetadata>::deserialize(reader)?,
};
Ok(footer)
}
}
impl FastFieldCodecReader for FORFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
let (_, mut footer) = bytes.split_at(bytes.len() - (4 + footer_len) as usize);
let footer = FORFooter::deserialize(&mut footer)?;
let mut block_readers = Vec::with_capacity(footer.block_metadatas.len());
let mut current_data_offset = 0;
for block_metadata in footer.block_metadatas {
let num_bits = block_metadata.num_bits;
block_readers.push(BlockReader::new(block_metadata, current_data_offset));
current_data_offset += num_bits as u64 * BLOCK_SIZE / 8;
}
Ok(Self {
num_vals: footer.num_vals,
min_value: footer.min_value,
max_value: footer.max_value,
block_readers,
})
}
#[inline]
fn get_u64(&self, idx: u64, data: &[u8]) -> u64 {
let block_idx = (idx / BLOCK_SIZE) as usize;
let block_pos = idx - (block_idx as u64) * BLOCK_SIZE;
let block_reader = &self.block_readers[block_idx];
block_reader.get_u64(block_pos, data)
}
#[inline]
fn min_value(&self) -> u64 {
self.min_value
}
#[inline]
fn max_value(&self) -> u64 {
self.max_value
}
}
/// Same as LinearInterpolFastFieldSerializer, but working on chunks of CHUNK_SIZE elements.
pub struct FORFastFieldSerializer {}
impl FastFieldCodecSerializer for FORFastFieldSerializer {
const NAME: &'static str = "FOR";
const ID: u8 = 5;
/// Creates a new fast field serializer.
fn serialize(
write: &mut impl Write,
_: &impl FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
_data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
let data = data_iter.collect::<Vec<_>>();
let mut bit_packer = BitPacker::new();
let mut block_metadatas = Vec::new();
for data_pos in (0..data.len() as u64).step_by(BLOCK_SIZE as usize) {
let block_num_vals = BLOCK_SIZE.min(data.len() as u64 - data_pos) as usize;
let block_values = &data[data_pos as usize..data_pos as usize + block_num_vals];
let mut min = block_values[0];
let mut max = block_values[0];
for &current_value in block_values[1..].iter() {
min = min.min(current_value);
max = max.max(current_value);
}
let num_bits = compute_num_bits(max - min);
for current_value in block_values.iter() {
bit_packer.write(current_value - min, num_bits, write)?;
}
bit_packer.flush(write)?;
block_metadatas.push(BlockMetadata { min, num_bits });
}
bit_packer.close(write)?;
let footer = FORFooter {
num_vals: stats.num_vals,
min_value: stats.min_value,
max_value: stats.max_value,
block_metadatas,
};
footer.serialize(write)?;
Ok(())
}
fn is_applicable(
_fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> bool {
stats.num_vals > BLOCK_SIZE
}
/// Estimate compression ratio by compute the ratio of the first block.
fn estimate_compression_ratio(
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> f32 {
let last_elem_in_first_chunk = BLOCK_SIZE.min(stats.num_vals);
let max_distance = (0..last_elem_in_first_chunk)
.into_iter()
.map(|pos| {
let actual_value = fastfield_accessor.get_val(pos as u64);
actual_value - stats.min_value
})
.max()
.unwrap();
// Estimate one block and multiply by a magic number 3 to select this codec
// when we are almost sure that this is relevant.
let relative_max_value = max_distance as f32 * 3.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
// function metadata per block
+ 9 * (stats.num_vals / BLOCK_SIZE);
let num_bits_uncompressed = 64 * stats.num_vals;
num_bits as f32 / num_bits_uncompressed as f32
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
crate::tests::create_and_validate::<FORFastFieldSerializer, FORFastFieldReader>(data, name)
}
#[test]
fn test_compression() {
let data = (10..=6_000_u64).collect::<Vec<_>>();
let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large");
println!("{}", actual_compression);
assert!(actual_compression < 0.2);
assert!(actual_compression > 0.006);
assert!(estimate < 0.20);
assert!(estimate > 0.10);
}
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();
create_and_validate(&data, name);
}
}
#[test]
fn test_simple() {
let data = (10..=20_u64).collect::<Vec<_>>();
create_and_validate(&data, "simple monotonically");
}
#[test]
fn border_cases_1() {
let data = (0..1024).collect::<Vec<_>>();
create_and_validate(&data, "border case");
}
#[test]
fn border_case_2() {
let data = (0..1025).collect::<Vec<_>>();
create_and_validate(&data, "border case");
}
#[test]
fn rand() {
for _ in 0..10 {
let mut data = (5_000..20_000)
.map(|_| rand::random::<u32>() as u64)
.collect::<Vec<_>>();
let (estimate, actual_compression) = create_and_validate(&data, "random");
dbg!(estimate);
dbg!(actual_compression);
data.reverse();
create_and_validate(&data, "random");
}
}
}

View File

@@ -5,81 +5,69 @@ extern crate more_asserts;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
use common::BinarySerializable;
use ownedbytes::OwnedBytes;
pub mod bitpacked; pub mod bitpacked;
pub mod blockwise_linear; #[cfg(feature = "unstable")]
pub mod linear; pub mod frame_of_reference;
pub mod linearinterpol;
pub mod multilinearinterpol;
pub mod piecewise_linear;
mod column; pub trait FastFieldCodecReader: Sized {
/// Reads the metadata and returns the CodecReader.
fn open_from_bytes(bytes: &[u8]) -> std::io::Result<Self>;
pub use self::column::Column; /// Read u64 value for indice `idx`.
/// `idx` can be either a `DocId` or an index used for
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] /// `multivalued` fast field.
#[repr(u8)] fn get_u64(&self, idx: u64, data: &[u8]) -> u64;
pub enum FastFieldCodecType { fn min_value(&self) -> u64;
Bitpacked = 1, fn max_value(&self) -> u64;
Linear = 2,
BlockwiseLinear = 3,
Gcd = 4,
}
impl BinarySerializable for FastFieldCodecType {
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
self.to_code().serialize(wrt)
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let code = u8::deserialize(reader)?;
let codec_type: Self = Self::from_code(code)
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
Ok(codec_type)
}
}
impl FastFieldCodecType {
pub fn to_code(self) -> u8 {
self as u8
}
pub fn from_code(code: u8) -> Option<Self> {
match code {
1 => Some(Self::Bitpacked),
2 => Some(Self::Linear),
3 => Some(Self::BlockwiseLinear),
4 => Some(Self::Gcd),
_ => None,
}
}
} }
/// The FastFieldSerializerEstimate trait is required on all variants /// The FastFieldSerializerEstimate trait is required on all variants
/// of fast field compressions, to decide which one to choose. /// of fast field compressions, to decide which one to choose.
pub trait FastFieldCodec { pub trait FastFieldCodecSerializer {
/// A codex needs to provide a unique name and id, which is /// A codex needs to provide a unique name and id, which is
/// used for debugging and de/serialization. /// used for debugging and de/serialization.
const CODEC_TYPE: FastFieldCodecType; const NAME: &'static str;
const ID: u8;
type Reader: Column<u64>; /// Check if the Codec is able to compress the data
fn is_applicable(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> bool;
/// Reads the metadata and returns the CodecReader
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader>;
/// Serializes the data using the serializer into write.
///
/// The fastfield_accessor iterator should be preferred over using fastfield_accessor for
/// performance reasons.
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column<u64>) -> io::Result<()>;
/// Returns an estimate of the compression ratio. /// Returns an estimate of the compression ratio.
/// If the codec is not applicable, returns `None`.
///
/// The baseline is uncompressed 64bit data. /// The baseline is uncompressed 64bit data.
/// ///
/// It could make sense to also return a value representing /// It could make sense to also return a value representing
/// computational complexity. /// computational complexity.
fn estimate(fastfield_accessor: &impl Column) -> Option<f32>; fn estimate_compression_ratio(
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> f32;
/// Serializes the data using the serializer into write.
/// There are multiple iterators, in case the codec needs to read the data multiple times.
/// The iterators should be preferred over using fastfield_accessor for performance reasons.
fn serialize(
write: &mut impl Write,
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()>;
}
/// FastFieldDataAccess is the trait to access fast field data during serialization and estimation.
pub trait FastFieldDataAccess {
/// Return the value associated to the given position.
///
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance
/// reasons.
///
/// # Panics
///
/// May panic if `position` is greater than the index.
fn get_val(&self, position: u64) -> u64;
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@@ -90,102 +78,60 @@ pub struct FastFieldStats {
pub num_vals: u64, pub num_vals: u64,
} }
struct VecColum<'a>(&'a [u64]); impl<'a> FastFieldDataAccess for &'a [u64] {
impl<'a> Column for VecColum<'a> {
fn get_val(&self, position: u64) -> u64 { fn get_val(&self, position: u64) -> u64 {
self.0[position as usize] self[position as usize]
}
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
Box::new(self.0.iter().cloned())
}
fn min_value(&self) -> u64 {
self.0.iter().min().cloned().unwrap_or(0)
}
fn max_value(&self) -> u64 {
self.0.iter().max().cloned().unwrap_or(0)
}
fn num_vals(&self) -> u64 {
self.0.len() as u64
} }
} }
impl<'a> From<&'a [u64]> for VecColum<'a> { impl FastFieldDataAccess for Vec<u64> {
fn from(data: &'a [u64]) -> Self { fn get_val(&self, position: u64) -> u64 {
Self(data) self[position as usize]
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use proptest::prelude::*; use crate::bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer};
use proptest::strategy::Strategy; use crate::piecewise_linear::{
use proptest::{prop_oneof, proptest}; PiecewiseLinearFastFieldReader, PiecewiseLinearFastFieldSerializer,
};
use crate::bitpacked::BitpackedCodec; pub fn create_and_validate<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(
use crate::blockwise_linear::BlockwiseLinearCodec;
use crate::linear::LinearCodec;
pub fn create_and_validate<Codec: FastFieldCodec>(
data: &[u64], data: &[u64],
name: &str, name: &str,
) -> Option<(f32, f32)> { ) -> (f32, f32) {
let estimation = Codec::estimate(&VecColum::from(data))?; if !S::is_applicable(&data, crate::tests::stats_from_vec(data)) {
return (f32::MAX, 0.0);
let mut out: Vec<u8> = Vec::new(); }
Codec::serialize(&mut out, &VecColum::from(data)).unwrap(); let estimation = S::estimate_compression_ratio(&data, crate::tests::stats_from_vec(data));
let mut out = vec![];
S::serialize(
&mut out,
&data,
crate::tests::stats_from_vec(data),
data.iter().cloned(),
data.iter().cloned(),
)
.unwrap();
let reader = R::open_from_bytes(&out).unwrap();
for (doc, orig_val) in data.iter().enumerate() {
let val = reader.get_u64(doc as u64, &out);
if val != *orig_val {
panic!(
"val {:?} does not match orig_val {:?}, in data set {}, data {:?}",
val, orig_val, name, data
);
}
}
let actual_compression = out.len() as f32 / (data.len() as f32 * 8.0); let actual_compression = out.len() as f32 / (data.len() as f32 * 8.0);
(estimation, actual_compression)
let reader = Codec::open_from_bytes(OwnedBytes::new(out)).unwrap();
assert_eq!(reader.num_vals(), data.len() as u64);
for (doc, orig_val) in data.iter().copied().enumerate() {
let val = reader.get_val(doc as u64);
assert_eq!(
val, orig_val,
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data \
`{data:?}`",
);
}
Some((estimation, actual_compression))
} }
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
#[test]
fn test_proptest_small(data in proptest::collection::vec(num_strategy(), 1..10)) {
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(10))]
#[test]
fn test_proptest_large(data in proptest::collection::vec(num_strategy(), 1..6000)) {
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
}
}
fn num_strategy() -> impl Strategy<Value = u64> {
prop_oneof![
1 => prop::num::u64::ANY.prop_map(|num| u64::MAX - (num % 10) ),
1 => prop::num::u64::ANY.prop_map(|num| num % 10 ),
20 => prop::num::u64::ANY,
]
}
pub fn get_codec_test_datasets() -> Vec<(Vec<u64>, &'static str)> {
let mut data_and_names = vec![]; let mut data_and_names = vec![];
let data = (10..=10_000_u64).collect::<Vec<_>>(); let data = (10..=20_u64).collect::<Vec<_>>();
data_and_names.push((data, "simple monotonically increasing")); data_and_names.push((data, "simple monotonically increasing"));
data_and_names.push(( data_and_names.push((
@@ -195,93 +141,94 @@ mod tests {
data_and_names.push((vec![5, 50, 3, 13, 1, 1000, 35], "rand small")); data_and_names.push((vec![5, 50, 3, 13, 1, 1000, 35], "rand small"));
data_and_names.push((vec![10], "single value")); data_and_names.push((vec![10], "single value"));
data_and_names.push((
vec![1572656989877777, 1170935903116329, 720575940379279, 0],
"overflow error",
));
data_and_names data_and_names
} }
fn test_codec<C: FastFieldCodec>() { fn test_codec<S: FastFieldCodecSerializer, R: FastFieldCodecReader>() {
let codec_name = format!("{:?}", C::CODEC_TYPE); let codec_name = S::NAME;
for (data, dataset_name) in get_codec_test_datasets() { for (data, data_set_name) in get_codec_test_data_sets() {
let estimate_actual_opt: Option<(f32, f32)> = let (estimate, actual) =
crate::tests::create_and_validate::<C>(&data, dataset_name); crate::tests::create_and_validate::<S, R>(&data, data_set_name);
let result = if let Some((estimate, actual)) = estimate_actual_opt { let result = if estimate == f32::MAX {
format!("Estimate `{estimate}` Actual `{actual}`")
} else {
"Disabled".to_string() "Disabled".to_string()
} else {
format!("Estimate {:?} Actual {:?} ", estimate, actual)
}; };
println!("Codec {codec_name}, DataSet {dataset_name}, {result}"); println!(
"Codec {}, DataSet {}, {}",
codec_name, data_set_name, result
);
} }
} }
#[test] #[test]
fn test_codec_bitpacking() { fn test_codec_bitpacking() {
test_codec::<BitpackedCodec>(); test_codec::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>();
} }
#[test] #[test]
fn test_codec_interpolation() { fn test_codec_piecewise_linear() {
test_codec::<LinearCodec>(); test_codec::<PiecewiseLinearFastFieldSerializer, PiecewiseLinearFastFieldReader>();
}
#[test]
fn test_codec_multi_interpolation() {
test_codec::<BlockwiseLinearCodec>();
} }
use super::*; use super::*;
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
let min_value = data.iter().cloned().min().unwrap_or(0);
let max_value = data.iter().cloned().max().unwrap_or(0);
FastFieldStats {
min_value,
max_value,
num_vals: data.len() as u64,
}
}
#[test] #[test]
fn estimation_good_interpolation_case() { fn estimation_good_interpolation_case() {
let data = (10..=20000_u64).collect::<Vec<_>>(); let data = (10..=20000_u64).collect::<Vec<_>>();
let data: VecColum = data.as_slice().into();
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap(); let piecewise_interpol_estimation =
assert_le!(linear_interpol_estimation, 0.01); PiecewiseLinearFastFieldSerializer::estimate_compression_ratio(
&data,
stats_from_vec(&data),
);
assert_le!(piecewise_interpol_estimation, 0.2);
let multi_linear_interpol_estimation = BlockwiseLinearCodec::estimate(&data).unwrap(); let bitpacked_estimation =
assert_le!(multi_linear_interpol_estimation, 0.2); BitpackedFastFieldSerializer::estimate_compression_ratio(&data, stats_from_vec(&data));
assert_le!(linear_interpol_estimation, multi_linear_interpol_estimation); assert_le!(piecewise_interpol_estimation, bitpacked_estimation);
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
assert_le!(linear_interpol_estimation, bitpacked_estimation);
} }
#[test] #[test]
fn estimation_test_bad_interpolation_case() { fn estimation_test_bad_interpolation_case() {
let data: &[u64] = &[200, 10, 10, 10, 10, 1000, 20]; let data = vec![200, 10, 10, 10, 10, 1000, 20];
let data: VecColum = data.into(); let piecewise_interpol_estimation =
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap(); PiecewiseLinearFastFieldSerializer::estimate_compression_ratio(
assert_le!(linear_interpol_estimation, 0.32); &data,
stats_from_vec(&data),
);
assert_le!(piecewise_interpol_estimation, 0.32);
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap(); let bitpacked_estimation =
assert_le!(bitpacked_estimation, linear_interpol_estimation); BitpackedFastFieldSerializer::estimate_compression_ratio(&data, stats_from_vec(&data));
assert_le!(bitpacked_estimation, piecewise_interpol_estimation);
} }
#[test] #[test]
fn estimation_test_bad_interpolation_case_monotonically_increasing() { fn estimation_test_interpolation_case_monotonically_increasing() {
let mut data: Vec<u64> = (200..=20000_u64).collect(); let mut data = (200..=20000_u64).collect::<Vec<_>>();
data.push(1_000_000); data.push(1_000_000);
let data: VecColum = data.as_slice().into();
// in this case the linear interpolation can't in fact not be worse than bitpacking, // in this case the linear interpolation can't in fact not be worse than bitpacking,
// but the estimator adds some threshold, which leads to estimated worse behavior // but the estimator adds some threshold, which leads to estimated worse behavior
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap(); let piecewise_interpol_estimation =
assert_le!(linear_interpol_estimation, 0.35); PiecewiseLinearFastFieldSerializer::estimate_compression_ratio(
&data,
stats_from_vec(&data),
);
assert_le!(piecewise_interpol_estimation, 0.2);
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap(); let bitpacked_estimation =
BitpackedFastFieldSerializer::estimate_compression_ratio(&data, stats_from_vec(&data));
println!("{}", bitpacked_estimation);
assert_le!(bitpacked_estimation, 0.32); assert_le!(bitpacked_estimation, 0.32);
assert_le!(bitpacked_estimation, linear_interpol_estimation); assert_le!(piecewise_interpol_estimation, bitpacked_estimation);
}
#[test]
fn test_fast_field_codec_type_to_code() {
let mut count_codec = 0;
for code in 0..=255 {
if let Some(codec_type) = FastFieldCodecType::from_code(code) {
assert_eq!(codec_type.to_code(), code);
count_codec += 1;
}
}
assert_eq!(count_codec, 4);
} }
} }

View File

@@ -2,23 +2,21 @@ use std::io::{self, Read, Write};
use std::ops::Sub; use std::ops::Sub;
use common::{BinarySerializable, FixedSize}; use common::{BinarySerializable, FixedSize};
use ownedbytes::OwnedBytes;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker}; use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::{Column, FastFieldCodec, FastFieldCodecType}; use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
/// Depending on the field type, a different /// Depending on the field type, a different
/// fast field is required. /// fast field is required.
#[derive(Clone)] #[derive(Clone)]
pub struct LinearReader { pub struct LinearInterpolFastFieldReader {
data: OwnedBytes,
bit_unpacker: BitUnpacker, bit_unpacker: BitUnpacker,
pub footer: LinearFooter, pub footer: LinearInterpolFooter,
pub slope: f32, pub slope: f32,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct LinearFooter { pub struct LinearInterpolFooter {
pub relative_max_value: u64, pub relative_max_value: u64,
pub offset: u64, pub offset: u64,
pub first_val: u64, pub first_val: u64,
@@ -28,7 +26,7 @@ pub struct LinearFooter {
pub max_value: u64, pub max_value: u64,
} }
impl BinarySerializable for LinearFooter { impl BinarySerializable for LinearInterpolFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> { fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
self.relative_max_value.serialize(write)?; self.relative_max_value.serialize(write)?;
self.offset.serialize(write)?; self.offset.serialize(write)?;
@@ -40,8 +38,8 @@ impl BinarySerializable for LinearFooter {
Ok(()) Ok(())
} }
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearFooter> { fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearInterpolFooter> {
Ok(LinearFooter { Ok(LinearInterpolFooter {
relative_max_value: u64::deserialize(reader)?, relative_max_value: u64::deserialize(reader)?,
offset: u64::deserialize(reader)?, offset: u64::deserialize(reader)?,
first_val: u64::deserialize(reader)?, first_val: u64::deserialize(reader)?,
@@ -53,15 +51,29 @@ impl BinarySerializable for LinearFooter {
} }
} }
impl FixedSize for LinearFooter { impl FixedSize for LinearInterpolFooter {
const SIZE_IN_BYTES: usize = 56; const SIZE_IN_BYTES: usize = 56;
} }
impl Column for LinearReader { impl FastFieldCodecReader for LinearInterpolFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let (_data, mut footer) = bytes.split_at(bytes.len() - LinearInterpolFooter::SIZE_IN_BYTES);
let footer = LinearInterpolFooter::deserialize(&mut footer)?;
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
let num_bits = compute_num_bits(footer.relative_max_value);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(LinearInterpolFastFieldReader {
bit_unpacker,
footer,
slope,
})
}
#[inline] #[inline]
fn get_val(&self, doc: u64) -> u64 { fn get_u64(&self, idx: u64, data: &[u8]) -> u64 {
let calculated_value = get_calculated_value(self.footer.first_val, doc, self.slope); let calculated_value = get_calculated_value(self.footer.first_val, idx, self.slope);
(calculated_value + self.bit_unpacker.get(doc, &self.data)) - self.footer.offset (calculated_value + self.bit_unpacker.get(idx, data)) - self.footer.offset
} }
#[inline] #[inline]
@@ -72,87 +84,52 @@ impl Column for LinearReader {
fn max_value(&self) -> u64 { fn max_value(&self) -> u64 {
self.footer.max_value self.footer.max_value
} }
#[inline]
fn num_vals(&self) -> u64 {
self.footer.num_vals
}
} }
/// Fastfield serializer, which tries to guess values by linear interpolation /// Fastfield serializer, which tries to guess values by linear interpolation
/// and stores the difference bitpacked. /// and stores the difference bitpacked.
pub struct LinearCodec; #[deprecated(
note = "Linear interpolation works best only on very rare cases and piecewise linear codec \
already works great on them."
)]
pub struct LinearInterpolFastFieldSerializer {}
#[inline] #[inline]
pub(crate) fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 { fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
if num_vals <= 1 { if num_vals <= 1 {
return 0.0; return 0.0;
} }
// We calculate the slope with f64 high precision and use the result in lower precision f32 // We calculate the slope with f64 high precision and use the result in lower precision f32
// This is done in order to handle estimations for very large values like i64::MAX // This is done in order to handle estimations for very large values like i64::MAX
let diff = diff(last_val, first_val); ((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
(diff / (num_vals - 1) as f64) as f32
}
/// Delay the cast, to improve precision for very large u64 values.
///
/// Since i64 is mapped monotonically to u64 space, 0i64 is after the mapping i64::MAX.
/// So very large values are not uncommon.
///
/// ```rust
/// let val1 = i64::MAX;
/// let val2 = i64::MAX - 100;
/// assert_eq!(val1 - val2, 100);
/// assert_eq!(val1 as f64 - val2 as f64, 0.0);
/// ```
fn diff(val1: u64, val2: u64) -> f64 {
if val1 >= val2 {
(val1 - val2) as f64
} else {
(val2 - val1) as f64 * -1.0
}
} }
#[inline] #[inline]
pub fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 { fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
if slope < 0.0 { first_val + (pos as f32 * slope) as u64
first_val.saturating_sub((pos as f32 * -slope) as u64)
} else {
first_val.saturating_add((pos as f32 * slope) as u64)
}
} }
impl FastFieldCodec for LinearCodec { #[allow(deprecated)]
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Linear; impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
const NAME: &'static str = "LinearInterpol";
type Reader = LinearReader; const ID: u8 = 2;
/// Opens a fast field given a file.
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
let footer_offset = bytes.len() - LinearFooter::SIZE_IN_BYTES;
let (data, mut footer) = bytes.split(footer_offset);
let footer = LinearFooter::deserialize(&mut footer)?;
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
let num_bits = compute_num_bits(footer.relative_max_value);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(LinearReader {
data,
bit_unpacker,
footer,
slope,
})
}
/// Creates a new fast field serializer. /// Creates a new fast field serializer.
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column) -> io::Result<()> { fn serialize(
assert!(fastfield_accessor.min_value() <= fastfield_accessor.max_value()); write: &mut impl Write,
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
assert!(stats.min_value <= stats.max_value);
let first_val = fastfield_accessor.get_val(0); let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1); let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, fastfield_accessor.num_vals()); let slope = get_slope(first_val, last_val, stats.num_vals);
// calculate offset to ensure all values are positive // calculate offset to ensure all values are positive
let mut offset = 0; let mut offset = 0;
let mut rel_positive_max = 0; let mut rel_positive_max = 0;
for (pos, actual_value) in fastfield_accessor.iter().enumerate() { for (pos, actual_value) in data_iter1.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope); let calculated_value = get_calculated_value(first_val, pos as u64, slope);
if calculated_value > actual_value { if calculated_value > actual_value {
// negative value we need to apply an offset // negative value we need to apply an offset
@@ -170,55 +147,62 @@ impl FastFieldCodec for LinearCodec {
let num_bits = compute_num_bits(relative_max_value); let num_bits = compute_num_bits(relative_max_value);
let mut bit_packer = BitPacker::new(); let mut bit_packer = BitPacker::new();
for (pos, val) in fastfield_accessor.iter().enumerate() { for (pos, val) in data_iter.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope); let calculated_value = get_calculated_value(first_val, pos as u64, slope);
let diff = (val + offset) - calculated_value; let diff = (val + offset) - calculated_value;
bit_packer.write(diff, num_bits, write)?; bit_packer.write(diff, num_bits, write)?;
} }
bit_packer.close(write)?; bit_packer.close(write)?;
let footer = LinearFooter { let footer = LinearInterpolFooter {
relative_max_value, relative_max_value,
offset, offset,
first_val, first_val,
last_val, last_val,
num_vals: fastfield_accessor.num_vals(), num_vals: stats.num_vals,
min_value: fastfield_accessor.min_value(), min_value: stats.min_value,
max_value: fastfield_accessor.max_value(), max_value: stats.max_value,
}; };
footer.serialize(write)?; footer.serialize(write)?;
Ok(()) Ok(())
} }
fn is_applicable(
/// estimation for linear interpolation is hard because, you don't know _fastfield_accessor: &impl FastFieldDataAccess,
/// where the local maxima for the deviation of the calculated value are and stats: FastFieldStats,
/// the offset to shift all values to >=0 is also unknown. ) -> bool {
#[allow(clippy::question_mark)] if stats.num_vals < 3 {
fn estimate(fastfield_accessor: &impl Column) -> Option<f32> { return false; // disable compressor for this case
if fastfield_accessor.num_vals() < 3 {
return None; // disable compressor for this case
} }
// On serialisation the offset is added to the actual value. // On serialisation the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues. // We need to make sure this won't run into overflow calculation issues.
// For this we take the maximum theroretical offset and add this to the max value. // For this we take the maximum theroretical offset and add this to the max value.
// If this doesn't overflow the algorithm should be fine // If this doesn't overflow the algortihm should be fine
let theorethical_maximum_offset = let theorethical_maximum_offset = stats.max_value - stats.min_value;
fastfield_accessor.max_value() - fastfield_accessor.min_value(); if stats
if fastfield_accessor .max_value
.max_value()
.checked_add(theorethical_maximum_offset) .checked_add(theorethical_maximum_offset)
.is_none() .is_none()
{ {
return None; return false;
}
true
}
/// Estimation for linear interpolation is hard because, you don't know
/// where the local maxima for the deviation of the calculated value are and
/// the offset to shift all values to >=0 is also unknown.
fn estimate_compression_ratio(
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> f32 {
if stats.num_vals < 3 {
return f32::MAX;
} }
let first_val = fastfield_accessor.get_val(0); let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1); let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, fastfield_accessor.num_vals()); let slope = get_slope(first_val, last_val, stats.num_vals);
// let's sample at 0%, 5%, 10% .. 95%, 100% // let's sample at 0%, 5%, 10% .. 95%, 100%
let num_vals = fastfield_accessor.num_vals() as f32 / 100.0; let num_vals = stats.num_vals as f32 / 100.0;
let sample_positions = (0..20) let sample_positions = (0..20)
.map(|pos| (num_vals * pos as f32 * 5.0) as usize) .map(|pos| (num_vals * pos as f32 * 5.0) as usize)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
@@ -240,11 +224,10 @@ impl FastFieldCodec for LinearCodec {
// //
let relative_max_value = (max_distance as f32 * 1.5) * 2.0; let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64 let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
* fastfield_accessor.num_vals() + LinearInterpolFooter::SIZE_IN_BYTES as u64;
+ LinearFooter::SIZE_IN_BYTES as u64; let num_bits_uncompressed = 64 * stats.num_vals;
let num_bits_uncompressed = 64 * fastfield_accessor.num_vals(); num_bits as f32 / num_bits_uncompressed as f32
Some(num_bits as f32 / num_bits_uncompressed as f32)
} }
} }
@@ -257,50 +240,32 @@ fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
} }
} }
#[allow(deprecated)]
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use rand::RngCore;
use super::*; use super::*;
use crate::tests::get_codec_test_datasets; use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) -> Option<(f32, f32)> { fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
crate::tests::create_and_validate::<LinearCodec>(data, name) crate::tests::create_and_validate::<
} LinearInterpolFastFieldSerializer,
LinearInterpolFastFieldReader,
#[test] >(data, name)
fn get_calculated_value_test() {
// pos slope
assert_eq!(get_calculated_value(100, 10, 5.0), 150);
// neg slope
assert_eq!(get_calculated_value(100, 10, -5.0), 50);
// pos slope, very high values
assert_eq!(
get_calculated_value(i64::MAX as u64, 10, 5.0),
i64::MAX as u64 + 50
);
// neg slope, very high values
assert_eq!(
get_calculated_value(i64::MAX as u64, 10, -5.0),
i64::MAX as u64 - 50
);
} }
#[test] #[test]
fn test_compression() { fn test_compression() {
let data = (10..=6_000_u64).collect::<Vec<_>>(); let data = (10..=6_000_u64).collect::<Vec<_>>();
let (estimate, actual_compression) = let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large").unwrap(); create_and_validate(&data, "simple monotonically large");
assert!(actual_compression < 0.01); assert!(actual_compression < 0.01);
assert!(estimate < 0.01); assert!(estimate < 0.01);
} }
#[test] #[test]
fn test_with_codec_datasets() { fn test_with_codec_data_sets() {
let data_sets = get_codec_test_datasets(); let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets { for (mut data, name) in data_sets {
create_and_validate(&data, name); create_and_validate(&data, name);
data.reverse(); data.reverse();
@@ -317,13 +282,6 @@ mod tests {
create_and_validate(&data, "large amplitude"); create_and_validate(&data, "large amplitude");
} }
#[test]
fn overflow_error_test() {
let data = vec![1572656989877777, 1170935903116329, 720575940379279, 0];
create_and_validate(&data, "overflow test");
}
#[test] #[test]
fn linear_interpol_fast_concave_data() { fn linear_interpol_fast_concave_data() {
let data = vec![0, 1, 2, 5, 8, 10, 20, 50]; let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
@@ -343,10 +301,12 @@ mod tests {
#[test] #[test]
fn linear_interpol_fast_field_rand() { fn linear_interpol_fast_field_rand() {
let mut rng = rand::thread_rng(); for _ in 0..10 {
for _ in 0..50 { let mut data = (5_000..20_000)
let mut data = (0..10_000).map(|_| rng.next_u64()).collect::<Vec<_>>(); .map(|_| rand::random::<u32>() as u64)
.collect::<Vec<_>>();
create_and_validate(&data, "random"); create_and_validate(&data, "random");
data.reverse(); data.reverse();
create_and_validate(&data, "random"); create_and_validate(&data, "random");
} }

View File

@@ -1,70 +1,77 @@
#[macro_use] #[macro_use]
extern crate prettytable; extern crate prettytable;
use fastfield_codecs::bitpacked::BitpackedCodec; use std::fs::File;
use fastfield_codecs::blockwise_linear::BlockwiseLinearCodec; use std::io;
use fastfield_codecs::linear::LinearCodec; use std::io::BufRead;
use fastfield_codecs::{Column, FastFieldCodec, FastFieldCodecType, FastFieldStats}; use std::time::{Duration, Instant};
use common::f64_to_u64;
use fastfield_codecs::bitpacked::BitpackedFastFieldReader;
#[cfg(feature = "unstable")]
use fastfield_codecs::frame_of_reference::{FORFastFieldReader, FORFastFieldSerializer};
use fastfield_codecs::piecewise_linear::{
PiecewiseLinearFastFieldReader, PiecewiseLinearFastFieldSerializer,
};
use fastfield_codecs::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldStats};
use prettytable::{Cell, Row, Table}; use prettytable::{Cell, Row, Table};
use rand::prelude::StdRng;
struct Data<'a>(&'a [u64]); use rand::Rng;
impl<'a> Column for Data<'a> {
fn get_val(&self, position: u64) -> u64 {
self.0[position as usize]
}
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
Box::new(self.0.iter().cloned())
}
fn min_value(&self) -> u64 {
*self.0.iter().min().unwrap_or(&0)
}
fn max_value(&self) -> u64 {
*self.0.iter().max().unwrap_or(&0)
}
fn num_vals(&self) -> u64 {
self.0.len() as u64
}
}
fn main() { fn main() {
let mut table = Table::new(); let mut table = Table::new();
// Add a row per time // Add a row per time
table.add_row(row!["", "Compression Ratio", "Compression Estimation"]); table.add_row(row![
"",
"Compression ratio",
"Compression ratio estimation",
"Compression time (micro)",
"Reading time (micro)"
]);
for (data, data_set_name) in get_codec_test_data_sets() { for (data, data_set_name) in get_codec_test_data_sets() {
let results: Vec<(f32, f32, FastFieldCodecType)> = [ let mut results = vec![];
serialize_with_codec::<LinearCodec>(&data), let res = serialize_with_codec::<
serialize_with_codec::<BlockwiseLinearCodec>(&data), PiecewiseLinearFastFieldSerializer,
serialize_with_codec::<BlockwiseLinearCodec>(&data), PiecewiseLinearFastFieldReader,
serialize_with_codec::<BitpackedCodec>(&data), >(&data);
] results.push(res);
.into_iter() #[cfg(feature = "unstable")]
.flatten() {
.collect(); let res = serialize_with_codec::<FORFastFieldSerializer, FORFastFieldReader>(&data);
results.push(res);
}
let res = serialize_with_codec::<
fastfield_codecs::bitpacked::BitpackedFastFieldSerializer,
BitpackedFastFieldReader,
>(&data);
results.push(res);
let best_compression_ratio_codec = results let best_compression_ratio_codec = results
.iter() .iter()
.min_by(|&res1, &res2| res1.partial_cmp(res2).unwrap()) .min_by(|res1, res2| res1.partial_cmp(res2).unwrap())
.cloned() .cloned()
.unwrap(); .unwrap();
table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")])); table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")]));
for (est, comp, codec_type) in results { for (is_applicable, est, comp, name, compression_duration, read_duration) in results {
let est_cell = est.to_string(); let (est_cell, ratio_cell) = if !is_applicable {
let ratio_cell = comp.to_string(); ("Codec Disabled".to_string(), "".to_string())
} else {
(est.to_string(), comp.to_string())
};
let style = if comp == best_compression_ratio_codec.1 { let style = if comp == best_compression_ratio_codec.1 {
"Fb" "Fb"
} else { } else {
"" ""
}; };
table.add_row(Row::new(vec![ table.add_row(Row::new(vec![
Cell::new(&format!("{codec_type:?}")).style_spec("bFg"), Cell::new(name).style_spec("bFg"),
Cell::new(&ratio_cell).style_spec(style), Cell::new(&ratio_cell).style_spec(style),
Cell::new(&est_cell).style_spec(""), Cell::new(&est_cell).style_spec(""),
Cell::new(&compression_duration.as_micros().to_string()),
Cell::new(&read_duration.as_micros().to_string()),
])); ]));
} }
} }
@@ -86,7 +93,6 @@ pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
current_cumulative current_cumulative
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// let data = (1..=200000_u64).map(|num| num + num).collect::<Vec<_>>();
data_and_names.push((data, "Monotonically increasing concave")); data_and_names.push((data, "Monotonically increasing concave"));
let mut current_cumulative = 0; let mut current_cumulative = 0;
@@ -99,23 +105,104 @@ pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
.collect::<Vec<_>>(); .collect::<Vec<_>>();
data_and_names.push((data, "Monotonically increasing convex")); data_and_names.push((data, "Monotonically increasing convex"));
let mut rng: StdRng = rand::SeedableRng::seed_from_u64(1);
let data = (1000..=200_000_u64) let data = (1000..=200_000_u64)
.map(|num| num + rand::random::<u8>() as u64) .map(|num| num + rng.gen::<u8>() as u64)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
data_and_names.push((data, "Almost monotonically increasing")); data_and_names.push((data, "Almost monotonically increasing"));
let data = (1000..=200_000_u64)
.map(|_| rng.gen::<u8>() as u64)
.collect::<Vec<_>>();
data_and_names.push((data, "Random"));
let mut data = load_dataset("datasets/hdfs_logs_timestamps.txt");
data_and_names.push((data.clone(), "HDFS logs timestamps"));
data.sort_unstable();
data_and_names.push((data, "HDFS logs timestamps SORTED"));
let data = load_dataset("datasets/http_logs_timestamps.txt");
data_and_names.push((data, "HTTP logs timestamps SORTED"));
let mut data = load_dataset("datasets/amazon_reviews_product_ids.txt");
data_and_names.push((data.clone(), "Amazon review product ids"));
data.sort_unstable();
data_and_names.push((data, "Amazon review product ids SORTED"));
let data = load_float_dataset("datasets/nooc_temperatures.txt");
data_and_names.push((data, "Temperatures"));
data_and_names data_and_names
} }
pub fn serialize_with_codec<C: FastFieldCodec>( pub fn load_dataset(file_path: &str) -> Vec<u64> {
println!("Load dataset from `{}`", file_path);
let file = File::open(file_path).expect("Error when opening file.");
let lines = io::BufReader::new(file).lines();
let mut data = Vec::new();
for line in lines {
let l = line.unwrap();
data.push(l.parse::<u64>().unwrap());
}
data
}
pub fn load_float_dataset(file_path: &str) -> Vec<u64> {
println!("Load float dataset from `{}`", file_path);
let file = File::open(file_path).expect("Error when opening file.");
let lines = io::BufReader::new(file).lines();
let mut data = Vec::new();
for line in lines {
let line_string = line.unwrap();
let value = line_string.parse::<f64>().unwrap();
data.push(f64_to_u64(value));
}
data
}
pub fn serialize_with_codec<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(
data: &[u64], data: &[u64],
) -> Option<(f32, f32, FastFieldCodecType)> { ) -> (bool, f32, f32, &'static str, Duration, Duration) {
let data = Data(data); let is_applicable = S::is_applicable(&data, stats_from_vec(data));
let estimation = C::estimate(&data)?; if !is_applicable {
let mut out = Vec::new(); return (
C::serialize(&mut out, &data).unwrap(); false,
let actual_compression = out.len() as f32 / (data.num_vals() * 8) as f32; 0.0,
Some((estimation, actual_compression, C::CODEC_TYPE)) 0.0,
S::NAME,
Duration::from_secs(0),
Duration::from_secs(0),
);
}
let start_time_compression = Instant::now();
let estimation = S::estimate_compression_ratio(&data, stats_from_vec(data));
let mut out = vec![];
S::serialize(
&mut out,
&data,
stats_from_vec(data),
data.iter().cloned(),
data.iter().cloned(),
)
.unwrap();
let elasped_time_compression = start_time_compression.elapsed();
let actual_compression = out.len() as f32 / (data.len() * 8) as f32;
let reader = R::open_from_bytes(&out).unwrap();
let start_time_read = Instant::now();
for doc in 0..data.len() {
reader.get_u64(doc as u64, &out);
}
let elapsed_time_read = start_time_read.elapsed();
(
true,
estimation,
actual_compression,
S::NAME,
elasped_time_compression,
elapsed_time_read,
)
} }
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats { pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {

View File

@@ -1,4 +1,4 @@
//! The BlockwiseLinear codec uses linear interpolation to guess a values and stores the //! MultiLinearInterpol compressor uses linear interpolation to guess a values and stores the
//! offset, but in blocks of 512. //! offset, but in blocks of 512.
//! //!
//! With a CHUNK_SIZE of 512 and 29 byte metadata per block, we get a overhead for metadata of 232 / //! With a CHUNK_SIZE of 512 and 29 byte metadata per block, we get a overhead for metadata of 232 /
@@ -14,25 +14,22 @@ use std::io::{self, Read, Write};
use std::ops::Sub; use std::ops::Sub;
use common::{BinarySerializable, CountingWriter, DeserializeFrom}; use common::{BinarySerializable, CountingWriter, DeserializeFrom};
use ownedbytes::OwnedBytes;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker}; use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::linear::{get_calculated_value, get_slope}; use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
use crate::{Column, FastFieldCodec, FastFieldCodecType};
const CHUNK_SIZE: u64 = 512; const CHUNK_SIZE: u64 = 512;
/// Depending on the field type, a different /// Depending on the field type, a different
/// fast field is required. /// fast field is required.
#[derive(Clone)] #[derive(Clone)]
pub struct BlockwiseLinearReader { pub struct MultiLinearInterpolFastFieldReader {
data: OwnedBytes, pub footer: MultiLinearInterpolFooter,
pub footer: BlockwiseLinearFooter,
} }
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug, Default)]
struct Function { struct Function {
// The offset in the data is required, because we have different bit_widths per block // The offset in the data is required, because we have diffrent bit_widths per block
data_start_offset: u64, data_start_offset: u64,
// start_pos in the block will be CHUNK_SIZE * BLOCK_NUM // start_pos in the block will be CHUNK_SIZE * BLOCK_NUM
start_pos: u64, start_pos: u64,
@@ -102,14 +99,14 @@ impl BinarySerializable for Function {
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct BlockwiseLinearFooter { pub struct MultiLinearInterpolFooter {
pub num_vals: u64, pub num_vals: u64,
pub min_value: u64, pub min_value: u64,
pub max_value: u64, pub max_value: u64,
interpolations: Vec<Function>, interpolations: Vec<Function>,
} }
impl BinarySerializable for BlockwiseLinearFooter { impl BinarySerializable for MultiLinearInterpolFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> { fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
let mut out = vec![]; let mut out = vec![];
self.num_vals.serialize(&mut out)?; self.num_vals.serialize(&mut out)?;
@@ -121,8 +118,8 @@ impl BinarySerializable for BlockwiseLinearFooter {
Ok(()) Ok(())
} }
fn deserialize<R: Read>(reader: &mut R) -> io::Result<BlockwiseLinearFooter> { fn deserialize<R: Read>(reader: &mut R) -> io::Result<MultiLinearInterpolFooter> {
let mut footer = BlockwiseLinearFooter { let mut footer = MultiLinearInterpolFooter {
num_vals: u64::deserialize(reader)?, num_vals: u64::deserialize(reader)?,
min_value: u64::deserialize(reader)?, min_value: u64::deserialize(reader)?,
max_value: u64::deserialize(reader)?, max_value: u64::deserialize(reader)?,
@@ -146,20 +143,29 @@ fn get_interpolation_function(doc: u64, interpolations: &[Function]) -> &Functio
&interpolations[get_interpolation_position(doc)] &interpolations[get_interpolation_position(doc)]
} }
impl Column for BlockwiseLinearReader { impl FastFieldCodecReader for MultiLinearInterpolFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
let (_data, mut footer) = bytes.split_at(bytes.len() - (4 + footer_len) as usize);
let footer = MultiLinearInterpolFooter::deserialize(&mut footer)?;
Ok(MultiLinearInterpolFastFieldReader { footer })
}
#[inline] #[inline]
fn get_val(&self, idx: u64) -> u64 { fn get_u64(&self, idx: u64, data: &[u8]) -> u64 {
let interpolation = get_interpolation_function(idx, &self.footer.interpolations); let interpolation = get_interpolation_function(idx, &self.footer.interpolations);
let in_block_idx = idx - interpolation.start_pos; let block_idx = idx - interpolation.start_pos;
let calculated_value = get_calculated_value( let calculated_value = get_calculated_value(
interpolation.value_start_pos, interpolation.value_start_pos,
in_block_idx, block_idx,
interpolation.slope, interpolation.slope,
); );
let diff = interpolation.bit_unpacker.get( let diff = interpolation
in_block_idx, .bit_unpacker
&self.data[interpolation.data_start_offset as usize..], .get(block_idx, &data[interpolation.data_start_offset as usize..]);
);
(calculated_value + diff) - interpolation.positive_val_offset (calculated_value + diff) - interpolation.positive_val_offset
} }
@@ -171,38 +177,44 @@ impl Column for BlockwiseLinearReader {
fn max_value(&self) -> u64 { fn max_value(&self) -> u64 {
self.footer.max_value self.footer.max_value
} }
#[inline]
fn num_vals(&self) -> u64 {
self.footer.num_vals
}
} }
/// Same as LinearSerializer, but working on chunks of CHUNK_SIZE elements. #[inline]
pub struct BlockwiseLinearCodec; fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
}
impl FastFieldCodec for BlockwiseLinearCodec { #[inline]
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::BlockwiseLinear; fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
(first_val as i64 + (pos as f32 * slope) as i64) as u64
}
type Reader = BlockwiseLinearReader; /// Same as LinearInterpolFastFieldSerializer, but working on chunks of CHUNK_SIZE elements.
#[deprecated(
/// Opens a fast field given a file. note = "MultiLinearInterpol is replaced by PiecewiseLinear codec which fixes the slope and is \
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> { a little bit more optimized."
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?; )]
let footer_offset = bytes.len() - 4 - footer_len as usize; pub struct MultiLinearInterpolFastFieldSerializer {}
let (data, mut footer) = bytes.split(footer_offset);
let footer = BlockwiseLinearFooter::deserialize(&mut footer)?;
Ok(BlockwiseLinearReader { data, footer })
}
#[allow(deprecated)]
impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
const NAME: &'static str = "MultiLinearInterpol";
const ID: u8 = 3;
/// Creates a new fast field serializer. /// Creates a new fast field serializer.
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column) -> io::Result<()> { fn serialize(
assert!(fastfield_accessor.min_value() <= fastfield_accessor.max_value()); write: &mut impl Write,
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
_data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
assert!(stats.min_value <= stats.max_value);
let first_val = fastfield_accessor.get_val(0); let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1); let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let mut first_function = Function { let mut first_function = Function {
end_pos: fastfield_accessor.num_vals(), end_pos: stats.num_vals,
value_start_pos: first_val, value_start_pos: first_val,
value_end_pos: last_val, value_end_pos: last_val,
..Default::default() ..Default::default()
@@ -213,7 +225,7 @@ impl FastFieldCodec for BlockwiseLinearCodec {
// Since we potentially apply multiple passes over the data, the data is cached. // Since we potentially apply multiple passes over the data, the data is cached.
// Multiple iteration can be expensive (merge with index sorting can add lot of overhead per // Multiple iteration can be expensive (merge with index sorting can add lot of overhead per
// iteration) // iteration)
let data = fastfield_accessor.iter().collect::<Vec<_>>(); let data = data_iter.collect::<Vec<_>>();
//// let's split this into chunks of CHUNK_SIZE //// let's split this into chunks of CHUNK_SIZE
for data_pos in (0..data.len() as u64).step_by(CHUNK_SIZE as usize).skip(1) { for data_pos in (0..data.len() as u64).step_by(CHUNK_SIZE as usize).skip(1) {
@@ -276,47 +288,52 @@ impl FastFieldCodec for BlockwiseLinearCodec {
} }
bit_packer.close(write)?; bit_packer.close(write)?;
let footer = BlockwiseLinearFooter { let footer = MultiLinearInterpolFooter {
num_vals: fastfield_accessor.num_vals(), num_vals: stats.num_vals,
min_value: fastfield_accessor.min_value(), min_value: stats.min_value,
max_value: fastfield_accessor.max_value(), max_value: stats.max_value,
interpolations, interpolations,
}; };
footer.serialize(write)?; footer.serialize(write)?;
Ok(()) Ok(())
} }
/// estimation for linear interpolation is hard because, you don't know fn is_applicable(
/// where the local maxima are for the deviation of the calculated value and _fastfield_accessor: &impl FastFieldDataAccess,
/// the offset is also unknown. stats: FastFieldStats,
#[allow(clippy::question_mark)] ) -> bool {
fn estimate(fastfield_accessor: &impl Column) -> Option<f32> { if stats.num_vals < 5_000 {
if fastfield_accessor.num_vals() < 10 * CHUNK_SIZE { return false;
return None;
} }
// On serialization the offset is added to the actual value. // On serialization the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues. // We need to make sure this won't run into overflow calculation issues.
// For this we take the maximum theroretical offset and add this to the max value. // For this we take the maximum theroretical offset and add this to the max value.
// If this doesn't overflow the algorithm should be fine // If this doesn't overflow the algortihm should be fine
let theorethical_maximum_offset = let theorethical_maximum_offset = stats.max_value - stats.min_value;
fastfield_accessor.max_value() - fastfield_accessor.min_value(); if stats
if fastfield_accessor .max_value
.max_value()
.checked_add(theorethical_maximum_offset) .checked_add(theorethical_maximum_offset)
.is_none() .is_none()
{ {
return None; return false;
} }
true
}
/// Estimation for linear interpolation is hard because, you don't know
/// where the local maxima are for the deviation of the calculated value and
/// the offset is also unknown.
fn estimate_compression_ratio(
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> f32 {
let first_val_in_first_block = fastfield_accessor.get_val(0); let first_val_in_first_block = fastfield_accessor.get_val(0);
let last_elem_in_first_chunk = CHUNK_SIZE.min(fastfield_accessor.num_vals()); let last_elem_in_first_chunk = CHUNK_SIZE.min(stats.num_vals);
let last_val_in_first_block = let last_val_in_first_block =
fastfield_accessor.get_val(last_elem_in_first_chunk as u64 - 1); fastfield_accessor.get_val(last_elem_in_first_chunk as u64 - 1);
let slope = get_slope( let slope = get_slope(
first_val_in_first_block, first_val_in_first_block,
last_val_in_first_block, last_val_in_first_block,
fastfield_accessor.num_vals(), stats.num_vals,
); );
// let's sample at 0%, 5%, 10% .. 95%, 100%, but for the first block only // let's sample at 0%, 5%, 10% .. 95%, 100%, but for the first block only
@@ -343,11 +360,11 @@ impl FastFieldCodec for BlockwiseLinearCodec {
// //
let relative_max_value = (max_distance as f32 * 1.5) * 2.0; let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * fastfield_accessor.num_vals() as u64 let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
// function metadata per block // function metadata per block
+ 29 * (fastfield_accessor.num_vals() / CHUNK_SIZE); + 29 * (stats.num_vals / CHUNK_SIZE);
let num_bits_uncompressed = 64 * fastfield_accessor.num_vals(); let num_bits_uncompressed = 64 * stats.num_vals;
Some(num_bits as f32 / num_bits_uncompressed as f32) num_bits as f32 / num_bits_uncompressed as f32
} }
} }
@@ -360,37 +377,23 @@ fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
} }
#[cfg(test)] #[cfg(test)]
#[allow(deprecated)]
mod tests { mod tests {
use super::*; use super::*;
use crate::tests::get_codec_test_datasets; use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) -> Option<(f32, f32)> { fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
crate::tests::create_and_validate::<BlockwiseLinearCodec>(data, name) crate::tests::create_and_validate::<
} MultiLinearInterpolFastFieldSerializer,
MultiLinearInterpolFastFieldReader,
const HIGHEST_BIT: u64 = 1 << 63; >(data, name)
pub fn i64_to_u64(val: i64) -> u64 {
(val as u64) ^ HIGHEST_BIT
}
#[test]
fn test_compression_i64() {
let data = (i64::MAX - 600_000..=i64::MAX - 550_000)
.map(i64_to_u64)
.collect::<Vec<_>>();
let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large i64").unwrap();
assert!(actual_compression < 0.2);
assert!(estimate < 0.20);
assert!(estimate > 0.15);
assert!(actual_compression > 0.01);
} }
#[test] #[test]
fn test_compression() { fn test_compression() {
let data = (10..=6_000_u64).collect::<Vec<_>>(); let data = (10..=6_000_u64).collect::<Vec<_>>();
let (estimate, actual_compression) = let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large").unwrap(); create_and_validate(&data, "simple monotonically large");
assert!(actual_compression < 0.2); assert!(actual_compression < 0.2);
assert!(estimate < 0.20); assert!(estimate < 0.20);
assert!(estimate > 0.15); assert!(estimate > 0.15);
@@ -399,7 +402,7 @@ mod tests {
#[test] #[test]
fn test_with_codec_data_sets() { fn test_with_codec_data_sets() {
let data_sets = get_codec_test_datasets(); let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets { for (mut data, name) in data_sets {
create_and_validate(&data, name); create_and_validate(&data, name);
data.reverse(); data.reverse();

View File

@@ -0,0 +1,365 @@
//! PiecewiseLinear codec uses piecewise linear functions for every block of 512 values to predict
//! values and fast field values. The difference with real fast field values is then stored.
//! For every block, the linear function can be expressed as
//! `computed_value = slope * block_position + first_value + positive_offset`
//! where:
//! - `block_position` is the position inside of the block from 0 to 511
//! - `first_value` is the first value on the block
//! - `positive_offset` is computed such that we ensure the diff `real_value - computed_value` is
//! always positive.
//!
//! 21 bytes is needed to store the block metadata, it adds an overhead of 21 * 8 / 512 = 0,33 bits
//! per element.
use std::io::{self, Read, Write};
use std::ops::Sub;
use common::{BinarySerializable, DeserializeFrom};
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
const BLOCK_SIZE: u64 = 512;
#[derive(Clone)]
pub struct PiecewiseLinearFastFieldReader {
min_value: u64,
max_value: u64,
block_readers: Vec<BlockReader>,
}
/// Block that stores metadata to predict value with a linear
/// function `predicted_value = slope * position + first_value + positive_offset`
/// where `positive_offset` is comupted such that predicted values
/// are always positive.
#[derive(Clone, Debug, Default)]
struct BlockMetadata {
first_value: u64,
positive_offset: u64,
slope: f32,
num_bits: u8,
}
#[derive(Clone, Debug, Default)]
struct BlockReader {
metadata: BlockMetadata,
start_offset: u64,
bit_unpacker: BitUnpacker,
}
impl BlockReader {
fn new(metadata: BlockMetadata, start_offset: u64) -> Self {
Self {
bit_unpacker: BitUnpacker::new(metadata.num_bits),
metadata,
start_offset,
}
}
#[inline]
fn get_u64(&self, block_pos: u64, data: &[u8]) -> u64 {
let diff = self
.bit_unpacker
.get(block_pos, &data[self.start_offset as usize..]);
let predicted_value =
predict_value(self.metadata.first_value, block_pos, self.metadata.slope);
(predicted_value + diff) - self.metadata.positive_offset
}
}
impl BinarySerializable for BlockMetadata {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
self.first_value.serialize(write)?;
self.positive_offset.serialize(write)?;
self.slope.serialize(write)?;
self.num_bits.serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
let first_value = u64::deserialize(reader)?;
let positive_offset = u64::deserialize(reader)?;
let slope = f32::deserialize(reader)?;
let num_bits = u8::deserialize(reader)?;
Ok(Self {
first_value,
positive_offset,
slope,
num_bits,
})
}
}
#[derive(Clone, Debug)]
pub struct PiecewiseLinearFooter {
pub num_vals: u64,
pub min_value: u64,
pub max_value: u64,
block_metadatas: Vec<BlockMetadata>,
}
impl BinarySerializable for PiecewiseLinearFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
let mut out = vec![];
self.num_vals.serialize(&mut out)?;
self.min_value.serialize(&mut out)?;
self.max_value.serialize(&mut out)?;
self.block_metadatas.serialize(&mut out)?;
write.write_all(&out)?;
(out.len() as u32).serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
let footer = Self {
num_vals: u64::deserialize(reader)?,
min_value: u64::deserialize(reader)?,
max_value: u64::deserialize(reader)?,
block_metadatas: Vec::<BlockMetadata>::deserialize(reader)?,
};
Ok(footer)
}
}
impl FastFieldCodecReader for PiecewiseLinearFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
let (_, mut footer) = bytes.split_at(bytes.len() - (4 + footer_len) as usize);
let footer = PiecewiseLinearFooter::deserialize(&mut footer)?;
let mut block_readers = Vec::with_capacity(footer.block_metadatas.len());
let mut current_data_offset = 0;
for block_metadata in footer.block_metadatas.into_iter() {
let num_bits = block_metadata.num_bits;
block_readers.push(BlockReader::new(block_metadata, current_data_offset));
current_data_offset += num_bits as u64 * BLOCK_SIZE / 8;
}
Ok(Self {
min_value: footer.min_value,
max_value: footer.max_value,
block_readers,
})
}
#[inline]
fn get_u64(&self, idx: u64, data: &[u8]) -> u64 {
let block_idx = (idx / BLOCK_SIZE) as usize;
let block_pos = idx - (block_idx as u64) * BLOCK_SIZE;
let block_reader = &self.block_readers[block_idx];
block_reader.get_u64(block_pos, data)
}
#[inline]
fn min_value(&self) -> u64 {
self.min_value
}
#[inline]
fn max_value(&self) -> u64 {
self.max_value
}
}
#[inline]
fn predict_value(first_val: u64, pos: u64, slope: f32) -> u64 {
(first_val as i64 + (pos as f32 * slope) as i64) as u64
}
pub struct PiecewiseLinearFastFieldSerializer;
impl FastFieldCodecSerializer for PiecewiseLinearFastFieldSerializer {
const NAME: &'static str = "PiecewiseLinear";
const ID: u8 = 4;
/// Creates a new fast field serializer.
fn serialize(
write: &mut impl Write,
_: &impl FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
_data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
let mut data = data_iter.collect::<Vec<_>>();
let mut bit_packer = BitPacker::new();
let mut block_metadatas = Vec::new();
for data_pos in (0..data.len() as u64).step_by(BLOCK_SIZE as usize) {
let block_num_vals = BLOCK_SIZE.min(data.len() as u64 - data_pos) as usize;
let block_values = &mut data[data_pos as usize..data_pos as usize + block_num_vals];
let slope = if block_num_vals == 1 {
0f32
} else {
((block_values[block_values.len() - 1] as f64 - block_values[0] as f64)
/ (block_num_vals - 1) as f64) as f32
};
let first_value = block_values[0];
let mut positive_offset = 0;
let mut max_delta = 0;
for (pos, &current_value) in block_values[1..].iter().enumerate() {
let computed_value = predict_value(first_value, pos as u64 + 1, slope);
if computed_value > current_value {
positive_offset = positive_offset.max(computed_value - current_value);
} else {
max_delta = max_delta.max(current_value - computed_value);
}
}
let num_bits = compute_num_bits(max_delta + positive_offset);
for (pos, current_value) in block_values.iter().enumerate() {
let computed_value = predict_value(first_value, pos as u64, slope);
let diff = (current_value + positive_offset) - computed_value;
bit_packer.write(diff, num_bits, write)?;
}
bit_packer.flush(write)?;
block_metadatas.push(BlockMetadata {
first_value,
positive_offset,
slope,
num_bits,
});
}
bit_packer.close(write)?;
let footer = PiecewiseLinearFooter {
num_vals: stats.num_vals,
min_value: stats.min_value,
max_value: stats.max_value,
block_metadatas,
};
footer.serialize(write)?;
Ok(())
}
fn is_applicable(
_fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> bool {
if stats.num_vals < 10 * BLOCK_SIZE {
return false;
}
// On serialization the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues.
// For this we take the maximum theroretical offset and add this to the max value.
// If this doesn't overflow the algortihm should be fine
let theorethical_maximum_offset = stats.max_value - stats.min_value;
if stats
.max_value
.checked_add(theorethical_maximum_offset)
.is_none()
{
return false;
}
true
}
/// Estimation for linear interpolation is hard because, you don't know
/// where the local maxima are for the deviation of the calculated value and
/// the offset is also unknown.
fn estimate_compression_ratio(
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> f32 {
let first_val_in_first_block = fastfield_accessor.get_val(0);
let last_elem_in_first_chunk = BLOCK_SIZE.min(stats.num_vals);
let last_val_in_first_block =
fastfield_accessor.get_val(last_elem_in_first_chunk as u64 - 1);
let slope = ((last_val_in_first_block as f64 - first_val_in_first_block as f64)
/ (stats.num_vals - 1) as f64) as f32;
// let's sample at 0%, 5%, 10% .. 95%, 100%, but for the first block only
let sample_positions = (0..20)
.map(|pos| (last_elem_in_first_chunk as f32 / 100.0 * pos as f32 * 5.0) as usize)
.collect::<Vec<_>>();
let max_distance = sample_positions
.iter()
.map(|&pos| {
let calculated_value = predict_value(first_val_in_first_block, pos as u64, slope);
let actual_value = fastfield_accessor.get_val(pos as u64);
distance(calculated_value, actual_value)
})
.max()
.unwrap();
// Estimate one block and extrapolate the cost to all blocks.
// the theory would be that we don't have the actual max_distance, but we are close within
// 50% threshold.
// It is multiplied by 2 because in a log case scenario the line would be as much above as
// below. So the offset would = max_distance
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
// function metadata per block
+ 21 * (stats.num_vals / BLOCK_SIZE);
let num_bits_uncompressed = 64 * stats.num_vals;
num_bits as f32 / num_bits_uncompressed as f32
}
}
fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
if x < y {
y - x
} else {
x - y
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
crate::tests::create_and_validate::<
PiecewiseLinearFastFieldSerializer,
PiecewiseLinearFastFieldReader,
>(data, name)
}
#[test]
fn test_compression() {
let data = (10..=6_000_u64).collect::<Vec<_>>();
let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large");
assert!(actual_compression < 0.2);
assert!(estimate < 0.20);
assert!(estimate > 0.15);
assert!(actual_compression > 0.001);
}
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();
create_and_validate(&data, name);
}
}
#[test]
fn test_simple() {
let data = (10..=20_u64).collect::<Vec<_>>();
create_and_validate(&data, "simple monotonically");
}
#[test]
fn border_cases_1() {
let data = (0..1024).collect::<Vec<_>>();
create_and_validate(&data, "border case");
}
#[test]
fn border_case_2() {
let data = (0..1025).collect::<Vec<_>>();
create_and_validate(&data, "border case");
}
#[test]
fn rand() {
for _ in 0..10 {
let mut data = (5_000..20_000)
.map(|_| rand::random::<u32>() as u64)
.collect::<Vec<_>>();
let (estimate, actual_compression) = create_and_validate(&data, "random");
dbg!(estimate);
dbg!(actual_compression);
data.reverse();
create_and_validate(&data, "random");
}
}
}

View File

@@ -1,8 +1,8 @@
[package] [package]
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"] authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
name = "ownedbytes" name = "ownedbytes"
version = "0.3.0" version = "0.2.0"
edition = "2021" edition = "2018"
description = "Expose data as static slice" description = "Expose data as static slice"
license = "MIT" license = "MIT"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

View File

@@ -6,7 +6,7 @@ use std::{fmt, io, mem};
use stable_deref_trait::StableDeref; use stable_deref_trait::StableDeref;
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes /// An OwnedBytes simply wraps an object that owns a slice of data and exposes
/// this data as a slice. /// this data as a static slice.
/// ///
/// The backing object is required to be `StableDeref`. /// The backing object is required to be `StableDeref`.
#[derive(Clone)] #[derive(Clone)]
@@ -21,7 +21,7 @@ impl OwnedBytes {
OwnedBytes::new(&[][..]) OwnedBytes::new(&[][..])
} }
/// Creates an `OwnedBytes` instance given a `StableDeref` object. /// Creates an `OwnedBytes` intance given a `StableDeref` object.
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>( pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
data_holder: T, data_holder: T,
) -> OwnedBytes { ) -> OwnedBytes {

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy-query-grammar" name = "tantivy-query-grammar"
version = "0.18.0" version = "0.15.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
@@ -9,9 +9,9 @@ homepage = "https://github.com/quickwit-oss/tantivy"
repository = "https://github.com/quickwit-oss/tantivy" repository = "https://github.com/quickwit-oss/tantivy"
readme = "README.md" readme = "README.md"
keywords = ["search", "information", "retrieval"] keywords = ["search", "information", "retrieval"]
edition = "2021" edition = "2018"
[dependencies] [dependencies]
combine = {version="4", default-features=false, features=[] } combine = {version="4", default-features=false, features=[] }
once_cell = "1.7.2" once_cell = "1.7.2"
regex ={ version = "1.5.4", default-features = false, features = ["std", "unicode"] } regex ={ version = "1.5.4", default-features = false, features = ["std"] }

View File

@@ -1,5 +1,3 @@
#![allow(clippy::derive_partial_eq_without_eq)]
mod occur; mod occur;
mod query_grammar; mod query_grammar;
mod user_input_ast; mod user_input_ast;

View File

@@ -2,11 +2,11 @@ use std::fmt;
use std::fmt::Write; use std::fmt::Write;
/// Defines whether a term in a query must be present, /// Defines whether a term in a query must be present,
/// should be present or must not be present. /// should be present or must be not present.
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)] #[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
pub enum Occur { pub enum Occur {
/// For a given document to be considered for scoring, /// For a given document to be considered for scoring,
/// at least one of the terms with the Should or the Must /// at least one of the document with the Should or the Must
/// Occur constraint must be within the document. /// Occur constraint must be within the document.
Should, Should,
/// Document without the term are excluded from the search. /// Document without the term are excluded from the search.

View File

@@ -16,9 +16,9 @@ use crate::Occur;
// Note: '-' char is only forbidden at the beginning of a field name, would be clearer to add it to // Note: '-' char is only forbidden at the beginning of a field name, would be clearer to add it to
// special characters. // special characters.
const SPECIAL_CHARS: &[char] = &[ const SPECIAL_CHARS: &[char] = &[
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '!', '\\', '*', ' ', '+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '~', '!', '\\', '*', ' ',
]; ];
const ESCAPED_SPECIAL_CHARS_PATTERN: &str = r#"\\(\+|\^|`|:|\{|\}|"|\[|\]|\(|\)|!|\\|\*|\s)"#; const ESCAPED_SPECIAL_CHARS_PATTERN: &str = r#"\\(\+|\^|`|:|\{|\}|"|\[|\]|\(|\)|\~|!|\\|\*| )"#;
/// Parses a field_name /// Parses a field_name
/// A field name must have at least one character and be followed by a colon. /// A field name must have at least one character and be followed by a colon.
@@ -34,8 +34,7 @@ fn field_name<'a>() -> impl Parser<&'a str, Output = String> {
take_while(|c| !SPECIAL_CHARS.contains(&c)), take_while(|c| !SPECIAL_CHARS.contains(&c)),
), ),
'\\', '\\',
satisfy(|_| true), /* if the next character is not a special char, the \ will be treated satisfy(|c| SPECIAL_CHARS.contains(&c)),
* as the \ character. */
)) ))
.skip(char(':')) .skip(char(':'))
.map(|s| ESCAPED_SPECIAL_CHARS_RE.replace_all(&s, "$1").to_string()) .map(|s| ESCAPED_SPECIAL_CHARS_RE.replace_all(&s, "$1").to_string())
@@ -67,7 +66,7 @@ fn word<'a>() -> impl Parser<&'a str, Output = String> {
/// 2021-04-13T19:46:26.266051969+00:00 /// 2021-04-13T19:46:26.266051969+00:00
/// ///
/// NOTE: also accepts 999999-99-99T99:99:99.266051969+99:99 /// NOTE: also accepts 999999-99-99T99:99:99.266051969+99:99
/// We delegate rejecting such invalid dates to the logical AST computation code /// We delegate rejecting such invalid dates to the logical AST compuation code
/// which invokes time::OffsetDateTime::parse(..., &Rfc3339) on the value to actually parse /// which invokes time::OffsetDateTime::parse(..., &Rfc3339) on the value to actually parse
/// it (instead of merely extracting the datetime value as string as done here). /// it (instead of merely extracting the datetime value as string as done here).
fn date_time<'a>() -> impl Parser<&'a str, Output = String> { fn date_time<'a>() -> impl Parser<&'a str, Output = String> {
@@ -120,36 +119,22 @@ fn date_time<'a>() -> impl Parser<&'a str, Output = String> {
fn term_val<'a>() -> impl Parser<&'a str, Output = String> { fn term_val<'a>() -> impl Parser<&'a str, Output = String> {
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"')); let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
negative_number().or(phrase.or(word())) phrase.or(word())
} }
fn term_query<'a>() -> impl Parser<&'a str, Output = UserInputLiteral> { fn term_query<'a>() -> impl Parser<&'a str, Output = UserInputLiteral> {
(field_name(), term_val(), slop_val()).map(|(field_name, phrase, slop)| UserInputLiteral { let term_val_with_field = negative_number().or(term_val());
(field_name(), term_val_with_field).map(|(field_name, phrase)| UserInputLiteral {
field_name: Some(field_name), field_name: Some(field_name),
phrase, phrase,
slop,
})
}
fn slop_val<'a>() -> impl Parser<&'a str, Output = u32> {
let slop =
(char('~'), many1(digit())).and_then(|(_, slop): (_, String)| match slop.parse::<u32>() {
Ok(d) => Ok(d),
_ => Err(StringStreamError::UnexpectedParse),
});
optional(slop).map(|slop| match slop {
Some(d) => d,
_ => 0,
}) })
} }
fn literal<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> { fn literal<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
let term_default_field = (term_val(), slop_val()).map(|(phrase, slop)| UserInputLiteral { let term_default_field = term_val().map(|phrase| UserInputLiteral {
field_name: None, field_name: None,
phrase, phrase,
slop,
}); });
attempt(term_query()) attempt(term_query())
.or(term_default_field) .or(term_default_field)
.map(UserInputLeaf::from) .map(UserInputLeaf::from)
@@ -299,7 +284,7 @@ fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAst> { fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAst> {
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt { (leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
Some(boost) if (boost - 1.0).abs() > f64::EPSILON => { Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
UserInputAst::Boost(Box::new(leaf), boost) UserInputAst::Boost(Box::new(leaf), boost)
} }
_ => leaf, _ => leaf,
@@ -531,18 +516,14 @@ mod test {
} }
#[test] #[test]
fn test_field_name() { fn test_field_name() -> TestParseResult {
assert_eq!( assert_eq!(
super::field_name().parse(".my.field.name:a"), super::field_name().parse(".my.field.name:a"),
Ok((".my.field.name".to_string(), "a")) Ok((".my.field.name".to_string(), "a"))
); );
assert_eq!( assert_eq!(
super::field_name().parse(r#"にんじん:a"#), super::field_name().parse("my\\ field\\ name:a"),
Ok(("にんじん".to_string(), "a")) Ok(("my field name".to_string(), "a"))
);
assert_eq!(
super::field_name().parse(r#"my\field:a"#),
Ok((r#"my\field"#.to_string(), "a"))
); );
assert!(super::field_name().parse("my field:a").is_err()); assert!(super::field_name().parse("my field:a").is_err());
assert_eq!( assert_eq!(
@@ -553,32 +534,14 @@ mod test {
super::field_name().parse("my_field_name:a"), super::field_name().parse("my_field_name:a"),
Ok(("my_field_name".to_string(), "a")) Ok(("my_field_name".to_string(), "a"))
); );
assert_eq!(
super::field_name().parse("myfield.b:hello").unwrap(),
("myfield.b".to_string(), "hello")
);
assert_eq!(
super::field_name().parse(r#"myfield\.b:hello"#).unwrap(),
(r#"myfield\.b"#.to_string(), "hello")
);
assert!(super::field_name().parse("my_field_name").is_err()); assert!(super::field_name().parse("my_field_name").is_err());
assert!(super::field_name().parse(":a").is_err()); assert!(super::field_name().parse(":a").is_err());
assert!(super::field_name().parse("-my_field:a").is_err()); assert!(super::field_name().parse("-my_field:a").is_err());
assert_eq!( assert_eq!(
super::field_name().parse("_my_field:a"), super::field_name().parse("_my_field:a")?,
Ok(("_my_field".to_string(), "a")) ("_my_field".to_string(), "a")
); );
assert_eq!( Ok(())
super::field_name().parse("~my~field:a"),
Ok(("~my~field".to_string(), "a"))
);
for special_char in SPECIAL_CHARS.iter() {
let query = &format!("\\{special_char}my\\{special_char}field:a");
assert_eq!(
super::field_name().parse(query),
Ok((format!("{special_char}my{special_char}field"), "a"))
);
}
} }
#[test] #[test]
@@ -731,22 +694,4 @@ mod test {
); );
test_is_parse_err("abc + "); test_is_parse_err("abc + ");
} }
#[test]
fn test_slop() {
assert!(parse_to_ast().parse("\"a b\"~").is_err());
assert!(parse_to_ast().parse("foo:\"a b\"~").is_err());
assert!(parse_to_ast().parse("\"a b\"~a").is_err());
assert!(parse_to_ast().parse("\"a b\"~100000000000000000").is_err());
test_parse_query_to_ast_helper("\"a b\"^2~4", "(*(\"a b\")^2 *\"~4\")");
test_parse_query_to_ast_helper("\"~Document\"", "\"~Document\"");
test_parse_query_to_ast_helper("~Document", "\"~Document\"");
test_parse_query_to_ast_helper("a~2", "\"a~2\"");
test_parse_query_to_ast_helper("\"a b\"~0", "\"a b\"");
test_parse_query_to_ast_helper("\"a b\"~1", "\"a b\"~1");
test_parse_query_to_ast_helper("\"a b\"~3", "\"a b\"~3");
test_parse_query_to_ast_helper("foo:\"a b\"~300", "\"foo\":\"a b\"~300");
test_parse_query_to_ast_helper("\"a b\"~300^2", "(\"a b\"~300)^2");
}
} }

View File

@@ -40,19 +40,14 @@ impl Debug for UserInputLeaf {
pub struct UserInputLiteral { pub struct UserInputLiteral {
pub field_name: Option<String>, pub field_name: Option<String>,
pub phrase: String, pub phrase: String,
pub slop: u32,
} }
impl fmt::Debug for UserInputLiteral { impl fmt::Debug for UserInputLiteral {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
if let Some(ref field) = self.field_name { match self.field_name {
write!(formatter, "\"{}\":", field)?; Some(ref field_name) => write!(formatter, "\"{}\":\"{}\"", field_name, self.phrase),
None => write!(formatter, "\"{}\"", self.phrase),
} }
write!(formatter, "\"{}\"", self.phrase)?;
if self.slop > 0 {
write!(formatter, "~{}", self.slop)?;
}
Ok(())
} }
} }

View File

@@ -12,7 +12,7 @@ Tantivy's aggregations have been designed to mimic the
The code is organized in submodules: The code is organized in submodules:
## bucket ## bucket
Contains all bucket aggregations, like range aggregation. These bucket aggregations group documents into buckets and can contain sub-aggregations. Contains all bucket aggregations, like range aggregation. These bucket aggregations group documents into buckets and can contain sub-aggegations.
## metric ## metric
Contains all metric aggregations, like average aggregation. Metric aggregations do not have sub aggregations. Contains all metric aggregations, like average aggregation. Metric aggregations do not have sub aggregations.

View File

@@ -20,7 +20,6 @@
//! bucket_agg: BucketAggregationType::Range(RangeAggregation{ //! bucket_agg: BucketAggregationType::Range(RangeAggregation{
//! field: "score".to_string(), //! field: "score".to_string(),
//! ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()], //! ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
//! keyed: false,
//! }), //! }),
//! sub_aggregation: Default::default(), //! sub_aggregation: Default::default(),
//! }), //! }),
@@ -49,8 +48,8 @@ use std::collections::{HashMap, HashSet};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::bucket::HistogramAggregation;
pub use super::bucket::RangeAggregation; pub use super::bucket::RangeAggregation;
use super::bucket::{HistogramAggregation, TermsAggregation};
use super::metric::{AverageAggregation, StatsAggregation}; use super::metric::{AverageAggregation, StatsAggregation};
use super::VecWithNames; use super::VecWithNames;
@@ -101,33 +100,12 @@ pub(crate) struct BucketAggregationInternal {
} }
impl BucketAggregationInternal { impl BucketAggregationInternal {
pub(crate) fn as_range(&self) -> Option<&RangeAggregation> { pub(crate) fn as_histogram(&self) -> &HistogramAggregation {
match &self.bucket_agg { match &self.bucket_agg {
BucketAggregationType::Range(range) => Some(range), BucketAggregationType::Range(_) => panic!("unexpected aggregation"),
_ => None, BucketAggregationType::Histogram(histogram) => histogram,
} }
} }
pub(crate) fn as_histogram(&self) -> Option<&HistogramAggregation> {
match &self.bucket_agg {
BucketAggregationType::Histogram(histogram) => Some(histogram),
_ => None,
}
}
pub(crate) fn as_term(&self) -> Option<&TermsAggregation> {
match &self.bucket_agg {
BucketAggregationType::Terms(terms) => Some(terms),
_ => None,
}
}
}
/// Extract all fields, where the term directory is used in the tree.
pub fn get_term_dict_field_names(aggs: &Aggregations) -> HashSet<String> {
let mut term_dict_field_names = Default::default();
for el in aggs.values() {
el.get_term_dict_field_names(&mut term_dict_field_names)
}
term_dict_field_names
} }
/// Extract all fast field names used in the tree. /// Extract all fast field names used in the tree.
@@ -152,12 +130,6 @@ pub enum Aggregation {
} }
impl Aggregation { impl Aggregation {
fn get_term_dict_field_names(&self, term_field_names: &mut HashSet<String>) {
if let Aggregation::Bucket(bucket) = self {
bucket.get_term_dict_field_names(term_field_names)
}
}
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) { fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
match self { match self {
Aggregation::Bucket(bucket) => bucket.get_fast_field_names(fast_field_names), Aggregation::Bucket(bucket) => bucket.get_fast_field_names(fast_field_names),
@@ -190,12 +162,6 @@ pub struct BucketAggregation {
} }
impl BucketAggregation { impl BucketAggregation {
fn get_term_dict_field_names(&self, term_dict_field_names: &mut HashSet<String>) {
if let BucketAggregationType::Terms(terms) = &self.bucket_agg {
term_dict_field_names.insert(terms.field.to_string());
}
term_dict_field_names.extend(get_term_dict_field_names(&self.sub_aggregation));
}
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) { fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
self.bucket_agg.get_fast_field_names(fast_field_names); self.bucket_agg.get_fast_field_names(fast_field_names);
fast_field_names.extend(get_fast_field_names(&self.sub_aggregation)); fast_field_names.extend(get_fast_field_names(&self.sub_aggregation));
@@ -211,15 +177,11 @@ pub enum BucketAggregationType {
/// Put data into buckets of user-defined ranges. /// Put data into buckets of user-defined ranges.
#[serde(rename = "histogram")] #[serde(rename = "histogram")]
Histogram(HistogramAggregation), Histogram(HistogramAggregation),
/// Put data into buckets of terms.
#[serde(rename = "terms")]
Terms(TermsAggregation),
} }
impl BucketAggregationType { impl BucketAggregationType {
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) { fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
match self { match self {
BucketAggregationType::Terms(terms) => fast_field_names.insert(terms.field.to_string()),
BucketAggregationType::Range(range) => fast_field_names.insert(range.field.to_string()), BucketAggregationType::Range(range) => fast_field_names.insert(range.field.to_string()),
BucketAggregationType::Histogram(histogram) => { BucketAggregationType::Histogram(histogram) => {
fast_field_names.insert(histogram.field.to_string()) fast_field_names.insert(histogram.field.to_string())
@@ -271,7 +233,6 @@ mod tests {
(7f64..20f64).into(), (7f64..20f64).into(),
(20f64..f64::MAX).into(), (20f64..f64::MAX).into(),
], ],
keyed: true,
}), }),
sub_aggregation: Default::default(), sub_aggregation: Default::default(),
}), }),
@@ -298,8 +259,7 @@ mod tests {
{ {
"from": 20.0 "from": 20.0
} }
], ]
"keyed": true
} }
} }
}"#; }"#;
@@ -321,7 +281,6 @@ mod tests {
(7f64..20f64).into(), (7f64..20f64).into(),
(20f64..f64::MAX).into(), (20f64..f64::MAX).into(),
], ],
..Default::default()
}), }),
sub_aggregation: Default::default(), sub_aggregation: Default::default(),
}), }),
@@ -347,7 +306,6 @@ mod tests {
(7f64..20f64).into(), (7f64..20f64).into(),
(20f64..f64::MAX).into(), (20f64..f64::MAX).into(),
], ],
..Default::default()
}), }),
sub_aggregation: agg_req2, sub_aggregation: agg_req2,
}), }),

View File

@@ -1,19 +1,12 @@
//! This will enhance the request tree with access to the fastfield and metadata. //! This will enhance the request tree with access to the fastfield and metadata.
use std::rc::Rc;
use std::sync::atomic::AtomicU32;
use std::sync::Arc;
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation}; use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation}; use super::bucket::{HistogramAggregation, RangeAggregation};
use super::metric::{AverageAggregation, StatsAggregation}; use super::metric::{AverageAggregation, StatsAggregation};
use super::segment_agg_result::BucketCount;
use super::VecWithNames; use super::VecWithNames;
use crate::fastfield::{ use crate::fastfield::{type_and_cardinality, DynamicFastFieldReader, FastType};
type_and_cardinality, DynamicFastFieldReader, FastType, MultiValuedFastFieldReader,
};
use crate::schema::{Cardinality, Type}; use crate::schema::{Cardinality, Type};
use crate::{InvertedIndexReader, SegmentReader, TantivyError}; use crate::{SegmentReader, TantivyError};
#[derive(Clone, Default)] #[derive(Clone, Default)]
pub(crate) struct AggregationsWithAccessor { pub(crate) struct AggregationsWithAccessor {
@@ -34,36 +27,14 @@ impl AggregationsWithAccessor {
} }
} }
#[derive(Clone)]
pub(crate) enum FastFieldAccessor {
Multi(MultiValuedFastFieldReader<u64>),
Single(DynamicFastFieldReader<u64>),
}
impl FastFieldAccessor {
pub fn as_single(&self) -> Option<&DynamicFastFieldReader<u64>> {
match self {
FastFieldAccessor::Multi(_) => None,
FastFieldAccessor::Single(reader) => Some(reader),
}
}
pub fn as_multi(&self) -> Option<&MultiValuedFastFieldReader<u64>> {
match self {
FastFieldAccessor::Multi(reader) => Some(reader),
FastFieldAccessor::Single(_) => None,
}
}
}
#[derive(Clone)] #[derive(Clone)]
pub struct BucketAggregationWithAccessor { pub struct BucketAggregationWithAccessor {
/// In general there can be buckets without fast field access, e.g. buckets that are created /// In general there can be buckets without fast field access, e.g. buckets that are created
/// based on search terms. So eventually this needs to be Option or moved. /// based on search terms. So eventually this needs to be Option or moved.
pub(crate) accessor: FastFieldAccessor, pub(crate) accessor: DynamicFastFieldReader<u64>,
pub(crate) inverted_index: Option<Arc<InvertedIndexReader>>,
pub(crate) field_type: Type, pub(crate) field_type: Type,
pub(crate) bucket_agg: BucketAggregationType, pub(crate) bucket_agg: BucketAggregationType,
pub(crate) sub_aggregation: AggregationsWithAccessor, pub(crate) sub_aggregation: AggregationsWithAccessor,
pub(crate) bucket_count: BucketCount,
} }
impl BucketAggregationWithAccessor { impl BucketAggregationWithAccessor {
@@ -71,44 +42,22 @@ impl BucketAggregationWithAccessor {
bucket: &BucketAggregationType, bucket: &BucketAggregationType,
sub_aggregation: &Aggregations, sub_aggregation: &Aggregations,
reader: &SegmentReader, reader: &SegmentReader,
bucket_count: Rc<AtomicU32>,
max_bucket_count: u32,
) -> crate::Result<BucketAggregationWithAccessor> { ) -> crate::Result<BucketAggregationWithAccessor> {
let mut inverted_index = None;
let (accessor, field_type) = match &bucket { let (accessor, field_type) = match &bucket {
BucketAggregationType::Range(RangeAggregation { BucketAggregationType::Range(RangeAggregation {
field: field_name, .. field: field_name,
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?, ranges: _,
}) => get_ff_reader_and_validate(reader, field_name)?,
BucketAggregationType::Histogram(HistogramAggregation { BucketAggregationType::Histogram(HistogramAggregation {
field: field_name, .. field: field_name, ..
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?, }) => get_ff_reader_and_validate(reader, field_name)?,
BucketAggregationType::Terms(TermsAggregation {
field: field_name, ..
}) => {
let field = reader
.schema()
.get_field(field_name)
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
inverted_index = Some(reader.inverted_index(field)?);
get_ff_reader_and_validate(reader, field_name, Cardinality::MultiValues)?
}
}; };
let sub_aggregation = sub_aggregation.clone(); let sub_aggregation = sub_aggregation.clone();
Ok(BucketAggregationWithAccessor { Ok(BucketAggregationWithAccessor {
accessor, accessor,
field_type, field_type,
sub_aggregation: get_aggs_with_accessor_and_validate( sub_aggregation: get_aggs_with_accessor_and_validate(&sub_aggregation, reader)?,
&sub_aggregation,
reader,
bucket_count.clone(),
max_bucket_count,
)?,
bucket_agg: bucket.clone(), bucket_agg: bucket.clone(),
inverted_index,
bucket_count: BucketCount {
bucket_count,
max_bucket_count,
},
}) })
} }
} }
@@ -129,14 +78,10 @@ impl MetricAggregationWithAccessor {
match &metric { match &metric {
MetricAggregation::Average(AverageAggregation { field: field_name }) MetricAggregation::Average(AverageAggregation { field: field_name })
| MetricAggregation::Stats(StatsAggregation { field: field_name }) => { | MetricAggregation::Stats(StatsAggregation { field: field_name }) => {
let (accessor, field_type) = let (accessor, field_type) = get_ff_reader_and_validate(reader, field_name)?;
get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?;
Ok(MetricAggregationWithAccessor { Ok(MetricAggregationWithAccessor {
accessor: accessor accessor,
.as_single()
.expect("unexpected fast field cardinality")
.clone(),
field_type, field_type,
metric: metric.clone(), metric: metric.clone(),
}) })
@@ -148,8 +93,6 @@ impl MetricAggregationWithAccessor {
pub(crate) fn get_aggs_with_accessor_and_validate( pub(crate) fn get_aggs_with_accessor_and_validate(
aggs: &Aggregations, aggs: &Aggregations,
reader: &SegmentReader, reader: &SegmentReader,
bucket_count: Rc<AtomicU32>,
max_bucket_count: u32,
) -> crate::Result<AggregationsWithAccessor> { ) -> crate::Result<AggregationsWithAccessor> {
let mut metrics = vec![]; let mut metrics = vec![];
let mut buckets = vec![]; let mut buckets = vec![];
@@ -161,8 +104,6 @@ pub(crate) fn get_aggs_with_accessor_and_validate(
&bucket.bucket_agg, &bucket.bucket_agg,
&bucket.sub_aggregation, &bucket.sub_aggregation,
reader, reader,
Rc::clone(&bucket_count),
max_bucket_count,
)?, )?,
)), )),
Aggregation::Metric(metric) => metrics.push(( Aggregation::Metric(metric) => metrics.push((
@@ -177,45 +118,32 @@ pub(crate) fn get_aggs_with_accessor_and_validate(
)) ))
} }
/// Get fast field reader with given cardinatility.
fn get_ff_reader_and_validate( fn get_ff_reader_and_validate(
reader: &SegmentReader, reader: &SegmentReader,
field_name: &str, field_name: &str,
cardinality: Cardinality, ) -> crate::Result<(DynamicFastFieldReader<u64>, Type)> {
) -> crate::Result<(FastFieldAccessor, Type)> {
let field = reader let field = reader
.schema() .schema()
.get_field(field_name) .get_field(field_name)
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?; .ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
let field_type = reader.schema().get_field_entry(field).field_type(); let field_type = reader.schema().get_field_entry(field).field_type();
if let Some((ff_type, field_cardinality)) = type_and_cardinality(field_type) { if let Some((ff_type, cardinality)) = type_and_cardinality(field_type) {
if ff_type == FastType::Date { if cardinality == Cardinality::MultiValues || ff_type == FastType::Date {
return Err(TantivyError::InvalidArgument(
"Unsupported field type date in aggregation".to_string(),
));
}
if cardinality != field_cardinality {
return Err(TantivyError::InvalidArgument(format!( return Err(TantivyError::InvalidArgument(format!(
"Invalid field cardinality on field {} expected {:?}, but got {:?}", "Invalid field type in aggregation {:?}, only Cardinality::SingleValue supported",
field_name, cardinality, field_cardinality field_type.value_type()
))); )));
} }
} else { } else {
return Err(TantivyError::InvalidArgument(format!( return Err(TantivyError::InvalidArgument(format!(
"Only fast fields of type f64, u64, i64 are supported, but got {:?} ", "Only single value fast fields of type f64, u64, i64 are supported, but got {:?} ",
field_type.value_type() field_type.value_type()
))); )));
}; };
let ff_fields = reader.fast_fields(); let ff_fields = reader.fast_fields();
match cardinality { ff_fields
Cardinality::SingleValue => ff_fields .u64_lenient(field)
.u64_lenient(field) .map(|field| (field, field_type.value_type()))
.map(|field| (FastFieldAccessor::Single(field), field_type.value_type())),
Cardinality::MultiValues => ff_fields
.u64s_lenient(field)
.map(|field| (FastFieldAccessor::Multi(field), field_type.value_type())),
}
} }

View File

@@ -4,37 +4,86 @@
//! intermediate average results, which is the sum and the number of values. The actual average is //! intermediate average results, which is the sum and the number of values. The actual average is
//! calculated on the step from intermediate to final aggregation result tree. //! calculated on the step from intermediate to final aggregation result tree.
use std::cmp::Ordering;
use std::collections::HashMap; use std::collections::HashMap;
use fnv::FnvHashMap; use itertools::Itertools;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::agg_req::BucketAggregationInternal; use super::agg_req::{Aggregations, AggregationsInternal, BucketAggregationInternal};
use super::bucket::GetDocCount; use super::bucket::intermediate_buckets_to_final_buckets;
use super::intermediate_agg_result::{IntermediateBucketResult, IntermediateMetricResult}; use super::intermediate_agg_result::{
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
IntermediateMetricResult, IntermediateRangeBucketEntry,
};
use super::metric::{SingleMetricResult, Stats}; use super::metric::{SingleMetricResult, Stats};
use super::Key; use super::Key;
use crate::TantivyError;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
/// The final aggegation result. /// The final aggegation result.
pub struct AggregationResults(pub HashMap<String, AggregationResult>); pub struct AggregationResults(pub HashMap<String, AggregationResult>);
impl AggregationResults { impl AggregationResults {
pub(crate) fn get_value_from_aggregation( /// Convert and intermediate result and its aggregation request to the final result
&self, pub fn from_intermediate_and_req(
name: &str, results: IntermediateAggregationResults,
agg_property: &str, agg: Aggregations,
) -> crate::Result<Option<f64>> { ) -> Self {
if let Some(agg) = self.0.get(name) { AggregationResults::from_intermediate_and_req_internal(results, &(agg.into()))
agg.get_value_from_aggregation(name, agg_property) }
/// Convert and intermediate result and its aggregation request to the final result
///
/// Internal function, CollectorAggregations is used instead Aggregations, which is optimized
/// for internal processing
fn from_intermediate_and_req_internal(
results: IntermediateAggregationResults,
req: &AggregationsInternal,
) -> Self {
let mut result = HashMap::default();
// Important assumption:
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
// request
if let Some(buckets) = results.buckets {
result.extend(buckets.into_iter().zip(req.buckets.values()).map(
|((key, bucket), req)| {
(
key,
AggregationResult::BucketResult(BucketResult::from_intermediate_and_req(
bucket, req,
)),
)
},
));
} else { } else {
// Validation is be done during request parsing, so we can't reach this state. result.extend(req.buckets.iter().map(|(key, req)| {
Err(TantivyError::InternalError(format!( let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
"Can't find aggregation {:?} in sub_aggregations", (
name key.to_string(),
))) AggregationResult::BucketResult(BucketResult::from_intermediate_and_req(
empty_bucket,
req,
)),
)
}));
} }
if let Some(metrics) = results.metrics {
result.extend(
metrics
.into_iter()
.map(|(key, metric)| (key, AggregationResult::MetricResult(metric.into()))),
);
} else {
result.extend(req.metrics.iter().map(|(key, req)| {
let empty_bucket = IntermediateMetricResult::empty_from_req(req);
(
key.to_string(),
AggregationResult::MetricResult(empty_bucket.into()),
)
}));
}
Self(result)
} }
} }
@@ -48,23 +97,6 @@ pub enum AggregationResult {
MetricResult(MetricResult), MetricResult(MetricResult),
} }
impl AggregationResult {
pub(crate) fn get_value_from_aggregation(
&self,
_name: &str,
agg_property: &str,
) -> crate::Result<Option<f64>> {
match self {
AggregationResult::BucketResult(_bucket) => Err(TantivyError::InternalError(
"Tried to retrieve value from bucket aggregation. This is not supported and \
should not happen during collection phase, but should be caught during validation"
.to_string(),
)),
AggregationResult::MetricResult(metric) => metric.get_value(agg_property),
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(untagged)] #[serde(untagged)]
/// MetricResult /// MetricResult
@@ -75,14 +107,6 @@ pub enum MetricResult {
Stats(Stats), Stats(Stats),
} }
impl MetricResult {
fn get_value(&self, agg_property: &str) -> crate::Result<Option<f64>> {
match self {
MetricResult::Average(avg) => Ok(avg.value),
MetricResult::Stats(stats) => stats.get_value(agg_property),
}
}
}
impl From<IntermediateMetricResult> for MetricResult { impl From<IntermediateMetricResult> for MetricResult {
fn from(metric: IntermediateMetricResult) -> Self { fn from(metric: IntermediateMetricResult) -> Self {
match metric { match metric {
@@ -104,7 +128,7 @@ pub enum BucketResult {
/// sub_aggregations. /// sub_aggregations.
Range { Range {
/// The range buckets sorted by range. /// The range buckets sorted by range.
buckets: BucketEntries<RangeBucketEntry>, buckets: Vec<RangeBucketEntry>,
}, },
/// This is the histogram entry for a bucket, which contains a key, count, and optionally /// This is the histogram entry for a bucket, which contains a key, count, and optionally
/// sub_aggregations. /// sub_aggregations.
@@ -114,38 +138,43 @@ pub enum BucketResult {
/// If there are holes depends on the request, if min_doc_count is 0, then there are no /// If there are holes depends on the request, if min_doc_count is 0, then there are no
/// holes between the first and last bucket. /// holes between the first and last bucket.
/// See [HistogramAggregation](super::bucket::HistogramAggregation) /// See [HistogramAggregation](super::bucket::HistogramAggregation)
buckets: BucketEntries<BucketEntry>,
},
/// This is the term result
Terms {
/// The buckets.
///
/// See [TermsAggregation](super::bucket::TermsAggregation)
buckets: Vec<BucketEntry>, buckets: Vec<BucketEntry>,
/// The number of documents that didnt make it into to TOP N due to shard_size or size
sum_other_doc_count: u64,
#[serde(skip_serializing_if = "Option::is_none")]
/// The upper bound error for the doc count of each term.
doc_count_error_upper_bound: Option<u64>,
}, },
} }
impl BucketResult { impl BucketResult {
pub(crate) fn empty_from_req(req: &BucketAggregationInternal) -> crate::Result<Self> { fn from_intermediate_and_req(
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg); bucket_result: IntermediateBucketResult,
empty_bucket.into_final_bucket_result(req) req: &BucketAggregationInternal,
} ) -> Self {
} match bucket_result {
IntermediateBucketResult::Range(range_map) => {
let mut buckets: Vec<RangeBucketEntry> = range_map
.into_iter()
.map(|(_, bucket)| {
RangeBucketEntry::from_intermediate_and_req(bucket, &req.sub_aggregation)
})
.collect_vec();
/// This is the wrapper of buckets entries, which can be vector or hashmap buckets.sort_by(|a, b| {
/// depending on if it's keyed or not. a.from
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] .unwrap_or(f64::MIN)
#[serde(untagged)] .partial_cmp(&b.from.unwrap_or(f64::MIN))
pub enum BucketEntries<T> { .unwrap_or(Ordering::Equal)
/// Vector format bucket entries });
Vec(Vec<T>), BucketResult::Range { buckets }
/// HashMap format bucket entries }
HashMap(FnvHashMap<String, T>), IntermediateBucketResult::Histogram { buckets } => {
let buckets = intermediate_buckets_to_final_buckets(
buckets,
req.as_histogram(),
&req.sub_aggregation,
);
BucketResult::Histogram { buckets }
}
}
}
} }
/// This is the default entry for a bucket, which contains a key, count, and optionally /// This is the default entry for a bucket, which contains a key, count, and optionally
@@ -181,17 +210,23 @@ pub struct BucketEntry {
/// Number of documents in the bucket. /// Number of documents in the bucket.
pub doc_count: u64, pub doc_count: u64,
#[serde(flatten)] #[serde(flatten)]
/// Sub-aggregations in this bucket. /// sub-aggregations in this bucket.
pub sub_aggregation: AggregationResults, pub sub_aggregation: AggregationResults,
} }
impl GetDocCount for &BucketEntry {
fn doc_count(&self) -> u64 { impl BucketEntry {
self.doc_count pub(crate) fn from_intermediate_and_req(
} entry: IntermediateHistogramBucketEntry,
} req: &AggregationsInternal,
impl GetDocCount for BucketEntry { ) -> Self {
fn doc_count(&self) -> u64 { BucketEntry {
self.doc_count key: Key::F64(entry.key),
doc_count: entry.doc_count,
sub_aggregation: AggregationResults::from_intermediate_and_req_internal(
entry.sub_aggregation,
req,
),
}
} }
} }
@@ -241,3 +276,21 @@ pub struct RangeBucketEntry {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub to: Option<f64>, pub to: Option<f64>,
} }
impl RangeBucketEntry {
fn from_intermediate_and_req(
entry: IntermediateRangeBucketEntry,
req: &AggregationsInternal,
) -> Self {
RangeBucketEntry {
key: entry.key,
doc_count: entry.doc_count,
sub_aggregation: AggregationResults::from_intermediate_and_req_internal(
entry.sub_aggregation,
req,
),
to: entry.to,
from: entry.from,
}
}
}

View File

@@ -1,7 +1,6 @@
use std::cmp::Ordering; use std::cmp::Ordering;
use std::fmt::Display; use std::fmt::Display;
use fastfield_codecs::Column;
use itertools::Itertools; use itertools::Itertools;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -14,8 +13,10 @@ use crate::aggregation::f64_from_fastfield_u64;
use crate::aggregation::intermediate_agg_result::{ use crate::aggregation::intermediate_agg_result::{
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry, IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
}; };
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector; use crate::aggregation::segment_agg_result::{
use crate::fastfield::DynamicFastFieldReader; SegmentAggregationResultsCollector, SegmentHistogramBucketEntry,
};
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
use crate::schema::Type; use crate::schema::Type;
use crate::{DocId, TantivyError}; use crate::{DocId, TantivyError};
@@ -49,13 +50,15 @@ use crate::{DocId, TantivyError};
/// ///
/// # Limitations/Compatibility /// # Limitations/Compatibility
/// ///
/// The keyed parameter (elasticsearch) is not yet supported.
///
/// # JSON Format /// # JSON Format
/// ```json /// ```json
/// { /// {
/// "prices": { /// "prices": {
/// "histogram": { /// "histogram": {
/// "field": "price", /// "field": "price",
/// "interval": 10 /// "interval": 10,
/// } /// }
/// } /// }
/// } /// }
@@ -68,17 +71,16 @@ use crate::{DocId, TantivyError};
pub struct HistogramAggregation { pub struct HistogramAggregation {
/// The field to aggregate on. /// The field to aggregate on.
pub field: String, pub field: String,
/// The interval to chunk your data range. Each bucket spans a value range of [0..interval). /// The interval to chunk your data range. The buckets span ranges of [0..interval).
/// Must be a positive value. /// Must be a positive value.
pub interval: f64, pub interval: f64,
/// Intervals implicitly defines an absolute grid of buckets `[interval * k, interval * (k + /// Intervals implicitely defines an absolute grid of buckets `[interval * k, interval * (k +
/// 1))`. /// 1))`.
/// ///
/// Offset makes it possible to shift this grid into /// Offset makes it possible to shift this grid into `[offset + interval * k, offset + interval
/// `[offset + interval * k, offset + interval * (k + 1))`. Offset has to be in the range [0, /// * (k + 1)) Offset has to be in the range [0, interval).
/// interval).
/// ///
/// As an example, if there are two documents with value 9 and 12 and interval 10.0, they would /// As an example. If there are two documents with value 8 and 12 and interval 10.0, they would
/// fall into the buckets with the key 0 and 10. /// fall into the buckets with the key 0 and 10.
/// With offset 5 and interval 10, they would both fall into the bucket with they key 5 and the /// With offset 5 and interval 10, they would both fall into the bucket with they key 5 and the
/// range [5..15) /// range [5..15)
@@ -91,22 +93,6 @@ pub struct HistogramAggregation {
/// ///
/// hard_bounds only limits the buckets, to force a range set both extended_bounds and /// hard_bounds only limits the buckets, to force a range set both extended_bounds and
/// hard_bounds to the same range. /// hard_bounds to the same range.
///
/// ## Example
/// ```json
/// {
/// "prices": {
/// "histogram": {
/// "field": "price",
/// "interval": 10,
/// "hard_bounds": {
/// "min": 0,
/// "max": 100
/// }
/// }
/// }
/// }
/// ```
pub hard_bounds: Option<HistogramBounds>, pub hard_bounds: Option<HistogramBounds>,
/// Can be set to extend your bounds. The range of the buckets is by default defined by the /// Can be set to extend your bounds. The range of the buckets is by default defined by the
/// data range of the values of the documents. As the name suggests, this can only be used to /// data range of the values of the documents. As the name suggests, this can only be used to
@@ -116,9 +102,6 @@ pub struct HistogramAggregation {
/// Cannot be set in conjunction with min_doc_count > 0, since the empty buckets from extended /// Cannot be set in conjunction with min_doc_count > 0, since the empty buckets from extended
/// bounds would not be returned. /// bounds would not be returned.
pub extended_bounds: Option<HistogramBounds>, pub extended_bounds: Option<HistogramBounds>,
/// Whether to return the buckets as a hash map
#[serde(default)]
pub keyed: bool,
} }
impl HistogramAggregation { impl HistogramAggregation {
@@ -176,27 +159,6 @@ impl HistogramBounds {
} }
} }
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct SegmentHistogramBucketEntry {
pub key: f64,
pub doc_count: u64,
}
impl SegmentHistogramBucketEntry {
pub(crate) fn into_intermediate_bucket_entry(
self,
sub_aggregation: SegmentAggregationResultsCollector,
agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<IntermediateHistogramBucketEntry> {
Ok(IntermediateHistogramBucketEntry {
key: self.key,
doc_count: self.doc_count,
sub_aggregation: sub_aggregation
.into_intermediate_aggregations_result(agg_with_accessor)?,
})
}
}
/// The collector puts values from the fast field into the correct buckets and does a conversion to /// The collector puts values from the fast field into the correct buckets and does a conversion to
/// the correct datatype. /// the correct datatype.
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
@@ -212,10 +174,7 @@ pub struct SegmentHistogramCollector {
} }
impl SegmentHistogramCollector { impl SegmentHistogramCollector {
pub fn into_intermediate_bucket_result( pub fn into_intermediate_bucket_result(self) -> IntermediateBucketResult {
self,
agg_with_accessor: &BucketAggregationWithAccessor,
) -> crate::Result<IntermediateBucketResult> {
let mut buckets = Vec::with_capacity( let mut buckets = Vec::with_capacity(
self.buckets self.buckets
.iter() .iter()
@@ -229,20 +188,13 @@ impl SegmentHistogramCollector {
// //
// Empty buckets may be added later again in the final result, depending on the request. // Empty buckets may be added later again in the final result, depending on the request.
if let Some(sub_aggregations) = self.sub_aggregations { if let Some(sub_aggregations) = self.sub_aggregations {
for bucket_res in self buckets.extend(
.buckets self.buckets
.into_iter() .into_iter()
.zip(sub_aggregations.into_iter()) .zip(sub_aggregations.into_iter())
.filter(|(bucket, _sub_aggregation)| bucket.doc_count != 0) .filter(|(bucket, _sub_aggregation)| bucket.doc_count != 0)
.map(|(bucket, sub_aggregation)| { .map(|(bucket, sub_aggregation)| (bucket, sub_aggregation).into()),
bucket.into_intermediate_bucket_entry( )
sub_aggregation,
&agg_with_accessor.sub_aggregation,
)
})
{
buckets.push(bucket_res?);
}
} else { } else {
buckets.extend( buckets.extend(
self.buckets self.buckets
@@ -252,12 +204,7 @@ impl SegmentHistogramCollector {
); );
}; };
agg_with_accessor IntermediateBucketResult::Histogram { buckets }
.bucket_count
.add_count(buckets.len() as u32);
agg_with_accessor.bucket_count.validate_bucket_count()?;
Ok(IntermediateBucketResult::Histogram { buckets })
} }
pub(crate) fn from_req_and_validate( pub(crate) fn from_req_and_validate(
@@ -318,7 +265,7 @@ impl SegmentHistogramCollector {
doc: &[DocId], doc: &[DocId],
bucket_with_accessor: &BucketAggregationWithAccessor, bucket_with_accessor: &BucketAggregationWithAccessor,
force_flush: bool, force_flush: bool,
) -> crate::Result<()> { ) {
let bounds = self.bounds; let bounds = self.bounds;
let interval = self.interval; let interval = self.interval;
let offset = self.offset; let offset = self.offset;
@@ -326,16 +273,12 @@ impl SegmentHistogramCollector {
let get_bucket_num = let get_bucket_num =
|val| (get_bucket_num_f64(val, interval, offset) as i64 - first_bucket_num) as usize; |val| (get_bucket_num_f64(val, interval, offset) as i64 - first_bucket_num) as usize;
let accessor = bucket_with_accessor
.accessor
.as_single()
.expect("unexpected fast field cardinatility");
let mut iter = doc.chunks_exact(4); let mut iter = doc.chunks_exact(4);
for docs in iter.by_ref() { for docs in iter.by_ref() {
let val0 = self.f64_from_fastfield_u64(accessor.get_val(docs[0] as u64)); let val0 = self.f64_from_fastfield_u64(bucket_with_accessor.accessor.get(docs[0]));
let val1 = self.f64_from_fastfield_u64(accessor.get_val(docs[1] as u64)); let val1 = self.f64_from_fastfield_u64(bucket_with_accessor.accessor.get(docs[1]));
let val2 = self.f64_from_fastfield_u64(accessor.get_val(docs[2] as u64)); let val2 = self.f64_from_fastfield_u64(bucket_with_accessor.accessor.get(docs[2]));
let val3 = self.f64_from_fastfield_u64(accessor.get_val(docs[3] as u64)); let val3 = self.f64_from_fastfield_u64(bucket_with_accessor.accessor.get(docs[3]));
let bucket_pos0 = get_bucket_num(val0); let bucket_pos0 = get_bucket_num(val0);
let bucket_pos1 = get_bucket_num(val1); let bucket_pos1 = get_bucket_num(val1);
@@ -348,31 +291,32 @@ impl SegmentHistogramCollector {
bucket_pos0, bucket_pos0,
docs[0], docs[0],
&bucket_with_accessor.sub_aggregation, &bucket_with_accessor.sub_aggregation,
)?; );
self.increment_bucket_if_in_bounds( self.increment_bucket_if_in_bounds(
val1, val1,
&bounds, &bounds,
bucket_pos1, bucket_pos1,
docs[1], docs[1],
&bucket_with_accessor.sub_aggregation, &bucket_with_accessor.sub_aggregation,
)?; );
self.increment_bucket_if_in_bounds( self.increment_bucket_if_in_bounds(
val2, val2,
&bounds, &bounds,
bucket_pos2, bucket_pos2,
docs[2], docs[2],
&bucket_with_accessor.sub_aggregation, &bucket_with_accessor.sub_aggregation,
)?; );
self.increment_bucket_if_in_bounds( self.increment_bucket_if_in_bounds(
val3, val3,
&bounds, &bounds,
bucket_pos3, bucket_pos3,
docs[3], docs[3],
&bucket_with_accessor.sub_aggregation, &bucket_with_accessor.sub_aggregation,
)?; );
} }
for &doc in iter.remainder() { for doc in iter.remainder() {
let val = f64_from_fastfield_u64(accessor.get_val(doc as u64), &self.field_type); let val =
f64_from_fastfield_u64(bucket_with_accessor.accessor.get(*doc), &self.field_type);
if !bounds.contains(val) { if !bounds.contains(val) {
continue; continue;
} }
@@ -383,17 +327,16 @@ impl SegmentHistogramCollector {
self.buckets[bucket_pos].key, self.buckets[bucket_pos].key,
get_bucket_val(val, self.interval, self.offset) as f64 get_bucket_val(val, self.interval, self.offset) as f64
); );
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?; self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation);
} }
if force_flush { if force_flush {
if let Some(sub_aggregations) = self.sub_aggregations.as_mut() { if let Some(sub_aggregations) = self.sub_aggregations.as_mut() {
for sub_aggregation in sub_aggregations { for sub_aggregation in sub_aggregations {
sub_aggregation sub_aggregation
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush)?; .flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush);
} }
} }
} }
Ok(())
} }
#[inline] #[inline]
@@ -404,16 +347,15 @@ impl SegmentHistogramCollector {
bucket_pos: usize, bucket_pos: usize,
doc: DocId, doc: DocId,
bucket_with_accessor: &AggregationsWithAccessor, bucket_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<()> { ) {
if bounds.contains(val) { if bounds.contains(val) {
debug_assert_eq!( debug_assert_eq!(
self.buckets[bucket_pos].key, self.buckets[bucket_pos].key,
get_bucket_val(val, self.interval, self.offset) as f64 get_bucket_val(val, self.interval, self.offset) as f64
); );
self.increment_bucket(bucket_pos, doc, bucket_with_accessor)?; self.increment_bucket(bucket_pos, doc, bucket_with_accessor);
} }
Ok(())
} }
#[inline] #[inline]
@@ -422,13 +364,12 @@ impl SegmentHistogramCollector {
bucket_pos: usize, bucket_pos: usize,
doc: DocId, doc: DocId,
bucket_with_accessor: &AggregationsWithAccessor, bucket_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<()> { ) {
let bucket = &mut self.buckets[bucket_pos]; let bucket = &mut self.buckets[bucket_pos];
bucket.doc_count += 1; bucket.doc_count += 1;
if let Some(sub_aggregation) = self.sub_aggregations.as_mut() { if let Some(sub_aggregation) = self.sub_aggregations.as_mut() {
(&mut sub_aggregation[bucket_pos]).collect(doc, bucket_with_accessor)?; (&mut sub_aggregation[bucket_pos]).collect(doc, bucket_with_accessor);
} }
Ok(())
} }
fn f64_from_fastfield_u64(&self, val: u64) -> f64 { fn f64_from_fastfield_u64(&self, val: u64) -> f64 {
@@ -452,7 +393,7 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
buckets: Vec<IntermediateHistogramBucketEntry>, buckets: Vec<IntermediateHistogramBucketEntry>,
histogram_req: &HistogramAggregation, histogram_req: &HistogramAggregation,
sub_aggregation: &AggregationsInternal, sub_aggregation: &AggregationsInternal,
) -> crate::Result<Vec<BucketEntry>> { ) -> Vec<BucketEntry> {
// Generate the the full list of buckets without gaps. // Generate the the full list of buckets without gaps.
// //
// The bounds are the min max from the current buckets, optionally extended by // The bounds are the min max from the current buckets, optionally extended by
@@ -492,16 +433,18 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
sub_aggregation: empty_sub_aggregation.clone(), sub_aggregation: empty_sub_aggregation.clone(),
}, },
}) })
.map(|intermediate_bucket| intermediate_bucket.into_final_bucket_entry(sub_aggregation)) .map(|intermediate_bucket| {
.collect::<crate::Result<Vec<_>>>() BucketEntry::from_intermediate_and_req(intermediate_bucket, sub_aggregation)
})
.collect_vec()
} }
// Convert to BucketEntry // Convert to BucketEntry
pub(crate) fn intermediate_histogram_buckets_to_final_buckets( pub(crate) fn intermediate_buckets_to_final_buckets(
buckets: Vec<IntermediateHistogramBucketEntry>, buckets: Vec<IntermediateHistogramBucketEntry>,
histogram_req: &HistogramAggregation, histogram_req: &HistogramAggregation,
sub_aggregation: &AggregationsInternal, sub_aggregation: &AggregationsInternal,
) -> crate::Result<Vec<BucketEntry>> { ) -> Vec<BucketEntry> {
if histogram_req.min_doc_count() == 0 { if histogram_req.min_doc_count() == 0 {
// With min_doc_count != 0, we may need to add buckets, so that there are no // With min_doc_count != 0, we may need to add buckets, so that there are no
// gaps, since intermediate result does not contain empty buckets (filtered to // gaps, since intermediate result does not contain empty buckets (filtered to
@@ -511,9 +454,9 @@ pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
} else { } else {
buckets buckets
.into_iter() .into_iter()
.filter(|histogram_bucket| histogram_bucket.doc_count >= histogram_req.min_doc_count()) .filter(|bucket| bucket.doc_count >= histogram_req.min_doc_count())
.map(|histogram_bucket| histogram_bucket.into_final_bucket_entry(sub_aggregation)) .map(|bucket| BucketEntry::from_intermediate_and_req(bucket, sub_aggregation))
.collect::<crate::Result<Vec<_>>>() .collect_vec()
} }
} }
@@ -554,7 +497,7 @@ pub(crate) fn generate_buckets_with_opt_minmax(
let offset = req.offset.unwrap_or(0.0); let offset = req.offset.unwrap_or(0.0);
let first_bucket_num = get_bucket_num_f64(min, req.interval, offset) as i64; let first_bucket_num = get_bucket_num_f64(min, req.interval, offset) as i64;
let last_bucket_num = get_bucket_num_f64(max, req.interval, offset) as i64; let last_bucket_num = get_bucket_num_f64(max, req.interval, offset) as i64;
let mut buckets = Vec::with_capacity((first_bucket_num..=last_bucket_num).count()); let mut buckets = vec![];
for bucket_pos in first_bucket_num..=last_bucket_num { for bucket_pos in first_bucket_num..=last_bucket_num {
let bucket_key = bucket_pos as f64 * req.interval + offset; let bucket_key = bucket_pos as f64 * req.interval + offset;
buckets.push(bucket_key); buckets.push(bucket_key);
@@ -687,9 +630,41 @@ mod tests {
}; };
use crate::aggregation::metric::{AverageAggregation, StatsAggregation}; use crate::aggregation::metric::{AverageAggregation, StatsAggregation};
use crate::aggregation::tests::{ use crate::aggregation::tests::{
exec_request, exec_request_with_query, get_test_index_2_segments, get_test_index_2_segments, get_test_index_from_values, get_test_index_with_num_docs,
get_test_index_from_values, get_test_index_with_num_docs,
}; };
use crate::aggregation::AggregationCollector;
use crate::query::{AllQuery, TermQuery};
use crate::schema::IndexRecordOption;
use crate::{Index, Term};
fn exec_request(agg_req: Aggregations, index: &Index) -> crate::Result<Value> {
exec_request_with_query(agg_req, index, None)
}
fn exec_request_with_query(
agg_req: Aggregations,
index: &Index,
query: Option<(&str, &str)>,
) -> crate::Result<Value> {
let collector = AggregationCollector::from_aggs(agg_req);
let reader = index.reader()?;
let searcher = reader.searcher();
let agg_res = if let Some((field, term)) = query {
let text_field = reader.searcher().schema().get_field(field).unwrap();
let term_query = TermQuery::new(
Term::from_field_text(text_field, term),
IndexRecordOption::Basic,
);
searcher.search(&term_query, &collector)?
} else {
searcher.search(&AllQuery, &collector)?
};
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
Ok(res)
}
#[test] #[test]
fn histogram_test_crooked_values() -> crate::Result<()> { fn histogram_test_crooked_values() -> crate::Result<()> {
@@ -1372,71 +1347,4 @@ mod tests {
Ok(()) Ok(())
} }
#[test]
fn histogram_invalid_request() -> crate::Result<()> {
let index = get_test_index_2_segments(true)?;
let agg_req: Aggregations = vec![(
"histogram".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
field: "score_f64".to_string(),
interval: 0.0,
..Default::default()
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let agg_res = exec_request(agg_req, &index);
assert!(agg_res.is_err());
Ok(())
}
#[test]
fn histogram_keyed_buckets_test() -> crate::Result<()> {
let index = get_test_index_with_num_docs(false, 100)?;
let agg_req: Aggregations = vec![(
"histogram".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
field: "score_f64".to_string(),
interval: 50.0,
keyed: true,
..Default::default()
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let res = exec_request(agg_req, &index)?;
assert_eq!(
res,
json!({
"histogram": {
"buckets": {
"0": {
"key": 0.0,
"doc_count": 50
},
"50": {
"key": 50.0,
"doc_count": 50
}
}
}
})
);
Ok(())
}
} }

View File

@@ -9,132 +9,8 @@
mod histogram; mod histogram;
mod range; mod range;
mod term_agg;
use std::collections::HashMap;
pub(crate) use histogram::SegmentHistogramCollector; pub(crate) use histogram::SegmentHistogramCollector;
pub use histogram::*; pub use histogram::*;
pub(crate) use range::SegmentRangeCollector; pub(crate) use range::SegmentRangeCollector;
pub use range::*; pub use range::*;
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
pub use term_agg::*;
/// Order for buckets in a bucket aggregation.
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum Order {
/// Asc order
#[serde(rename = "asc")]
Asc,
/// Desc order
#[serde(rename = "desc")]
Desc,
}
impl Default for Order {
fn default() -> Self {
Order::Desc
}
}
#[derive(Clone, Debug, PartialEq)]
/// Order property by which to apply the order
pub enum OrderTarget {
/// The key of the bucket
Key,
/// The doc count of the bucket
Count,
/// Order by value of the sub aggregation metric with identified by given `String`.
///
/// Only single value metrics are supported currently
SubAggregation(String),
}
impl Default for OrderTarget {
fn default() -> Self {
OrderTarget::Count
}
}
impl From<&str> for OrderTarget {
fn from(val: &str) -> Self {
match val {
"_key" => OrderTarget::Key,
"_count" => OrderTarget::Count,
_ => OrderTarget::SubAggregation(val.to_string()),
}
}
}
impl ToString for OrderTarget {
fn to_string(&self) -> String {
match self {
OrderTarget::Key => "_key".to_string(),
OrderTarget::Count => "_count".to_string(),
OrderTarget::SubAggregation(agg) => agg.to_string(),
}
}
}
/// Set the order. target is either "_count", "_key", or the name of
/// a metric sub_aggregation.
///
/// De/Serializes to elasticsearch compatible JSON.
///
/// Examples in JSON format:
/// { "_count": "asc" }
/// { "_key": "asc" }
/// { "average_price": "asc" }
#[derive(Clone, Default, Debug, PartialEq)]
pub struct CustomOrder {
/// The target property by which to sort by
pub target: OrderTarget,
/// The order asc or desc
pub order: Order,
}
impl Serialize for CustomOrder {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let map: HashMap<String, Order> =
std::iter::once((self.target.to_string(), self.order)).collect();
map.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for CustomOrder {
fn deserialize<D>(deserializer: D) -> Result<CustomOrder, D::Error>
where D: Deserializer<'de> {
HashMap::<String, Order>::deserialize(deserializer).and_then(|map| {
if let Some((key, value)) = map.into_iter().next() {
Ok(CustomOrder {
target: key.as_str().into(),
order: value,
})
} else {
Err(de::Error::custom(
"unexpected empty map in order".to_string(),
))
}
})
}
}
#[test]
fn custom_order_serde_test() {
let order = CustomOrder {
target: OrderTarget::Key,
order: Order::Desc,
};
let order_str = serde_json::to_string(&order).unwrap();
assert_eq!(order_str, "{\"_key\":\"desc\"}");
let order_deser = serde_json::from_str(&order_str).unwrap();
assert_eq!(order, order_deser);
let order_deser: serde_json::Result<CustomOrder> = serde_json::from_str("{}");
assert!(order_deser.is_err());
let order_deser: serde_json::Result<CustomOrder> = serde_json::from_str("[]");
assert!(order_deser.is_err());
}

View File

@@ -1,24 +1,22 @@
use std::fmt::Debug;
use std::ops::Range; use std::ops::Range;
use fastfield_codecs::Column;
use fnv::FnvHashMap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::aggregation::agg_req_with_accessor::{ use crate::aggregation::agg_req_with_accessor::{
AggregationsWithAccessor, BucketAggregationWithAccessor, AggregationsWithAccessor, BucketAggregationWithAccessor,
}; };
use crate::aggregation::intermediate_agg_result::{ use crate::aggregation::intermediate_agg_result::IntermediateBucketResult;
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult, use crate::aggregation::segment_agg_result::{
SegmentAggregationResultsCollector, SegmentRangeBucketEntry,
}; };
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector}; use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key};
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key, SerializedKey}; use crate::fastfield::FastFieldReader;
use crate::schema::Type; use crate::schema::Type;
use crate::{DocId, TantivyError}; use crate::{DocId, TantivyError};
/// Provide user-defined buckets to aggregate on. /// Provide user-defined buckets to aggregate on.
/// Two special buckets will automatically be created to cover the whole range of values. /// Two special buckets will automatically be created to cover the whole range of values.
/// The provided buckets have to be continuous. /// The provided buckets have to be continous.
/// During the aggregation, the values extracted from the fast_field `field` will be checked /// During the aggregation, the values extracted from the fast_field `field` will be checked
/// against each bucket range. Note that this aggregation includes the from value and excludes the /// against each bucket range. Note that this aggregation includes the from value and excludes the
/// to value for each range. /// to value for each range.
@@ -35,38 +33,34 @@ use crate::{DocId, TantivyError};
/// # Limitations/Compatibility /// # Limitations/Compatibility
/// Overlapping ranges are not yet supported. /// Overlapping ranges are not yet supported.
/// ///
/// The keyed parameter (elasticsearch) is not yet supported.
///
/// # Request JSON Format /// # Request JSON Format
/// ```json /// ```json
/// { /// {
/// "my_ranges": { /// "range": {
/// "field": "score", /// "field": "score",
/// "ranges": [ /// "ranges": [
/// { "to": 3.0 }, /// { "to": 3.0 },
/// { "from": 3.0, "to": 7.0 }, /// { "from": 3.0, "to": 7.0 },
/// { "from": 7.0, "to": 20.0 }, /// { "from": 7.0, "to": 20.0 }
/// { "from": 20.0 } /// { "from": 20.0 }
/// ] /// ]
/// } /// }
/// } /// }
/// ``` /// ```
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RangeAggregation { pub struct RangeAggregation {
/// The field to aggregate on. /// The field to aggregate on.
pub field: String, pub field: String,
/// Note that this aggregation includes the from value and excludes the to value for each /// Note that this aggregation includes the from value and excludes the to value for each
/// range. Extra buckets will be created until the first to, and last from, if necessary. /// range. Extra buckets will be created until the first to, and last from, if necessary.
pub ranges: Vec<RangeAggregationRange>, pub ranges: Vec<RangeAggregationRange>,
/// Whether to return the buckets as a hash map
#[serde(default)]
pub keyed: bool,
} }
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
/// The range for one range bucket. /// The range for one range bucket.
pub struct RangeAggregationRange { pub struct RangeAggregationRange {
/// Custom key for the range bucket
#[serde(skip_serializing_if = "Option::is_none", default)]
pub key: Option<String>,
/// The from range value, which is inclusive in the range. /// The from range value, which is inclusive in the range.
/// None equals to an open ended interval. /// None equals to an open ended interval.
#[serde(skip_serializing_if = "Option::is_none", default)] #[serde(skip_serializing_if = "Option::is_none", default)]
@@ -89,26 +83,7 @@ impl From<Range<f64>> for RangeAggregationRange {
} else { } else {
Some(range.end) Some(range.end)
}; };
RangeAggregationRange { RangeAggregationRange { from, to }
key: None,
from,
to,
}
}
}
#[derive(Clone, Debug, PartialEq)]
/// Internally used u64 range for one range bucket.
pub(crate) struct InternalRangeAggregationRange {
/// Custom key for the range bucket
key: Option<String>,
/// u64 range value
range: Range<u64>,
}
impl From<Range<u64>> for InternalRangeAggregationRange {
fn from(range: Range<u64>) -> Self {
InternalRangeAggregationRange { key: None, range }
} }
} }
@@ -127,100 +102,44 @@ pub struct SegmentRangeCollector {
field_type: Type, field_type: Type,
} }
#[derive(Clone, PartialEq)]
pub(crate) struct SegmentRangeBucketEntry {
pub key: Key,
pub doc_count: u64,
pub sub_aggregation: Option<SegmentAggregationResultsCollector>,
/// The from range of the bucket. Equals f64::MIN when None.
pub from: Option<f64>,
/// The to range of the bucket. Equals f64::MAX when None. Open interval, `to` is not
/// inclusive.
pub to: Option<f64>,
}
impl Debug for SegmentRangeBucketEntry {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SegmentRangeBucketEntry")
.field("key", &self.key)
.field("doc_count", &self.doc_count)
.field("from", &self.from)
.field("to", &self.to)
.finish()
}
}
impl SegmentRangeBucketEntry {
pub(crate) fn into_intermediate_bucket_entry(
self,
agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<IntermediateRangeBucketEntry> {
let sub_aggregation = if let Some(sub_aggregation) = self.sub_aggregation {
sub_aggregation.into_intermediate_aggregations_result(agg_with_accessor)?
} else {
Default::default()
};
Ok(IntermediateRangeBucketEntry {
key: self.key,
doc_count: self.doc_count,
sub_aggregation,
from: self.from,
to: self.to,
})
}
}
impl SegmentRangeCollector { impl SegmentRangeCollector {
pub fn into_intermediate_bucket_result( pub fn into_intermediate_bucket_result(self) -> IntermediateBucketResult {
self,
agg_with_accessor: &BucketAggregationWithAccessor,
) -> crate::Result<IntermediateBucketResult> {
let field_type = self.field_type; let field_type = self.field_type;
let buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry> = self let buckets = self
.buckets .buckets
.into_iter() .into_iter()
.map(move |range_bucket| { .map(move |range_bucket| {
Ok(( (
range_to_string(&range_bucket.range, &field_type), range_to_string(&range_bucket.range, &field_type),
range_bucket range_bucket.bucket.into(),
.bucket )
.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
))
}) })
.collect::<crate::Result<_>>()?; .collect();
Ok(IntermediateBucketResult::Range( IntermediateBucketResult::Range(buckets)
IntermediateRangeBucketResult { buckets },
))
} }
pub(crate) fn from_req_and_validate( pub(crate) fn from_req_and_validate(
req: &RangeAggregation, req: &RangeAggregation,
sub_aggregation: &AggregationsWithAccessor, sub_aggregation: &AggregationsWithAccessor,
bucket_count: &BucketCount,
field_type: Type, field_type: Type,
) -> crate::Result<Self> { ) -> crate::Result<Self> {
// The range input on the request is f64. // The range input on the request is f64.
// We need to convert to u64 ranges, because we read the values as u64. // We need to convert to u64 ranges, because we read the values as u64.
// The mapping from the conversion is monotonic so ordering is preserved. // The mapping from the conversion is monotonic so ordering is preserved.
let buckets: Vec<_> = extend_validate_ranges(&req.ranges, &field_type)? let buckets = extend_validate_ranges(&req.ranges, &field_type)?
.iter() .iter()
.map(|range| { .map(|range| {
let key = range let to = if range.end == u64::MAX {
.key
.clone()
.map(Key::Str)
.unwrap_or_else(|| range_to_key(&range.range, &field_type));
let to = if range.range.end == u64::MAX {
None None
} else { } else {
Some(f64_from_fastfield_u64(range.range.end, &field_type)) Some(f64_from_fastfield_u64(range.end, &field_type))
}; };
let from = if range.range.start == u64::MIN { let from = if range.start == u64::MIN {
None None
} else { } else {
Some(f64_from_fastfield_u64(range.range.start, &field_type)) Some(f64_from_fastfield_u64(range.start, &field_type))
}; };
let sub_aggregation = if sub_aggregation.is_empty() { let sub_aggregation = if sub_aggregation.is_empty() {
None None
@@ -230,11 +149,11 @@ impl SegmentRangeCollector {
)?) )?)
}; };
Ok(SegmentRangeAndBucketEntry { Ok(SegmentRangeAndBucketEntry {
range: range.range.clone(), range: range.clone(),
bucket: SegmentRangeBucketEntry { bucket: SegmentRangeBucketEntry {
key: range_to_key(range, &field_type),
doc_count: 0, doc_count: 0,
sub_aggregation, sub_aggregation,
key,
from, from,
to, to,
}, },
@@ -242,9 +161,6 @@ impl SegmentRangeCollector {
}) })
.collect::<crate::Result<_>>()?; .collect::<crate::Result<_>>()?;
bucket_count.add_count(buckets.len() as u32);
bucket_count.validate_bucket_count()?;
Ok(SegmentRangeCollector { Ok(SegmentRangeCollector {
buckets, buckets,
field_type, field_type,
@@ -257,41 +173,36 @@ impl SegmentRangeCollector {
doc: &[DocId], doc: &[DocId],
bucket_with_accessor: &BucketAggregationWithAccessor, bucket_with_accessor: &BucketAggregationWithAccessor,
force_flush: bool, force_flush: bool,
) -> crate::Result<()> { ) {
let mut iter = doc.chunks_exact(4); let mut iter = doc.chunks_exact(4);
let accessor = bucket_with_accessor
.accessor
.as_single()
.expect("unexpected fast field cardinatility");
for docs in iter.by_ref() { for docs in iter.by_ref() {
let val1 = accessor.get_val(docs[0] as u64); let val1 = bucket_with_accessor.accessor.get(docs[0]);
let val2 = accessor.get_val(docs[1] as u64); let val2 = bucket_with_accessor.accessor.get(docs[1]);
let val3 = accessor.get_val(docs[2] as u64); let val3 = bucket_with_accessor.accessor.get(docs[2]);
let val4 = accessor.get_val(docs[3] as u64); let val4 = bucket_with_accessor.accessor.get(docs[3]);
let bucket_pos1 = self.get_bucket_pos(val1); let bucket_pos1 = self.get_bucket_pos(val1);
let bucket_pos2 = self.get_bucket_pos(val2); let bucket_pos2 = self.get_bucket_pos(val2);
let bucket_pos3 = self.get_bucket_pos(val3); let bucket_pos3 = self.get_bucket_pos(val3);
let bucket_pos4 = self.get_bucket_pos(val4); let bucket_pos4 = self.get_bucket_pos(val4);
self.increment_bucket(bucket_pos1, docs[0], &bucket_with_accessor.sub_aggregation)?; self.increment_bucket(bucket_pos1, docs[0], &bucket_with_accessor.sub_aggregation);
self.increment_bucket(bucket_pos2, docs[1], &bucket_with_accessor.sub_aggregation)?; self.increment_bucket(bucket_pos2, docs[1], &bucket_with_accessor.sub_aggregation);
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation)?; self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation);
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?; self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation);
} }
for &doc in iter.remainder() { for doc in iter.remainder() {
let val = accessor.get_val(doc as u64); let val = bucket_with_accessor.accessor.get(*doc);
let bucket_pos = self.get_bucket_pos(val); let bucket_pos = self.get_bucket_pos(val);
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?; self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation);
} }
if force_flush { if force_flush {
for bucket in &mut self.buckets { for bucket in &mut self.buckets {
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation { if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
sub_aggregation sub_aggregation
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush)?; .flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush);
} }
} }
} }
Ok(())
} }
#[inline] #[inline]
@@ -300,14 +211,13 @@ impl SegmentRangeCollector {
bucket_pos: usize, bucket_pos: usize,
doc: DocId, doc: DocId,
bucket_with_accessor: &AggregationsWithAccessor, bucket_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<()> { ) {
let bucket = &mut self.buckets[bucket_pos]; let bucket = &mut self.buckets[bucket_pos];
bucket.bucket.doc_count += 1; bucket.bucket.doc_count += 1;
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation { if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
sub_aggregation.collect(doc, bucket_with_accessor)?; sub_aggregation.collect(doc, bucket_with_accessor);
} }
Ok(())
} }
#[inline] #[inline]
@@ -333,10 +243,7 @@ impl SegmentRangeCollector {
/// fast field. /// fast field.
/// The alternative would be that every value read would be converted to the f64 range, but that is /// The alternative would be that every value read would be converted to the f64 range, but that is
/// more computational expensive when many documents are hit. /// more computational expensive when many documents are hit.
fn to_u64_range( fn to_u64_range(range: &RangeAggregationRange, field_type: &Type) -> crate::Result<Range<u64>> {
range: &RangeAggregationRange,
field_type: &Type,
) -> crate::Result<InternalRangeAggregationRange> {
let start = if let Some(from) = range.from { let start = if let Some(from) = range.from {
f64_to_fastfield_u64(from, field_type) f64_to_fastfield_u64(from, field_type)
.ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))? .ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))?
@@ -351,43 +258,39 @@ fn to_u64_range(
u64::MAX u64::MAX
}; };
Ok(InternalRangeAggregationRange { Ok(start..end)
key: range.key.clone(),
range: start..end,
})
} }
/// Extends the provided buckets to contain the whole value range, by inserting buckets at the /// Extends the provided buckets to contain the whole value range, by inserting buckets at the
/// beginning and end and filling gaps. /// beginning and end.
fn extend_validate_ranges( fn extend_validate_ranges(
buckets: &[RangeAggregationRange], buckets: &[RangeAggregationRange],
field_type: &Type, field_type: &Type,
) -> crate::Result<Vec<InternalRangeAggregationRange>> { ) -> crate::Result<Vec<Range<u64>>> {
let mut converted_buckets = buckets let mut converted_buckets = buckets
.iter() .iter()
.map(|range| to_u64_range(range, field_type)) .map(|range| to_u64_range(range, field_type))
.collect::<crate::Result<Vec<_>>>()?; .collect::<crate::Result<Vec<_>>>()?;
converted_buckets.sort_by_key(|bucket| bucket.range.start); converted_buckets.sort_by_key(|bucket| bucket.start);
if converted_buckets[0].range.start != u64::MIN { if converted_buckets[0].start != u64::MIN {
converted_buckets.insert(0, (u64::MIN..converted_buckets[0].range.start).into()); converted_buckets.insert(0, u64::MIN..converted_buckets[0].start);
} }
if converted_buckets[converted_buckets.len() - 1].range.end != u64::MAX { if converted_buckets[converted_buckets.len() - 1].end != u64::MAX {
converted_buckets converted_buckets.push(converted_buckets[converted_buckets.len() - 1].end..u64::MAX);
.push((converted_buckets[converted_buckets.len() - 1].range.end..u64::MAX).into());
} }
// fill up holes in the ranges // fill up holes in the ranges
let find_hole = |converted_buckets: &[InternalRangeAggregationRange]| { let find_hole = |converted_buckets: &[Range<u64>]| {
for (pos, ranges) in converted_buckets.windows(2).enumerate() { for (pos, ranges) in converted_buckets.windows(2).enumerate() {
if ranges[0].range.end > ranges[1].range.start { if ranges[0].end > ranges[1].start {
return Err(TantivyError::InvalidArgument(format!( return Err(TantivyError::InvalidArgument(format!(
"Overlapping ranges not supported range {:?}, range+1 {:?}", "Overlapping ranges not supported range {:?}, range+1 {:?}",
ranges[0], ranges[1] ranges[0], ranges[1]
))); )));
} }
if ranges[0].range.end != ranges[1].range.start { if ranges[0].end != ranges[1].start {
return Ok(Some(pos)); return Ok(Some(pos));
} }
} }
@@ -395,9 +298,8 @@ fn extend_validate_ranges(
}; };
while let Some(hole_pos) = find_hole(&converted_buckets)? { while let Some(hole_pos) = find_hole(&converted_buckets)? {
let new_range = let new_range = converted_buckets[hole_pos].end..converted_buckets[hole_pos + 1].start;
converted_buckets[hole_pos].range.end..converted_buckets[hole_pos + 1].range.start; converted_buckets.insert(hole_pos + 1, new_range);
converted_buckets.insert(hole_pos + 1, new_range.into());
} }
Ok(converted_buckets) Ok(converted_buckets)
@@ -405,7 +307,7 @@ fn extend_validate_ranges(
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> String { pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0, // is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
// it should be rendered as "*-0" and not "*-*" // it should be rendererd as "*-0" and not "*-*"
let to_str = |val: u64, is_start: bool| { let to_str = |val: u64, is_start: bool| {
if (is_start && val == u64::MIN) || (!is_start && val == u64::MAX) { if (is_start && val == u64::MIN) || (!is_start && val == u64::MAX) {
"*".to_string() "*".to_string()
@@ -424,12 +326,16 @@ pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> Key {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use serde_json::Value;
use super::*; use super::*;
use crate::aggregation::agg_req::{ use crate::aggregation::agg_req::{
Aggregation, Aggregations, BucketAggregation, BucketAggregationType, Aggregation, Aggregations, BucketAggregation, BucketAggregationType,
}; };
use crate::aggregation::tests::{exec_request_with_query, get_test_index_with_num_docs}; use crate::aggregation::tests::get_test_index_with_num_docs;
use crate::aggregation::AggregationCollector;
use crate::fastfield::FastValue; use crate::fastfield::FastValue;
use crate::query::AllQuery;
pub fn get_collector_from_ranges( pub fn get_collector_from_ranges(
ranges: Vec<RangeAggregationRange>, ranges: Vec<RangeAggregationRange>,
@@ -438,16 +344,9 @@ mod tests {
let req = RangeAggregation { let req = RangeAggregation {
field: "dummy".to_string(), field: "dummy".to_string(),
ranges, ranges,
..Default::default()
}; };
SegmentRangeCollector::from_req_and_validate( SegmentRangeCollector::from_req_and_validate(&req, &Default::default(), field_type).unwrap()
&req,
&Default::default(),
&Default::default(),
field_type,
)
.expect("unexpected error")
} }
#[test] #[test]
@@ -460,7 +359,6 @@ mod tests {
bucket_agg: BucketAggregationType::Range(RangeAggregation { bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "fraction_f64".to_string(), field: "fraction_f64".to_string(),
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()], ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
..Default::default()
}), }),
sub_aggregation: Default::default(), sub_aggregation: Default::default(),
}), }),
@@ -468,7 +366,13 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let res = exec_request_with_query(agg_req, &index, None)?; let collector = AggregationCollector::from_aggs(agg_req);
let reader = index.reader()?;
let searcher = reader.searcher();
let agg_res = searcher.search(&AllQuery, &collector).unwrap();
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
assert_eq!(res["range"]["buckets"][0]["key"], "*-0"); assert_eq!(res["range"]["buckets"][0]["key"], "*-0");
assert_eq!(res["range"]["buckets"][0]["doc_count"], 0); assert_eq!(res["range"]["buckets"][0]["doc_count"], 0);
@@ -482,131 +386,6 @@ mod tests {
Ok(()) Ok(())
} }
#[test]
fn range_keyed_buckets_test() -> crate::Result<()> {
let index = get_test_index_with_num_docs(false, 100)?;
let agg_req: Aggregations = vec![(
"range".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "fraction_f64".to_string(),
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
keyed: true,
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let res = exec_request_with_query(agg_req, &index, None)?;
assert_eq!(
res,
json!({
"range": {
"buckets": {
"*-0": { "key": "*-0", "doc_count": 0, "to": 0.0},
"0-0.1": {"key": "0-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
"0.1-0.2": {"key": "0.1-0.2", "doc_count": 10, "from": 0.1, "to": 0.2},
"0.2-*": {"key": "0.2-*", "doc_count": 80, "from": 0.2},
}
}
})
);
Ok(())
}
#[test]
fn range_custom_key_test() -> crate::Result<()> {
let index = get_test_index_with_num_docs(false, 100)?;
let agg_req: Aggregations = vec![(
"range".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "fraction_f64".to_string(),
ranges: vec![
RangeAggregationRange {
key: Some("custom-key-0-to-0.1".to_string()),
from: Some(0f64),
to: Some(0.1f64),
},
RangeAggregationRange {
key: None,
from: Some(0.1f64),
to: Some(0.2f64),
},
],
keyed: false,
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let res = exec_request_with_query(agg_req, &index, None)?;
assert_eq!(
res,
json!({
"range": {
"buckets": [
{"key": "*-0", "doc_count": 0, "to": 0.0},
{"key": "custom-key-0-to-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
{"key": "0.1-0.2", "doc_count": 10, "from": 0.1, "to": 0.2},
{"key": "0.2-*", "doc_count": 80, "from": 0.2}
]
}
})
);
Ok(())
}
#[test]
fn range_custom_key_keyed_buckets_test() -> crate::Result<()> {
let index = get_test_index_with_num_docs(false, 100)?;
let agg_req: Aggregations = vec![(
"range".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "fraction_f64".to_string(),
ranges: vec![RangeAggregationRange {
key: Some("custom-key-0-to-0.1".to_string()),
from: Some(0f64),
to: Some(0.1f64),
}],
keyed: true,
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let res = exec_request_with_query(agg_req, &index, None)?;
assert_eq!(
res,
json!({
"range": {
"buckets": {
"*-0": { "key": "*-0", "doc_count": 0, "to": 0.0},
"custom-key-0-to-0.1": {"key": "custom-key-0-to-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
"0.1-*": {"key": "0.1-*", "doc_count": 90, "from": 0.1},
}
}
})
);
Ok(())
}
#[test] #[test]
fn bucket_test_extend_range_hole() { fn bucket_test_extend_range_hole() {
let buckets = vec![(10f64..20f64).into(), (30f64..40f64).into()]; let buckets = vec![(10f64..20f64).into(), (30f64..40f64).into()];
@@ -685,7 +464,6 @@ mod tests {
let ranges = vec![ let ranges = vec![
RangeAggregationRange { RangeAggregationRange {
key: None,
to: Some(10.0), to: Some(10.0),
from: None, from: None,
}, },
@@ -695,13 +473,11 @@ mod tests {
let ranges = vec![ let ranges = vec![
RangeAggregationRange { RangeAggregationRange {
key: None,
to: Some(10.0), to: Some(10.0),
from: None, from: None,
}, },
(10.0..100.0).into(), (10.0..100.0).into(),
RangeAggregationRange { RangeAggregationRange {
key: None,
to: None, to: None,
from: Some(100.0), from: Some(100.0),
}, },
@@ -711,7 +487,11 @@ mod tests {
#[test] #[test]
fn range_binary_search_test_f64() { fn range_binary_search_test_f64() {
let ranges = vec![(10.0..100.0).into()]; let ranges = vec![
//(f64::MIN..10.0).into(),
(10.0..100.0).into(),
//(100.0..f64::MAX).into(),
];
let collector = get_collector_from_ranges(ranges, Type::F64); let collector = get_collector_from_ranges(ranges, Type::F64);
let search = |val: u64| collector.get_bucket_pos(val); let search = |val: u64| collector.get_bucket_pos(val);

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,3 @@
use std::rc::Rc;
use super::agg_req::Aggregations; use super::agg_req::Aggregations;
use super::agg_req_with_accessor::AggregationsWithAccessor; use super::agg_req_with_accessor::AggregationsWithAccessor;
use super::agg_result::AggregationResults; use super::agg_result::AggregationResults;
@@ -7,29 +5,19 @@ use super::intermediate_agg_result::IntermediateAggregationResults;
use super::segment_agg_result::SegmentAggregationResultsCollector; use super::segment_agg_result::SegmentAggregationResultsCollector;
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate; use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
use crate::collector::{Collector, SegmentCollector}; use crate::collector::{Collector, SegmentCollector};
use crate::{SegmentReader, TantivyError}; use crate::SegmentReader;
/// The default max bucket count, before the aggregation fails.
pub const MAX_BUCKET_COUNT: u32 = 65000;
/// Collector for aggregations. /// Collector for aggregations.
/// ///
/// The collector collects all aggregations by the underlying aggregation request. /// The collector collects all aggregations by the underlying aggregation request.
pub struct AggregationCollector { pub struct AggregationCollector {
agg: Aggregations, agg: Aggregations,
max_bucket_count: u32,
} }
impl AggregationCollector { impl AggregationCollector {
/// Create collector from aggregation request. /// Create collector from aggregation request.
/// pub fn from_aggs(agg: Aggregations) -> Self {
/// Aggregation fails when the total bucket count is higher than max_bucket_count. Self { agg }
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
Self {
agg,
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
}
} }
} }
@@ -39,22 +27,16 @@ impl AggregationCollector {
/// ///
/// # Purpose /// # Purpose
/// AggregationCollector returns `IntermediateAggregationResults` and not the final /// AggregationCollector returns `IntermediateAggregationResults` and not the final
/// `AggregationResults`, so that results from different indices can be merged and then converted /// `AggregationResults`, so that results from differenct indices can be merged and then converted
/// into the final `AggregationResults` via the `into_final_result()` method. /// into the final `AggregationResults` via the `into()` method.
pub struct DistributedAggregationCollector { pub struct DistributedAggregationCollector {
agg: Aggregations, agg: Aggregations,
max_bucket_count: u32,
} }
impl DistributedAggregationCollector { impl DistributedAggregationCollector {
/// Create collector from aggregation request. /// Create collector from aggregation request.
/// pub fn from_aggs(agg: Aggregations) -> Self {
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset Self { agg }
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
Self {
agg,
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
}
} }
} }
@@ -68,11 +50,7 @@ impl Collector for DistributedAggregationCollector {
_segment_local_id: crate::SegmentOrdinal, _segment_local_id: crate::SegmentOrdinal,
reader: &crate::SegmentReader, reader: &crate::SegmentReader,
) -> crate::Result<Self::Child> { ) -> crate::Result<Self::Child> {
AggregationSegmentCollector::from_agg_req_and_reader( AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader)
&self.agg,
reader,
self.max_bucket_count,
)
} }
fn requires_scoring(&self) -> bool { fn requires_scoring(&self) -> bool {
@@ -97,11 +75,7 @@ impl Collector for AggregationCollector {
_segment_local_id: crate::SegmentOrdinal, _segment_local_id: crate::SegmentOrdinal,
reader: &crate::SegmentReader, reader: &crate::SegmentReader,
) -> crate::Result<Self::Child> { ) -> crate::Result<Self::Child> {
AggregationSegmentCollector::from_agg_req_and_reader( AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader)
&self.agg,
reader,
self.max_bucket_count,
)
} }
fn requires_scoring(&self) -> bool { fn requires_scoring(&self) -> bool {
@@ -112,18 +86,17 @@ impl Collector for AggregationCollector {
&self, &self,
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>, segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
) -> crate::Result<Self::Fruit> { ) -> crate::Result<Self::Fruit> {
let res = merge_fruits(segment_fruits)?; merge_fruits(segment_fruits)
res.into_final_bucket_result(self.agg.clone()) .map(|res| AggregationResults::from_intermediate_and_req(res, self.agg.clone()))
} }
} }
fn merge_fruits( fn merge_fruits(
mut segment_fruits: Vec<crate::Result<IntermediateAggregationResults>>, mut segment_fruits: Vec<IntermediateAggregationResults>,
) -> crate::Result<IntermediateAggregationResults> { ) -> crate::Result<IntermediateAggregationResults> {
if let Some(fruit) = segment_fruits.pop() { if let Some(mut fruit) = segment_fruits.pop() {
let mut fruit = fruit?;
for next_fruit in segment_fruits { for next_fruit in segment_fruits {
fruit.merge_fruits(next_fruit?); fruit.merge_fruits(next_fruit);
} }
Ok(fruit) Ok(fruit)
} else { } else {
@@ -133,9 +106,8 @@ fn merge_fruits(
/// AggregationSegmentCollector does the aggregation collection on a segment. /// AggregationSegmentCollector does the aggregation collection on a segment.
pub struct AggregationSegmentCollector { pub struct AggregationSegmentCollector {
aggs_with_accessor: AggregationsWithAccessor, aggs: AggregationsWithAccessor,
result: SegmentAggregationResultsCollector, result: SegmentAggregationResultsCollector,
error: Option<TantivyError>,
} }
impl AggregationSegmentCollector { impl AggregationSegmentCollector {
@@ -144,40 +116,27 @@ impl AggregationSegmentCollector {
pub fn from_agg_req_and_reader( pub fn from_agg_req_and_reader(
agg: &Aggregations, agg: &Aggregations,
reader: &SegmentReader, reader: &SegmentReader,
max_bucket_count: u32,
) -> crate::Result<Self> { ) -> crate::Result<Self> {
let aggs_with_accessor = let aggs_with_accessor = get_aggs_with_accessor_and_validate(agg, reader)?;
get_aggs_with_accessor_and_validate(agg, reader, Rc::default(), max_bucket_count)?;
let result = let result =
SegmentAggregationResultsCollector::from_req_and_validate(&aggs_with_accessor)?; SegmentAggregationResultsCollector::from_req_and_validate(&aggs_with_accessor)?;
Ok(AggregationSegmentCollector { Ok(AggregationSegmentCollector {
aggs_with_accessor, aggs: aggs_with_accessor,
result, result,
error: None,
}) })
} }
} }
impl SegmentCollector for AggregationSegmentCollector { impl SegmentCollector for AggregationSegmentCollector {
type Fruit = crate::Result<IntermediateAggregationResults>; type Fruit = IntermediateAggregationResults;
#[inline] #[inline]
fn collect(&mut self, doc: crate::DocId, _score: crate::Score) { fn collect(&mut self, doc: crate::DocId, _score: crate::Score) {
if self.error.is_some() { self.result.collect(doc, &self.aggs);
return;
}
if let Err(err) = self.result.collect(doc, &self.aggs_with_accessor) {
self.error = Some(err);
}
} }
fn harvest(mut self) -> Self::Fruit { fn harvest(mut self) -> Self::Fruit {
if let Some(err) = self.error { self.result.flush_staged_docs(&self.aggs, true);
return Err(err); self.result.into()
}
self.result
.flush_staged_docs(&self.aggs_with_accessor, true)?;
self.result
.into_intermediate_aggregations_result(&self.aggs_with_accessor)
} }
} }

View File

@@ -3,75 +3,37 @@
//! indices. //! indices.
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::HashMap;
use fnv::FnvHashMap; use fnv::FnvHashMap;
use itertools::Itertools; use itertools::Itertools;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::agg_req::{ use super::agg_req::{AggregationsInternal, BucketAggregationType, MetricAggregation};
Aggregations, AggregationsInternal, BucketAggregationInternal, BucketAggregationType,
MetricAggregation,
};
use super::agg_result::{AggregationResult, BucketResult, RangeBucketEntry};
use super::bucket::{
cut_off_buckets, get_agg_name_and_property, intermediate_histogram_buckets_to_final_buckets,
GetDocCount, Order, OrderTarget, SegmentHistogramBucketEntry, TermsAggregation,
};
use super::metric::{IntermediateAverage, IntermediateStats}; use super::metric::{IntermediateAverage, IntermediateStats};
use super::segment_agg_result::SegmentMetricResultCollector; use super::segment_agg_result::{
SegmentAggregationResultsCollector, SegmentBucketResultCollector, SegmentHistogramBucketEntry,
SegmentMetricResultCollector, SegmentRangeBucketEntry,
};
use super::{Key, SerializedKey, VecWithNames}; use super::{Key, SerializedKey, VecWithNames};
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
use crate::aggregation::bucket::TermsAggregationInternal;
/// Contains the intermediate aggregation result, which is optimized to be merged with other /// Contains the intermediate aggregation result, which is optimized to be merged with other
/// intermediate results. /// intermediate results.
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)] #[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntermediateAggregationResults { pub struct IntermediateAggregationResults {
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) metrics: Option<VecWithNames<IntermediateMetricResult>>, pub(crate) metrics: Option<VecWithNames<IntermediateMetricResult>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) buckets: Option<VecWithNames<IntermediateBucketResult>>, pub(crate) buckets: Option<VecWithNames<IntermediateBucketResult>>,
} }
impl From<SegmentAggregationResultsCollector> for IntermediateAggregationResults {
fn from(tree: SegmentAggregationResultsCollector) -> Self {
let metrics = tree.metrics.map(VecWithNames::from_other);
let buckets = tree.buckets.map(VecWithNames::from_other);
Self { metrics, buckets }
}
}
impl IntermediateAggregationResults { impl IntermediateAggregationResults {
/// Convert intermediate result and its aggregation request to the final result.
pub fn into_final_bucket_result(self, req: Aggregations) -> crate::Result<AggregationResults> {
self.into_final_bucket_result_internal(&(req.into()))
}
/// Convert intermediate result and its aggregation request to the final result.
///
/// Internal function, AggregationsInternal is used instead Aggregations, which is optimized
/// for internal processing, by splitting metric and buckets into separate groups.
pub(crate) fn into_final_bucket_result_internal(
self,
req: &AggregationsInternal,
) -> crate::Result<AggregationResults> {
// Important assumption:
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
// request
let mut results: HashMap<String, AggregationResult> = HashMap::new();
if let Some(buckets) = self.buckets {
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets)?
} else {
// When there are no buckets, we create empty buckets, so that the serialized json
// format is constant
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
};
if let Some(metrics) = self.metrics {
convert_and_add_final_metrics_to_result(&mut results, metrics);
} else {
// When there are no metrics, we create empty metric results, so that the serialized
// json format is constant
add_empty_final_metrics_to_result(&mut results, &req.metrics)?;
}
Ok(AggregationResults(results))
}
pub(crate) fn empty_from_req(req: &AggregationsInternal) -> Self { pub(crate) fn empty_from_req(req: &AggregationsInternal) -> Self {
let metrics = if req.metrics.is_empty() { let metrics = if req.metrics.is_empty() {
None None
@@ -131,58 +93,6 @@ impl IntermediateAggregationResults {
} }
} }
fn convert_and_add_final_metrics_to_result(
results: &mut HashMap<String, AggregationResult>,
metrics: VecWithNames<IntermediateMetricResult>,
) {
results.extend(
metrics
.into_iter()
.map(|(key, metric)| (key, AggregationResult::MetricResult(metric.into()))),
);
}
fn add_empty_final_metrics_to_result(
results: &mut HashMap<String, AggregationResult>,
req_metrics: &VecWithNames<MetricAggregation>,
) -> crate::Result<()> {
results.extend(req_metrics.iter().map(|(key, req)| {
let empty_bucket = IntermediateMetricResult::empty_from_req(req);
(
key.to_string(),
AggregationResult::MetricResult(empty_bucket.into()),
)
}));
Ok(())
}
fn add_empty_final_buckets_to_result(
results: &mut HashMap<String, AggregationResult>,
req_buckets: &VecWithNames<BucketAggregationInternal>,
) -> crate::Result<()> {
let requested_buckets = req_buckets.iter();
for (key, req) in requested_buckets {
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
results.insert(key.to_string(), empty_bucket);
}
Ok(())
}
fn convert_and_add_final_buckets_to_result(
results: &mut HashMap<String, AggregationResult>,
buckets: VecWithNames<IntermediateBucketResult>,
req_buckets: &VecWithNames<BucketAggregationInternal>,
) -> crate::Result<()> {
assert_eq!(buckets.len(), req_buckets.len());
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
for ((key, bucket), req) in buckets_with_request {
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req)?);
results.insert(key, result);
}
Ok(())
}
/// An aggregation is either a bucket or a metric. /// An aggregation is either a bucket or a metric.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum IntermediateAggregationResult { pub enum IntermediateAggregationResult {
@@ -252,83 +162,29 @@ impl IntermediateMetricResult {
pub enum IntermediateBucketResult { pub enum IntermediateBucketResult {
/// This is the range entry for a bucket, which contains a key, count, from, to, and optionally /// This is the range entry for a bucket, which contains a key, count, from, to, and optionally
/// sub_aggregations. /// sub_aggregations.
Range(IntermediateRangeBucketResult), Range(FnvHashMap<SerializedKey, IntermediateRangeBucketEntry>),
/// This is the histogram entry for a bucket, which contains a key, count, and optionally /// This is the histogram entry for a bucket, which contains a key, count, and optionally
/// sub_aggregations. /// sub_aggregations.
Histogram { Histogram {
/// The buckets /// The buckets
buckets: Vec<IntermediateHistogramBucketEntry>, buckets: Vec<IntermediateHistogramBucketEntry>,
}, },
/// Term aggregation }
Terms(IntermediateTermBucketResult),
impl From<SegmentBucketResultCollector> for IntermediateBucketResult {
fn from(collector: SegmentBucketResultCollector) -> Self {
match collector {
SegmentBucketResultCollector::Range(range) => range.into_intermediate_bucket_result(),
SegmentBucketResultCollector::Histogram(histogram) => {
histogram.into_intermediate_bucket_result()
}
}
}
} }
impl IntermediateBucketResult { impl IntermediateBucketResult {
pub(crate) fn into_final_bucket_result(
self,
req: &BucketAggregationInternal,
) -> crate::Result<BucketResult> {
match self {
IntermediateBucketResult::Range(range_res) => {
let mut buckets: Vec<RangeBucketEntry> = range_res
.buckets
.into_iter()
.map(|(_, bucket)| bucket.into_final_bucket_entry(&req.sub_aggregation))
.collect::<crate::Result<Vec<_>>>()?;
buckets.sort_by(|left, right| {
left.from
.unwrap_or(f64::MIN)
.total_cmp(&right.from.unwrap_or(f64::MIN))
});
let is_keyed = req
.as_range()
.expect("unexpected aggregation, expected range aggregation")
.keyed;
let buckets = if is_keyed {
let mut bucket_map =
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
for bucket in buckets {
bucket_map.insert(bucket.key.to_string(), bucket);
}
BucketEntries::HashMap(bucket_map)
} else {
BucketEntries::Vec(buckets)
};
Ok(BucketResult::Range { buckets })
}
IntermediateBucketResult::Histogram { buckets } => {
let buckets = intermediate_histogram_buckets_to_final_buckets(
buckets,
req.as_histogram()
.expect("unexpected aggregation, expected histogram aggregation"),
&req.sub_aggregation,
)?;
let buckets = if req.as_histogram().unwrap().keyed {
let mut bucket_map =
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
for bucket in buckets {
bucket_map.insert(bucket.key.to_string(), bucket);
}
BucketEntries::HashMap(bucket_map)
} else {
BucketEntries::Vec(buckets)
};
Ok(BucketResult::Histogram { buckets })
}
IntermediateBucketResult::Terms(terms) => terms.into_final_result(
req.as_term()
.expect("unexpected aggregation, expected term aggregation"),
&req.sub_aggregation,
),
}
}
pub(crate) fn empty_from_req(req: &BucketAggregationType) -> Self { pub(crate) fn empty_from_req(req: &BucketAggregationType) -> Self {
match req { match req {
BucketAggregationType::Terms(_) => IntermediateBucketResult::Terms(Default::default()),
BucketAggregationType::Range(_) => IntermediateBucketResult::Range(Default::default()), BucketAggregationType::Range(_) => IntermediateBucketResult::Range(Default::default()),
BucketAggregationType::Histogram(_) => { BucketAggregationType::Histogram(_) => {
IntermediateBucketResult::Histogram { buckets: vec![] } IntermediateBucketResult::Histogram { buckets: vec![] }
@@ -338,34 +194,24 @@ impl IntermediateBucketResult {
fn merge_fruits(&mut self, other: IntermediateBucketResult) { fn merge_fruits(&mut self, other: IntermediateBucketResult) {
match (self, other) { match (self, other) {
( (
IntermediateBucketResult::Terms(term_res_left), IntermediateBucketResult::Range(entries_left),
IntermediateBucketResult::Terms(term_res_right), IntermediateBucketResult::Range(entries_right),
) => { ) => {
merge_maps(&mut term_res_left.entries, term_res_right.entries); merge_maps(entries_left, entries_right);
term_res_left.sum_other_doc_count += term_res_right.sum_other_doc_count;
term_res_left.doc_count_error_upper_bound +=
term_res_right.doc_count_error_upper_bound;
}
(
IntermediateBucketResult::Range(range_res_left),
IntermediateBucketResult::Range(range_res_right),
) => {
merge_maps(&mut range_res_left.buckets, range_res_right.buckets);
} }
( (
IntermediateBucketResult::Histogram { IntermediateBucketResult::Histogram {
buckets: buckets_left, buckets: entries_left,
.. ..
}, },
IntermediateBucketResult::Histogram { IntermediateBucketResult::Histogram {
buckets: buckets_right, buckets: entries_right,
.. ..
}, },
) => { ) => {
let buckets = buckets_left let mut buckets = entries_left
.drain(..) .drain(..)
.merge_join_by(buckets_right.into_iter(), |left, right| { .merge_join_by(entries_right.into_iter(), |left, right| {
left.key.partial_cmp(&right.key).unwrap_or(Ordering::Equal) left.key.partial_cmp(&right.key).unwrap_or(Ordering::Equal)
}) })
.map(|either| match either { .map(|either| match either {
@@ -378,7 +224,7 @@ impl IntermediateBucketResult {
}) })
.collect(); .collect();
*buckets_left = buckets; std::mem::swap(entries_left, &mut buckets);
} }
(IntermediateBucketResult::Range(_), _) => { (IntermediateBucketResult::Range(_), _) => {
panic!("try merge on different types") panic!("try merge on different types")
@@ -386,114 +232,10 @@ impl IntermediateBucketResult {
(IntermediateBucketResult::Histogram { .. }, _) => { (IntermediateBucketResult::Histogram { .. }, _) => {
panic!("try merge on different types") panic!("try merge on different types")
} }
(IntermediateBucketResult::Terms { .. }, _) => {
panic!("try merge on different types")
}
} }
} }
} }
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
/// Range aggregation including error counts
pub struct IntermediateRangeBucketResult {
pub(crate) buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry>,
}
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
/// Term aggregation including error counts
pub struct IntermediateTermBucketResult {
pub(crate) entries: FnvHashMap<String, IntermediateTermBucketEntry>,
pub(crate) sum_other_doc_count: u64,
pub(crate) doc_count_error_upper_bound: u64,
}
impl IntermediateTermBucketResult {
pub(crate) fn into_final_result(
self,
req: &TermsAggregation,
sub_aggregation_req: &AggregationsInternal,
) -> crate::Result<BucketResult> {
let req = TermsAggregationInternal::from_req(req);
let mut buckets: Vec<BucketEntry> = self
.entries
.into_iter()
.filter(|bucket| bucket.1.doc_count >= req.min_doc_count)
.map(|(key, entry)| {
Ok(BucketEntry {
key: Key::Str(key),
doc_count: entry.doc_count,
sub_aggregation: entry
.sub_aggregation
.into_final_bucket_result_internal(sub_aggregation_req)?,
})
})
.collect::<crate::Result<_>>()?;
let order = req.order.order;
match req.order.target {
OrderTarget::Key => {
buckets.sort_by(|left, right| {
if req.order.order == Order::Desc {
left.key.partial_cmp(&right.key)
} else {
right.key.partial_cmp(&left.key)
}
.expect("expected type string, which is always sortable")
});
}
OrderTarget::Count => {
if req.order.order == Order::Desc {
buckets.sort_unstable_by_key(|bucket| std::cmp::Reverse(bucket.doc_count()));
} else {
buckets.sort_unstable_by_key(|bucket| bucket.doc_count());
}
}
OrderTarget::SubAggregation(name) => {
let (agg_name, agg_property) = get_agg_name_and_property(&name);
let mut buckets_with_val = buckets
.into_iter()
.map(|bucket| {
let val = bucket
.sub_aggregation
.get_value_from_aggregation(agg_name, agg_property)?
.unwrap_or(f64::NAN);
Ok((bucket, val))
})
.collect::<crate::Result<Vec<_>>>()?;
buckets_with_val.sort_by(|(_, val1), (_, val2)| match &order {
Order::Desc => val2.total_cmp(val1),
Order::Asc => val1.total_cmp(val2),
});
buckets = buckets_with_val
.into_iter()
.map(|(bucket, _val)| bucket)
.collect_vec();
}
}
// We ignore _term_doc_count_before_cutoff here, because it increases the upperbound error
// only for terms that didn't make it into the top N.
//
// This can be interesting, as a value of quality of the results, but not good to check the
// actual error count for the returned terms.
let (_term_doc_count_before_cutoff, sum_other_doc_count) =
cut_off_buckets(&mut buckets, req.size as usize);
let doc_count_error_upper_bound = if req.show_term_doc_count_error {
Some(self.doc_count_error_upper_bound)
} else {
None
};
Ok(BucketResult::Terms {
buckets,
sum_other_doc_count: self.sum_other_doc_count + sum_other_doc_count,
doc_count_error_upper_bound,
})
}
}
trait MergeFruits { trait MergeFruits {
fn merge_fruits(&mut self, other: Self); fn merge_fruits(&mut self, other: Self);
} }
@@ -525,21 +267,6 @@ pub struct IntermediateHistogramBucketEntry {
pub sub_aggregation: IntermediateAggregationResults, pub sub_aggregation: IntermediateAggregationResults,
} }
impl IntermediateHistogramBucketEntry {
pub(crate) fn into_final_bucket_entry(
self,
req: &AggregationsInternal,
) -> crate::Result<BucketEntry> {
Ok(BucketEntry {
key: Key::F64(self.key),
doc_count: self.doc_count,
sub_aggregation: self
.sub_aggregation
.into_final_bucket_result_internal(req)?,
})
}
}
impl From<SegmentHistogramBucketEntry> for IntermediateHistogramBucketEntry { impl From<SegmentHistogramBucketEntry> for IntermediateHistogramBucketEntry {
fn from(entry: SegmentHistogramBucketEntry) -> Self { fn from(entry: SegmentHistogramBucketEntry) -> Self {
IntermediateHistogramBucketEntry { IntermediateHistogramBucketEntry {
@@ -550,6 +277,26 @@ impl From<SegmentHistogramBucketEntry> for IntermediateHistogramBucketEntry {
} }
} }
impl
From<(
SegmentHistogramBucketEntry,
SegmentAggregationResultsCollector,
)> for IntermediateHistogramBucketEntry
{
fn from(
entry: (
SegmentHistogramBucketEntry,
SegmentAggregationResultsCollector,
),
) -> Self {
IntermediateHistogramBucketEntry {
key: entry.0.key,
doc_count: entry.0.doc_count,
sub_aggregation: entry.1.into(),
}
}
}
/// This is the range entry for a bucket, which contains a key, count, and optionally /// This is the range entry for a bucket, which contains a key, count, and optionally
/// sub_aggregations. /// sub_aggregations.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
@@ -558,6 +305,7 @@ pub struct IntermediateRangeBucketEntry {
pub key: Key, pub key: Key,
/// The number of documents in the bucket. /// The number of documents in the bucket.
pub doc_count: u64, pub doc_count: u64,
pub(crate) values: Option<Vec<u64>>,
/// The sub_aggregation in this bucket. /// The sub_aggregation in this bucket.
pub sub_aggregation: IntermediateAggregationResults, pub sub_aggregation: IntermediateAggregationResults,
/// The from range of the bucket. Equals f64::MIN when None. /// The from range of the bucket. Equals f64::MIN when None.
@@ -568,37 +316,22 @@ pub struct IntermediateRangeBucketEntry {
pub to: Option<f64>, pub to: Option<f64>,
} }
impl IntermediateRangeBucketEntry { impl From<SegmentRangeBucketEntry> for IntermediateRangeBucketEntry {
pub(crate) fn into_final_bucket_entry( fn from(entry: SegmentRangeBucketEntry) -> Self {
self, let sub_aggregation = if let Some(sub_aggregation) = entry.sub_aggregation {
req: &AggregationsInternal, sub_aggregation.into()
) -> crate::Result<RangeBucketEntry> { } else {
Ok(RangeBucketEntry { Default::default()
key: self.key, };
doc_count: self.doc_count,
sub_aggregation: self
.sub_aggregation
.into_final_bucket_result_internal(req)?,
to: self.to,
from: self.from,
})
}
}
/// This is the term entry for a bucket, which contains a count, and optionally IntermediateRangeBucketEntry {
/// sub_aggregations. key: entry.key,
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] doc_count: entry.doc_count,
pub struct IntermediateTermBucketEntry { values: None,
/// The number of documents in the bucket. sub_aggregation,
pub doc_count: u64, to: entry.to,
/// The sub_aggregation in this bucket. from: entry.from,
pub sub_aggregation: IntermediateAggregationResults, }
}
impl MergeFruits for IntermediateTermBucketEntry {
fn merge_fruits(&mut self, other: IntermediateTermBucketEntry) {
self.doc_count += other.doc_count;
self.sub_aggregation.merge_fruits(other.sub_aggregation);
} }
} }
@@ -633,6 +366,7 @@ mod tests {
IntermediateRangeBucketEntry { IntermediateRangeBucketEntry {
key: Key::Str(key.to_string()), key: Key::Str(key.to_string()),
doc_count: *doc_count, doc_count: *doc_count,
values: None,
sub_aggregation: Default::default(), sub_aggregation: Default::default(),
from: None, from: None,
to: None, to: None,
@@ -641,7 +375,7 @@ mod tests {
} }
map.insert( map.insert(
"my_agg_level2".to_string(), "my_agg_level2".to_string(),
IntermediateBucketResult::Range(IntermediateRangeBucketResult { buckets }), IntermediateBucketResult::Range(buckets),
); );
IntermediateAggregationResults { IntermediateAggregationResults {
buckets: Some(VecWithNames::from_entries(map.into_iter().collect())), buckets: Some(VecWithNames::from_entries(map.into_iter().collect())),
@@ -660,6 +394,7 @@ mod tests {
IntermediateRangeBucketEntry { IntermediateRangeBucketEntry {
key: Key::Str(key.to_string()), key: Key::Str(key.to_string()),
doc_count: *doc_count, doc_count: *doc_count,
values: None,
from: None, from: None,
to: None, to: None,
sub_aggregation: get_sub_test_tree(&[( sub_aggregation: get_sub_test_tree(&[(
@@ -671,7 +406,7 @@ mod tests {
} }
map.insert( map.insert(
"my_agg_level1".to_string(), "my_agg_level1".to_string(),
IntermediateBucketResult::Range(IntermediateRangeBucketResult { buckets }), IntermediateBucketResult::Range(buckets),
); );
IntermediateAggregationResults { IntermediateAggregationResults {
buckets: Some(VecWithNames::from_entries(map.into_iter().collect())), buckets: Some(VecWithNames::from_entries(map.into_iter().collect())),

View File

@@ -1,10 +1,9 @@
use std::fmt::Debug; use std::fmt::Debug;
use fastfield_codecs::Column;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::aggregation::f64_from_fastfield_u64; use crate::aggregation::f64_from_fastfield_u64;
use crate::fastfield::DynamicFastFieldReader; use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
use crate::schema::Type; use crate::schema::Type;
use crate::DocId; use crate::DocId;
@@ -20,7 +19,7 @@ use crate::DocId;
/// "avg": { /// "avg": {
/// "field": "score", /// "field": "score",
/// } /// }
/// } /// }
/// ``` /// ```
pub struct AverageAggregation { pub struct AverageAggregation {
/// The field name to compute the stats on. /// The field name to compute the stats on.
@@ -61,10 +60,10 @@ impl SegmentAverageCollector {
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) { pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
let mut iter = doc.chunks_exact(4); let mut iter = doc.chunks_exact(4);
for docs in iter.by_ref() { for docs in iter.by_ref() {
let val1 = field.get_val(docs[0] as u64); let val1 = field.get(docs[0]);
let val2 = field.get_val(docs[1] as u64); let val2 = field.get(docs[1]);
let val3 = field.get_val(docs[2] as u64); let val3 = field.get(docs[2]);
let val4 = field.get_val(docs[3] as u64); let val4 = field.get(docs[3]);
let val1 = f64_from_fastfield_u64(val1, &self.field_type); let val1 = f64_from_fastfield_u64(val1, &self.field_type);
let val2 = f64_from_fastfield_u64(val2, &self.field_type); let val2 = f64_from_fastfield_u64(val2, &self.field_type);
let val3 = f64_from_fastfield_u64(val3, &self.field_type); let val3 = f64_from_fastfield_u64(val3, &self.field_type);
@@ -74,8 +73,8 @@ impl SegmentAverageCollector {
self.data.collect(val3); self.data.collect(val3);
self.data.collect(val4); self.data.collect(val4);
} }
for &doc in iter.remainder() { for doc in iter.remainder() {
let val = field.get_val(doc as u64); let val = field.get(*doc);
let val = f64_from_fastfield_u64(val, &self.field_type); let val = f64_from_fastfield_u64(val, &self.field_type);
self.data.collect(val); self.data.collect(val);
} }

View File

@@ -1,10 +1,9 @@
use fastfield_codecs::Column;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::aggregation::f64_from_fastfield_u64; use crate::aggregation::f64_from_fastfield_u64;
use crate::fastfield::DynamicFastFieldReader; use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
use crate::schema::Type; use crate::schema::Type;
use crate::{DocId, TantivyError}; use crate::DocId;
/// A multi-value metric aggregation that computes stats of numeric values that are /// A multi-value metric aggregation that computes stats of numeric values that are
/// extracted from the aggregated documents. /// extracted from the aggregated documents.
@@ -54,23 +53,6 @@ pub struct Stats {
pub avg: Option<f64>, pub avg: Option<f64>,
} }
impl Stats {
pub(crate) fn get_value(&self, agg_property: &str) -> crate::Result<Option<f64>> {
match agg_property {
"count" => Ok(Some(self.count as f64)),
"sum" => Ok(Some(self.sum)),
"standard_deviation" => Ok(self.standard_deviation),
"min" => Ok(self.min),
"max" => Ok(self.max),
"avg" => Ok(self.avg),
_ => Err(TantivyError::InvalidArgument(format!(
"unknown property {} on stats metric aggregation",
agg_property
))),
}
}
}
/// IntermediateStats contains the mergeable version for stats. /// IntermediateStats contains the mergeable version for stats.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntermediateStats { pub struct IntermediateStats {
@@ -167,10 +149,10 @@ impl SegmentStatsCollector {
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) { pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
let mut iter = doc.chunks_exact(4); let mut iter = doc.chunks_exact(4);
for docs in iter.by_ref() { for docs in iter.by_ref() {
let val1 = field.get_val(docs[0] as u64); let val1 = field.get(docs[0]);
let val2 = field.get_val(docs[1] as u64); let val2 = field.get(docs[1]);
let val3 = field.get_val(docs[2] as u64); let val3 = field.get(docs[2]);
let val4 = field.get_val(docs[3] as u64); let val4 = field.get(docs[3]);
let val1 = f64_from_fastfield_u64(val1, &self.field_type); let val1 = f64_from_fastfield_u64(val1, &self.field_type);
let val2 = f64_from_fastfield_u64(val2, &self.field_type); let val2 = f64_from_fastfield_u64(val2, &self.field_type);
let val3 = f64_from_fastfield_u64(val3, &self.field_type); let val3 = f64_from_fastfield_u64(val3, &self.field_type);
@@ -180,8 +162,8 @@ impl SegmentStatsCollector {
self.stats.collect(val3); self.stats.collect(val3);
self.stats.collect(val4); self.stats.collect(val4);
} }
for &doc in iter.remainder() { for doc in iter.remainder() {
let val = field.get_val(doc as u64); let val = field.get(*doc);
let val = f64_from_fastfield_u64(val, &self.field_type); let val = f64_from_fastfield_u64(val, &self.field_type);
self.stats.collect(val); self.stats.collect(val);
} }
@@ -223,7 +205,7 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let reader = index.reader()?; let reader = index.reader()?;
let searcher = reader.searcher(); let searcher = reader.searcher();
@@ -286,7 +268,6 @@ mod tests {
(7f64..19f64).into(), (7f64..19f64).into(),
(19f64..20f64).into(), (19f64..20f64).into(),
], ],
..Default::default()
}), }),
sub_aggregation: iter::once(( sub_aggregation: iter::once((
"stats".to_string(), "stats".to_string(),
@@ -301,7 +282,7 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher(); let searcher = reader.searcher();
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap(); let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();

View File

@@ -20,15 +20,14 @@
//! //!
//! #### Limitations //! #### Limitations
//! //!
//! Currently aggregations work only on single value fast fields of type u64, f64, i64 and //! Currently aggregations work only on single value fast fields of type u64, f64 and i64.
//! fast fields on text fields.
//! //!
//! # JSON Format //! # JSON Format
//! Aggregations request and result structures de/serialize into elasticsearch compatible JSON. //! Aggregations request and result structures de/serialize into elasticsearch compatible JSON.
//! //!
//! ```verbatim //! ```verbatim
//! let agg_req: Aggregations = serde_json::from_str(json_request_string).unwrap(); //! let agg_req: Aggregations = serde_json::from_str(json_request_string).unwrap();
//! let collector = AggregationCollector::from_aggs(agg_req, None); //! let collector = AggregationCollector::from_aggs(agg_req);
//! let searcher = reader.searcher(); //! let searcher = reader.searcher();
//! let agg_res = searcher.search(&term_query, &collector).unwrap_err(); //! let agg_res = searcher.search(&term_query, &collector).unwrap_err();
//! let json_response_string: String = &serde_json::to_string(&agg_res)?; //! let json_response_string: String = &serde_json::to_string(&agg_res)?;
@@ -38,7 +37,6 @@
//! - [Bucket](bucket) //! - [Bucket](bucket)
//! - [Histogram](bucket::HistogramAggregation) //! - [Histogram](bucket::HistogramAggregation)
//! - [Range](bucket::RangeAggregation) //! - [Range](bucket::RangeAggregation)
//! - [Terms](bucket::TermsAggregation)
//! - [Metric](metric) //! - [Metric](metric)
//! - [Average](metric::AverageAggregation) //! - [Average](metric::AverageAggregation)
//! - [Stats](metric::StatsAggregation) //! - [Stats](metric::StatsAggregation)
@@ -68,7 +66,7 @@
//! .into_iter() //! .into_iter()
//! .collect(); //! .collect();
//! //!
//! let collector = AggregationCollector::from_aggs(agg_req, None); //! let collector = AggregationCollector::from_aggs(agg_req);
//! //!
//! let searcher = reader.searcher(); //! let searcher = reader.searcher();
//! let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap(); //! let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
@@ -132,7 +130,6 @@
//! bucket_agg: BucketAggregationType::Range(RangeAggregation{ //! bucket_agg: BucketAggregationType::Range(RangeAggregation{
//! field: "score".to_string(), //! field: "score".to_string(),
//! ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()], //! ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
//! keyed: false,
//! }), //! }),
//! sub_aggregation: sub_agg_req_1.clone(), //! sub_aggregation: sub_agg_req_1.clone(),
//! }), //! }),
@@ -150,8 +147,7 @@
//! IntermediateAggregationResults provides the //! IntermediateAggregationResults provides the
//! [merge_fruits](intermediate_agg_result::IntermediateAggregationResults::merge_fruits) method to //! [merge_fruits](intermediate_agg_result::IntermediateAggregationResults::merge_fruits) method to
//! merge multiple results. The merged result can then be converted into //! merge multiple results. The merged result can then be converted into
//! [agg_result::AggregationResults] via the //! [agg_result::AggregationResults] via the [Into] trait.
//! [agg_result::AggregationResults::from_intermediate_and_req] method.
pub mod agg_req; pub mod agg_req;
mod agg_req_with_accessor; mod agg_req_with_accessor;
@@ -167,7 +163,6 @@ use std::fmt::Display;
pub use collector::{ pub use collector::{
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector, AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
MAX_BUCKET_COUNT,
}; };
use itertools::Itertools; use itertools::Itertools;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -250,14 +245,6 @@ impl<T: Clone> VecWithNames<T> {
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {
self.keys.is_empty() self.keys.is_empty()
} }
fn len(&self) -> usize {
self.keys.len()
}
fn get(&self, name: &str) -> Option<&T> {
self.keys()
.position(|key| key == name)
.map(|pos| &self.values[pos])
}
} }
/// The serialized key is used in a HashMap. /// The serialized key is used in a HashMap.
@@ -324,16 +311,13 @@ mod tests {
use super::bucket::RangeAggregation; use super::bucket::RangeAggregation;
use super::collector::AggregationCollector; use super::collector::AggregationCollector;
use super::metric::AverageAggregation; use super::metric::AverageAggregation;
use crate::aggregation::agg_req::{ use crate::aggregation::agg_req::{BucketAggregationType, MetricAggregation};
get_term_dict_field_names, BucketAggregationType, MetricAggregation,
};
use crate::aggregation::agg_result::AggregationResults; use crate::aggregation::agg_result::AggregationResults;
use crate::aggregation::bucket::TermsAggregation;
use crate::aggregation::intermediate_agg_result::IntermediateAggregationResults; use crate::aggregation::intermediate_agg_result::IntermediateAggregationResults;
use crate::aggregation::segment_agg_result::DOC_BLOCK_SIZE; use crate::aggregation::segment_agg_result::DOC_BLOCK_SIZE;
use crate::aggregation::DistributedAggregationCollector; use crate::aggregation::DistributedAggregationCollector;
use crate::query::{AllQuery, TermQuery}; use crate::query::{AllQuery, TermQuery};
use crate::schema::{Cardinality, IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING}; use crate::schema::{Cardinality, IndexRecordOption, Schema, TextFieldIndexing};
use crate::{Index, Term}; use crate::{Index, Term};
fn get_avg_req(field_name: &str) -> Aggregation { fn get_avg_req(field_name: &str) -> Aggregation {
@@ -352,82 +336,17 @@ mod tests {
) )
} }
pub fn exec_request(agg_req: Aggregations, index: &Index) -> crate::Result<Value> {
exec_request_with_query(agg_req, index, None)
}
pub fn exec_request_with_query(
agg_req: Aggregations,
index: &Index,
query: Option<(&str, &str)>,
) -> crate::Result<Value> {
let collector = AggregationCollector::from_aggs(agg_req, None);
let reader = index.reader()?;
let searcher = reader.searcher();
let agg_res = if let Some((field, term)) = query {
let text_field = reader.searcher().schema().get_field(field).unwrap();
let term_query = TermQuery::new(
Term::from_field_text(text_field, term),
IndexRecordOption::Basic,
);
searcher.search(&term_query, &collector)?
} else {
searcher.search(&AllQuery, &collector)?
};
// Test serialization/deserialization roundtrip
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
Ok(res)
}
pub fn get_test_index_from_values( pub fn get_test_index_from_values(
merge_segments: bool, merge_segments: bool,
values: &[f64], values: &[f64],
) -> crate::Result<Index> {
// Every value gets its own segment
let mut segment_and_values = vec![];
for value in values {
segment_and_values.push(vec![(*value, value.to_string())]);
}
get_test_index_from_values_and_terms(merge_segments, &segment_and_values)
}
pub fn get_test_index_from_terms(
merge_segments: bool,
values: &[Vec<&str>],
) -> crate::Result<Index> {
// Every value gets its own segment
let segment_and_values = values
.iter()
.map(|terms| {
terms
.iter()
.enumerate()
.map(|(i, term)| (i as f64, term.to_string()))
.collect()
})
.collect::<Vec<_>>();
get_test_index_from_values_and_terms(merge_segments, &segment_and_values)
}
pub fn get_test_index_from_values_and_terms(
merge_segments: bool,
segment_and_values: &[Vec<(f64, String)>],
) -> crate::Result<Index> { ) -> crate::Result<Index> {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_fieldtype = crate::schema::TextOptions::default() let text_fieldtype = crate::schema::TextOptions::default()
.set_indexing_options( .set_indexing_options(
TextFieldIndexing::default() TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
.set_index_option(IndexRecordOption::Basic)
.set_fieldnorms(false),
) )
.set_fast()
.set_stored(); .set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype.clone()); let text_field = schema_builder.add_text_field("text", text_fieldtype);
let text_field_id = schema_builder.add_text_field("text_id", text_fieldtype);
let string_field_id = schema_builder.add_text_field("string_id", STRING | FAST);
let score_fieldtype = let score_fieldtype =
crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue); crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone()); let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
@@ -439,22 +358,16 @@ mod tests {
); );
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
{ {
// let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(1, 30_000_000)?; for &i in values {
for values in segment_and_values { // writing the segment
for (i, term) in values { index_writer.add_document(doc!(
let i = *i; text_field => "cool",
// writing the segment score_field => i as u64,
index_writer.add_document(doc!( score_field_f64 => i as f64,
text_field => "cool", score_field_i64 => i as i64,
text_field_id => term.to_string(), fraction_field => i as f64/100.0,
string_field_id => term.to_string(), ))?;
score_field => i as u64,
score_field_f64 => i as f64,
score_field_i64 => i as i64,
fraction_field => i as f64/100.0,
))?;
}
index_writer.commit()?; index_writer.commit()?;
} }
} }
@@ -462,11 +375,9 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
if segment_ids.len() > 1 { let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_for_tests()?; index_writer.merge(&segment_ids).wait()?;
index_writer.merge(&segment_ids).wait()?; index_writer.wait_merging_threads()?;
index_writer.wait_merging_threads()?;
}
} }
Ok(index) Ok(index)
@@ -477,13 +388,15 @@ mod tests {
merge_segments: bool, merge_segments: bool,
use_distributed_collector: bool, use_distributed_collector: bool,
) -> crate::Result<()> { ) -> crate::Result<()> {
let mut values_and_terms = (0..80) let index = get_test_index_with_num_docs(merge_segments, 80)?;
.map(|val| vec![(val as f64, "terma".to_string())])
.collect::<Vec<_>>();
values_and_terms.last_mut().unwrap()[0].1 = "termb".to_string();
let index = get_test_index_from_values_and_terms(merge_segments, &values_and_terms)?;
let reader = index.reader()?; let reader = index.reader()?;
let text_field = reader.searcher().schema().get_field("text").unwrap();
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
assert_eq!(DOC_BLOCK_SIZE, 64); assert_eq!(DOC_BLOCK_SIZE, 64);
// In the tree we cache Documents of DOC_BLOCK_SIZE, before passing them down as one block. // In the tree we cache Documents of DOC_BLOCK_SIZE, before passing them down as one block.
@@ -518,20 +431,7 @@ mod tests {
"histogram": { "histogram": {
"field": "score", "field": "score",
"interval": 70.0, "interval": 70.0,
"offset": 3.0 "offset": 3.0,
},
"aggs": {
"bucketsL2": {
"histogram": {
"field": "score",
"interval": 70.0
}
}
}
},
"term_agg_test":{
"terms": {
"field": "string_id"
}, },
"aggs": { "aggs": {
"bucketsL2": { "bucketsL2": {
@@ -549,18 +449,18 @@ mod tests {
.unwrap(); .unwrap();
let agg_res: AggregationResults = if use_distributed_collector { let agg_res: AggregationResults = if use_distributed_collector {
let collector = DistributedAggregationCollector::from_aggs(agg_req.clone(), None); let collector = DistributedAggregationCollector::from_aggs(agg_req.clone());
let searcher = reader.searcher(); let searcher = reader.searcher();
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap(); AggregationResults::from_intermediate_and_req(
intermediate_agg_result searcher.search(&term_query, &collector).unwrap(),
.into_final_bucket_result(agg_req) agg_req,
.unwrap() )
} else { } else {
let collector = AggregationCollector::from_aggs(agg_req, None); let collector = AggregationCollector::from_aggs(agg_req);
let searcher = reader.searcher(); let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap() searcher.search(&term_query, &collector).unwrap()
}; };
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?; let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
@@ -590,46 +490,6 @@ mod tests {
); );
assert_eq!(res["bucketsL1"]["buckets"][2]["doc_count"], 80 - 70); assert_eq!(res["bucketsL1"]["buckets"][2]["doc_count"], 80 - 70);
assert_eq!(
res["term_agg_test"],
json!(
{
"buckets": [
{
"bucketsL2": {
"buckets": [
{
"doc_count": 70,
"key": 0.0
},
{
"doc_count": 9,
"key": 70.0
}
]
},
"doc_count": 79,
"key": "terma"
},
{
"bucketsL2": {
"buckets": [
{
"doc_count": 1,
"key": 70.0
}
]
},
"doc_count": 1,
"key": "termb"
}
],
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0
}
)
);
Ok(()) Ok(())
} }
@@ -647,10 +507,8 @@ mod tests {
.set_indexing_options( .set_indexing_options(
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs), TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
) )
.set_fast()
.set_stored(); .set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype); let text_field = schema_builder.add_text_field("text", text_fieldtype);
schema_builder.add_text_field("dummy_text", STRING);
let score_fieldtype = let score_fieldtype =
crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue); crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone()); let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
@@ -766,7 +624,6 @@ mod tests {
bucket_agg: BucketAggregationType::Range(RangeAggregation { bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "score".to_string(), field: "score".to_string(),
ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()], ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
..Default::default()
}), }),
sub_aggregation: Default::default(), sub_aggregation: Default::default(),
}), }),
@@ -777,7 +634,6 @@ mod tests {
bucket_agg: BucketAggregationType::Range(RangeAggregation { bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "score_f64".to_string(), field: "score_f64".to_string(),
ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()], ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
..Default::default()
}), }),
sub_aggregation: Default::default(), sub_aggregation: Default::default(),
}), }),
@@ -788,7 +644,6 @@ mod tests {
bucket_agg: BucketAggregationType::Range(RangeAggregation { bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "score_i64".to_string(), field: "score_i64".to_string(),
ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()], ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
..Default::default()
}), }),
sub_aggregation: Default::default(), sub_aggregation: Default::default(),
}), }),
@@ -797,7 +652,7 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher(); let searcher = reader.searcher();
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap(); let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
@@ -858,21 +713,10 @@ mod tests {
IndexRecordOption::Basic, IndexRecordOption::Basic,
); );
let sub_agg_req: Aggregations = vec![ let sub_agg_req: Aggregations =
("average_in_range".to_string(), get_avg_req("score")), vec![("average_in_range".to_string(), get_avg_req("score"))]
( .into_iter()
"term_agg".to_string(), .collect();
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text".to_string(),
..Default::default()
}),
sub_aggregation: Default::default(),
}),
),
]
.into_iter()
.collect();
let agg_req: Aggregations = if use_elastic_json_req { let agg_req: Aggregations = if use_elastic_json_req {
let elasticsearch_compatible_json_req = r#" let elasticsearch_compatible_json_req = r#"
{ {
@@ -888,8 +732,7 @@ mod tests {
] ]
}, },
"aggs": { "aggs": {
"average_in_range": { "avg": { "field": "score" } }, "average_in_range": { "avg": { "field": "score" } }
"term_agg": { "terms": { "field": "text" } }
} }
}, },
"rangei64": { "rangei64": {
@@ -904,8 +747,7 @@ mod tests {
] ]
}, },
"aggs": { "aggs": {
"average_in_range": { "avg": { "field": "score" } }, "average_in_range": { "avg": { "field": "score" } }
"term_agg": { "terms": { "field": "text" } }
} }
}, },
"average": { "average": {
@@ -923,8 +765,7 @@ mod tests {
] ]
}, },
"aggs": { "aggs": {
"average_in_range": { "avg": { "field": "score" } }, "average_in_range": { "avg": { "field": "score" } }
"term_agg": { "terms": { "field": "text" } }
} }
} }
} }
@@ -945,7 +786,6 @@ mod tests {
(7f64..19f64).into(), (7f64..19f64).into(),
(19f64..20f64).into(), (19f64..20f64).into(),
], ],
..Default::default()
}), }),
sub_aggregation: sub_agg_req.clone(), sub_aggregation: sub_agg_req.clone(),
}), }),
@@ -960,7 +800,6 @@ mod tests {
(7f64..19f64).into(), (7f64..19f64).into(),
(19f64..20f64).into(), (19f64..20f64).into(),
], ],
..Default::default()
}), }),
sub_aggregation: sub_agg_req.clone(), sub_aggregation: sub_agg_req.clone(),
}), }),
@@ -975,7 +814,6 @@ mod tests {
(7f64..19f64).into(), (7f64..19f64).into(),
(19f64..20f64).into(), (19f64..20f64).into(),
], ],
..Default::default()
}), }),
sub_aggregation: sub_agg_req, sub_aggregation: sub_agg_req,
}), }),
@@ -986,20 +824,17 @@ mod tests {
agg_req agg_req
}; };
let field_names = get_term_dict_field_names(&agg_req);
assert_eq!(field_names, vec!["text".to_string()].into_iter().collect());
let agg_res: AggregationResults = if use_distributed_collector { let agg_res: AggregationResults = if use_distributed_collector {
let collector = DistributedAggregationCollector::from_aggs(agg_req.clone(), None); let collector = DistributedAggregationCollector::from_aggs(agg_req.clone());
let searcher = reader.searcher(); let searcher = reader.searcher();
let res = searcher.search(&term_query, &collector).unwrap(); let res = searcher.search(&term_query, &collector).unwrap();
// Test de/serialization roundtrip on intermediate_agg_result // Test de/serialization roundtrip on intermediate_agg_result
let res: IntermediateAggregationResults = let res: IntermediateAggregationResults =
serde_json::from_str(&serde_json::to_string(&res).unwrap()).unwrap(); serde_json::from_str(&serde_json::to_string(&res).unwrap()).unwrap();
res.into_final_bucket_result(agg_req.clone()).unwrap() AggregationResults::from_intermediate_and_req(res, agg_req.clone())
} else { } else {
let collector = AggregationCollector::from_aggs(agg_req.clone(), None); let collector = AggregationCollector::from_aggs(agg_req.clone());
let searcher = reader.searcher(); let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap() searcher.search(&term_query, &collector).unwrap()
@@ -1057,7 +892,7 @@ mod tests {
); );
// Test empty result set // Test empty result set
let collector = AggregationCollector::from_aggs(agg_req, None); let collector = AggregationCollector::from_aggs(agg_req);
let searcher = reader.searcher(); let searcher = reader.searcher();
searcher.search(&query_with_no_hits, &collector).unwrap(); searcher.search(&query_with_no_hits, &collector).unwrap();
@@ -1122,17 +957,17 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher(); let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap_err() searcher.search(&AllQuery, &collector).unwrap_err()
}; };
let agg_res = avg_on_field("dummy_text"); let agg_res = avg_on_field("text");
assert_eq!( assert_eq!(
format!("{:?}", agg_res), format!("{:?}", agg_res),
r#"InvalidArgument("Only fast fields of type f64, u64, i64 are supported, but got Str ")"# r#"InvalidArgument("Only single value fast fields of type f64, u64, i64 are supported, but got Str ")"#
); );
let agg_res = avg_on_field("not_exist_field"); let agg_res = avg_on_field("not_exist_field");
@@ -1144,7 +979,7 @@ mod tests {
let agg_res = avg_on_field("scores_i64"); let agg_res = avg_on_field("scores_i64");
assert_eq!( assert_eq!(
format!("{:?}", agg_res), format!("{:?}", agg_res),
r#"InvalidArgument("Invalid field cardinality on field scores_i64 expected SingleValue, but got MultiValues")"# r#"InvalidArgument("Invalid field type in aggregation I64, only Cardinality::SingleValue supported")"#
); );
Ok(()) Ok(())
@@ -1153,12 +988,11 @@ mod tests {
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use test::{self, Bencher}; use test::{self, Bencher};
use super::*; use super::*;
use crate::aggregation::bucket::{HistogramAggregation, HistogramBounds, TermsAggregation}; use crate::aggregation::bucket::{HistogramAggregation, HistogramBounds};
use crate::aggregation::metric::StatsAggregation; use crate::aggregation::metric::StatsAggregation;
use crate::query::AllQuery; use crate::query::AllQuery;
@@ -1170,10 +1004,6 @@ mod tests {
) )
.set_stored(); .set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype); let text_field = schema_builder.add_text_field("text", text_fieldtype);
let text_field_many_terms =
schema_builder.add_text_field("text_many_terms", STRING | FAST);
let text_field_few_terms =
schema_builder.add_text_field("text_few_terms", STRING | FAST);
let score_fieldtype = let score_fieldtype =
crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue); crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone()); let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
@@ -1181,10 +1011,6 @@ mod tests {
schema_builder.add_f64_field("score_f64", score_fieldtype.clone()); schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype); let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
let index = Index::create_from_tempdir(schema_builder.build())?; let index = Index::create_from_tempdir(schema_builder.build())?;
let few_terms_data = vec!["INFO", "ERROR", "WARN", "DEBUG"];
let many_terms_data = (0..15_000)
.map(|num| format!("author{}", num))
.collect::<Vec<_>>();
{ {
let mut rng = thread_rng(); let mut rng = thread_rng();
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_for_tests()?;
@@ -1193,8 +1019,6 @@ mod tests {
let val: f64 = rng.gen_range(0.0..1_000_000.0); let val: f64 = rng.gen_range(0.0..1_000_000.0);
index_writer.add_document(doc!( index_writer.add_document(doc!(
text_field => "cool", text_field => "cool",
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
score_field => val as u64, score_field => val as u64,
score_field_f64 => val as f64, score_field_f64 => val as f64,
score_field_i64 => val as i64, score_field_i64 => val as i64,
@@ -1235,7 +1059,7 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher(); let searcher = reader.searcher();
let agg_res: AggregationResults = let agg_res: AggregationResults =
@@ -1266,7 +1090,7 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher(); let searcher = reader.searcher();
let agg_res: AggregationResults = let agg_res: AggregationResults =
@@ -1297,7 +1121,7 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher(); let searcher = reader.searcher();
let agg_res: AggregationResults = let agg_res: AggregationResults =
@@ -1336,7 +1160,7 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher(); let searcher = reader.searcher();
let agg_res: AggregationResults = let agg_res: AggregationResults =
@@ -1346,64 +1170,6 @@ mod tests {
}); });
} }
#[bench]
fn bench_aggregation_terms_few(b: &mut Bencher) {
let index = get_test_index_bench(false).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text_few_terms".to_string(),
..Default::default()
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req, None);
let searcher = reader.searcher();
let agg_res: AggregationResults =
searcher.search(&AllQuery, &collector).unwrap().into();
agg_res
});
}
#[bench]
fn bench_aggregation_terms_many(b: &mut Bencher) {
let index = get_test_index_bench(false).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text_many_terms".to_string(),
..Default::default()
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req, None);
let searcher = reader.searcher();
let agg_res: AggregationResults =
searcher.search(&AllQuery, &collector).unwrap().into();
agg_res
});
}
#[bench] #[bench]
fn bench_aggregation_range_only(b: &mut Bencher) { fn bench_aggregation_range_only(b: &mut Bencher) {
let index = get_test_index_bench(false).unwrap(); let index = get_test_index_bench(false).unwrap();
@@ -1423,7 +1189,6 @@ mod tests {
(40000f64..50000f64).into(), (40000f64..50000f64).into(),
(50000f64..60000f64).into(), (50000f64..60000f64).into(),
], ],
..Default::default()
}), }),
sub_aggregation: Default::default(), sub_aggregation: Default::default(),
}), }),
@@ -1431,7 +1196,7 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher(); let searcher = reader.searcher();
let agg_res: AggregationResults = let agg_res: AggregationResults =
@@ -1466,7 +1231,7 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher(); let searcher = reader.searcher();
let agg_res: AggregationResults = let agg_res: AggregationResults =
@@ -1505,7 +1270,7 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher(); let searcher = reader.searcher();
let agg_res: AggregationResults = let agg_res: AggregationResults =
@@ -1535,7 +1300,7 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher(); let searcher = reader.searcher();
let agg_res: AggregationResults = let agg_res: AggregationResults =
@@ -1583,7 +1348,6 @@ mod tests {
(7000f64..20000f64).into(), (7000f64..20000f64).into(),
(20000f64..60000f64).into(), (20000f64..60000f64).into(),
], ],
..Default::default()
}), }),
sub_aggregation: sub_agg_req_1.clone(), sub_aggregation: sub_agg_req_1.clone(),
}), }),
@@ -1592,7 +1356,7 @@ mod tests {
.into_iter() .into_iter()
.collect(); .collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None); let collector = AggregationCollector::from_aggs(agg_req_1);
let searcher = reader.searcher(); let searcher = reader.searcher();
let agg_res: AggregationResults = let agg_res: AggregationResults =

View File

@@ -4,22 +4,18 @@
//! merging. //! merging.
use std::fmt::Debug; use std::fmt::Debug;
use std::rc::Rc;
use std::sync::atomic::AtomicU32;
use super::agg_req::MetricAggregation; use super::agg_req::MetricAggregation;
use super::agg_req_with_accessor::{ use super::agg_req_with_accessor::{
AggregationsWithAccessor, BucketAggregationWithAccessor, MetricAggregationWithAccessor, AggregationsWithAccessor, BucketAggregationWithAccessor, MetricAggregationWithAccessor,
}; };
use super::bucket::{SegmentHistogramCollector, SegmentRangeCollector, SegmentTermCollector}; use super::bucket::{SegmentHistogramCollector, SegmentRangeCollector};
use super::collector::MAX_BUCKET_COUNT;
use super::intermediate_agg_result::{IntermediateAggregationResults, IntermediateBucketResult};
use super::metric::{ use super::metric::{
AverageAggregation, SegmentAverageCollector, SegmentStatsCollector, StatsAggregation, AverageAggregation, SegmentAverageCollector, SegmentStatsCollector, StatsAggregation,
}; };
use super::VecWithNames; use super::{Key, VecWithNames};
use crate::aggregation::agg_req::BucketAggregationType; use crate::aggregation::agg_req::BucketAggregationType;
use crate::{DocId, TantivyError}; use crate::DocId;
pub(crate) const DOC_BLOCK_SIZE: usize = 64; pub(crate) const DOC_BLOCK_SIZE: usize = 64;
pub(crate) type DocBlock = [DocId; DOC_BLOCK_SIZE]; pub(crate) type DocBlock = [DocId; DOC_BLOCK_SIZE];
@@ -32,17 +28,6 @@ pub(crate) struct SegmentAggregationResultsCollector {
num_staged_docs: usize, num_staged_docs: usize,
} }
impl Default for SegmentAggregationResultsCollector {
fn default() -> Self {
Self {
metrics: Default::default(),
buckets: Default::default(),
staged_docs: [0; DOC_BLOCK_SIZE],
num_staged_docs: Default::default(),
}
}
}
impl Debug for SegmentAggregationResultsCollector { impl Debug for SegmentAggregationResultsCollector {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SegmentAggregationResultsCollector") f.debug_struct("SegmentAggregationResultsCollector")
@@ -55,25 +40,6 @@ impl Debug for SegmentAggregationResultsCollector {
} }
impl SegmentAggregationResultsCollector { impl SegmentAggregationResultsCollector {
pub fn into_intermediate_aggregations_result(
self,
agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<IntermediateAggregationResults> {
let buckets = if let Some(buckets) = self.buckets {
let entries = buckets
.into_iter()
.zip(agg_with_accessor.buckets.values())
.map(|((key, bucket), acc)| Ok((key, bucket.into_intermediate_bucket_result(acc)?)))
.collect::<crate::Result<Vec<(String, _)>>>()?;
Some(VecWithNames::from_entries(entries))
} else {
None
};
let metrics = self.metrics.map(VecWithNames::from_other);
Ok(IntermediateAggregationResults { metrics, buckets })
}
pub(crate) fn from_req_and_validate(req: &AggregationsWithAccessor) -> crate::Result<Self> { pub(crate) fn from_req_and_validate(req: &AggregationsWithAccessor) -> crate::Result<Self> {
let buckets = req let buckets = req
.buckets .buckets
@@ -118,23 +84,19 @@ impl SegmentAggregationResultsCollector {
&mut self, &mut self,
doc: crate::DocId, doc: crate::DocId,
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<()> { ) {
self.staged_docs[self.num_staged_docs] = doc; self.staged_docs[self.num_staged_docs] = doc;
self.num_staged_docs += 1; self.num_staged_docs += 1;
if self.num_staged_docs == self.staged_docs.len() { if self.num_staged_docs == self.staged_docs.len() {
self.flush_staged_docs(agg_with_accessor, false)?; self.flush_staged_docs(agg_with_accessor, false);
} }
Ok(())
} }
pub(crate) fn flush_staged_docs( pub(crate) fn flush_staged_docs(
&mut self, &mut self,
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &AggregationsWithAccessor,
force_flush: bool, force_flush: bool,
) -> crate::Result<()> { ) {
if self.num_staged_docs == 0 {
return Ok(());
}
if let Some(metrics) = &mut self.metrics { if let Some(metrics) = &mut self.metrics {
for (collector, agg_with_accessor) in for (collector, agg_with_accessor) in
metrics.values_mut().zip(agg_with_accessor.metrics.values()) metrics.values_mut().zip(agg_with_accessor.metrics.values())
@@ -152,12 +114,11 @@ impl SegmentAggregationResultsCollector {
&self.staged_docs[..self.num_staged_docs], &self.staged_docs[..self.num_staged_docs],
agg_with_accessor, agg_with_accessor,
force_flush, force_flush,
)?; );
} }
} }
self.num_staged_docs = 0; self.num_staged_docs = 0;
Ok(())
} }
} }
@@ -201,58 +162,27 @@ impl SegmentMetricResultCollector {
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub(crate) enum SegmentBucketResultCollector { pub(crate) enum SegmentBucketResultCollector {
Range(SegmentRangeCollector), Range(SegmentRangeCollector),
Histogram(Box<SegmentHistogramCollector>), Histogram(SegmentHistogramCollector),
Terms(Box<SegmentTermCollector>),
} }
impl SegmentBucketResultCollector { impl SegmentBucketResultCollector {
pub fn into_intermediate_bucket_result(
self,
agg_with_accessor: &BucketAggregationWithAccessor,
) -> crate::Result<IntermediateBucketResult> {
match self {
SegmentBucketResultCollector::Terms(terms) => {
terms.into_intermediate_bucket_result(agg_with_accessor)
}
SegmentBucketResultCollector::Range(range) => {
range.into_intermediate_bucket_result(agg_with_accessor)
}
SegmentBucketResultCollector::Histogram(histogram) => {
histogram.into_intermediate_bucket_result(agg_with_accessor)
}
}
}
pub fn from_req_and_validate(req: &BucketAggregationWithAccessor) -> crate::Result<Self> { pub fn from_req_and_validate(req: &BucketAggregationWithAccessor) -> crate::Result<Self> {
match &req.bucket_agg { match &req.bucket_agg {
BucketAggregationType::Terms(terms_req) => Ok(Self::Terms(Box::new(
SegmentTermCollector::from_req_and_validate(
terms_req,
&req.sub_aggregation,
req.field_type,
req.accessor
.as_multi()
.expect("unexpected fast field cardinality"),
)?,
))),
BucketAggregationType::Range(range_req) => { BucketAggregationType::Range(range_req) => {
Ok(Self::Range(SegmentRangeCollector::from_req_and_validate( Ok(Self::Range(SegmentRangeCollector::from_req_and_validate(
range_req, range_req,
&req.sub_aggregation, &req.sub_aggregation,
&req.bucket_count,
req.field_type, req.field_type,
)?)) )?))
} }
BucketAggregationType::Histogram(histogram) => Ok(Self::Histogram(Box::new( BucketAggregationType::Histogram(histogram) => Ok(Self::Histogram(
SegmentHistogramCollector::from_req_and_validate( SegmentHistogramCollector::from_req_and_validate(
histogram, histogram,
&req.sub_aggregation, &req.sub_aggregation,
req.field_type, req.field_type,
req.accessor &req.accessor,
.as_single()
.expect("unexpected fast field cardinality"),
)?, )?,
))), )),
} }
} }
@@ -262,52 +192,42 @@ impl SegmentBucketResultCollector {
doc: &[DocId], doc: &[DocId],
bucket_with_accessor: &BucketAggregationWithAccessor, bucket_with_accessor: &BucketAggregationWithAccessor,
force_flush: bool, force_flush: bool,
) -> crate::Result<()> { ) {
match self { match self {
SegmentBucketResultCollector::Range(range) => { SegmentBucketResultCollector::Range(range) => {
range.collect_block(doc, bucket_with_accessor, force_flush)?; range.collect_block(doc, bucket_with_accessor, force_flush);
} }
SegmentBucketResultCollector::Histogram(histogram) => { SegmentBucketResultCollector::Histogram(histogram) => {
histogram.collect_block(doc, bucket_with_accessor, force_flush)?; histogram.collect_block(doc, bucket_with_accessor, force_flush)
}
SegmentBucketResultCollector::Terms(terms) => {
terms.collect_block(doc, bucket_with_accessor, force_flush)?;
} }
} }
Ok(())
} }
} }
#[derive(Clone)] #[derive(Clone, Debug, PartialEq)]
pub(crate) struct BucketCount { pub(crate) struct SegmentHistogramBucketEntry {
/// The counter which is shared between the aggregations for one request. pub key: f64,
pub(crate) bucket_count: Rc<AtomicU32>, pub doc_count: u64,
pub(crate) max_bucket_count: u32,
} }
impl Default for BucketCount { #[derive(Clone, PartialEq)]
fn default() -> Self { pub(crate) struct SegmentRangeBucketEntry {
Self { pub key: Key,
bucket_count: Default::default(), pub doc_count: u64,
max_bucket_count: MAX_BUCKET_COUNT, pub sub_aggregation: Option<SegmentAggregationResultsCollector>,
} /// The from range of the bucket. Equals f64::MIN when None.
} pub from: Option<f64>,
/// The to range of the bucket. Equals f64::MAX when None.
pub to: Option<f64>,
} }
impl BucketCount { impl Debug for SegmentRangeBucketEntry {
pub(crate) fn validate_bucket_count(&self) -> crate::Result<()> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.get_count() > self.max_bucket_count { f.debug_struct("SegmentRangeBucketEntry")
return Err(TantivyError::InvalidArgument( .field("key", &self.key)
"Aborting aggregation because too many buckets were created".to_string(), .field("doc_count", &self.doc_count)
)); .field("from", &self.from)
} .field("to", &self.to)
Ok(()) .finish()
}
pub(crate) fn add_count(&self, count: u32) {
self.bucket_count
.fetch_add(count as u32, std::sync::atomic::Ordering::Relaxed);
}
pub(crate) fn get_count(&self) -> u32 {
self.bucket_count.load(std::sync::atomic::Ordering::Relaxed)
} }
} }

View File

@@ -271,8 +271,8 @@ impl Collector for FacetCollector {
let mut facet_streamer = facet_reader.facet_dict().range().into_stream()?; let mut facet_streamer = facet_reader.facet_dict().range().into_stream()?;
if facet_streamer.advance() { if facet_streamer.advance() {
'outer: loop { 'outer: loop {
// at the beginning of this loop, facet_streamer // at the begining of this loop, facet_streamer
// is positioned on a term that has not been processed yet. // is positionned on a term that has not been processed yet.
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it); let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
match skip_result { match skip_result {
SkipResult::Found => { SkipResult::Found => {

View File

@@ -11,10 +11,8 @@
// Importing tantivy... // Importing tantivy...
use std::marker::PhantomData; use std::marker::PhantomData;
use fastfield_codecs::Column;
use crate::collector::{Collector, SegmentCollector}; use crate::collector::{Collector, SegmentCollector};
use crate::fastfield::{DynamicFastFieldReader, FastValue}; use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
use crate::schema::Field; use crate::schema::Field;
use crate::{Score, SegmentReader, TantivyError}; use crate::{Score, SegmentReader, TantivyError};
@@ -176,7 +174,7 @@ where
type Fruit = TSegmentCollector::Fruit; type Fruit = TSegmentCollector::Fruit;
fn collect(&mut self, doc: u32, score: Score) { fn collect(&mut self, doc: u32, score: Score) {
let value = self.fast_field_reader.get_val(doc as u64); let value = self.fast_field_reader.get(doc);
if (self.predicate)(value) { if (self.predicate)(value) {
self.segment_collector.collect(doc, score) self.segment_collector.collect(doc, score)
} }

View File

@@ -1,8 +1,7 @@
use fastdivide::DividerU64; use fastdivide::DividerU64;
use fastfield_codecs::Column;
use crate::collector::{Collector, SegmentCollector}; use crate::collector::{Collector, SegmentCollector};
use crate::fastfield::{DynamicFastFieldReader, FastValue}; use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
use crate::schema::{Field, Type}; use crate::schema::{Field, Type};
use crate::{DocId, Score}; use crate::{DocId, Score};
@@ -73,7 +72,8 @@ impl HistogramComputer {
return; return;
} }
let delta = value - self.min_value; let delta = value - self.min_value;
let bucket_id: usize = self.divider.divide(delta) as usize; let delta_u64 = delta.to_u64();
let bucket_id: usize = self.divider.divide(delta_u64) as usize;
if bucket_id < self.counts.len() { if bucket_id < self.counts.len() {
self.counts[bucket_id] += 1; self.counts[bucket_id] += 1;
} }
@@ -92,7 +92,7 @@ impl SegmentCollector for SegmentHistogramCollector {
type Fruit = Vec<u64>; type Fruit = Vec<u64>;
fn collect(&mut self, doc: DocId, _score: Score) { fn collect(&mut self, doc: DocId, _score: Score) {
let value = self.ff_reader.get_val(doc as u64); let value = self.ff_reader.get(doc);
self.histogram_computer.add_value(value); self.histogram_computer.add_value(value);
} }
@@ -273,21 +273,21 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut writer = index.writer_with_num_threads(1, 4_000_000)?; let mut writer = index.writer_with_num_threads(1, 4_000_000)?;
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1982, Month::September, 17)?.with_hms(0, 0, 0)?)))?; writer.add_document(doc!(date_field=>DateTime::new_primitive(Date::from_calendar_date(1982, Month::September, 17)?.with_hms(0, 0, 0)?)))?;
writer.add_document( writer.add_document(
doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1986, Month::March, 9)?.with_hms(0, 0, 0)?)), doc!(date_field=>DateTime::new_primitive(Date::from_calendar_date(1986, Month::March, 9)?.with_hms(0, 0, 0)?)),
)?; )?;
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1983, Month::September, 27)?.with_hms(0, 0, 0)?)))?; writer.add_document(doc!(date_field=>DateTime::new_primitive(Date::from_calendar_date(1983, Month::September, 27)?.with_hms(0, 0, 0)?)))?;
writer.commit()?; writer.commit()?;
let reader = index.reader()?; let reader = index.reader()?;
let searcher = reader.searcher(); let searcher = reader.searcher();
let all_query = AllQuery; let all_query = AllQuery;
let week_histogram_collector = HistogramCollector::new( let week_histogram_collector = HistogramCollector::new(
date_field, date_field,
DateTime::from_primitive( DateTime::new_primitive(
Date::from_calendar_date(1980, Month::January, 1)?.with_hms(0, 0, 0)?, Date::from_calendar_date(1980, Month::January, 1)?.with_hms(0, 0, 0)?,
), ),
3_600_000_000 * 24 * 365, // it is just for a unit test... sorry leap years. 3600 * 24 * 365, // it is just for a unit test... sorry leap years.
10, 10,
); );
let week_histogram = searcher.search(&all_query, &week_histogram_collector)?; let week_histogram = searcher.search(&all_query, &week_histogram_collector)?;

View File

@@ -92,7 +92,7 @@ mod histogram_collector;
pub use histogram_collector::HistogramCollector; pub use histogram_collector::HistogramCollector;
mod multi_collector; mod multi_collector;
pub use self::multi_collector::{FruitHandle, MultiCollector, MultiFruit}; pub use self::multi_collector::MultiCollector;
mod top_collector; mod top_collector;

View File

@@ -5,7 +5,6 @@ use super::{Collector, SegmentCollector};
use crate::collector::Fruit; use crate::collector::Fruit;
use crate::{DocId, Score, SegmentOrdinal, SegmentReader, TantivyError}; use crate::{DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
/// MultiFruit keeps Fruits from every nested Collector
pub struct MultiFruit { pub struct MultiFruit {
sub_fruits: Vec<Option<Box<dyn Fruit>>>, sub_fruits: Vec<Option<Box<dyn Fruit>>>,
} }
@@ -80,17 +79,12 @@ impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
} }
} }
/// FruitHandle stores reference to the corresponding collector inside MultiCollector
pub struct FruitHandle<TFruit: Fruit> { pub struct FruitHandle<TFruit: Fruit> {
pos: usize, pos: usize,
_phantom: PhantomData<TFruit>, _phantom: PhantomData<TFruit>,
} }
impl<TFruit: Fruit> FruitHandle<TFruit> { impl<TFruit: Fruit> FruitHandle<TFruit> {
/// Extract a typed fruit off a multifruit.
///
/// This function involves downcasting and can panic if the multifruit was
/// created using faulty code.
pub fn extract(self, fruits: &mut MultiFruit) -> TFruit { pub fn extract(self, fruits: &mut MultiFruit) -> TFruit {
let boxed_fruit = fruits.sub_fruits[self.pos].take().expect(""); let boxed_fruit = fruits.sub_fruits[self.pos].take().expect("");
*boxed_fruit *boxed_fruit

View File

@@ -1,9 +1,7 @@
use fastfield_codecs::Column;
use super::*; use super::*;
use crate::collector::{Count, FilterCollector, TopDocs}; use crate::collector::{Count, FilterCollector, TopDocs};
use crate::core::SegmentReader; use crate::core::SegmentReader;
use crate::fastfield::{BytesFastFieldReader, DynamicFastFieldReader}; use crate::fastfield::{BytesFastFieldReader, DynamicFastFieldReader, FastFieldReader};
use crate::query::{AllQuery, QueryParser}; use crate::query::{AllQuery, QueryParser};
use crate::schema::{Field, Schema, FAST, TEXT}; use crate::schema::{Field, Schema, FAST, TEXT};
use crate::time::format_description::well_known::Rfc3339; use crate::time::format_description::well_known::Rfc3339;
@@ -28,11 +26,11 @@ pub fn test_filter_collector() -> crate::Result<()> {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?; let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64, date => DateTime::from_utc(OffsetDateTime::parse("1898-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?; index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64, date => DateTime::new_utc(OffsetDateTime::parse("1898-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?;
index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64, date => DateTime::from_utc(OffsetDateTime::parse("2020-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?; index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64, date => DateTime::new_utc(OffsetDateTime::parse("2020-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?;
index_writer.add_document(doc!(title => "The Diary of Anne Frank", price => 18_240u64, date => DateTime::from_utc(OffsetDateTime::parse("2019-04-20T00:00:00+00:00", &Rfc3339).unwrap())))?; index_writer.add_document(doc!(title => "The Diary of Anne Frank", price => 18_240u64, date => DateTime::new_utc(OffsetDateTime::parse("2019-04-20T00:00:00+00:00", &Rfc3339).unwrap())))?;
index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64, date => DateTime::from_utc(OffsetDateTime::parse("2019-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?; index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64, date => DateTime::new_utc(OffsetDateTime::parse("2019-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?;
index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64, date => DateTime::from_utc(OffsetDateTime::parse("2018-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?; index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64, date => DateTime::new_utc(OffsetDateTime::parse("2018-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?;
index_writer.commit()?; index_writer.commit()?;
let reader = index.reader()?; let reader = index.reader()?;
@@ -57,7 +55,7 @@ pub fn test_filter_collector() -> crate::Result<()> {
assert_eq!(filtered_top_docs.len(), 0); assert_eq!(filtered_top_docs.len(), 0);
fn date_filter(value: DateTime) -> bool { fn date_filter(value: DateTime) -> bool {
(value.into_utc() - OffsetDateTime::parse("2019-04-09T00:00:00+00:00", &Rfc3339).unwrap()) (value.to_utc() - OffsetDateTime::parse("2019-04-09T00:00:00+00:00", &Rfc3339).unwrap())
.whole_weeks() .whole_weeks()
> 0 > 0
} }
@@ -71,8 +69,10 @@ pub fn test_filter_collector() -> crate::Result<()> {
/// Stores all of the doc ids. /// Stores all of the doc ids.
/// This collector is only used for tests. /// This collector is only used for tests.
/// It is unusable in practise, as it does /// It is unusable in pr
/// not store the segment ordinals ///
/// actise, as it does not store
/// the segment ordinals
pub struct TestCollector { pub struct TestCollector {
pub compute_score: bool, pub compute_score: bool,
} }
@@ -199,7 +199,7 @@ impl SegmentCollector for FastFieldSegmentCollector {
type Fruit = Vec<u64>; type Fruit = Vec<u64>;
fn collect(&mut self, doc: DocId, _score: Score) { fn collect(&mut self, doc: DocId, _score: Score) {
let val = self.reader.get_val(doc as u64); let val = self.reader.get(doc);
self.vals.push(val); self.vals.push(val);
} }
@@ -265,7 +265,7 @@ impl SegmentCollector for BytesFastFieldSegmentCollector {
} }
} }
fn make_test_searcher() -> crate::Result<Searcher> { fn make_test_searcher() -> crate::Result<crate::LeasedItem<Searcher>> {
let schema = Schema::builder().build(); let schema = Schema::builder().build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_for_tests()?;

View File

@@ -137,7 +137,7 @@ where T: PartialOrd + Clone
/// sorted by type `T`. /// sorted by type `T`.
/// ///
/// The implementation is based on a `BinaryHeap`. /// The implementation is based on a `BinaryHeap`.
/// The theoretical complexity for collecting the top `K` out of `n` documents /// The theorical complexity for collecting the top `K` out of `n` documents
/// is `O(n log K)`. /// is `O(n log K)`.
pub(crate) struct TopSegmentCollector<T> { pub(crate) struct TopSegmentCollector<T> {
limit: usize, limit: usize,

View File

@@ -2,8 +2,6 @@ use std::collections::BinaryHeap;
use std::fmt; use std::fmt;
use std::marker::PhantomData; use std::marker::PhantomData;
use fastfield_codecs::Column;
use super::Collector; use super::Collector;
use crate::collector::custom_score_top_collector::CustomScoreTopCollector; use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
use crate::collector::top_collector::{ComparableDoc, TopCollector, TopSegmentCollector}; use crate::collector::top_collector::{ComparableDoc, TopCollector, TopSegmentCollector};
@@ -11,7 +9,7 @@ use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
use crate::collector::{ use crate::collector::{
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector, CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
}; };
use crate::fastfield::{DynamicFastFieldReader, FastValue}; use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
use crate::query::Weight; use crate::query::Weight;
use crate::schema::Field; use crate::schema::Field;
use crate::{DocAddress, DocId, Score, SegmentOrdinal, SegmentReader, TantivyError}; use crate::{DocAddress, DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
@@ -81,7 +79,7 @@ where
/// sorted by their score. /// sorted by their score.
/// ///
/// The implementation is based on a `BinaryHeap`. /// The implementation is based on a `BinaryHeap`.
/// The theoretical complexity for collecting the top `K` out of `n` documents /// The theorical complexity for collecting the top `K` out of `n` documents
/// is `O(n log K)`. /// is `O(n log K)`.
/// ///
/// This collector guarantees a stable sorting in case of a tie on the /// This collector guarantees a stable sorting in case of a tie on the
@@ -136,7 +134,7 @@ struct ScorerByFastFieldReader {
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader { impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
fn score(&mut self, doc: DocId) -> u64 { fn score(&mut self, doc: DocId) -> u64 {
self.ff_reader.get_val(doc as u64) self.ff_reader.get(doc)
} }
} }
@@ -285,7 +283,7 @@ impl TopDocs {
/// ///
/// # See also /// # See also
/// ///
/// To comfortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to /// To confortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
/// [.order_by_fast_field(...)](#method.order_by_fast_field) method. /// [.order_by_fast_field(...)](#method.order_by_fast_field) method.
pub fn order_by_u64_field( pub fn order_by_u64_field(
self, self,
@@ -409,7 +407,7 @@ impl TopDocs {
/// # use tantivy::query::QueryParser; /// # use tantivy::query::QueryParser;
/// use tantivy::SegmentReader; /// use tantivy::SegmentReader;
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::fastfield::Column; /// use tantivy::fastfield::FastFieldReader;
/// use tantivy::schema::Field; /// use tantivy::schema::Field;
/// ///
/// fn create_schema() -> Schema { /// fn create_schema() -> Schema {
@@ -458,7 +456,7 @@ impl TopDocs {
/// ///
/// // We can now define our actual scoring function /// // We can now define our actual scoring function
/// move |doc: DocId, original_score: Score| { /// move |doc: DocId, original_score: Score| {
/// let popularity: u64 = popularity_reader.get_val(doc as u64); /// let popularity: u64 = popularity_reader.get(doc);
/// // Well.. For the sake of the example we use a simple logarithm /// // Well.. For the sake of the example we use a simple logarithm
/// // function. /// // function.
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2(); /// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
@@ -501,7 +499,7 @@ impl TopDocs {
/// ///
/// This method only makes it possible to compute the score from a given /// This method only makes it possible to compute the score from a given
/// `DocId`, fastfield values for the doc and any information you could /// `DocId`, fastfield values for the doc and any information you could
/// have precomputed beforehand. It does not make it possible for instance /// have precomputed beforehands. It does not make it possible for instance
/// to compute something like TfIdf as it does not have access to the list of query /// to compute something like TfIdf as it does not have access to the list of query
/// terms present in the document, nor the term frequencies for the different terms. /// terms present in the document, nor the term frequencies for the different terms.
/// ///
@@ -517,7 +515,7 @@ impl TopDocs {
/// use tantivy::SegmentReader; /// use tantivy::SegmentReader;
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field; /// use tantivy::schema::Field;
/// use fastfield_codecs::Column; /// use tantivy::fastfield::FastFieldReader;
/// ///
/// # fn create_schema() -> Schema { /// # fn create_schema() -> Schema {
/// # let mut schema_builder = Schema::builder(); /// # let mut schema_builder = Schema::builder();
@@ -569,8 +567,8 @@ impl TopDocs {
/// ///
/// // We can now define our actual scoring function /// // We can now define our actual scoring function
/// move |doc: DocId| { /// move |doc: DocId| {
/// let popularity: u64 = popularity_reader.get_val(doc as u64); /// let popularity: u64 = popularity_reader.get(doc);
/// let boosted: u64 = boosted_reader.get_val(doc as u64); /// let boosted: u64 = boosted_reader.get(doc);
/// // Score do not have to be `f64` in tantivy. /// // Score do not have to be `f64` in tantivy.
/// // Here we return a couple to get lexicographical order /// // Here we return a couple to get lexicographical order
/// // for free. /// // for free.
@@ -900,7 +898,7 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_for_tests()?;
let pr_birthday = DateTime::from_utc(OffsetDateTime::parse( let pr_birthday = DateTime::new_utc(OffsetDateTime::parse(
"1898-04-09T00:00:00+00:00", "1898-04-09T00:00:00+00:00",
&Rfc3339, &Rfc3339,
)?); )?);
@@ -908,7 +906,7 @@ mod tests {
name => "Paul Robeson", name => "Paul Robeson",
birthday => pr_birthday, birthday => pr_birthday,
))?; ))?;
let mr_birthday = DateTime::from_utc(OffsetDateTime::parse( let mr_birthday = DateTime::new_utc(OffsetDateTime::parse(
"1947-11-08T00:00:00+00:00", "1947-11-08T00:00:00+00:00",
&Rfc3339, &Rfc3339,
)?); )?);

View File

@@ -1,7 +1,6 @@
use crossbeam::channel;
use rayon::{ThreadPool, ThreadPoolBuilder}; use rayon::{ThreadPool, ThreadPoolBuilder};
use crate::TantivyError;
/// Search executor whether search request are single thread or multithread. /// Search executor whether search request are single thread or multithread.
/// ///
/// We don't expose Rayon thread pool directly here for several reasons. /// We don't expose Rayon thread pool directly here for several reasons.
@@ -48,19 +47,16 @@ impl Executor {
match self { match self {
Executor::SingleThread => args.map(f).collect::<crate::Result<_>>(), Executor::SingleThread => args.map(f).collect::<crate::Result<_>>(),
Executor::ThreadPool(pool) => { Executor::ThreadPool(pool) => {
let args: Vec<A> = args.collect(); let args_with_indices: Vec<(usize, A)> = args.enumerate().collect();
let num_fruits = args.len(); let num_fruits = args_with_indices.len();
let fruit_receiver = { let fruit_receiver = {
let (fruit_sender, fruit_receiver) = crossbeam_channel::unbounded(); let (fruit_sender, fruit_receiver) = channel::unbounded();
pool.scope(|scope| { pool.scope(|scope| {
for (idx, arg) in args.into_iter().enumerate() { for arg_with_idx in args_with_indices {
// We name references for f and fruit_sender_ref because we do not scope.spawn(|_| {
// want these two to be moved into the closure. let (idx, arg) = arg_with_idx;
let f_ref = &f; let fruit = f(arg);
let fruit_sender_ref = &fruit_sender; if let Err(err) = fruit_sender.send((idx, fruit)) {
scope.spawn(move |_| {
let fruit = f_ref(arg);
if let Err(err) = fruit_sender_ref.send((idx, fruit)) {
error!( error!(
"Failed to send search task. It probably means all search \ "Failed to send search task. It probably means all search \
threads have panicked. {:?}", threads have panicked. {:?}",
@@ -75,19 +71,18 @@ impl Executor {
// This is important as it makes it possible for the fruit_receiver iteration to // This is important as it makes it possible for the fruit_receiver iteration to
// terminate. // terminate.
}; };
let mut result_placeholders: Vec<Option<R>> = // This is lame, but safe.
std::iter::repeat_with(|| None).take(num_fruits).collect(); let mut results_with_position = Vec::with_capacity(num_fruits);
for (pos, fruit_res) in fruit_receiver { for (pos, fruit_res) in fruit_receiver {
let fruit = fruit_res?; let fruit = fruit_res?;
result_placeholders[pos] = Some(fruit); results_with_position.push((pos, fruit));
} }
let results: Vec<R> = result_placeholders.into_iter().flatten().collect(); results_with_position.sort_by_key(|(pos, _)| *pos);
if results.len() != num_fruits { assert_eq!(results_with_position.len(), num_fruits);
return Err(TantivyError::InternalError( Ok(results_with_position
"One of the mapped execution failed.".to_string(), .into_iter()
)); .map(|(_, fruit)| fruit)
} .collect::<Vec<_>>())
Ok(results)
} }
} }
} }

View File

@@ -7,7 +7,6 @@ use std::sync::Arc;
use super::segment::Segment; use super::segment::Segment;
use super::IndexSettings; use super::IndexSettings;
use crate::core::single_segment_index_writer::SingleSegmentIndexWriter;
use crate::core::{ use crate::core::{
Executor, IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory, META_FILEPATH, Executor, IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory, META_FILEPATH,
}; };
@@ -17,7 +16,7 @@ use crate::directory::MmapDirectory;
use crate::directory::{Directory, ManagedDirectory, RamDirectory, INDEX_WRITER_LOCK}; use crate::directory::{Directory, ManagedDirectory, RamDirectory, INDEX_WRITER_LOCK};
use crate::error::{DataCorruption, TantivyError}; use crate::error::{DataCorruption, TantivyError};
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_ARENA_NUM_BYTES_MIN}; use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_ARENA_NUM_BYTES_MIN};
use crate::indexer::segment_updater::save_metas; use crate::indexer::segment_updater::save_new_metas;
use crate::reader::{IndexReader, IndexReaderBuilder}; use crate::reader::{IndexReader, IndexReaderBuilder};
use crate::schema::{Field, FieldType, Schema}; use crate::schema::{Field, FieldType, Schema};
use crate::tokenizer::{TextAnalyzer, TokenizerManager}; use crate::tokenizer::{TextAnalyzer, TokenizerManager};
@@ -48,34 +47,6 @@ fn load_metas(
.map_err(From::from) .map_err(From::from)
} }
/// Save the index meta file.
/// This operation is atomic :
/// Either
/// - it fails, in which case an error is returned,
/// and the `meta.json` remains untouched,
/// - it succeeds, and `meta.json` is written
/// and flushed.
///
/// This method is not part of tantivy's public API
fn save_new_metas(
schema: Schema,
index_settings: IndexSettings,
directory: &dyn Directory,
) -> crate::Result<()> {
save_metas(
&IndexMeta {
index_settings,
segments: Vec::new(),
schema,
opstamp: 0u64,
payload: None,
},
directory,
)?;
directory.sync_directory()?;
Ok(())
}
/// IndexBuilder can be used to create an index. /// IndexBuilder can be used to create an index.
/// ///
/// Use in conjunction with `SchemaBuilder`. Global index settings /// Use in conjunction with `SchemaBuilder`. Global index settings
@@ -103,7 +74,6 @@ fn save_new_metas(
pub struct IndexBuilder { pub struct IndexBuilder {
schema: Option<Schema>, schema: Option<Schema>,
index_settings: IndexSettings, index_settings: IndexSettings,
tokenizer_manager: TokenizerManager,
} }
impl Default for IndexBuilder { impl Default for IndexBuilder {
fn default() -> Self { fn default() -> Self {
@@ -116,7 +86,6 @@ impl IndexBuilder {
Self { Self {
schema: None, schema: None,
index_settings: IndexSettings::default(), index_settings: IndexSettings::default(),
tokenizer_manager: TokenizerManager::default(),
} }
} }
@@ -134,12 +103,6 @@ impl IndexBuilder {
self self
} }
/// Set the tokenizers .
pub fn tokenizers(mut self, tokenizers: TokenizerManager) -> Self {
self.tokenizer_manager = tokenizers;
self
}
/// Creates a new index using the `RAMDirectory`. /// Creates a new index using the `RAMDirectory`.
/// ///
/// The index will be allocated in anonymous memory. /// The index will be allocated in anonymous memory.
@@ -164,25 +127,6 @@ impl IndexBuilder {
self.create(mmap_directory) self.create(mmap_directory)
} }
/// Dragons ahead!!!
///
/// The point of this API is to let users create a simple index with a single segment
/// and without starting any thread.
///
/// Do not use this method if you are not sure what you are doing.
///
/// It expects an originally empty directory, and will not run any GC operation.
#[doc(hidden)]
pub fn single_segment_index_writer(
self,
dir: impl Into<Box<dyn Directory>>,
mem_budget: usize,
) -> crate::Result<SingleSegmentIndexWriter> {
let index = self.create(dir)?;
let index_simple_writer = SingleSegmentIndexWriter::new(index, mem_budget)?;
Ok(index_simple_writer)
}
/// Creates a new index in a temp directory. /// Creates a new index in a temp directory.
/// ///
/// The index will use the `MMapDirectory` in a newly created directory. /// The index will use the `MMapDirectory` in a newly created directory.
@@ -210,8 +154,7 @@ impl IndexBuilder {
if !Index::exists(&*dir)? { if !Index::exists(&*dir)? {
return self.create(dir); return self.create(dir);
} }
let mut index = Index::open(dir)?; let index = Index::open(dir)?;
index.set_tokenizers(self.tokenizer_manager.clone());
if index.schema() == self.get_expect_schema()? { if index.schema() == self.get_expect_schema()? {
Ok(index) Ok(index)
} else { } else {
@@ -233,8 +176,7 @@ impl IndexBuilder {
)?; )?;
let mut metas = IndexMeta::with_schema(self.get_expect_schema()?); let mut metas = IndexMeta::with_schema(self.get_expect_schema()?);
metas.index_settings = self.index_settings; metas.index_settings = self.index_settings;
let mut index = Index::open_from_metas(directory, &metas, SegmentMetaInventory::default()); let index = Index::open_from_metas(directory, &metas, SegmentMetaInventory::default());
index.set_tokenizers(self.tokenizer_manager);
Ok(index) Ok(index)
} }
} }
@@ -280,7 +222,7 @@ impl Index {
} }
/// Replace the default single thread search executor pool /// Replace the default single thread search executor pool
/// by a thread pool with as many threads as there are CPUs on the system. /// by a thread pool with a given number of threads.
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> { pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
let default_num_threads = num_cpus::get(); let default_num_threads = num_cpus::get();
self.set_multithread_executor(default_num_threads) self.set_multithread_executor(default_num_threads)
@@ -362,11 +304,6 @@ impl Index {
} }
} }
/// Setter for the tokenizer manager.
pub fn set_tokenizers(&mut self, tokenizers: TokenizerManager) {
self.tokenizers = tokenizers;
}
/// Accessor for the tokenizer manager. /// Accessor for the tokenizer manager.
pub fn tokenizers(&self) -> &TokenizerManager { pub fn tokenizers(&self) -> &TokenizerManager {
&self.tokenizers &self.tokenizers
@@ -377,31 +314,20 @@ impl Index {
let field_entry = self.schema.get_field_entry(field); let field_entry = self.schema.get_field_entry(field);
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
let tokenizer_manager: &TokenizerManager = self.tokenizers(); let tokenizer_manager: &TokenizerManager = self.tokenizers();
let indexing_options_opt = match field_type { let tokenizer_name_opt: Option<TextAnalyzer> = match field_type {
FieldType::JsonObject(options) => options.get_text_indexing_options(), FieldType::Str(text_options) => text_options
FieldType::Str(options) => options.get_indexing_options(), .get_indexing_options()
_ => { .map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
return Err(TantivyError::SchemaError(format!( .and_then(|tokenizer_name| tokenizer_manager.get(&tokenizer_name)),
"{:?} is not a text field.", _ => None,
field_entry.name()
)))
}
}; };
let indexing_options = indexing_options_opt.ok_or_else(|| { match tokenizer_name_opt {
TantivyError::InvalidArgument(format!( Some(tokenizer) => Ok(tokenizer),
"No indexing options set for field {:?}", None => Err(TantivyError::SchemaError(format!(
field_entry "{:?} is not a text field.",
)) field_entry.name()
})?; ))),
}
tokenizer_manager
.get(indexing_options.tokenizer())
.ok_or_else(|| {
TantivyError::InvalidArgument(format!(
"No Tokenizer found for field {:?}",
field_entry
))
})
} }
/// Create a default `IndexReader` for the given index. /// Create a default `IndexReader` for the given index.
@@ -414,7 +340,8 @@ impl Index {
/// Create a `IndexReader` for the given index. /// Create a `IndexReader` for the given index.
/// ///
/// Most project should create at most one reader for a given index. /// Most project should create at most one reader for a given index.
/// This method is typically called only once per `Index` instance. /// This method is typically called only once per `Index` instance,
/// over the lifetime of most problem.
pub fn reader_builder(&self) -> IndexReaderBuilder { pub fn reader_builder(&self) -> IndexReaderBuilder {
IndexReaderBuilder::new(self.clone()) IndexReaderBuilder::new(self.clone())
} }
@@ -628,12 +555,9 @@ impl fmt::Debug for Index {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::collector::Count;
use crate::directory::{RamDirectory, WatchCallback}; use crate::directory::{RamDirectory, WatchCallback};
use crate::query::TermQuery; use crate::schema::{Field, Schema, INDEXED, TEXT};
use crate::schema::{Field, IndexRecordOption, Schema, INDEXED, TEXT}; use crate::{Directory, Index, IndexReader, IndexSettings, ReloadPolicy};
use crate::tokenizer::TokenizerManager;
use crate::{Directory, Index, IndexBuilder, IndexReader, IndexSettings, ReloadPolicy, Term};
#[test] #[test]
fn test_indexer_for_field() { fn test_indexer_for_field() {
@@ -649,21 +573,6 @@ mod tests {
); );
} }
#[test]
fn test_set_tokenizer_manager() {
let mut schema_builder = Schema::builder();
schema_builder.add_u64_field("num_likes", INDEXED);
schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build();
let index = IndexBuilder::new()
// set empty tokenizer manager
.tokenizers(TokenizerManager::new())
.schema(schema)
.create_in_ram()
.unwrap();
assert!(index.tokenizers().get("raw").is_none());
}
#[test] #[test]
fn test_index_exists() { fn test_index_exists() {
let directory: Box<dyn Directory> = Box::new(RamDirectory::create()); let directory: Box<dyn Directory> = Box::new(RamDirectory::create());
@@ -793,7 +702,7 @@ mod tests {
.try_into()?; .try_into()?;
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64))?; writer.add_document(doc!(field=>1u64))?;
let (sender, receiver) = crossbeam_channel::unbounded(); let (sender, receiver) = crossbeam::channel::unbounded();
let _handle = index.directory_mut().watch(WatchCallback::new(move || { let _handle = index.directory_mut().watch(WatchCallback::new(move || {
let _ = sender.send(()); let _ = sender.send(());
})); }));
@@ -828,7 +737,7 @@ mod tests {
reader: &IndexReader, reader: &IndexReader,
) -> crate::Result<()> { ) -> crate::Result<()> {
let mut reader_index = reader.index(); let mut reader_index = reader.index();
let (sender, receiver) = crossbeam_channel::unbounded(); let (sender, receiver) = crossbeam::channel::unbounded();
let _watch_handle = reader_index let _watch_handle = reader_index
.directory_mut() .directory_mut()
.watch(WatchCallback::new(move || { .watch(WatchCallback::new(move || {
@@ -899,28 +808,4 @@ mod tests {
); );
Ok(()) Ok(())
} }
#[test]
fn test_single_segment_index_writer() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let directory = RamDirectory::default();
let mut single_segment_index_writer = Index::builder()
.schema(schema)
.single_segment_index_writer(directory, 10_000_000)?;
for _ in 0..10 {
let doc = doc!(text_field=>"hello");
single_segment_index_writer.add_document(doc)?;
}
let index = single_segment_index_writer.finalize()?;
let searcher = index.reader()?.searcher();
let term_query = TermQuery::new(
Term::from_field_text(text_field, "hello"),
IndexRecordOption::Basic,
);
let count = searcher.search(&term_query, &Count)?;
assert_eq!(count, 10);
Ok(())
}
} }

View File

@@ -239,7 +239,7 @@ impl InnerSegmentMeta {
/// ///
/// Contains settings which are applied on the whole /// Contains settings which are applied on the whole
/// index, like presort documents. /// index, like presort documents.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] #[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)]
pub struct IndexSettings { pub struct IndexSettings {
/// Sorts the documents by information /// Sorts the documents by information
/// provided in `IndexSortByField` /// provided in `IndexSortByField`
@@ -248,29 +248,10 @@ pub struct IndexSettings {
/// The `Compressor` used to compress the doc store. /// The `Compressor` used to compress the doc store.
#[serde(default)] #[serde(default)]
pub docstore_compression: Compressor, pub docstore_compression: Compressor,
#[serde(default = "default_docstore_blocksize")]
/// The size of each block that will be compressed and written to disk
pub docstore_blocksize: usize,
} }
/// Must be a function to be compatible with serde defaults
fn default_docstore_blocksize() -> usize {
16_384
}
impl Default for IndexSettings {
fn default() -> Self {
Self {
sort_by_field: None,
docstore_compression: Compressor::default(),
docstore_blocksize: default_docstore_blocksize(),
}
}
}
/// Settings to presort the documents in an index /// Settings to presort the documents in an index
/// ///
/// Presorting documents can greatly improve performance /// Presorting documents can greatly performance
/// in some scenarios, by applying top n /// in some scenarios, by applying top n
/// optimizations. /// optimizations.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
@@ -311,7 +292,7 @@ pub struct IndexMeta {
/// `IndexSettings` to configure index options. /// `IndexSettings` to configure index options.
#[serde(default)] #[serde(default)]
pub index_settings: IndexSettings, pub index_settings: IndexSettings,
/// List of `SegmentMeta` information associated to each finalized segment of the index. /// List of `SegmentMeta` informations associated to each finalized segment of the index.
pub segments: Vec<SegmentMeta>, pub segments: Vec<SegmentMeta>,
/// Index `Schema` /// Index `Schema`
pub schema: Schema, pub schema: Schema,
@@ -326,7 +307,7 @@ pub struct IndexMeta {
pub payload: Option<String>, pub payload: Option<String>,
} }
#[derive(Deserialize, Debug)] #[derive(Deserialize)]
struct UntrackedIndexMeta { struct UntrackedIndexMeta {
pub segments: Vec<InnerSegmentMeta>, pub segments: Vec<InnerSegmentMeta>,
#[serde(default)] #[serde(default)]
@@ -395,7 +376,6 @@ mod tests {
use super::IndexMeta; use super::IndexMeta;
use crate::core::index_meta::UntrackedIndexMeta; use crate::core::index_meta::UntrackedIndexMeta;
use crate::schema::{Schema, TEXT}; use crate::schema::{Schema, TEXT};
use crate::store::ZstdCompressor;
use crate::{IndexSettings, IndexSortByField, Order}; use crate::{IndexSettings, IndexSortByField, Order};
#[test] #[test]
@@ -421,7 +401,7 @@ mod tests {
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed"); let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
assert_eq!( assert_eq!(
json, json,
r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"lz4","docstore_blocksize":16384},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"# r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"lz4"},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false}}],"opstamp":0}"#
); );
let deser_meta: UntrackedIndexMeta = serde_json::from_str(&json).unwrap(); let deser_meta: UntrackedIndexMeta = serde_json::from_str(&json).unwrap();
@@ -429,60 +409,4 @@ mod tests {
assert_eq!(index_metas.schema, deser_meta.schema); assert_eq!(index_metas.schema, deser_meta.schema);
assert_eq!(index_metas.opstamp, deser_meta.opstamp); assert_eq!(index_metas.opstamp, deser_meta.opstamp);
} }
#[test]
fn test_serialize_metas_zstd_compressor() {
let schema = {
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("text", TEXT);
schema_builder.build()
};
let index_metas = IndexMeta {
index_settings: IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "text".to_string(),
order: Order::Asc,
}),
docstore_compression: crate::store::Compressor::Zstd(ZstdCompressor {
compression_level: Some(4),
}),
docstore_blocksize: 1_000_000,
},
segments: Vec::new(),
schema,
opstamp: 0u64,
payload: None,
};
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
assert_eq!(
json,
r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zstd(compression_level=4)","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
);
let deser_meta: UntrackedIndexMeta = serde_json::from_str(&json).unwrap();
assert_eq!(index_metas.index_settings, deser_meta.index_settings);
assert_eq!(index_metas.schema, deser_meta.schema);
assert_eq!(index_metas.opstamp, deser_meta.opstamp);
}
#[test]
fn test_serialize_metas_invalid_comp() {
let json = r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zsstd","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
let err = serde_json::from_str::<UntrackedIndexMeta>(json).unwrap_err();
assert_eq!(
err.to_string(),
"unknown variant `zsstd`, expected one of `none`, `lz4`, `brotli`, `snappy`, `zstd`, \
`zstd(compression_level=5)` at line 1 column 96"
.to_string()
);
let json = r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zstd(bla=10)","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
let err = serde_json::from_str::<UntrackedIndexMeta>(json).unwrap_err();
assert_eq!(
err.to_string(),
"unknown zstd option \"bla\" at line 1 column 103".to_string()
);
}
} }

View File

@@ -230,13 +230,4 @@ impl InvertedIndexReader {
} }
Ok(()) Ok(())
} }
/// Returns the number of documents containing the term asynchronously.
pub async fn doc_freq_async(&self, term: &Term) -> crate::AsyncIoResult<u32> {
Ok(self
.get_term_info_async(term)
.await?
.map(|term_info| term_info.doc_freq)
.unwrap_or(0u32))
}
} }

View File

@@ -7,7 +7,6 @@ mod segment;
mod segment_component; mod segment_component;
mod segment_id; mod segment_id;
mod segment_reader; mod segment_reader;
mod single_segment_index_writer;
use std::path::Path; use std::path::Path;
@@ -24,7 +23,6 @@ pub use self::segment::Segment;
pub use self::segment_component::SegmentComponent; pub use self::segment_component::SegmentComponent;
pub use self::segment_id::SegmentId; pub use self::segment_id::SegmentId;
pub use self::segment_reader::SegmentReader; pub use self::segment_reader::SegmentReader;
pub use self::single_segment_index_writer::SingleSegmentIndexWriter;
/// The meta file contains all the information about the list of segments and the schema /// The meta file contains all the information about the list of segments and the schema
/// of the index. /// of the index.

View File

@@ -1,5 +1,4 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::sync::Arc;
use std::{fmt, io}; use std::{fmt, io};
use crate::collector::Collector; use crate::collector::Collector;
@@ -7,7 +6,7 @@ use crate::core::{Executor, SegmentReader};
use crate::query::Query; use crate::query::Query;
use crate::schema::{Document, Schema, Term}; use crate::schema::{Document, Schema, Term};
use crate::space_usage::SearcherSpaceUsage; use crate::space_usage::SearcherSpaceUsage;
use crate::store::{CacheStats, StoreReader}; use crate::store::StoreReader;
use crate::{DocAddress, Index, Opstamp, SegmentId, TrackedObject}; use crate::{DocAddress, Index, Opstamp, SegmentId, TrackedObject};
/// Identifies the searcher generation accessed by a [Searcher]. /// Identifies the searcher generation accessed by a [Searcher].
@@ -63,20 +62,43 @@ impl SearcherGeneration {
/// ///
/// It guarantees that the `Segment` will not be removed before /// It guarantees that the `Segment` will not be removed before
/// the destruction of the `Searcher`. /// the destruction of the `Searcher`.
#[derive(Clone)]
pub struct Searcher { pub struct Searcher {
inner: Arc<SearcherInner>, schema: Schema,
index: Index,
segment_readers: Vec<SegmentReader>,
store_readers: Vec<StoreReader>,
generation: TrackedObject<SearcherGeneration>,
} }
impl Searcher { impl Searcher {
/// Creates a new `Searcher`
pub(crate) fn new(
schema: Schema,
index: Index,
segment_readers: Vec<SegmentReader>,
generation: TrackedObject<SearcherGeneration>,
) -> io::Result<Searcher> {
let store_readers: Vec<StoreReader> = segment_readers
.iter()
.map(SegmentReader::get_store_reader)
.collect::<io::Result<Vec<_>>>()?;
Ok(Searcher {
schema,
index,
segment_readers,
store_readers,
generation,
})
}
/// Returns the `Index` associated to the `Searcher` /// Returns the `Index` associated to the `Searcher`
pub fn index(&self) -> &Index { pub fn index(&self) -> &Index {
&self.inner.index &self.index
} }
/// [SearcherGeneration] which identifies the version of the snapshot held by this `Searcher`. /// [SearcherGeneration] which identifies the version of the snapshot held by this `Searcher`.
pub fn generation(&self) -> &SearcherGeneration { pub fn generation(&self) -> &SearcherGeneration {
self.inner.generation.as_ref() self.generation.as_ref()
} }
/// Fetches a document from tantivy's store given a `DocAddress`. /// Fetches a document from tantivy's store given a `DocAddress`.
@@ -84,39 +106,25 @@ impl Searcher {
/// The searcher uses the segment ordinal to route the /// The searcher uses the segment ordinal to route the
/// the request to the right `Segment`. /// the request to the right `Segment`.
pub fn doc(&self, doc_address: DocAddress) -> crate::Result<Document> { pub fn doc(&self, doc_address: DocAddress) -> crate::Result<Document> {
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize]; let store_reader = &self.store_readers[doc_address.segment_ord as usize];
store_reader.get(doc_address.doc_id) store_reader.get(doc_address.doc_id)
} }
/// The cache stats for the underlying store reader.
///
/// Aggregates the sum for each segment store reader.
pub fn doc_store_cache_stats(&self) -> CacheStats {
let cache_stats: CacheStats = self
.inner
.store_readers
.iter()
.map(|reader| reader.cache_stats())
.sum();
cache_stats
}
/// Fetches a document in an asynchronous manner. /// Fetches a document in an asynchronous manner.
#[cfg(feature = "quickwit")] #[cfg(feature = "quickwit")]
pub async fn doc_async(&self, doc_address: DocAddress) -> crate::Result<Document> { pub async fn doc_async(&self, doc_address: DocAddress) -> crate::Result<Document> {
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize]; let store_reader = &self.store_readers[doc_address.segment_ord as usize];
store_reader.get_async(doc_address.doc_id).await store_reader.get_async(doc_address.doc_id).await
} }
/// Access the schema associated to the index of this searcher. /// Access the schema associated to the index of this searcher.
pub fn schema(&self) -> &Schema { pub fn schema(&self) -> &Schema {
&self.inner.schema &self.schema
} }
/// Returns the overall number of documents in the index. /// Returns the overall number of documents in the index.
pub fn num_docs(&self) -> u64 { pub fn num_docs(&self) -> u64 {
self.inner self.segment_readers
.segment_readers
.iter() .iter()
.map(|segment_reader| u64::from(segment_reader.num_docs())) .map(|segment_reader| u64::from(segment_reader.num_docs()))
.sum::<u64>() .sum::<u64>()
@@ -126,7 +134,7 @@ impl Searcher {
/// the given term. /// the given term.
pub fn doc_freq(&self, term: &Term) -> crate::Result<u64> { pub fn doc_freq(&self, term: &Term) -> crate::Result<u64> {
let mut total_doc_freq = 0; let mut total_doc_freq = 0;
for segment_reader in &self.inner.segment_readers { for segment_reader in &self.segment_readers {
let inverted_index = segment_reader.inverted_index(term.field())?; let inverted_index = segment_reader.inverted_index(term.field())?;
let doc_freq = inverted_index.doc_freq(term)?; let doc_freq = inverted_index.doc_freq(term)?;
total_doc_freq += u64::from(doc_freq); total_doc_freq += u64::from(doc_freq);
@@ -134,27 +142,14 @@ impl Searcher {
Ok(total_doc_freq) Ok(total_doc_freq)
} }
/// Return the overall number of documents containing
/// the given term in an asynchronous manner.
#[cfg(feature = "quickwit")]
pub async fn doc_freq_async(&self, term: &Term) -> crate::Result<u64> {
let mut total_doc_freq = 0;
for segment_reader in &self.inner.segment_readers {
let inverted_index = segment_reader.inverted_index(term.field())?;
let doc_freq = inverted_index.doc_freq_async(term).await?;
total_doc_freq += u64::from(doc_freq);
}
Ok(total_doc_freq)
}
/// Return the list of segment readers /// Return the list of segment readers
pub fn segment_readers(&self) -> &[SegmentReader] { pub fn segment_readers(&self) -> &[SegmentReader] {
&self.inner.segment_readers &self.segment_readers
} }
/// Returns the segment_reader associated with the given segment_ord /// Returns the segment_reader associated with the given segment_ord
pub fn segment_reader(&self, segment_ord: u32) -> &SegmentReader { pub fn segment_reader(&self, segment_ord: u32) -> &SegmentReader {
&self.inner.segment_readers[segment_ord as usize] &self.segment_readers[segment_ord as usize]
} }
/// Runs a query on the segment readers wrapped by the searcher. /// Runs a query on the segment readers wrapped by the searcher.
@@ -176,7 +171,7 @@ impl Searcher {
query: &dyn Query, query: &dyn Query,
collector: &C, collector: &C,
) -> crate::Result<C::Fruit> { ) -> crate::Result<C::Fruit> {
let executor = self.inner.index.search_executor(); let executor = self.index.search_executor();
self.search_with_executor(query, collector, executor) self.search_with_executor(query, collector, executor)
} }
@@ -213,67 +208,17 @@ impl Searcher {
/// Summarize total space usage of this searcher. /// Summarize total space usage of this searcher.
pub fn space_usage(&self) -> io::Result<SearcherSpaceUsage> { pub fn space_usage(&self) -> io::Result<SearcherSpaceUsage> {
let mut space_usage = SearcherSpaceUsage::new(); let mut space_usage = SearcherSpaceUsage::new();
for segment_reader in self.segment_readers() { for segment_reader in &self.segment_readers {
space_usage.add_segment(segment_reader.space_usage()?); space_usage.add_segment(segment_reader.space_usage()?);
} }
Ok(space_usage) Ok(space_usage)
} }
} }
impl From<Arc<SearcherInner>> for Searcher {
fn from(inner: Arc<SearcherInner>) -> Self {
Searcher { inner }
}
}
/// Holds a list of `SegmentReader`s ready for search.
///
/// It guarantees that the `Segment` will not be removed before
/// the destruction of the `Searcher`.
pub(crate) struct SearcherInner {
schema: Schema,
index: Index,
segment_readers: Vec<SegmentReader>,
store_readers: Vec<StoreReader>,
generation: TrackedObject<SearcherGeneration>,
}
impl SearcherInner {
/// Creates a new `Searcher`
pub(crate) fn new(
schema: Schema,
index: Index,
segment_readers: Vec<SegmentReader>,
generation: TrackedObject<SearcherGeneration>,
doc_store_cache_size: usize,
) -> io::Result<SearcherInner> {
assert_eq!(
&segment_readers
.iter()
.map(|reader| (reader.segment_id(), reader.delete_opstamp()))
.collect::<BTreeMap<_, _>>(),
generation.segments(),
"Set of segments referenced by this Searcher and its SearcherGeneration must match"
);
let store_readers: Vec<StoreReader> = segment_readers
.iter()
.map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_size))
.collect::<io::Result<Vec<_>>>()?;
Ok(SearcherInner {
schema,
index,
segment_readers,
store_readers,
generation,
})
}
}
impl fmt::Debug for Searcher { impl fmt::Debug for Searcher {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let segment_ids = self let segment_ids = self
.segment_readers() .segment_readers
.iter() .iter()
.map(SegmentReader::segment_id) .map(SegmentReader::segment_id)
.collect::<Vec<_>>(); .collect::<Vec<_>>();

View File

@@ -24,8 +24,7 @@ pub enum SegmentComponent {
Store, Store,
/// Temporary storage of the documents, before streamed to `Store`. /// Temporary storage of the documents, before streamed to `Store`.
TempStore, TempStore,
/// Bitset describing which document of the segment is alive. /// Bitset describing which document of the segment is deleted.
/// (It was representing deleted docs but changed to represent alive docs from v0.17)
Delete, Delete,
} }

View File

@@ -16,7 +16,7 @@ use uuid::Uuid;
/// by a UUID which is used to prefix the filenames /// by a UUID which is used to prefix the filenames
/// of all of the file associated with the segment. /// of all of the file associated with the segment.
/// ///
/// In unit test, for reproducibility, the `SegmentId` are /// In unit test, for reproducability, the `SegmentId` are
/// simply generated in an autoincrement fashion. /// simply generated in an autoincrement fashion.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct SegmentId(Uuid); pub struct SegmentId(Uuid);
@@ -35,7 +35,7 @@ const ZERO_ARRAY: [u8; 8] = [0u8; 8];
#[cfg(test)] #[cfg(test)]
fn create_uuid() -> Uuid { fn create_uuid() -> Uuid {
let new_auto_inc_id = (*AUTO_INC_COUNTER).fetch_add(1, atomic::Ordering::SeqCst); let new_auto_inc_id = (*AUTO_INC_COUNTER).fetch_add(1, atomic::Ordering::SeqCst);
Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &ZERO_ARRAY) Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &ZERO_ARRAY).unwrap()
} }
#[cfg(not(test))] #[cfg(not(test))]
@@ -57,7 +57,7 @@ impl SegmentId {
/// Picking the first 8 chars is ok to identify /// Picking the first 8 chars is ok to identify
/// segments in a display message (e.g. a5c4dfcb). /// segments in a display message (e.g. a5c4dfcb).
pub fn short_uuid_string(&self) -> String { pub fn short_uuid_string(&self) -> String {
(&self.0.as_simple().to_string()[..8]).to_string() (&self.0.to_simple_ref().to_string()[..8]).to_string()
} }
/// Returns a segment uuid string. /// Returns a segment uuid string.
@@ -65,7 +65,7 @@ impl SegmentId {
/// It consists in 32 lowercase hexadecimal chars /// It consists in 32 lowercase hexadecimal chars
/// (e.g. a5c4dfcbdfe645089129e308e26d5523) /// (e.g. a5c4dfcbdfe645089129e308e26d5523)
pub fn uuid_string(&self) -> String { pub fn uuid_string(&self) -> String {
self.0.as_simple().to_string() self.0.to_simple_ref().to_string()
} }
/// Build a `SegmentId` string from the full uuid string. /// Build a `SegmentId` string from the full uuid string.

View File

@@ -128,14 +128,13 @@ impl SegmentReader {
}) })
} }
#[doc(hidden)] pub(crate) fn fieldnorms_readers(&self) -> &FieldNormReaders {
pub fn fieldnorms_readers(&self) -> &FieldNormReaders {
&self.fieldnorm_readers &self.fieldnorm_readers
} }
/// Accessor to the segment's `StoreReader`. /// Accessor to the segment's `StoreReader`.
pub fn get_store_reader(&self, cache_size: usize) -> io::Result<StoreReader> { pub fn get_store_reader(&self) -> io::Result<StoreReader> {
StoreReader::open(self.store_file.clone(), cache_size) StoreReader::open(self.store_file.clone())
} }
/// Open a new segment for reading. /// Open a new segment for reading.
@@ -170,15 +169,15 @@ impl SegmentReader {
let fast_fields_data = segment.open_read(SegmentComponent::FastFields)?; let fast_fields_data = segment.open_read(SegmentComponent::FastFields)?;
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?; let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
let fast_fields_readers = let fast_field_readers =
Arc::new(FastFieldReaders::new(schema.clone(), fast_fields_composite)); Arc::new(FastFieldReaders::new(schema.clone(), fast_fields_composite));
let fieldnorm_data = segment.open_read(SegmentComponent::FieldNorms)?; let fieldnorm_data = segment.open_read(SegmentComponent::FieldNorms)?;
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?; let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
let original_bitset = if segment.meta().has_deletes() { let original_bitset = if segment.meta().has_deletes() {
let alive_doc_file_slice = segment.open_read(SegmentComponent::Delete)?; let delete_file_slice = segment.open_read(SegmentComponent::Delete)?;
let alive_doc_data = alive_doc_file_slice.read_bytes()?; let delete_data = delete_file_slice.read_bytes()?;
Some(AliveBitSet::open(alive_doc_data)) Some(AliveBitSet::open(delete_data))
} else { } else {
None None
}; };
@@ -197,7 +196,7 @@ impl SegmentReader {
max_doc, max_doc,
termdict_composite, termdict_composite,
postings_composite, postings_composite,
fast_fields_readers, fast_fields_readers: fast_field_readers,
fieldnorm_readers, fieldnorm_readers,
segment_id: segment.id(), segment_id: segment.id(),
delete_opstamp: segment.meta().delete_opstamp(), delete_opstamp: segment.meta().delete_opstamp(),
@@ -216,7 +215,7 @@ impl SegmentReader {
/// term dictionary associated to a specific field, /// term dictionary associated to a specific field,
/// and opening the posting list associated to any term. /// and opening the posting list associated to any term.
/// ///
/// If the field is not marked as index, a warn is logged and an empty `InvertedIndexReader` /// If the field is marked as index, a warn is logged and an empty `InvertedIndexReader`
/// is returned. /// is returned.
/// Similarly if the field is marked as indexed but no term has been indexed for the given /// Similarly if the field is marked as indexed but no term has been indexed for the given
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged). /// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
@@ -296,7 +295,8 @@ impl SegmentReader {
self.delete_opstamp self.delete_opstamp
} }
/// Returns the bitset representing the alive `DocId`s. /// Returns the bitset representing
/// the documents that have been deleted.
pub fn alive_bitset(&self) -> Option<&AliveBitSet> { pub fn alive_bitset(&self) -> Option<&AliveBitSet> {
self.alive_bitset_opt.as_ref() self.alive_bitset_opt.as_ref()
} }
@@ -305,7 +305,7 @@ impl SegmentReader {
/// as deleted. /// as deleted.
pub fn is_deleted(&self, doc: DocId) -> bool { pub fn is_deleted(&self, doc: DocId) -> bool {
self.alive_bitset() self.alive_bitset()
.map(|alive_bitset| alive_bitset.is_deleted(doc)) .map(|delete_set| delete_set.is_deleted(doc))
.unwrap_or(false) .unwrap_or(false)
} }
@@ -327,7 +327,7 @@ impl SegmentReader {
self.positions_composite.space_usage(), self.positions_composite.space_usage(),
self.fast_fields_readers.space_usage(), self.fast_fields_readers.space_usage(),
self.fieldnorm_readers.space_usage(), self.fieldnorm_readers.space_usage(),
self.get_store_reader(0)?.space_usage(), self.get_store_reader()?.space_usage(),
self.alive_bitset_opt self.alive_bitset_opt
.as_ref() .as_ref()
.map(AliveBitSet::space_usage) .map(AliveBitSet::space_usage)

View File

@@ -1,47 +0,0 @@
use crate::indexer::operation::AddOperation;
use crate::indexer::segment_updater::save_metas;
use crate::indexer::SegmentWriter;
use crate::{Directory, Document, Index, IndexMeta, Opstamp, Segment};
#[doc(hidden)]
pub struct SingleSegmentIndexWriter {
segment_writer: SegmentWriter,
segment: Segment,
opstamp: Opstamp,
}
impl SingleSegmentIndexWriter {
pub fn new(index: Index, mem_budget: usize) -> crate::Result<Self> {
let segment = index.new_segment();
let segment_writer = SegmentWriter::for_segment(mem_budget, segment.clone())?;
Ok(Self {
segment_writer,
segment,
opstamp: 0,
})
}
pub fn add_document(&mut self, document: Document) -> crate::Result<()> {
let opstamp = self.opstamp;
self.opstamp += 1;
self.segment_writer
.add_document(AddOperation { opstamp, document })
}
pub fn finalize(self) -> crate::Result<Index> {
let max_doc = self.segment_writer.max_doc();
self.segment_writer.finalize()?;
let segment: Segment = self.segment.with_max_doc(max_doc);
let index = segment.index();
let index_meta = IndexMeta {
index_settings: index.settings().clone(),
segments: vec![segment.meta().clone()],
schema: index.schema(),
opstamp: 0,
payload: None,
};
save_metas(&index_meta, index.directory())?;
index.directory().sync_directory()?;
Ok(segment.index().clone())
}
}

View File

@@ -38,7 +38,7 @@ impl BinarySerializable for FileAddr {
/// A `CompositeWrite` is used to write a `CompositeFile`. /// A `CompositeWrite` is used to write a `CompositeFile`.
pub struct CompositeWrite<W = WritePtr> { pub struct CompositeWrite<W = WritePtr> {
write: CountingWriter<W>, write: CountingWriter<W>,
offsets: Vec<(FileAddr, u64)>, offsets: HashMap<FileAddr, u64>,
} }
impl<W: TerminatingWrite + Write> CompositeWrite<W> { impl<W: TerminatingWrite + Write> CompositeWrite<W> {
@@ -47,7 +47,7 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
pub fn wrap(w: W) -> CompositeWrite<W> { pub fn wrap(w: W) -> CompositeWrite<W> {
CompositeWrite { CompositeWrite {
write: CountingWriter::wrap(w), write: CountingWriter::wrap(w),
offsets: Vec::new(), offsets: HashMap::new(),
} }
} }
@@ -60,8 +60,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
pub fn for_field_with_idx(&mut self, field: Field, idx: usize) -> &mut CountingWriter<W> { pub fn for_field_with_idx(&mut self, field: Field, idx: usize) -> &mut CountingWriter<W> {
let offset = self.write.written_bytes(); let offset = self.write.written_bytes();
let file_addr = FileAddr::new(field, idx); let file_addr = FileAddr::new(field, idx);
assert!(!self.offsets.iter().any(|el| el.0 == file_addr)); assert!(!self.offsets.contains_key(&file_addr));
self.offsets.push((file_addr, offset)); self.offsets.insert(file_addr, offset);
&mut self.write &mut self.write
} }
@@ -73,8 +73,16 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
let footer_offset = self.write.written_bytes(); let footer_offset = self.write.written_bytes();
VInt(self.offsets.len() as u64).serialize(&mut self.write)?; VInt(self.offsets.len() as u64).serialize(&mut self.write)?;
let mut offset_fields: Vec<_> = self
.offsets
.iter()
.map(|(file_addr, offset)| (*offset, *file_addr))
.collect();
offset_fields.sort();
let mut prev_offset = 0; let mut prev_offset = 0;
for (file_addr, offset) in self.offsets { for (offset, file_addr) in offset_fields {
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?; VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
file_addr.serialize(&mut self.write)?; file_addr.serialize(&mut self.write)?;
prev_offset = offset; prev_offset = offset;
@@ -98,14 +106,6 @@ pub struct CompositeFile {
offsets_index: HashMap<FileAddr, Range<usize>>, offsets_index: HashMap<FileAddr, Range<usize>>,
} }
impl std::fmt::Debug for CompositeFile {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CompositeFile")
.field("offsets_index", &self.offsets_index)
.finish()
}
}
impl CompositeFile { impl CompositeFile {
/// Opens a composite file stored in a given /// Opens a composite file stored in a given
/// `FileSlice`. /// `FileSlice`.
@@ -233,56 +233,4 @@ mod test {
} }
Ok(()) Ok(())
} }
#[test]
fn test_composite_file_bug() -> crate::Result<()> {
let path = Path::new("test_path");
let directory = RamDirectory::create();
{
let w = directory.open_write(path).unwrap();
let mut composite_write = CompositeWrite::wrap(w);
let mut write = composite_write.for_field_with_idx(Field::from_field_id(1u32), 0);
VInt(32431123u64).serialize(&mut write)?;
write.flush()?;
let write = composite_write.for_field_with_idx(Field::from_field_id(1u32), 1);
write.flush()?;
let mut write = composite_write.for_field_with_idx(Field::from_field_id(0u32), 0);
VInt(1_000_000).serialize(&mut write)?;
write.flush()?;
composite_write.close()?;
}
{
let r = directory.open_read(path)?;
let composite_file = CompositeFile::open(&r)?;
{
let file = composite_file
.open_read_with_idx(Field::from_field_id(1u32), 0)
.unwrap()
.read_bytes()?;
let mut file0_buf = file.as_slice();
let payload_0 = VInt::deserialize(&mut file0_buf)?.0;
assert_eq!(file0_buf.len(), 0);
assert_eq!(payload_0, 32431123u64);
}
{
let file = composite_file
.open_read_with_idx(Field::from_field_id(1u32), 1)
.unwrap()
.read_bytes()?;
let file = file.as_slice();
assert_eq!(file.len(), 0);
}
{
let file = composite_file
.open_read_with_idx(Field::from_field_id(0u32), 0)
.unwrap()
.read_bytes()?;
let file = file.as_slice();
assert_eq!(file.len(), 3);
}
}
Ok(())
}
} }

View File

@@ -1,7 +1,6 @@
use std::io::Write; use std::io::Write;
use std::marker::{Send, Sync}; use std::marker::{Send, Sync};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use std::{fmt, io, thread}; use std::{fmt, io, thread};
@@ -63,12 +62,7 @@ impl Drop for DirectoryLockGuard {
enum TryAcquireLockError { enum TryAcquireLockError {
FileExists, FileExists,
IoError(Arc<io::Error>), IoError(io::Error),
}
impl From<io::Error> for TryAcquireLockError {
fn from(io_error: io::Error) -> Self {
Self::IoError(Arc::new(io_error))
}
} }
fn try_acquire_lock( fn try_acquire_lock(
@@ -79,7 +73,7 @@ fn try_acquire_lock(
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists, OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
OpenWriteError::IoError { io_error, .. } => TryAcquireLockError::IoError(io_error), OpenWriteError::IoError { io_error, .. } => TryAcquireLockError::IoError(io_error),
})?; })?;
write.flush().map_err(TryAcquireLockError::from)?; write.flush().map_err(TryAcquireLockError::IoError)?;
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard { Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
directory: directory.box_clone(), directory: directory.box_clone(),
path: filepath.to_owned(), path: filepath.to_owned(),
@@ -111,7 +105,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// ///
/// Users of `Directory` should typically call `Directory::open_read(...)`, /// Users of `Directory` should typically call `Directory::open_read(...)`,
/// while `Directory` implementor should implement `get_file_handle()`. /// while `Directory` implementor should implement `get_file_handle()`.
fn get_file_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>, OpenReadError>; fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError>;
/// Once a virtual file is open, its data may not /// Once a virtual file is open, its data may not
/// change. /// change.

View File

@@ -45,7 +45,7 @@ pub static INDEX_WRITER_LOCK: Lazy<Lock> = Lazy::new(|| Lock {
/// The meta lock file is here to protect the segment files being opened by /// The meta lock file is here to protect the segment files being opened by
/// `IndexReader::reload()` from being garbage collected. /// `IndexReader::reload()` from being garbage collected.
/// It makes it possible for another process to safely consume /// It makes it possible for another process to safely consume
/// our index in-writing. Ideally, we may have preferred `RWLock` semantics /// our index in-writing. Ideally, we may have prefered `RWLock` semantics
/// here, but it is difficult to achieve on Windows. /// here, but it is difficult to achieve on Windows.
/// ///
/// Opening segment readers is a very fast process. /// Opening segment readers is a very fast process.

View File

@@ -1,11 +1,10 @@
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc;
use std::{fmt, io}; use std::{fmt, io};
use crate::Version; use crate::Version;
/// Error while trying to acquire a directory lock. /// Error while trying to acquire a directory lock.
#[derive(Debug, Clone, Error)] #[derive(Debug, Error)]
pub enum LockError { pub enum LockError {
/// Failed to acquired a lock as it is already held by another /// Failed to acquired a lock as it is already held by another
/// client. /// client.
@@ -17,18 +16,11 @@ pub enum LockError {
LockBusy, LockBusy,
/// Trying to acquire a lock failed with an `IoError` /// Trying to acquire a lock failed with an `IoError`
#[error("Failed to acquire the lock due to an io:Error.")] #[error("Failed to acquire the lock due to an io:Error.")]
IoError(Arc<io::Error>), IoError(io::Error),
}
impl LockError {
/// Wraps an io error.
pub fn wrap_io_error(io_error: io::Error) -> Self {
Self::IoError(Arc::new(io_error))
}
} }
/// Error that may occur when opening a directory /// Error that may occur when opening a directory
#[derive(Debug, Clone, Error)] #[derive(Debug, Error)]
pub enum OpenDirectoryError { pub enum OpenDirectoryError {
/// The underlying directory does not exists. /// The underlying directory does not exists.
#[error("Directory does not exist: '{0}'.")] #[error("Directory does not exist: '{0}'.")]
@@ -38,12 +30,12 @@ pub enum OpenDirectoryError {
NotADirectory(PathBuf), NotADirectory(PathBuf),
/// Failed to create a temp directory. /// Failed to create a temp directory.
#[error("Failed to create a temporary directory: '{0}'.")] #[error("Failed to create a temporary directory: '{0}'.")]
FailedToCreateTempDir(Arc<io::Error>), FailedToCreateTempDir(io::Error),
/// IoError /// IoError
#[error("IoError '{io_error:?}' while create directory in: '{directory_path:?}'.")] #[error("IoError '{io_error:?}' while create directory in: '{directory_path:?}'.")]
IoError { IoError {
/// underlying io Error. /// underlying io Error.
io_error: Arc<io::Error>, io_error: io::Error,
/// directory we tried to open. /// directory we tried to open.
directory_path: PathBuf, directory_path: PathBuf,
}, },
@@ -53,14 +45,14 @@ impl OpenDirectoryError {
/// Wraps an io error. /// Wraps an io error.
pub fn wrap_io_error(io_error: io::Error, directory_path: PathBuf) -> Self { pub fn wrap_io_error(io_error: io::Error, directory_path: PathBuf) -> Self {
Self::IoError { Self::IoError {
io_error: Arc::new(io_error), io_error,
directory_path, directory_path,
} }
} }
} }
/// Error that may occur when starting to write in a file /// Error that may occur when starting to write in a file
#[derive(Debug, Clone, Error)] #[derive(Debug, Error)]
pub enum OpenWriteError { pub enum OpenWriteError {
/// Our directory is WORM, writing an existing file is forbidden. /// Our directory is WORM, writing an existing file is forbidden.
/// Checkout the `Directory` documentation. /// Checkout the `Directory` documentation.
@@ -71,7 +63,7 @@ pub enum OpenWriteError {
#[error("IoError '{io_error:?}' while opening file for write: '{filepath}'.")] #[error("IoError '{io_error:?}' while opening file for write: '{filepath}'.")]
IoError { IoError {
/// The underlying `io::Error`. /// The underlying `io::Error`.
io_error: Arc<io::Error>, io_error: io::Error,
/// File path of the file that tantivy failed to open for write. /// File path of the file that tantivy failed to open for write.
filepath: PathBuf, filepath: PathBuf,
}, },
@@ -80,15 +72,11 @@ pub enum OpenWriteError {
impl OpenWriteError { impl OpenWriteError {
/// Wraps an io error. /// Wraps an io error.
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self { pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
Self::IoError { Self::IoError { io_error, filepath }
io_error: Arc::new(io_error),
filepath,
}
} }
} }
/// Type of index incompatibility between the library and the index found on disk /// Type of index incompatibility between the library and the index found on disk
/// Used to catch and provide a hint to solve this incompatibility issue /// Used to catch and provide a hint to solve this incompatibility issue
#[derive(Clone)]
pub enum Incompatibility { pub enum Incompatibility {
/// This library cannot decompress the index found on disk /// This library cannot decompress the index found on disk
CompressionMismatch { CompressionMismatch {
@@ -147,7 +135,7 @@ impl fmt::Debug for Incompatibility {
} }
/// Error that may occur when accessing a file read /// Error that may occur when accessing a file read
#[derive(Debug, Clone, Error)] #[derive(Debug, Error)]
pub enum OpenReadError { pub enum OpenReadError {
/// The file does not exists. /// The file does not exists.
#[error("Files does not exists: {0:?}")] #[error("Files does not exists: {0:?}")]
@@ -158,7 +146,7 @@ pub enum OpenReadError {
)] )]
IoError { IoError {
/// The underlying `io::Error`. /// The underlying `io::Error`.
io_error: Arc<io::Error>, io_error: io::Error,
/// File path of the file that tantivy failed to open for read. /// File path of the file that tantivy failed to open for read.
filepath: PathBuf, filepath: PathBuf,
}, },
@@ -170,14 +158,11 @@ pub enum OpenReadError {
impl OpenReadError { impl OpenReadError {
/// Wraps an io error. /// Wraps an io error.
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self { pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
Self::IoError { Self::IoError { io_error, filepath }
io_error: Arc::new(io_error),
filepath,
}
} }
} }
/// Error that may occur when trying to delete a file /// Error that may occur when trying to delete a file
#[derive(Debug, Clone, Error)] #[derive(Debug, Error)]
pub enum DeleteError { pub enum DeleteError {
/// The file does not exists. /// The file does not exists.
#[error("File does not exists: '{0}'.")] #[error("File does not exists: '{0}'.")]
@@ -187,7 +172,7 @@ pub enum DeleteError {
#[error("The following IO error happened while deleting file '{filepath}': '{io_error:?}'.")] #[error("The following IO error happened while deleting file '{filepath}': '{io_error:?}'.")]
IoError { IoError {
/// The underlying `io::Error`. /// The underlying `io::Error`.
io_error: Arc<io::Error>, io_error: io::Error,
/// File path of the file that tantivy failed to delete. /// File path of the file that tantivy failed to delete.
filepath: PathBuf, filepath: PathBuf,
}, },

View File

@@ -54,7 +54,7 @@ impl<B> From<B> for FileSlice
where B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync where B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync
{ {
fn from(bytes: B) -> FileSlice { fn from(bytes: B) -> FileSlice {
FileSlice::new(Arc::new(OwnedBytes::new(bytes))) FileSlice::new(Box::new(OwnedBytes::new(bytes)))
} }
} }
@@ -75,7 +75,7 @@ impl fmt::Debug for FileSlice {
impl FileSlice { impl FileSlice {
/// Wraps a FileHandle. /// Wraps a FileHandle.
pub fn new(file_handle: Arc<dyn FileHandle>) -> Self { pub fn new(file_handle: Box<dyn FileHandle>) -> Self {
let num_bytes = file_handle.len(); let num_bytes = file_handle.len();
FileSlice::new_with_num_bytes(file_handle, num_bytes) FileSlice::new_with_num_bytes(file_handle, num_bytes)
} }
@@ -83,9 +83,9 @@ impl FileSlice {
/// Wraps a FileHandle. /// Wraps a FileHandle.
#[doc(hidden)] #[doc(hidden)]
#[must_use] #[must_use]
pub fn new_with_num_bytes(file_handle: Arc<dyn FileHandle>, num_bytes: usize) -> Self { pub fn new_with_num_bytes(file_handle: Box<dyn FileHandle>, num_bytes: usize) -> Self {
FileSlice { FileSlice {
data: file_handle, data: Arc::from(file_handle),
range: 0..num_bytes, range: 0..num_bytes,
} }
} }
@@ -112,7 +112,7 @@ impl FileSlice {
/// Returns a `OwnedBytes` with all of the data in the `FileSlice`. /// Returns a `OwnedBytes` with all of the data in the `FileSlice`.
/// ///
/// The behavior is strongly dependent on the implementation of the underlying /// The behavior is strongly dependant on the implementation of the underlying
/// `Directory` and the `FileSliceTrait` it creates. /// `Directory` and the `FileSliceTrait` it creates.
/// In particular, it is up to the `Directory` implementation /// In particular, it is up to the `Directory` implementation
/// to handle caching if needed. /// to handle caching if needed.
@@ -235,7 +235,6 @@ impl FileHandle for OwnedBytes {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::io; use std::io;
use std::sync::Arc;
use common::HasLen; use common::HasLen;
@@ -243,7 +242,7 @@ mod tests {
#[test] #[test]
fn test_file_slice() -> io::Result<()> { fn test_file_slice() -> io::Result<()> {
let file_slice = FileSlice::new(Arc::new(b"abcdef".as_ref())); let file_slice = FileSlice::new(Box::new(b"abcdef".as_ref()));
assert_eq!(file_slice.len(), 6); assert_eq!(file_slice.len(), 6);
assert_eq!(file_slice.slice_from(2).read_bytes()?.as_slice(), b"cdef"); assert_eq!(file_slice.slice_from(2).read_bytes()?.as_slice(), b"cdef");
assert_eq!(file_slice.slice_to(2).read_bytes()?.as_slice(), b"ab"); assert_eq!(file_slice.slice_to(2).read_bytes()?.as_slice(), b"ab");
@@ -287,7 +286,7 @@ mod tests {
#[test] #[test]
fn test_slice_simple_read() -> io::Result<()> { fn test_slice_simple_read() -> io::Result<()> {
let slice = FileSlice::new(Arc::new(&b"abcdef"[..])); let slice = FileSlice::new(Box::new(&b"abcdef"[..]));
assert_eq!(slice.len(), 6); assert_eq!(slice.len(), 6);
assert_eq!(slice.read_bytes()?.as_ref(), b"abcdef"); assert_eq!(slice.read_bytes()?.as_ref(), b"abcdef");
assert_eq!(slice.slice(1..4).read_bytes()?.as_ref(), b"bcd"); assert_eq!(slice.slice(1..4).read_bytes()?.as_ref(), b"bcd");
@@ -296,7 +295,7 @@ mod tests {
#[test] #[test]
fn test_slice_read_slice() -> io::Result<()> { fn test_slice_read_slice() -> io::Result<()> {
let slice_deref = FileSlice::new(Arc::new(&b"abcdef"[..])); let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
assert_eq!(slice_deref.read_bytes_slice(1..4)?.as_ref(), b"bcd"); assert_eq!(slice_deref.read_bytes_slice(1..4)?.as_ref(), b"bcd");
Ok(()) Ok(())
} }
@@ -304,7 +303,7 @@ mod tests {
#[test] #[test]
#[should_panic(expected = "end of requested range exceeds the fileslice length (10 > 6)")] #[should_panic(expected = "end of requested range exceeds the fileslice length (10 > 6)")]
fn test_slice_read_slice_invalid_range_exceeds() { fn test_slice_read_slice_invalid_range_exceeds() {
let slice_deref = FileSlice::new(Arc::new(&b"abcdef"[..])); let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
assert_eq!( assert_eq!(
slice_deref.read_bytes_slice(0..10).unwrap().as_ref(), slice_deref.read_bytes_slice(0..10).unwrap().as_ref(),
b"bcd" b"bcd"

View File

@@ -110,7 +110,7 @@ mod tests {
let tmp_file = tmp_dir.path().join("watched.txt"); let tmp_file = tmp_dir.path().join("watched.txt");
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let (tx, rx) = crossbeam_channel::unbounded(); let (tx, rx) = crossbeam::channel::unbounded();
let timeout = Duration::from_millis(100); let timeout = Duration::from_millis(100);
let watcher = FileWatcher::new(&tmp_file); let watcher = FileWatcher::new(&tmp_file);
@@ -153,7 +153,7 @@ mod tests {
let tmp_file = tmp_dir.path().join("watched.txt"); let tmp_file = tmp_dir.path().join("watched.txt");
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let (tx, rx) = crossbeam_channel::unbounded(); let (tx, rx) = crossbeam::channel::unbounded();
let timeout = Duration::from_millis(100); let timeout = Duration::from_millis(100);
let watcher = FileWatcher::new(&tmp_file); let watcher = FileWatcher::new(&tmp_file);

View File

@@ -156,7 +156,6 @@ impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
mod tests { mod tests {
use std::io; use std::io;
use std::sync::Arc;
use common::BinarySerializable; use common::BinarySerializable;
@@ -169,7 +168,7 @@ mod tests {
let footer = Footer::new(123); let footer = Footer::new(123);
footer.append_footer(&mut buf).unwrap(); footer.append_footer(&mut buf).unwrap();
let owned_bytes = OwnedBytes::new(buf); let owned_bytes = OwnedBytes::new(buf);
let fileslice = FileSlice::new(Arc::new(owned_bytes)); let fileslice = FileSlice::new(Box::new(owned_bytes));
let (footer_deser, _body) = Footer::extract_footer(fileslice).unwrap(); let (footer_deser, _body) = Footer::extract_footer(fileslice).unwrap();
assert_eq!(footer_deser.crc(), footer.crc()); assert_eq!(footer_deser.crc(), footer.crc());
} }
@@ -182,7 +181,7 @@ mod tests {
let owned_bytes = OwnedBytes::new(buf); let owned_bytes = OwnedBytes::new(buf);
let fileslice = FileSlice::new(Arc::new(owned_bytes)); let fileslice = FileSlice::new(Box::new(owned_bytes));
let err = Footer::extract_footer(fileslice).unwrap_err(); let err = Footer::extract_footer(fileslice).unwrap_err();
assert_eq!( assert_eq!(
err.to_string(), err.to_string(),
@@ -199,7 +198,7 @@ mod tests {
let owned_bytes = OwnedBytes::new(buf); let owned_bytes = OwnedBytes::new(buf);
let fileslice = FileSlice::new(Arc::new(owned_bytes)); let fileslice = FileSlice::new(Box::new(owned_bytes));
let err = Footer::extract_footer(fileslice).unwrap_err(); let err = Footer::extract_footer(fileslice).unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::UnexpectedEof); assert_eq!(err.kind(), io::ErrorKind::UnexpectedEof);
assert_eq!( assert_eq!(
@@ -218,7 +217,7 @@ mod tests {
let owned_bytes = OwnedBytes::new(buf); let owned_bytes = OwnedBytes::new(buf);
let fileslice = FileSlice::new(Arc::new(owned_bytes)); let fileslice = FileSlice::new(Box::new(owned_bytes));
let err = Footer::extract_footer(fileslice).unwrap_err(); let err = Footer::extract_footer(fileslice).unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidData); assert_eq!(err.kind(), io::ErrorKind::InvalidData);
assert_eq!( assert_eq!(

View File

@@ -114,7 +114,7 @@ impl ManagedDirectory {
let mut files_to_delete = vec![]; let mut files_to_delete = vec![];
// It is crucial to get the living files after acquiring the // It is crucial to get the living files after acquiring the
// read lock of meta information. That way, we // read lock of meta informations. That way, we
// avoid the following scenario. // avoid the following scenario.
// //
// 1) we get the list of living files. // 1) we get the list of living files.
@@ -242,13 +242,16 @@ impl ManagedDirectory {
/// Verify checksum of a managed file /// Verify checksum of a managed file
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> { pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
let reader = self.directory.open_read(path)?; let reader = self.directory.open_read(path)?;
let (footer, data) = Footer::extract_footer(reader) let (footer, data) =
.map_err(|io_error| OpenReadError::wrap_io_error(io_error, path.to_path_buf()))?; Footer::extract_footer(reader).map_err(|io_error| OpenReadError::IoError {
io_error,
filepath: path.to_path_buf(),
})?;
let bytes = data let bytes = data
.read_bytes() .read_bytes()
.map_err(|io_error| OpenReadError::IoError { .map_err(|io_error| OpenReadError::IoError {
io_error: Arc::new(io_error),
filepath: path.to_path_buf(), filepath: path.to_path_buf(),
io_error,
})?; })?;
let mut hasher = Hasher::new(); let mut hasher = Hasher::new();
hasher.update(bytes.as_slice()); hasher.update(bytes.as_slice());
@@ -269,9 +272,9 @@ impl ManagedDirectory {
} }
impl Directory for ManagedDirectory { impl Directory for ManagedDirectory {
fn get_file_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>, OpenReadError> { fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> {
let file_slice = self.open_read(path)?; let file_slice = self.open_read(path)?;
Ok(Arc::new(file_slice)) Ok(Box::new(file_slice))
} }
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {

View File

@@ -174,8 +174,7 @@ impl MmapDirectory {
/// This is mostly useful to test the MmapDirectory itself. /// This is mostly useful to test the MmapDirectory itself.
/// For your unit tests, prefer the RamDirectory. /// For your unit tests, prefer the RamDirectory.
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> { pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
let tempdir = TempDir::new() let tempdir = TempDir::new().map_err(OpenDirectoryError::FailedToCreateTempDir)?;
.map_err(|io_err| OpenDirectoryError::FailedToCreateTempDir(Arc::new(io_err)))?;
Ok(MmapDirectory::new( Ok(MmapDirectory::new(
tempdir.path().to_path_buf(), tempdir.path().to_path_buf(),
Some(tempdir), Some(tempdir),
@@ -310,7 +309,7 @@ pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
} }
impl Directory for MmapDirectory { impl Directory for MmapDirectory {
fn get_file_handle(&self, path: &Path) -> result::Result<Arc<dyn FileHandle>, OpenReadError> { fn get_file_handle(&self, path: &Path) -> result::Result<Box<dyn FileHandle>, OpenReadError> {
debug!("Open Read {:?}", path); debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
@@ -331,7 +330,7 @@ impl Directory for MmapDirectory {
}) })
.unwrap_or_else(OwnedBytes::empty); .unwrap_or_else(OwnedBytes::empty);
Ok(Arc::new(owned_bytes)) Ok(Box::new(owned_bytes))
} }
/// Any entry associated to the path in the mmap will be /// Any entry associated to the path in the mmap will be
@@ -343,7 +342,7 @@ impl Directory for MmapDirectory {
DeleteError::FileDoesNotExist(path.to_owned()) DeleteError::FileDoesNotExist(path.to_owned())
} else { } else {
DeleteError::IoError { DeleteError::IoError {
io_error: Arc::new(e), io_error: e,
filepath: path.to_path_buf(), filepath: path.to_path_buf(),
} }
} }
@@ -423,9 +422,9 @@ impl Directory for MmapDirectory {
.write(true) .write(true)
.create(true) //< if the file does not exist yet, create it. .create(true) //< if the file does not exist yet, create it.
.open(&full_path) .open(&full_path)
.map_err(LockError::wrap_io_error)?; .map_err(LockError::IoError)?;
if lock.is_blocking { if lock.is_blocking {
file.lock_exclusive().map_err(LockError::wrap_io_error)?; file.lock_exclusive().map_err(LockError::IoError)?;
} else { } else {
file.try_lock_exclusive().map_err(|_| LockError::LockBusy)? file.try_lock_exclusive().map_err(|_| LockError::LockBusy)?
} }

View File

@@ -40,7 +40,7 @@ impl Drop for VecWriter {
fn drop(&mut self) { fn drop(&mut self) {
if !self.is_flushed { if !self.is_flushed {
warn!( warn!(
"You forgot to flush {:?} before its writer got Drop. Do not rely on drop. This \ "You forgot to flush {:?} before its writter got Drop. Do not rely on drop. This \
also occurs when the indexer crashed, so you may want to check the logs for the \ also occurs when the indexer crashed, so you may want to check the logs for the \
root cause.", root cause.",
self.path self.path
@@ -160,9 +160,9 @@ impl RamDirectory {
} }
impl Directory for RamDirectory { impl Directory for RamDirectory {
fn get_file_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>, OpenReadError> { fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> {
let file_slice = self.open_read(path)?; let file_slice = self.open_read(path)?;
Ok(Arc::new(file_slice)) Ok(Box::new(file_slice))
} }
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
@@ -172,7 +172,7 @@ impl Directory for RamDirectory {
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
fail_point!("RamDirectory::delete", |_| { fail_point!("RamDirectory::delete", |_| {
Err(DeleteError::IoError { Err(DeleteError::IoError {
io_error: Arc::new(io::Error::from(io::ErrorKind::Other)), io_error: io::Error::from(io::ErrorKind::Other),
filepath: path.to_path_buf(), filepath: path.to_path_buf(),
}) })
}); });
@@ -184,7 +184,7 @@ impl Directory for RamDirectory {
.fs .fs
.read() .read()
.map_err(|e| OpenReadError::IoError { .map_err(|e| OpenReadError::IoError {
io_error: Arc::new(io::Error::new(io::ErrorKind::Other, e.to_string())), io_error: io::Error::new(io::ErrorKind::Other, e.to_string()),
filepath: path.to_path_buf(), filepath: path.to_path_buf(),
})? })?
.exists(path)) .exists(path))
@@ -208,7 +208,7 @@ impl Directory for RamDirectory {
self.open_read(path)? self.open_read(path)?
.read_bytes() .read_bytes()
.map_err(|io_error| OpenReadError::IoError { .map_err(|io_error| OpenReadError::IoError {
io_error: Arc::new(io_error), io_error,
filepath: path.to_path_buf(), filepath: path.to_path_buf(),
})?; })?;
Ok(bytes.as_slice().to_owned()) Ok(bytes.as_slice().to_owned())

View File

@@ -181,7 +181,7 @@ fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> {
fn test_watch(directory: &dyn Directory) { fn test_watch(directory: &dyn Directory) {
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let (tx, rx) = crossbeam_channel::unbounded(); let (tx, rx) = crossbeam::channel::unbounded();
let timeout = Duration::from_millis(500); let timeout = Duration::from_millis(500);
let handle = directory let handle = directory
@@ -247,7 +247,7 @@ fn test_lock_blocking(directory: &dyn Directory) {
//< lock_a_res is sent to the thread. //< lock_a_res is sent to the thread.
in_thread_clone.store(true, SeqCst); in_thread_clone.store(true, SeqCst);
let _just_sync = receiver.recv(); let _just_sync = receiver.recv();
// explicitly dropping lock_a_res. It would have been sufficient to just force it // explicitely dropping lock_a_res. It would have been sufficient to just force it
// to be part of the move, but the intent seems clearer that way. // to be part of the move, but the intent seems clearer that way.
drop(lock_a_res); drop(lock_a_res);
}); });

View File

@@ -7,7 +7,7 @@ use crate::DocId;
/// ///
/// This is not u32::MAX as one would have expected, due to the lack of SSE2 instructions /// This is not u32::MAX as one would have expected, due to the lack of SSE2 instructions
/// to compare [u32; 4]. /// to compare [u32; 4].
pub const TERMINATED: DocId = i32::MAX as u32; pub const TERMINATED: DocId = std::i32::MAX as u32;
/// Represents an iterable set of sorted doc ids. /// Represents an iterable set of sorted doc ids.
pub trait DocSet: Send { pub trait DocSet: Send {
@@ -24,6 +24,7 @@ pub trait DocSet: Send {
/// ///
/// Calling `.advance()` on a terminated DocSet should be supported, and TERMINATED should /// Calling `.advance()` on a terminated DocSet should be supported, and TERMINATED should
/// be returned. /// be returned.
/// TODO Test existing docsets.
fn advance(&mut self) -> DocId; fn advance(&mut self) -> DocId;
/// Advances the DocSet forward until reaching the target, or going to the /// Advances the DocSet forward until reaching the target, or going to the

View File

@@ -1,7 +1,7 @@
//! Definition of Tantivy's errors and results. //! Definition of Tantivy's errors and results.
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::{Arc, PoisonError}; use std::sync::PoisonError;
use std::{fmt, io}; use std::{fmt, io};
use thiserror::Error; use thiserror::Error;
@@ -15,7 +15,6 @@ use crate::{query, schema};
/// Represents a `DataCorruption` error. /// Represents a `DataCorruption` error.
/// ///
/// When facing data corruption, tantivy actually panics or returns this error. /// When facing data corruption, tantivy actually panics or returns this error.
#[derive(Clone)]
pub struct DataCorruption { pub struct DataCorruption {
filepath: Option<PathBuf>, filepath: Option<PathBuf>,
comment: String, comment: String,
@@ -51,7 +50,7 @@ impl fmt::Debug for DataCorruption {
} }
/// The library's error enum /// The library's error enum
#[derive(Debug, Clone, Error)] #[derive(Debug, Error)]
pub enum TantivyError { pub enum TantivyError {
/// Failed to open the directory. /// Failed to open the directory.
#[error("Failed to open the directory: '{0:?}'")] #[error("Failed to open the directory: '{0:?}'")]
@@ -70,7 +69,7 @@ pub enum TantivyError {
LockFailure(LockError, Option<String>), LockFailure(LockError, Option<String>),
/// IO Error. /// IO Error.
#[error("An IO error occurred: '{0}'")] #[error("An IO error occurred: '{0}'")]
IoError(Arc<io::Error>), IoError(#[from] io::Error),
/// Data corruption. /// Data corruption.
#[error("Data corrupted: '{0:?}'")] #[error("Data corrupted: '{0:?}'")]
DataCorruption(DataCorruption), DataCorruption(DataCorruption),
@@ -98,10 +97,6 @@ pub enum TantivyError {
/// Index incompatible with current version of Tantivy. /// Index incompatible with current version of Tantivy.
#[error("{0:?}")] #[error("{0:?}")]
IncompatibleIndex(Incompatibility), IncompatibleIndex(Incompatibility),
/// An internal error occurred. This is are internal states that should not be reached.
/// e.g. a datastructure is incorrectly inititalized.
#[error("Internal error: '{0}'")]
InternalError(String),
} }
#[cfg(feature = "quickwit")] #[cfg(feature = "quickwit")]
@@ -126,11 +121,6 @@ impl From<AsyncIoError> for TantivyError {
} }
} }
impl From<io::Error> for TantivyError {
fn from(io_err: io::Error) -> TantivyError {
TantivyError::IoError(Arc::new(io_err))
}
}
impl From<DataCorruption> for TantivyError { impl From<DataCorruption> for TantivyError {
fn from(data_corruption: DataCorruption) -> TantivyError { fn from(data_corruption: DataCorruption) -> TantivyError {
TantivyError::DataCorruption(data_corruption) TantivyError::DataCorruption(data_corruption)
@@ -185,7 +175,7 @@ impl From<schema::DocParsingError> for TantivyError {
impl From<serde_json::Error> for TantivyError { impl From<serde_json::Error> for TantivyError {
fn from(error: serde_json::Error) -> TantivyError { fn from(error: serde_json::Error) -> TantivyError {
TantivyError::IoError(Arc::new(error.into())) TantivyError::IoError(error.into())
} }
} }

View File

@@ -188,14 +188,14 @@ mod bench {
} }
#[bench] #[bench]
fn bench_alive_bitset_iter_deser_on_fly(bench: &mut Bencher) { fn bench_deletebitset_iter_deser_on_fly(bench: &mut Bencher) {
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&[0, 1, 1000, 10000], 1_000_000); let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&[0, 1, 1000, 10000], 1_000_000);
bench.iter(|| alive_bitset.iter_alive().collect::<Vec<_>>()); bench.iter(|| alive_bitset.iter_alive().collect::<Vec<_>>());
} }
#[bench] #[bench]
fn bench_alive_bitset_access(bench: &mut Bencher) { fn bench_deletebitset_access(bench: &mut Bencher) {
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&[0, 1, 1000, 10000], 1_000_000); let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&[0, 1, 1000, 10000], 1_000_000);
bench.iter(|| { bench.iter(|| {
@@ -206,14 +206,14 @@ mod bench {
} }
#[bench] #[bench]
fn bench_alive_bitset_iter_deser_on_fly_1_8_alive(bench: &mut Bencher) { fn bench_deletebitset_iter_deser_on_fly_1_8_alive(bench: &mut Bencher) {
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&get_alive(), 1_000_000); let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&get_alive(), 1_000_000);
bench.iter(|| alive_bitset.iter_alive().collect::<Vec<_>>()); bench.iter(|| alive_bitset.iter_alive().collect::<Vec<_>>());
} }
#[bench] #[bench]
fn bench_alive_bitset_access_1_8_alive(bench: &mut Bencher) { fn bench_deletebitset_access_1_8_alive(bench: &mut Bencher) {
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&get_alive(), 1_000_000); let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&get_alive(), 1_000_000);
bench.iter(|| { bench.iter(|| {

View File

@@ -6,6 +6,8 @@ pub use self::writer::BytesFastFieldWriter;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::ops::Deref;
use crate::query::TermQuery; use crate::query::TermQuery;
use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value, FAST, INDEXED, STORED}; use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value, FAST, INDEXED, STORED};
use crate::{DocAddress, DocSet, Index, Searcher, Term}; use crate::{DocAddress, DocSet, Index, Searcher, Term};
@@ -35,7 +37,9 @@ mod tests {
Ok(()) Ok(())
} }
fn create_index_for_test<T: Into<BytesOptions>>(byte_options: T) -> crate::Result<Searcher> { fn create_index_for_test<T: Into<BytesOptions>>(
byte_options: T,
) -> crate::Result<impl Deref<Target = Searcher>> {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let field = schema_builder.add_bytes_field("string_bytes", byte_options.into()); let field = schema_builder.add_bytes_field("string_bytes", byte_options.into());
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -82,7 +86,7 @@ mod tests {
let field = searcher.schema().get_field("string_bytes").unwrap(); let field = searcher.schema().get_field("string_bytes").unwrap();
let term = Term::from_field_bytes(field, b"lucene".as_ref()); let term = Term::from_field_bytes(field, b"lucene".as_ref());
let term_query = TermQuery::new(term, IndexRecordOption::Basic); let term_query = TermQuery::new(term, IndexRecordOption::Basic);
let term_weight = term_query.specialized_weight(&searcher, true)?; let term_weight = term_query.specialized_weight(&*searcher, true)?;
let term_scorer = term_weight.specialized_scorer(searcher.segment_reader(0), 1.0)?; let term_scorer = term_weight.specialized_scorer(searcher.segment_reader(0), 1.0)?;
assert_eq!(term_scorer.doc(), 0u32); assert_eq!(term_scorer.doc(), 0u32);
Ok(()) Ok(())
@@ -95,7 +99,7 @@ mod tests {
let field = searcher.schema().get_field("string_bytes").unwrap(); let field = searcher.schema().get_field("string_bytes").unwrap();
let term = Term::from_field_bytes(field, b"lucene".as_ref()); let term = Term::from_field_bytes(field, b"lucene".as_ref());
let term_query = TermQuery::new(term, IndexRecordOption::Basic); let term_query = TermQuery::new(term, IndexRecordOption::Basic);
let term_weight_err = term_query.specialized_weight(&searcher, false); let term_weight_err = term_query.specialized_weight(&*searcher, false);
assert!(matches!( assert!(matches!(
term_weight_err, term_weight_err,
Err(crate::TantivyError::SchemaError(_)) Err(crate::TantivyError::SchemaError(_))

View File

@@ -1,7 +1,5 @@
use fastfield_codecs::Column;
use crate::directory::{FileSlice, OwnedBytes}; use crate::directory::{FileSlice, OwnedBytes};
use crate::fastfield::{DynamicFastFieldReader, MultiValueLength}; use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, MultiValueLength};
use crate::DocId; use crate::DocId;
/// Reader for byte array fast fields /// Reader for byte array fast fields
@@ -30,9 +28,8 @@ impl BytesFastFieldReader {
} }
fn range(&self, doc: DocId) -> (usize, usize) { fn range(&self, doc: DocId) -> (usize, usize) {
let idx = doc as u64; let start = self.idx_reader.get(doc) as usize;
let start = self.idx_reader.get_val(idx) as usize; let stop = self.idx_reader.get(doc + 1) as usize;
let stop = self.idx_reader.get_val(idx + 1) as usize;
(start, stop) (start, stop)
} }

View File

@@ -1,361 +0,0 @@
use std::io::{self, Write};
use std::num::NonZeroU64;
use common::BinarySerializable;
use fastdivide::DividerU64;
use fastfield_codecs::{Column, FastFieldCodec};
use ownedbytes::OwnedBytes;
pub const GCD_DEFAULT: u64 = 1;
/// Wrapper for accessing a fastfield.
///
/// Holds the data and the codec to the read the data.
#[derive(Clone)]
pub struct GCDReader<CodecReader: Column> {
gcd_params: GCDParams,
reader: CodecReader,
}
#[derive(Debug, Clone, Copy)]
struct GCDParams {
gcd: u64,
min_value: u64,
num_vals: u64,
}
impl GCDParams {
pub fn eval(&self, val: u64) -> u64 {
self.min_value + self.gcd * val
}
}
impl BinarySerializable for GCDParams {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
self.gcd.serialize(writer)?;
self.min_value.serialize(writer)?;
self.num_vals.serialize(writer)?;
Ok(())
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let gcd: u64 = u64::deserialize(reader)?;
let min_value: u64 = u64::deserialize(reader)?;
let num_vals: u64 = u64::deserialize(reader)?;
Ok(Self {
gcd,
min_value,
num_vals,
})
}
}
pub fn open_gcd_from_bytes<WrappedCodec: FastFieldCodec>(
bytes: OwnedBytes,
) -> io::Result<GCDReader<WrappedCodec::Reader>> {
let footer_offset = bytes.len() - 24;
let (body, mut footer) = bytes.split(footer_offset);
let gcd_params = GCDParams::deserialize(&mut footer)?;
let reader: WrappedCodec::Reader = WrappedCodec::open_from_bytes(body)?;
Ok(GCDReader { gcd_params, reader })
}
impl<C: Column + Clone> Column for GCDReader<C> {
#[inline]
fn get_val(&self, doc: u64) -> u64 {
let val = self.reader.get_val(doc);
self.gcd_params.eval(val)
}
fn min_value(&self) -> u64 {
self.gcd_params.eval(self.reader.min_value())
}
fn max_value(&self) -> u64 {
self.gcd_params.eval(self.reader.max_value())
}
fn num_vals(&self) -> u64 {
self.gcd_params.num_vals
}
}
pub fn write_gcd_header<W: Write>(
field_write: &mut W,
min_value: u64,
gcd: u64,
num_vals: u64,
) -> io::Result<()> {
gcd.serialize(field_write)?;
min_value.serialize(field_write)?;
num_vals.serialize(field_write)?;
Ok(())
}
/// Compute the gcd of two non null numbers.
///
/// It is recommended, but not required, to feed values such that `large >= small`.
fn compute_gcd(mut large: NonZeroU64, mut small: NonZeroU64) -> NonZeroU64 {
loop {
let rem: u64 = large.get() % small;
if let Some(new_small) = NonZeroU64::new(rem) {
(large, small) = (small, new_small);
} else {
return small;
}
}
}
// Find GCD for iterator of numbers
pub fn find_gcd(numbers: impl Iterator<Item = u64>) -> Option<NonZeroU64> {
let mut numbers = numbers.flat_map(NonZeroU64::new);
let mut gcd: NonZeroU64 = numbers.next()?;
if gcd.get() == 1 {
return Some(gcd);
}
let mut gcd_divider = DividerU64::divide_by(gcd.get());
for val in numbers {
let remainder = val.get() - (gcd_divider.divide(val.get())) * gcd.get();
if remainder == 0 {
continue;
}
gcd = compute_gcd(val, gcd);
if gcd.get() == 1 {
return Some(gcd);
}
gcd_divider = DividerU64::divide_by(gcd.get());
}
Some(gcd)
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::num::NonZeroU64;
use std::path::Path;
use std::time::{Duration, SystemTime};
use common::HasLen;
use fastfield_codecs::Column;
use crate::directory::{CompositeFile, RamDirectory, WritePtr};
use crate::fastfield::gcd::compute_gcd;
use crate::fastfield::serializer::FastFieldCodecEnableCheck;
use crate::fastfield::tests::{FIELD, FIELDI64, SCHEMA, SCHEMAI64};
use crate::fastfield::{
find_gcd, CompositeFastFieldSerializer, DynamicFastFieldReader, FastFieldCodecType,
FastFieldsWriter, ALL_CODECS,
};
use crate::schema::{Cardinality, Schema};
use crate::{DateOptions, DatePrecision, DateTime, Directory};
fn get_index(
docs: &[crate::Document],
schema: &Schema,
codec_enable_checker: FastFieldCodecEnableCheck,
) -> crate::Result<RamDirectory> {
let directory: RamDirectory = RamDirectory::create();
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer =
CompositeFastFieldSerializer::from_write_with_codec(write, codec_enable_checker)
.unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(schema);
for doc in docs {
fast_field_writers.add_document(doc);
}
fast_field_writers
.serialize(&mut serializer, &HashMap::new(), None)
.unwrap();
serializer.close().unwrap();
}
Ok(directory)
}
fn test_fastfield_gcd_i64_with_codec(
code_type: FastFieldCodecType,
num_vals: usize,
) -> crate::Result<()> {
let path = Path::new("test");
let mut docs = vec![];
for i in 1..=num_vals {
let val = (i as i64 - 5) * 1000i64;
docs.push(doc!(*FIELDI64=>val));
}
let directory = get_index(&docs, &SCHEMAI64, code_type.into())?;
let file = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<i64>::open(file)?;
assert_eq!(fast_field_reader.get_val(0), -4000i64);
assert_eq!(fast_field_reader.get_val(1), -3000i64);
assert_eq!(fast_field_reader.get_val(2), -2000i64);
assert_eq!(fast_field_reader.max_value(), (num_vals as i64 - 5) * 1000);
assert_eq!(fast_field_reader.min_value(), -4000i64);
let file = directory.open_read(path).unwrap();
// Can't apply gcd
let path = Path::new("test");
docs.pop();
docs.push(doc!(*FIELDI64=>2001i64));
let directory = get_index(&docs, &SCHEMAI64, code_type.into())?;
let file2 = directory.open_read(path).unwrap();
assert!(file2.len() > file.len());
Ok(())
}
#[test]
fn test_fastfield_gcd_i64() -> crate::Result<()> {
for &code_type in ALL_CODECS {
test_fastfield_gcd_i64_with_codec(code_type, 5500)?;
}
Ok(())
}
fn test_fastfield_gcd_u64_with_codec(
code_type: FastFieldCodecType,
num_vals: usize,
) -> crate::Result<()> {
let path = Path::new("test");
let mut docs = vec![];
for i in 1..=num_vals {
let val = i as u64 * 1000u64;
docs.push(doc!(*FIELD=>val));
}
let directory = get_index(&docs, &SCHEMA, code_type.into())?;
let file = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(file)?;
assert_eq!(fast_field_reader.get_val(0), 1000u64);
assert_eq!(fast_field_reader.get_val(1), 2000u64);
assert_eq!(fast_field_reader.get_val(2), 3000u64);
assert_eq!(fast_field_reader.max_value(), num_vals as u64 * 1000);
assert_eq!(fast_field_reader.min_value(), 1000u64);
let file = directory.open_read(path).unwrap();
// Can't apply gcd
let path = Path::new("test");
docs.pop();
docs.push(doc!(*FIELDI64=>2001u64));
let directory = get_index(&docs, &SCHEMA, code_type.into())?;
let file2 = directory.open_read(path).unwrap();
assert!(file2.len() > file.len());
Ok(())
}
#[test]
fn test_fastfield_gcd_u64() -> crate::Result<()> {
for &code_type in ALL_CODECS {
test_fastfield_gcd_u64_with_codec(code_type, 5500)?;
}
Ok(())
}
#[test]
pub fn test_fastfield2() {
let test_fastfield = DynamicFastFieldReader::<u64>::from(vec![100, 200, 300]);
assert_eq!(test_fastfield.get_val(0), 100);
assert_eq!(test_fastfield.get_val(1), 200);
assert_eq!(test_fastfield.get_val(2), 300);
}
#[test]
pub fn test_gcd_date() -> crate::Result<()> {
let size_prec_sec =
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Seconds)?;
let size_prec_micro =
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Microseconds)?;
assert!(size_prec_sec < size_prec_micro);
let size_prec_sec =
test_gcd_date_with_codec(FastFieldCodecType::Linear, DatePrecision::Seconds)?;
let size_prec_micro =
test_gcd_date_with_codec(FastFieldCodecType::Linear, DatePrecision::Microseconds)?;
assert!(size_prec_sec < size_prec_micro);
Ok(())
}
fn test_gcd_date_with_codec(
codec_type: FastFieldCodecType,
precision: DatePrecision,
) -> crate::Result<usize> {
let time1 = DateTime::from_timestamp_micros(
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs() as i64,
);
let time2 = DateTime::from_timestamp_micros(
SystemTime::now()
.checked_sub(Duration::from_micros(4111))
.unwrap()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs() as i64,
);
let time3 = DateTime::from_timestamp_micros(
SystemTime::now()
.checked_sub(Duration::from_millis(2000))
.unwrap()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs() as i64,
);
let mut schema_builder = Schema::builder();
let date_options = DateOptions::default()
.set_fast(Cardinality::SingleValue)
.set_precision(precision);
let field = schema_builder.add_date_field("field", date_options);
let schema = schema_builder.build();
let docs = vec![doc!(field=>time1), doc!(field=>time2), doc!(field=>time3)];
let directory = get_index(&docs, &schema, codec_type.into())?;
let path = Path::new("test");
let file = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(*FIELD).unwrap();
let len = file.len();
let test_fastfield = DynamicFastFieldReader::<DateTime>::open(file)?;
assert_eq!(test_fastfield.get_val(0), time1.truncate(precision));
assert_eq!(test_fastfield.get_val(1), time2.truncate(precision));
assert_eq!(test_fastfield.get_val(2), time3.truncate(precision));
Ok(len)
}
#[test]
fn test_compute_gcd() {
let test_compute_gcd_aux = |large, small, expected| {
let large = NonZeroU64::new(large).unwrap();
let small = NonZeroU64::new(small).unwrap();
let expected = NonZeroU64::new(expected).unwrap();
assert_eq!(compute_gcd(small, large), expected);
assert_eq!(compute_gcd(large, small), expected);
};
test_compute_gcd_aux(1, 4, 1);
test_compute_gcd_aux(2, 4, 2);
test_compute_gcd_aux(10, 25, 5);
test_compute_gcd_aux(25, 25, 25);
}
#[test]
fn find_gcd_test() {
assert_eq!(find_gcd([0].into_iter()), None);
assert_eq!(find_gcd([0, 10].into_iter()), NonZeroU64::new(10));
assert_eq!(find_gcd([10, 0].into_iter()), NonZeroU64::new(10));
assert_eq!(find_gcd([].into_iter()), None);
assert_eq!(find_gcd([15, 30, 5, 10].into_iter()), NonZeroU64::new(5));
assert_eq!(find_gcd([15, 16, 10].into_iter()), NonZeroU64::new(1));
assert_eq!(find_gcd([0, 5, 5, 5].into_iter()), NonZeroU64::new(5));
assert_eq!(find_gcd([0, 0].into_iter()), None);
}
}

View File

@@ -20,18 +20,15 @@
//! //!
//! Read access performance is comparable to that of an array lookup. //! Read access performance is comparable to that of an array lookup.
use fastfield_codecs::FastFieldCodecType;
pub use self::alive_bitset::{intersect_alive_bitsets, write_alive_bitset, AliveBitSet}; pub use self::alive_bitset::{intersect_alive_bitsets, write_alive_bitset, AliveBitSet};
pub use self::bytes::{BytesFastFieldReader, BytesFastFieldWriter}; pub use self::bytes::{BytesFastFieldReader, BytesFastFieldWriter};
pub use self::error::{FastFieldNotAvailableError, Result}; pub use self::error::{FastFieldNotAvailableError, Result};
pub use self::facet_reader::FacetReader; pub use self::facet_reader::FacetReader;
pub(crate) use self::gcd::{find_gcd, GCDReader, GCD_DEFAULT};
pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter}; pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter};
pub use self::reader::DynamicFastFieldReader; pub use self::reader::{DynamicFastFieldReader, FastFieldReader};
pub use self::readers::FastFieldReaders; pub use self::readers::FastFieldReaders;
pub(crate) use self::readers::{type_and_cardinality, FastType}; pub(crate) use self::readers::{type_and_cardinality, FastType};
pub use self::serializer::{Column, CompositeFastFieldSerializer, FastFieldStats}; pub use self::serializer::{CompositeFastFieldSerializer, FastFieldDataAccess, FastFieldStats};
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter}; pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use crate::schema::{Cardinality, FieldType, Type, Value}; use crate::schema::{Cardinality, FieldType, Type, Value};
use crate::{DateTime, DocId}; use crate::{DateTime, DocId};
@@ -40,19 +37,12 @@ mod alive_bitset;
mod bytes; mod bytes;
mod error; mod error;
mod facet_reader; mod facet_reader;
mod gcd;
mod multivalued; mod multivalued;
mod reader; mod reader;
mod readers; mod readers;
mod serializer; mod serializer;
mod writer; mod writer;
pub(crate) const ALL_CODECS: &[FastFieldCodecType; 3] = &[
FastFieldCodecType::Bitpacked,
FastFieldCodecType::Linear,
FastFieldCodecType::BlockwiseLinear,
];
/// Trait for `BytesFastFieldReader` and `MultiValuedFastFieldReader` to return the length of data /// Trait for `BytesFastFieldReader` and `MultiValuedFastFieldReader` to return the length of data
/// for a doc_id /// for a doc_id
pub trait MultiValueLength { pub trait MultiValueLength {
@@ -62,13 +52,11 @@ pub trait MultiValueLength {
fn get_total_len(&self) -> u64; fn get_total_len(&self) -> u64;
} }
/// Trait for types that are allowed for fast fields: /// Trait for types that are allowed for fast fields: (u64, i64 and f64).
/// (u64, i64 and f64, bool, DateTime).
pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd + 'static { pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd + 'static {
/// Converts a value from u64 /// Converts a value from u64
/// ///
/// Internally all fast field values are encoded as u64. /// Internally all fast field values are encoded as u64.
/// **Note: To be used for converting encoded Term, Posting values.**
fn from_u64(val: u64) -> Self; fn from_u64(val: u64) -> Self;
/// Converts a value to u64. /// Converts a value to u64.
@@ -172,56 +160,25 @@ impl FastValue for f64 {
} }
} }
impl FastValue for bool {
fn from_u64(val: u64) -> Self {
val != 0u64
}
fn to_u64(&self) -> u64 {
match self {
false => 0,
true => 1,
}
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::Bool(ref integer_options) => integer_options.get_fastfield_cardinality(),
_ => None,
}
}
fn as_u64(&self) -> u64 {
*self as u64
}
fn to_type() -> Type {
Type::Bool
}
}
impl FastValue for DateTime { impl FastValue for DateTime {
/// Converts a timestamp microseconds into DateTime. fn from_u64(timestamp_u64: u64) -> Self {
/// let unix_timestamp = i64::from_u64(timestamp_u64);
/// **Note the timestamps is expected to be in microseconds.** Self::from_unix_timestamp(unix_timestamp)
fn from_u64(timestamp_micros_u64: u64) -> Self {
let timestamp_micros = i64::from_u64(timestamp_micros_u64);
Self::from_timestamp_micros(timestamp_micros)
} }
fn to_u64(&self) -> u64 { fn to_u64(&self) -> u64 {
common::i64_to_u64(self.into_timestamp_micros()) self.to_unix_timestamp().to_u64()
} }
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> { fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type { match *field_type {
FieldType::Date(ref options) => options.get_fastfield_cardinality(), FieldType::Date(ref integer_options) => integer_options.get_fastfield_cardinality(),
_ => None, _ => None,
} }
} }
fn as_u64(&self) -> u64 { fn as_u64(&self) -> u64 {
self.into_timestamp_micros().as_u64() self.to_unix_timestamp().as_u64()
} }
fn to_type() -> Type { fn to_type() -> Type {
@@ -234,29 +191,8 @@ fn value_to_u64(value: &Value) -> u64 {
Value::U64(val) => val.to_u64(), Value::U64(val) => val.to_u64(),
Value::I64(val) => val.to_u64(), Value::I64(val) => val.to_u64(),
Value::F64(val) => val.to_u64(), Value::F64(val) => val.to_u64(),
Value::Bool(val) => val.to_u64(),
Value::Date(val) => val.to_u64(), Value::Date(val) => val.to_u64(),
_ => panic!("Expected a u64/i64/f64/bool/date field, got {:?} ", value), _ => panic!("Expected a u64/i64/f64/date field, got {:?} ", value),
}
}
/// The fast field type
pub enum FastFieldType {
/// Numeric type, e.g. f64.
Numeric,
/// Fast field stores string ids.
String,
/// Fast field stores string ids for facets.
Facet,
}
impl FastFieldType {
fn is_storing_term_ids(&self) -> bool {
matches!(self, FastFieldType::String | FastFieldType::Facet)
}
fn is_facet(&self) -> bool {
matches!(self, FastFieldType::Facet)
} }
} }
@@ -264,7 +200,6 @@ impl FastFieldType {
mod tests { mod tests {
use std::collections::HashMap; use std::collections::HashMap;
use std::ops::Range;
use std::path::Path; use std::path::Path;
use common::HasLen; use common::HasLen;
@@ -276,9 +211,9 @@ mod tests {
use super::*; use super::*;
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr}; use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
use crate::merge_policy::NoMergePolicy; use crate::merge_policy::NoMergePolicy;
use crate::schema::{Document, Field, Schema, FAST, STRING, TEXT}; use crate::schema::{Document, Field, NumericOptions, Schema, FAST};
use crate::time::OffsetDateTime; use crate::time::OffsetDateTime;
use crate::{DateOptions, DatePrecision, Index, SegmentId, SegmentReader}; use crate::{Index, SegmentId, SegmentReader};
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| { pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -286,26 +221,19 @@ mod tests {
schema_builder.build() schema_builder.build()
}); });
pub static SCHEMAI64: Lazy<Schema> = Lazy::new(|| {
let mut schema_builder = Schema::builder();
schema_builder.add_i64_field("field", FAST);
schema_builder.build()
});
pub static FIELD: Lazy<Field> = Lazy::new(|| SCHEMA.get_field("field").unwrap()); pub static FIELD: Lazy<Field> = Lazy::new(|| SCHEMA.get_field("field").unwrap());
pub static FIELDI64: Lazy<Field> = Lazy::new(|| SCHEMAI64.get_field("field").unwrap());
#[test] #[test]
pub fn test_fastfield() { pub fn test_fastfield() {
let test_fastfield = DynamicFastFieldReader::<u64>::from(vec![100, 200, 300]); let test_fastfield = DynamicFastFieldReader::<u64>::from(vec![100, 200, 300]);
assert_eq!(test_fastfield.get_val(0u64), 100); assert_eq!(test_fastfield.get(0), 100);
assert_eq!(test_fastfield.get_val(1u64), 200); assert_eq!(test_fastfield.get(1), 200);
assert_eq!(test_fastfield.get_val(2u64), 300); assert_eq!(test_fastfield.get(2), 300);
} }
#[test] #[test]
pub fn test_fastfield_i64_u64() { pub fn test_fastfield_i64_u64() {
let datetime = DateTime::from_utc(OffsetDateTime::UNIX_EPOCH); let datetime = DateTime::new_utc(OffsetDateTime::UNIX_EPOCH);
assert_eq!(i64::from_u64(datetime.to_u64()), 0i64); assert_eq!(i64::from_u64(datetime.to_u64()), 0i64);
} }
@@ -326,13 +254,13 @@ mod tests {
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(path).unwrap(); let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 45); assert_eq!(file.len(), 37);
let composite_file = CompositeFile::open(&file)?; let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(*FIELD).unwrap(); let file = composite_file.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(file)?; let fast_field_reader = DynamicFastFieldReader::<u64>::open(file)?;
assert_eq!(fast_field_reader.get_val(0), 13u64); assert_eq!(fast_field_reader.get(0), 13u64);
assert_eq!(fast_field_reader.get_val(1), 14u64); assert_eq!(fast_field_reader.get(1), 14u64);
assert_eq!(fast_field_reader.get_val(2), 2u64); assert_eq!(fast_field_reader.get(2), 2u64);
Ok(()) Ok(())
} }
@@ -357,20 +285,20 @@ mod tests {
serializer.close()?; serializer.close()?;
} }
let file = directory.open_read(path)?; let file = directory.open_read(path)?;
assert_eq!(file.len(), 70); assert_eq!(file.len(), 62);
{ {
let fast_fields_composite = CompositeFile::open(&file)?; let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?; let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
assert_eq!(fast_field_reader.get_val(0), 4u64); assert_eq!(fast_field_reader.get(0), 4u64);
assert_eq!(fast_field_reader.get_val(1), 14_082_001u64); assert_eq!(fast_field_reader.get(1), 14_082_001u64);
assert_eq!(fast_field_reader.get_val(2), 3_052u64); assert_eq!(fast_field_reader.get(2), 3_052u64);
assert_eq!(fast_field_reader.get_val(3), 9002u64); assert_eq!(fast_field_reader.get(3), 9002u64);
assert_eq!(fast_field_reader.get_val(4), 15_001u64); assert_eq!(fast_field_reader.get(4), 15_001u64);
assert_eq!(fast_field_reader.get_val(5), 777u64); assert_eq!(fast_field_reader.get(5), 777u64);
assert_eq!(fast_field_reader.get_val(6), 1_002u64); assert_eq!(fast_field_reader.get(6), 1_002u64);
assert_eq!(fast_field_reader.get_val(7), 1_501u64); assert_eq!(fast_field_reader.get(7), 1_501u64);
assert_eq!(fast_field_reader.get_val(8), 215u64); assert_eq!(fast_field_reader.get(8), 215u64);
} }
Ok(()) Ok(())
} }
@@ -393,13 +321,13 @@ mod tests {
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(path).unwrap(); let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 43); assert_eq!(file.len(), 35);
{ {
let fast_fields_composite = CompositeFile::open(&file).unwrap(); let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?; let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
for doc in 0..10_000 { for doc in 0..10_000 {
assert_eq!(fast_field_reader.get_val(doc), 100_000u64); assert_eq!(fast_field_reader.get(doc), 100_000u64);
} }
} }
Ok(()) Ok(())
@@ -425,15 +353,15 @@ mod tests {
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(path).unwrap(); let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 80051); assert_eq!(file.len(), 80043);
{ {
let fast_fields_composite = CompositeFile::open(&file)?; let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?; let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
assert_eq!(fast_field_reader.get_val(0), 0u64); assert_eq!(fast_field_reader.get(0), 0u64);
for doc in 1..10_001 { for doc in 1..10_001 {
assert_eq!( assert_eq!(
fast_field_reader.get_val(doc), fast_field_reader.get(doc),
5_000_000_000_000_000_000u64 + doc as u64 - 1u64 5_000_000_000_000_000_000u64 + doc as u64 - 1u64
); );
} }
@@ -442,7 +370,7 @@ mod tests {
} }
#[test] #[test]
fn test_signed_intfastfield_normal() -> crate::Result<()> { fn test_signed_intfastfield() -> crate::Result<()> {
let path = Path::new("test"); let path = Path::new("test");
let directory: RamDirectory = RamDirectory::create(); let directory: RamDirectory = RamDirectory::create();
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -464,9 +392,7 @@ mod tests {
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(path).unwrap(); let file = directory.open_read(path).unwrap();
// assert_eq!(file.len(), 17710 as usize); //bitpacked size assert_eq!(file.len(), 12471_usize); // Piecewise linear codec size
// assert_eq!(file.len(), 10175_usize); // linear interpol size
assert_eq!(file.len(), 75_usize); // linear interpol size after calc improvement
{ {
let fast_fields_composite = CompositeFile::open(&file)?; let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite.open_read(i64_field).unwrap(); let data = fast_fields_composite.open_read(i64_field).unwrap();
@@ -475,7 +401,7 @@ mod tests {
assert_eq!(fast_field_reader.min_value(), -100i64); assert_eq!(fast_field_reader.min_value(), -100i64);
assert_eq!(fast_field_reader.max_value(), 9_999i64); assert_eq!(fast_field_reader.max_value(), 9_999i64);
for (doc, i) in (-100i64..10_000i64).enumerate() { for (doc, i) in (-100i64..10_000i64).enumerate() {
assert_eq!(fast_field_reader.get_val(doc as u64), i); assert_eq!(fast_field_reader.get(doc as u32), i);
} }
let mut buffer = vec![0i64; 100]; let mut buffer = vec![0i64; 100];
fast_field_reader.get_range(53, &mut buffer[..]); fast_field_reader.get_range(53, &mut buffer[..]);
@@ -511,7 +437,7 @@ mod tests {
let fast_fields_composite = CompositeFile::open(&file).unwrap(); let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite.open_read(i64_field).unwrap(); let data = fast_fields_composite.open_read(i64_field).unwrap();
let fast_field_reader = DynamicFastFieldReader::<i64>::open(data)?; let fast_field_reader = DynamicFastFieldReader::<i64>::open(data)?;
assert_eq!(fast_field_reader.get_val(0), 0i64); assert_eq!(fast_field_reader.get(0u32), 0i64);
} }
Ok(()) Ok(())
} }
@@ -523,15 +449,10 @@ mod tests {
permutation permutation
} }
// Warning: this generates the same permutation at each call #[test]
pub fn generate_permutation_gcd() -> Vec<u64> { fn test_intfastfield_permutation() -> crate::Result<()> {
let mut permutation: Vec<u64> = (1u64..100_000u64).map(|el| el * 1000).collect();
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
permutation
}
fn test_intfastfield_permutation_with_data(permutation: Vec<u64>) -> crate::Result<()> {
let path = Path::new("test"); let path = Path::new("test");
let permutation = generate_permutation();
let n = permutation.len(); let n = permutation.len();
let directory = RamDirectory::create(); let directory = RamDirectory::create();
{ {
@@ -550,27 +471,15 @@ mod tests {
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?; let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
for a in 0..n { let mut a = 0u64;
assert_eq!(fast_field_reader.get_val(a as u64), permutation[a as usize]); for _ in 0..n {
assert_eq!(fast_field_reader.get(a as u32), permutation[a as usize]);
a = fast_field_reader.get(a as u32);
} }
} }
Ok(()) Ok(())
} }
#[test]
fn test_intfastfield_permutation_gcd() -> crate::Result<()> {
let permutation = generate_permutation_gcd();
test_intfastfield_permutation_with_data(permutation)?;
Ok(())
}
#[test]
fn test_intfastfield_permutation() -> crate::Result<()> {
let permutation = generate_permutation();
test_intfastfield_permutation_with_data(permutation)?;
Ok(())
}
#[test] #[test]
fn test_merge_missing_date_fast_field() -> crate::Result<()> { fn test_merge_missing_date_fast_field() -> crate::Result<()> {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -580,7 +489,7 @@ mod tests {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_for_tests().unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy)); index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer index_writer
.add_document(doc!(date_field =>DateTime::from_utc(OffsetDateTime::now_utc())))?; .add_document(doc!(date_field =>DateTime::new_utc(OffsetDateTime::now_utc())))?;
index_writer.commit()?; index_writer.commit()?;
index_writer.add_document(doc!())?; index_writer.add_document(doc!())?;
index_writer.commit()?; index_writer.commit()?;
@@ -599,222 +508,18 @@ mod tests {
} }
#[test] #[test]
fn test_default_date() { fn test_default_datetime() {
assert_eq!(0, DateTime::make_zero().into_timestamp_secs()); assert_eq!(0, DateTime::make_zero().to_unix_timestamp());
}
fn get_vals_for_docs(ff: &MultiValuedFastFieldReader<u64>, docs: Range<u32>) -> Vec<u64> {
let mut all = vec![];
for doc in docs {
let mut out = vec![];
ff.get_vals(doc, &mut out);
all.extend(out);
}
all
}
#[test]
fn test_text_fastfield() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT | FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
// first segment
let mut index_writer = index.writer_for_tests()?;
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(
text_field => "BBBBB AAAAA", // term_ord 1,2
))?;
index_writer.add_document(doc!())?;
index_writer.add_document(doc!(
text_field => "AAAAA", // term_ord 0
))?;
index_writer.add_document(doc!(
text_field => "AAAAA BBBBB", // term_ord 0
))?;
index_writer.add_document(doc!(
text_field => "zumberthree", // term_ord 2, after merge term_ord 3
))?;
index_writer.add_document(doc!())?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.u64s(text_field).unwrap();
assert_eq!(
get_vals_for_docs(&text_fast_field, 0..5),
vec![1, 0, 0, 0, 1, 2]
);
let mut out = vec![];
text_fast_field.get_vals(3, &mut out);
assert_eq!(out, vec![0, 1]);
let inverted_index = segment_reader.inverted_index(text_field)?;
assert_eq!(inverted_index.terms().num_terms(), 3);
let mut bytes = vec![];
assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
// default tokenizer applies lower case
assert_eq!(bytes, "aaaaa".as_bytes());
}
{
// second segment
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(
text_field => "AAAAA", // term_ord 0
))?;
index_writer.add_document(doc!(
text_field => "CCCCC AAAAA", // term_ord 1, after merge 2
))?;
index_writer.add_document(doc!())?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2);
let segment_reader = searcher.segment_reader(1);
let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.u64s(text_field).unwrap();
assert_eq!(get_vals_for_docs(&text_fast_field, 0..3), vec![0, 1, 0]);
}
// Merging the segments
{
let segment_ids = index.searchable_segment_ids()?;
let mut index_writer = index.writer_for_tests()?;
index_writer.merge(&segment_ids).wait()?;
index_writer.wait_merging_threads()?;
}
let reader = index.reader()?;
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.u64s(text_field).unwrap();
assert_eq!(
get_vals_for_docs(&text_fast_field, 0..8),
vec![1, 0, 0, 0, 1, 3 /* next segment */, 0, 2, 0]
);
Ok(())
}
#[test]
fn test_string_fastfield() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", STRING | FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
// first segment
let mut index_writer = index.writer_for_tests()?;
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(
text_field => "BBBBB", // term_ord 1
))?;
index_writer.add_document(doc!())?;
index_writer.add_document(doc!(
text_field => "AAAAA", // term_ord 0
))?;
index_writer.add_document(doc!(
text_field => "AAAAA", // term_ord 0
))?;
index_writer.add_document(doc!(
text_field => "zumberthree", // term_ord 2, after merge term_ord 3
))?;
index_writer.add_document(doc!())?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.u64s(text_field).unwrap();
assert_eq!(get_vals_for_docs(&text_fast_field, 0..6), vec![1, 0, 0, 2]);
let inverted_index = segment_reader.inverted_index(text_field)?;
assert_eq!(inverted_index.terms().num_terms(), 3);
let mut bytes = vec![];
assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
assert_eq!(bytes, "AAAAA".as_bytes());
}
{
// second segment
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(
text_field => "AAAAA", // term_ord 0
))?;
index_writer.add_document(doc!(
text_field => "CCCCC", // term_ord 1, after merge 2
))?;
index_writer.add_document(doc!())?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2);
let segment_reader = searcher.segment_reader(1);
let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.u64s(text_field).unwrap();
assert_eq!(get_vals_for_docs(&text_fast_field, 0..2), vec![0, 1]);
}
// Merging the segments
{
let segment_ids = index.searchable_segment_ids()?;
let mut index_writer = index.writer_for_tests()?;
index_writer.merge(&segment_ids).wait()?;
index_writer.wait_merging_threads()?;
}
let reader = index.reader()?;
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.u64s(text_field).unwrap();
assert_eq!(
get_vals_for_docs(&text_fast_field, 0..9),
vec![1, 0, 0, 3 /* next segment */, 0, 2]
);
Ok(())
} }
#[test] #[test]
fn test_datefastfield() -> crate::Result<()> { fn test_datefastfield() -> crate::Result<()> {
use crate::fastfield::FastValue; use crate::fastfield::FastValue;
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field( let date_field = schema_builder.add_date_field("date", FAST);
"date",
DateOptions::from(FAST).set_precision(DatePrecision::Microseconds),
);
let multi_date_field = schema_builder.add_date_field( let multi_date_field = schema_builder.add_date_field(
"multi_date", "multi_date",
DateOptions::default() NumericOptions::default().set_fast(Cardinality::MultiValues),
.set_precision(DatePrecision::Microseconds)
.set_fast(Cardinality::MultiValues),
); );
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -842,156 +547,60 @@ mod tests {
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap(); let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
let mut dates = vec![]; let mut dates = vec![];
{ {
assert_eq!(date_fast_field.get_val(0).into_timestamp_micros(), 1i64); assert_eq!(date_fast_field.get(0u32).to_unix_timestamp(), 1i64);
dates_fast_field.get_vals(0u32, &mut dates); dates_fast_field.get_vals(0u32, &mut dates);
assert_eq!(dates.len(), 2); assert_eq!(dates.len(), 2);
assert_eq!(dates[0].into_timestamp_micros(), 2i64); assert_eq!(dates[0].to_unix_timestamp(), 2i64);
assert_eq!(dates[1].into_timestamp_micros(), 3i64); assert_eq!(dates[1].to_unix_timestamp(), 3i64);
} }
{ {
assert_eq!(date_fast_field.get_val(1).into_timestamp_micros(), 4i64); assert_eq!(date_fast_field.get(1u32).to_unix_timestamp(), 4i64);
dates_fast_field.get_vals(1u32, &mut dates); dates_fast_field.get_vals(1u32, &mut dates);
assert!(dates.is_empty()); assert!(dates.is_empty());
} }
{ {
assert_eq!(date_fast_field.get_val(2).into_timestamp_micros(), 0i64); assert_eq!(date_fast_field.get(2u32).to_unix_timestamp(), 0i64);
dates_fast_field.get_vals(2u32, &mut dates); dates_fast_field.get_vals(2u32, &mut dates);
assert_eq!(dates.len(), 2); assert_eq!(dates.len(), 2);
assert_eq!(dates[0].into_timestamp_micros(), 5i64); assert_eq!(dates[0].to_unix_timestamp(), 5i64);
assert_eq!(dates[1].into_timestamp_micros(), 6i64); assert_eq!(dates[1].to_unix_timestamp(), 6i64);
} }
Ok(()) Ok(())
} }
#[test]
pub fn test_fastfield_bool() {
let test_fastfield = DynamicFastFieldReader::<bool>::from(vec![true, false, true, false]);
assert_eq!(test_fastfield.get_val(0), true);
assert_eq!(test_fastfield.get_val(1), false);
assert_eq!(test_fastfield.get_val(2), true);
assert_eq!(test_fastfield.get_val(3), false);
}
#[test]
pub fn test_fastfield_bool_small() -> crate::Result<()> {
let path = Path::new("test_bool");
let directory: RamDirectory = RamDirectory::create();
let mut schema_builder = Schema::builder();
schema_builder.add_bool_field("field_bool", FAST);
let schema = schema_builder.build();
let field = schema.get_field("field_bool").unwrap();
{
let write: WritePtr = directory.open_write(path).unwrap();
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
fast_field_writers.add_document(&doc!(field=>true));
fast_field_writers.add_document(&doc!(field=>false));
fast_field_writers.add_document(&doc!(field=>true));
fast_field_writers.add_document(&doc!(field=>false));
fast_field_writers
.serialize(&mut serializer, &HashMap::new(), None)
.unwrap();
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 44);
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(field).unwrap();
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
assert_eq!(fast_field_reader.get_val(0), true);
assert_eq!(fast_field_reader.get_val(1), false);
assert_eq!(fast_field_reader.get_val(2), true);
assert_eq!(fast_field_reader.get_val(3), false);
Ok(())
}
#[test]
pub fn test_fastfield_bool_large() -> crate::Result<()> {
let path = Path::new("test_bool");
let directory: RamDirectory = RamDirectory::create();
let mut schema_builder = Schema::builder();
schema_builder.add_bool_field("field_bool", FAST);
let schema = schema_builder.build();
let field = schema.get_field("field_bool").unwrap();
{
let write: WritePtr = directory.open_write(path).unwrap();
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
for _ in 0..50 {
fast_field_writers.add_document(&doc!(field=>true));
fast_field_writers.add_document(&doc!(field=>false));
}
fast_field_writers
.serialize(&mut serializer, &HashMap::new(), None)
.unwrap();
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 56);
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(field).unwrap();
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
for i in 0..25 {
assert_eq!(fast_field_reader.get_val(i * 2), true);
assert_eq!(fast_field_reader.get_val(i * 2 + 1), false);
}
Ok(())
}
#[test]
pub fn test_fastfield_bool_default_value() -> crate::Result<()> {
let path = Path::new("test_bool");
let directory: RamDirectory = RamDirectory::create();
let mut schema_builder = Schema::builder();
schema_builder.add_bool_field("field_bool", FAST);
let schema = schema_builder.build();
let field = schema.get_field("field_bool").unwrap();
{
let write: WritePtr = directory.open_write(path).unwrap();
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
let doc = Document::default();
fast_field_writers.add_document(&doc);
fast_field_writers
.serialize(&mut serializer, &HashMap::new(), None)
.unwrap();
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 43);
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(field).unwrap();
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
assert_eq!(fast_field_reader.get_val(0), false);
Ok(())
}
} }
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use fastfield_codecs::Column; use std::collections::HashMap;
use std::path::Path;
use test::{self, Bencher}; use test::{self, Bencher};
use super::tests::generate_permutation; use super::tests::{generate_permutation, FIELD, SCHEMA};
use super::*; use super::*;
use crate::fastfield::tests::generate_permutation_gcd; use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
use crate::fastfield::FastFieldReader;
#[bench] #[bench]
fn bench_intfastfield_jumpy_veclookup(b: &mut Bencher) { fn bench_intfastfield_linear_veclookup(b: &mut Bencher) {
let permutation = generate_permutation(); let permutation = generate_permutation();
let n = permutation.len();
b.iter(|| { b.iter(|| {
let n = test::black_box(7000u32);
let mut a = 0u64; let mut a = 0u64;
for _ in 0..n { for i in (0u32..n / 7).map(|v| v * 7) {
a ^= permutation[i as usize];
}
a
});
}
#[bench]
fn bench_intfastfield_veclookup(b: &mut Bencher) {
let permutation = generate_permutation();
b.iter(|| {
let n = test::black_box(1000u32);
let mut a = 0u64;
for _ in 0u32..n {
a = permutation[a as usize]; a = permutation[a as usize];
} }
a a
@@ -999,83 +608,70 @@ mod bench {
} }
#[bench] #[bench]
fn bench_intfastfield_jumpy_fflookup(b: &mut Bencher) { fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
let path = Path::new("test");
let permutation = generate_permutation(); let permutation = generate_permutation();
let n = permutation.len(); let directory: RamDirectory = RamDirectory::create();
let column = DynamicFastFieldReader::from(permutation); {
b.iter(|| { let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut a = 0u64; let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
for _ in 0..n { let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
a = column.get_val(a as u64); for &x in &permutation {
fast_field_writers.add_document(&doc!(*FIELD=>x));
} }
a fast_field_writers
}); .serialize(&mut serializer, &HashMap::new(), None)
.unwrap();
serializer.close().unwrap();
}
let file = directory.open_read(&path).unwrap();
{
let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
b.iter(|| {
let n = test::black_box(7000u32);
let mut a = 0u64;
for i in (0u32..n / 7).map(|val| val * 7) {
a ^= fast_field_reader.get(i);
}
a
});
}
} }
#[bench] #[bench]
fn bench_intfastfield_stride7_vec(b: &mut Bencher) { fn bench_intfastfield_fflookup(b: &mut Bencher) {
let path = Path::new("test");
let permutation = generate_permutation(); let permutation = generate_permutation();
let n = permutation.len(); let directory: RamDirectory = RamDirectory::create();
b.iter(|| { {
let mut a = 0u64; let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
for i in (0..n / 7).map(|val| val * 7) { let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
a += permutation[i as usize]; let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for &x in &permutation {
fast_field_writers.add_document(&doc!(*FIELD=>x));
} }
a fast_field_writers
}); .serialize(&mut serializer, &HashMap::new(), None)
} .unwrap();
serializer.close().unwrap();
}
let file = directory.open_read(&path).unwrap();
{
let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
#[bench] b.iter(|| {
fn bench_intfastfield_stride7_fflookup(b: &mut Bencher) { let n = test::black_box(1000u32);
let permutation = generate_permutation(); let mut a = 0u32;
let n = permutation.len(); for _ in 0u32..n {
let column = DynamicFastFieldReader::from(permutation); a = fast_field_reader.get(a) as u32;
b.iter(|| { }
let mut a = 0u64; a
for i in (0..n / 7).map(|val| val * 7) { });
a += column.get_val(i as u64); }
}
a
});
}
#[bench]
fn bench_intfastfield_scan_all_fflookup(b: &mut Bencher) {
let permutation = generate_permutation();
let n = permutation.len();
let column = DynamicFastFieldReader::from(permutation);
b.iter(|| {
let mut a = 0u64;
for i in 0u64..n as u64 {
a += column.get_val(i);
}
a
});
}
#[bench]
fn bench_intfastfield_scan_all_fflookup_gcd(b: &mut Bencher) {
let permutation = generate_permutation_gcd();
let n = permutation.len();
let column = DynamicFastFieldReader::from(permutation);
b.iter(|| {
let mut a = 0u64;
for i in 0..n as u64 {
a += column.get_val(i);
}
a
});
}
#[bench]
fn bench_intfastfield_scan_all_vec(b: &mut Bencher) {
let permutation = generate_permutation();
b.iter(|| {
let mut a = 0u64;
for i in 0..permutation.len() {
a += permutation[i as usize] as u64;
}
a
});
} }
} }

Some files were not shown because too many files have changed in this diff Show More