Compare commits

..

12 Commits

Author SHA1 Message Date
Pascal Seitz
c62ddb61b7 rename, add position to docid function 2022-09-06 12:47:49 +08:00
Pascal Seitz
ed85ba62b3 make measure_time optional
move u128 vint to common, add u128 tests
2022-08-22 11:11:53 +02:00
Pascal Seitz
4b7ed27595 add multivalued ip fast field
fix null value handling in value range search
2022-08-18 12:54:20 +02:00
Pascal Seitz
66ccba2878 fix composite file issue, add proptest
Fix composite file issue. The composite file had an issue with the last written fast field, where the wrong field was set as the last range in the composite file due to sorting.
Fix handling of empty fastfields for ip codec.
2022-08-16 12:24:06 +02:00
Pascal Seitz
c56f4572f4 add merge code for u128 2022-08-12 08:48:02 +02:00
Pascal Seitz
399b137617 clippy 2022-08-11 18:50:59 +02:00
Pascal Seitz
f3efb41d4e fmt 2022-08-11 18:50:59 +02:00
Pascal Seitz
20a09282a1 plug u128 field writer 2022-08-11 18:50:59 +02:00
Pascal Seitz
1107400ae0 add null value detection for ip codec 2022-08-11 18:50:58 +02:00
Pascal Seitz
391f881fa1 fix upperrange outside compact space 2022-08-11 18:50:00 +02:00
Pascal Seitz
eec908e962 add ip codec 2022-08-11 18:50:00 +02:00
Pascal Seitz
4a1b251a08 add ip field 2022-08-11 18:50:00 +02:00
123 changed files with 4051 additions and 2822 deletions

View File

@@ -12,14 +12,12 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Install Rust
run: rustup toolchain install nightly --profile minimal --component llvm-tools-preview
- uses: Swatinem/rust-cache@v2
run: rustup toolchain install nightly --component llvm-tools-preview
- uses: taiki-e/install-action@cargo-llvm-cov
- name: Generate code coverage
run: cargo +nightly llvm-cov --all-features --workspace --lcov --output-path lcov.info
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
continue-on-error: true
with:
token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
files: lcov.info

View File

@@ -19,10 +19,11 @@ jobs:
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
override: true
components: rustfmt, clippy
- name: Run indexing_unsorted
run: cargo test indexing_unsorted -- --ignored
- name: Run indexing_sorted
run: cargo test indexing_sorted -- --ignored

View File

@@ -10,27 +10,34 @@ env:
CARGO_TERM_COLOR: always
jobs:
check:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install nightly
- name: Install latest nightly to test also against unstable feature flag
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
profile: minimal
override: true
components: rustfmt
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: clippy
override: true
components: rustfmt, clippy
- uses: Swatinem/rust-cache@v2
- name: Build
run: cargo build --verbose --workspace
- name: Run tests
run: cargo +stable test --features mmap,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints --verbose --workspace
- name: Run tests quickwit feature
run: cargo +stable test --features mmap,quickwit,failpoints --verbose --workspace
- name: Check Formatting
run: cargo +nightly fmt --all -- --check
@@ -41,34 +48,3 @@ jobs:
token: ${{ secrets.GITHUB_TOKEN }}
args: --tests
test:
runs-on: ubuntu-latest
strategy:
matrix:
features: [
{ label: "all", flags: "mmap,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints" },
{ label: "quickwit", flags: "mmap,quickwit,failpoints" }
]
name: test-${{ matrix.features.label}}
steps:
- uses: actions/checkout@v3
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
override: true
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
- name: Run tests
run: cargo +stable nextest run --features ${{ matrix.features.flags }} --verbose --workspace
- name: Run doctests
run: cargo +stable test --doc --features ${{ matrix.features.flags }} --verbose --workspace

View File

@@ -10,7 +10,6 @@ Tantivy's bread and butter is to address the problem of full-text search :
Given a large set of textual documents, and a text query, return the K-most relevant documents in a very efficient way. To execute these queries rapidly, the tantivy needs to build an index beforehand. The relevance score implemented in the tantivy is not configurable. Tantivy uses the same score as the default similarity used in Lucene / Elasticsearch, called [BM25](https://en.wikipedia.org/wiki/Okapi_BM25).
But tantivy's scope does not stop there. Numerous features are required to power rich-search applications. For instance, one may want to:
- compute the count of documents matching a query in the different section of an e-commerce website,
- display an average price per meter square for a real estate search engine,
- take into account historical user data to rank documents in a specific way,
@@ -23,28 +22,27 @@ rapidly select all documents matching a given predicate (also known as a query)
collect some information about them ([See collector](#collector-define-what-to-do-with-matched-documents)).
Roughly speaking the design is following these guiding principles:
- Search should be O(1) in memory.
- Indexing should be O(1) in memory. (In practice it is just sublinear)
- Search should be as fast as possible
This comes at the cost of the dynamicity of the index: while it is possible to add, and delete documents from our corpus, the tantivy is designed to handle these updates in large batches.
## [core/](src/core): Index, segments, searchers
## [core/](src/core): Index, segments, searchers.
Core contains all of the high-level code to make it possible to create an index, add documents, delete documents and commit.
This is both the most high-level part of tantivy, the least performance-sensitive one, the seemingly most mundane code... And paradoxically the most complicated part.
### Index and Segments
### Index and Segments...
A tantivy index is a collection of smaller independent immutable segments.
A tantivy index is a collection of smaller independent immutable segments.
Each segment contains its own independent set of data structures.
A segment is identified by a segment id that is in fact a UUID.
The file of a segment has the format
```segment-id . ext```
```segment-id . ext ```
The extension signals which data structure (or [`SegmentComponent`](src/core/segment_component.rs)) is stored in the file.
@@ -54,15 +52,17 @@ On commit, one segment per indexing thread is written to disk, and the `meta.jso
For a better idea of how indexing works, you may read the [following blog post](https://fulmicoton.com/posts/behold-tantivy-part2/).
### Deletes
Deletes happen by deleting a "term". Tantivy does not offer any notion of primary id, so it is up to the user to use a field in their schema as if it was a primary id, and delete the associated term if they want to delete only one specific document.
On commit, tantivy will find all of the segments with documents matching this existing term and remove from [alive bitset file](src/fastfield/alive_bitset.rs) that represents the bitset of the alive document ids.
Like all segment files, this file is immutable. Because it is possible to have more than one alive bitset file at a given instant, the alive bitset filename has the format ```segment_id . commit_opstamp . del```.
Like all segment files, this file is immutable. Because it is possible to have more than one alive bitset file at a given instant, the alive bitset filename has the format ``` segment_id . commit_opstamp . del```.
An opstamp is simply an incremental id that identifies any operation applied to the index. For instance, performing a commit or adding a document.
### DocId
Within a segment, all documents are identified by a DocId that ranges within `[0, max_doc)`.
@@ -74,7 +74,6 @@ The DocIds are simply allocated in the order documents are added to the index.
In separate threads, tantivy's index writer search for opportunities to merge segments.
The point of segment merge is to:
- eventually get rid of tombstoned documents
- reduce the otherwise ever-growing number of segments.
@@ -105,7 +104,6 @@ Tantivy's document follows a very strict schema, decided before building any ind
The schema defines all of the fields that the indexes [`Document`](src/schema/document.rs) may and should contain, their types (`text`, `i64`, `u64`, `Date`, ...) as well as how it should be indexed / represented in tantivy.
Depending on the type of the field, you can decide to
- put it in the docstore
- store it as a fast field
- index it
@@ -119,10 +117,9 @@ As of today, tantivy's schema imposes a 1:1 relationship between a field that is
This is not something tantivy supports, and it is up to the user to duplicate field / concatenate fields before feeding them to tantivy.
## General information about these data structures
## General information about these data structures.
All data structures in tantivy, have:
- a writer
- a serializer
- a reader
@@ -135,7 +132,7 @@ This conversion is done by the serializer.
Finally, the reader is in charge of offering an API to read on this on-disk read-only representation.
In tantivy, readers are designed to require very little anonymous memory. The data is read straight from an mmapped file, and loading an index is as fast as mmapping its files.
## [store/](src/store): Here is my DocId, Gimme my document
## [store/](src/store): Here is my DocId, Gimme my document!
The docstore is a row-oriented storage that, for each document, stores a subset of the fields
that are marked as stored in the schema. The docstore is compressed using a general-purpose algorithm
@@ -149,7 +146,6 @@ Once the top 10 documents have been identified, we fetch them from the store, an
**Not useful for**
Fetching a document from the store is typically a "slow" operation. It usually consists in
- searching into a compact tree-like data structure to find the position of the right block.
- decompressing a small block
- returning the document from this block.
@@ -158,7 +154,8 @@ It is NOT meant to be called for every document matching a query.
As a rule of thumb, if you hit the docstore more than 100 times per search query, you are probably misusing tantivy.
## [fastfield/](src/fastfield): Here is my DocId, Gimme my value
## [fastfield/](src/fastfield): Here is my DocId, Gimme my value!
Fast fields are stored in a column-oriented storage that allows for random access.
The only compression applied is bitpacking. The column comes with two meta data.
@@ -166,7 +163,7 @@ The minimum value in the column and the number of bits per doc.
Fetching a value for a `DocId` is then as simple as computing
```rust
```
min_value + fetch_bits(num_bits * doc_id..num_bits * (doc_id+1))
```
@@ -193,7 +190,7 @@ For advanced search engine, it is possible to store all of the features required
Finally facets are a specific kind of fast field, and the associated source code is in [`fastfield/facet_reader.rs`](src/fastfield/facet_reader.rs).
# The inverted search index
# The inverted search index.
The inverted index is the core part of full-text search.
When presented a new document with the text field "Hello, happy tax payer!", tantivy breaks it into a list of so-called tokens. In addition to just splitting these strings into tokens, it might also do different kinds of operations like dropping the punctuation, converting the character to lowercase, apply stemming, etc. Tantivy makes it possible to configure the operations to be applied in the schema (tokenizer/ is the place where these operations are implemented).
@@ -218,18 +215,19 @@ The inverted index actually consists of two data structures chained together.
Where [TermInfo](src/postings/term_info.rs) is an object containing some meta data about a term.
## [termdict/](src/termdict): Here is a term, give me the [TermInfo](src/postings/term_info.rs)
## [termdict/](src/termdict): Here is a term, give me the [TermInfo](src/postings/term_info.rs)!
Tantivy's term dictionary is mainly in charge of supplying the function
[Term](src/schema/term.rs) ⟶ [TermInfo](src/postings/term_info.rs)
It is itself broken into two parts.
- [Term](src/schema/term.rs) ⟶ [TermOrdinal](src/termdict/mod.rs) is addressed by a finite state transducer, implemented by the fst crate.
- [TermOrdinal](src/termdict/mod.rs) ⟶ [TermInfo](src/postings/term_info.rs) is addressed by the term info store.
## [postings/](src/postings): Iterate over documents... very fast
## [postings/](src/postings): Iterate over documents... very fast!
A posting list makes it possible to store a sorted list of doc ids and for each doc store
a term frequency as well.
@@ -259,6 +257,7 @@ we advance the position reader by the number of term frequencies of the current
The [BM25](https://en.wikipedia.org/wiki/Okapi_BM25) formula also requires to know the number of tokens stored in a specific field for a given document. We store this information on one byte per document in the fieldnorm.
The fieldnorm is therefore compressed. Values up to 40 are encoded unchanged.
## [tokenizer/](src/tokenizer): How should we process text?
Text processing is key to a good search experience.
@@ -269,6 +268,7 @@ Text processing can be configured by selecting an off-the-shelf [`Tokenizer`](./
Tantivy's comes with few tokenizers, but external crates are offering advanced tokenizers, such as [Lindera](https://crates.io/crates/lindera) for Japanese.
## [query/](src/query): Define and compose queries
The [Query](src/query/query.rs) trait defines what a query is.

View File

@@ -1,6 +1,5 @@
Tantivy 0.19
================================
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
The `DateTime` type has been updated to hold timestamps with microseconds precision.
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing).
@@ -8,7 +7,6 @@ Tantivy 0.19
Tantivy 0.18
================================
- For date values `chrono` has been replaced with `time` (@uklotzde) #1304 :
- The `time` crate is re-exported as `tantivy::time` instead of `tantivy::chrono`.
- The type alias `tantivy::DateTime` has been removed.
@@ -24,7 +22,6 @@ Tantivy 0.18
Tantivy 0.17
================================
- LogMergePolicy now triggers merges if the ratio of deleted documents reaches a threshold (@shikhar @fulmicoton) [#115](https://github.com/quickwit-oss/tantivy/issues/115)
- Adds a searcher Warmer API (@shikhar @fulmicoton)
- Change to non-strict schema. Ignore fields in data which are not defined in schema. Previously this returned an error. #1211
@@ -39,39 +36,33 @@ Tantivy 0.17
Tantivy 0.16.2
================================
- Bugfix in FuzzyTermQuery. (transposition_cost_one was not doing anything)
Tantivy 0.16.1
========================
- Major Bugfix on multivalued fastfield. #1151
- Demux operation (@PSeitz)
Tantivy 0.16.0
=========================
- Bugfix in the filesum check. (@evanxg852000) #1127
- Bugfix in positions when the index is sorted by a field. (@appaquet) #1125
Tantivy 0.15.3
=========================
- Major bugfix. Deleting documents was broken when the index was sorted by a field. (@appaquet, @fulmicoton) #1101
Tantivy 0.15.2
========================
- Major bugfix. DocStore still panics when a deleted doc is at the beginning of a block. (@appaquet) #1088
Tantivy 0.15.1
=========================
- Major bugfix. DocStore panics when first block is deleted. (@appaquet) #1077
Tantivy 0.15.0
=========================
- API Changes. Using Range instead of (start, end) in the API and internals (`FileSlice`, `OwnedBytes`, `Snippets`, ...)
This change is breaking but migration is trivial.
- Added an Histogram collector. (@fulmicoton) #994
@@ -93,9 +84,9 @@ Tantivy 0.15.0
- Updated TermMerger implementation to rely on the union feature of the FST (@scampi) #469
- Add boolean marking whether position is required in the query_terms API call (@fulmicoton). #1070
Tantivy 0.14.0
=========================
- Remove dependency to atomicwrites #833 .Implemented by @fulmicoton upon suggestion and research from @asafigan).
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
- API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match.
@@ -114,19 +105,16 @@ This version breaks compatibility and requires users to reindex everything.
Tantivy 0.13.2
===================
Bugfix. Acquiring a facet reader on a segment that does not contain any
doc with this facet returns `None`. (#896)
Tantivy 0.13.1
===================
Made `Query` and `Collector` `Send + Sync`.
Updated misc dependency versions.
Tantivy 0.13.0
======================
Tantivy 0.13 introduce a change in the index format that will require
you to reindex your index (BlockWAND information are added in the skiplist).
The index size increase is minor as this information is only added for
@@ -141,7 +129,6 @@ so that we can discuss possible solutions.
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
As a result, iterating through DocSet now looks as follows
```rust
let mut doc = docset.doc();
while doc != TERMINATED {
@@ -149,9 +136,7 @@ while doc != TERMINATED {
doc = docset.advance();
}
```
The change made it possible to greatly simplify a lot of the docset's code.
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
- Added an offset option to the Top(.*)Collectors. (@robyoung)
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
@@ -159,7 +144,6 @@ to the PISA team for answering all my questions!)
Tantivy 0.12.0
======================
- Removing static dispatch in tokenizers for simplicity. (#762)
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
@@ -170,32 +154,30 @@ Tantivy 0.12.0
## How to update?
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
minor changes. Check <https://github.com/quickwit-oss/tantivy/blob/main/examples/custom_tokenizer.rs>
minor changes. Check https://github.com/quickwit-oss/tantivy/blob/main/examples/custom_tokenizer.rs
to check for some code sample.
Tantivy 0.11.3
=======================
- Fixed DateTime as a fast field (#735)
Tantivy 0.11.2
=======================
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
- Exposing a constructor for `WatchHandle` (#731)
Tantivy 0.11.1
=====================
- Bug fix #729
Tantivy 0.11.0
=====================
- Added f64 field. Internally reuse u64 code the same way i64 does (@fdb-hiroshima)
- Various bugfixes in the query parser.
- Better handling of hyphens in query parser. (#609)
- Better handling of whitespaces.
- Better handling of hyphens in query parser. (#609)
- Better handling of whitespaces.
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
- API change around `Box<BoxableTokenizer>`. See detail in #629
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
@@ -226,6 +208,7 @@ Tantivy 0.10.1
Avoid watching the mmap directory until someone effectively creates a reader that uses
this functionality.
Tantivy 0.10.0
=====================
@@ -241,7 +224,6 @@ Tantivy 0.10.0
Minor
---------
- Switched to Rust 2018 (@uvd)
- Small simplification of the code.
Calling .freq() or .doc() when .advance() has never been called
@@ -249,7 +231,8 @@ on segment postings should panic from now on.
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
- Fast fields are now preloaded when the `SegmentReader` is created.
- `IndexMeta` is now public. (@hntd187)
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
only require a read lock. (@fulmicoton)
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
@@ -265,17 +248,16 @@ Your program should be usable as is.
Fast fields used to be accessed directly from the `SegmentReader`.
The API changed, you are now required to acquire your fast field reader via the
`segment_reader.fast_fields()`, and use one of the typed method:
- `.u64()`, `.i64()` if your field is single-valued ;
- `.u64s()`, `.i64s()` if your field is multi-valued ;
- `.bytes()` if your field is bytes fast field.
Tantivy 0.9.0
=====================
*0.9.0 index format is not compatible with the
previous index format.*
- MAJOR BUGFIX :
Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton)
- Removed most unsafe (@fulmicoton)
@@ -319,40 +301,37 @@ To update from tantivy 0.8, you will need to go through the following steps.
```
Tantivy 0.8.2
=====================
Fixing build for x86_64 platforms. (#496)
No need to update from 0.8.1 if tantivy
is building on your platform.
Tantivy 0.8.1
=====================
Hotfix of #476.
Merge was reflecting deletes before commit was passed.
Thanks @barrotsteindev for reporting the bug.
Tantivy 0.8.0
=====================
*No change in the index format*
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
- Multithreaded search (@jwolfe, @fulmicoton)
Tantivy 0.7.1
=====================
*No change in the index format*
- Bugfix: NGramTokenizer panics on non ascii chars
- Added a space usage API
Tantivy 0.7
=====================
- Skip data for doc ids and positions (@fulmicoton),
greatly improving performance
- Tantivy error now rely on the failure crate (@drusellers)
@@ -362,15 +341,15 @@ Tantivy 0.7
Tantivy 0.6.1
=========================
- Bugfix #324. GC removing was removing file that were still in useful
- Added support for parsing AllQuery and RangeQuery via QueryParser
- AllQuery: `*`
- RangeQuery:
- Inclusive `field:[startIncl to endIncl]`
- Exclusive `field:{startExcl to endExcl}`
- Mixed `field:[startIncl to endExcl}` and vice versa
- Unbounded `field:[start to *]`, `field:[* to end]`
- AllQuery: `*`
- RangeQuery:
- Inclusive `field:[startIncl to endIncl]`
- Exclusive `field:{startExcl to endExcl}`
- Mixed `field:[startIncl to endExcl}` and vice versa
- Unbounded `field:[start to *]`, `field:[* to end]`
Tantivy 0.6
==========================
@@ -383,53 +362,58 @@ to this release!
- Approximate field norms encoded over 1 byte. (@fulmicoton)
- Compiles on stable rust (@fulmicoton)
- Add &[u8] fastfield for associating arbitrary bytes to each document (@jason-wolfe) (#270)
- Completely uncompressed
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
- Completely uncompressed
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
- Add NGram token support (@drusellers)
- Add Stopword Filter support (@drusellers)
- Add a FuzzyTermQuery (@drusellers)
- Add a RegexQuery (@drusellers)
- Various performance improvements (@fulmicoton)_
Tantivy 0.5.2
===========================
- bugfix #274
- bugfix #280
- bugfix #289
Tantivy 0.5.1
==========================
- bugfix #254 : tantivy failed if no documents in a segment contained a specific field.
Tantivy 0.5
==========================
- Faceting
- RangeQuery
- Configurable tokenization pipeline
- Bugfix in PhraseQuery
- Various query optimisation
- Allowing very large indexes
- 64 bits file address
- Smarter encoding of the `TermInfo` objects
- 64 bits file address
- Smarter encoding of the `TermInfo` objects
Tantivy 0.4.3
==========================
- Bugfix race condition when deleting files. (#198)
Tantivy 0.4.2
==========================
- Prevent usage of AVX2 instructions (#201)
Tantivy 0.4.1
==========================
- Bugfix for non-indexed fields. (#199)
Tantivy 0.4.0
==========================
@@ -444,31 +428,37 @@ Tantivy 0.4.0
- Searching for a non-indexed field returns an explicit Error
- Phrase query for non-tokenized field are not tokenized by the query parser.
- Faster/Better indexing (@fulmicoton)
- using murmurhash2
- faster merging
- more memory efficient fast field writer (@lnicola )
- better handling of collisions
- lesser memory usage
- using murmurhash2
- faster merging
- more memory efficient fast field writer (@lnicola )
- better handling of collisions
- lesser memory usage
- Added API, most notably to iterate over ranges of terms (@fulmicoton)
- Bugfix that was preventing to unmap segment files, on index drop (@fulmicoton)
- Made the doc! macro public (@fulmicoton)
- Added an alternative implementation of the streaming dictionary (@fulmicoton)
Tantivy 0.3.1
==========================
- Expose a method to trigger files garbage collection
Tantivy 0.3
==========================
Special thanks to @Kodraus @lnicola @Ameobea @manuel-woelker @celaus
for their contribution to this release.
Thanks also to everyone in tantivy gitter chat
for their advise and company :)
<https://gitter.im/tantivy-search/tantivy>
https://gitter.im/tantivy-search/tantivy
Warning:
@@ -477,16 +467,19 @@ code and index format.
You should not expect backward compatibility before
tantivy 1.0.
New Features
------------
- Delete. You can now delete documents from an index.
- Support for windows (Thanks to @lnicola)
Various Bugfixes & small improvements
----------------------------------------
- Added CI for Windows (<https://ci.appveyor.com/project/fulmicoton/tantivy>)
- Added CI for Windows (https://ci.appveyor.com/project/fulmicoton/tantivy)
Thanks to @KodrAus ! (#108)
- Various dependy version update (Thanks to @Ameobea) #76
- Fixed several race conditions in `Index.wait_merge_threads`
@@ -498,3 +491,7 @@ Thanks to @KodrAus ! (#108)
- Building binary targets for tantivy-cli (Thanks to @KodrAus)
- Misc invisible bug fixes, and code cleanup.
- Use

View File

@@ -60,6 +60,8 @@ pretty_assertions = "1.2.1"
serde_cbor = { version = "0.11.2", optional = true }
async-trait = "0.1.53"
arc-swap = "1.5.0"
gcd = "2.1.0"
roaring = "0.9.0"
[target.'cfg(windows)'.dependencies]
winapi = "0.3.9"

View File

@@ -5,6 +5,7 @@
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![Crates.io](https://img.shields.io/crates/v/tantivy.svg)](https://crates.io/crates/tantivy)
![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png)
**Tantivy** is a **full-text search engine library** written in Rust.
@@ -15,7 +16,7 @@ to build such a search engine.
Tantivy is, in fact, strongly inspired by Lucene's design.
If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy.
If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy.
# Benchmark
@@ -56,6 +57,7 @@ Your mileage WILL vary depending on the nature of queries and their load.
Distributed search is out of the scope of Tantivy, but if you are looking for this feature, check out [Quickwit](https://github.com/quickwit-oss/quickwit/).
# Getting started
Tantivy works on stable Rust (>= 1.27) and supports Linux, macOS, and Windows.
@@ -123,8 +125,7 @@ By default, `rustc` compiles everything in the `examples/` directory in debug mo
rust-gdb target/debug/examples/$EXAMPLE_NAME
$ gdb run
```
# Companies Using Tantivy
# Companies Using Tantivy
<p align="left">
<img align="center" src="doc/assets/images/Nuclia.png#gh-light-mode-only" alt="Nuclia" height="25" width="auto" /> &nbsp;
@@ -133,12 +134,11 @@ $ gdb run
<img align="center" src="doc/assets/images/nuclia-dark-theme.png#gh-dark-mode-only" alt="Nuclia" height="35" width="auto" /> &nbsp;
<img align="center" src="doc/assets/images/humanfirst.ai-dark-theme.png#gh-dark-mode-only" alt="Humanfirst.ai" height="25" width="auto" />&nbsp; &nbsp;
<img align="center" src="doc/assets/images/element-dark-theme.png#gh-dark-mode-only" alt="Element.io" height="25" width="auto" />
</p>
</p>
# FAQ
### Can I use Tantivy in other languages?
- Python → [tantivy-py](https://github.com/quickwit-oss/tantivy-py)
- Ruby → [tantiny](https://github.com/baygeldin/tantiny)
@@ -152,17 +152,13 @@ You can also find other bindings on [GitHub](https://github.com/search?q=tantivy
- and [more](https://github.com/search?q=tantivy)!
### On average, how much faster is Tantivy compared to Lucene?
- According to our [search latency benchmark](https://tantivy-search.github.io/bench/), Tantivy is approximately 2x faster than Lucene.
### Does tantivy support incremental indexing?
- Yes.
### How can I edit documents?
- Data in tantivy is immutable. To edit a document, the document needs to be deleted and reindexed.
### When will my documents be searchable during indexing?
- Documents will be searchable after a `commit` is called on an `IndexWriter`. Existing `IndexReader`s will also need to be reloaded in order to reflect the changes. Finally, changes are only visible to newly acquired `Searcher`.

View File

@@ -82,16 +82,14 @@ impl BitUnpacker {
}
}
pub fn bit_width(&self) -> u8 {
self.num_bits as u8
}
#[inline]
pub fn get(&self, idx: u64, data: &[u8]) -> u64 {
if self.num_bits == 0 {
return 0u64;
}
let addr_in_bits = idx * self.num_bits;
let num_bits = self.num_bits;
let mask = self.mask;
let addr_in_bits = idx * num_bits;
let addr = addr_in_bits >> 3;
let bit_shift = addr_in_bits & 7;
debug_assert!(
@@ -103,7 +101,7 @@ impl BitUnpacker {
.unwrap();
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
val_shifted & self.mask
val_shifted & mask
}
}

View File

@@ -58,10 +58,6 @@ fn metadata_test() {
assert_eq!(meta.num_bits(), 6);
}
fn mem_usage<T>(items: &Vec<T>) -> usize {
items.capacity() * std::mem::size_of::<T>()
}
impl BlockedBitpacker {
pub fn new() -> Self {
let mut compressed_blocks = vec![];
@@ -77,8 +73,10 @@ impl BlockedBitpacker {
pub fn mem_usage(&self) -> usize {
std::mem::size_of::<BlockedBitpacker>()
+ self.compressed_blocks.capacity()
+ mem_usage(&self.offset_and_bits)
+ mem_usage(&self.buffer)
+ self.offset_and_bits.capacity()
* std::mem::size_of_val(&self.offset_and_bits.get(0).cloned().unwrap_or_default())
+ self.buffer.capacity()
* std::mem::size_of_val(&self.buffer.get(0).cloned().unwrap_or_default())
}
#[inline]

View File

@@ -11,7 +11,10 @@ mod writer;
pub use bitset::*;
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
pub use vint::{read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt};
pub use vint::{
deserialize_vint_u128, read_u32_vint, read_u32_vint_no_advance, serialize_vint_u128,
serialize_vint_u32, write_u32_vint, VInt,
};
pub use writer::{AntiCallToken, CountingWriter, TerminatingWrite};
/// Has length trait

View File

@@ -19,7 +19,7 @@ pub trait DeserializeFrom<T: BinarySerializable> {
/// Implement deserialize from &[u8] for all types which implement BinarySerializable.
///
/// TryFrom would actually be preferable, but not possible because of the orphan
/// TryFrom would actually be preferrable, but not possible because of the orphan
/// rules (not completely sure if this could be resolved)
impl<T: BinarySerializable> DeserializeFrom<T> for &[u8] {
fn deserialize(&mut self) -> io::Result<T> {

View File

@@ -5,6 +5,40 @@ use byteorder::{ByteOrder, LittleEndian};
use super::BinarySerializable;
/// Variable int serializes a u128 number
pub fn serialize_vint_u128(mut val: u128, output: &mut Vec<u8>) {
loop {
let next_byte: u8 = (val % 128u128) as u8;
val /= 128u128;
if val == 0 {
output.push(next_byte | STOP_BIT);
return;
} else {
output.push(next_byte);
}
}
}
/// Deserializes a u128 number
///
/// Returns the number and the slice after the vint
pub fn deserialize_vint_u128(data: &[u8]) -> io::Result<(u128, &[u8])> {
let mut result = 0u128;
let mut shift = 0u64;
for i in 0..19 {
let b = data[i];
result |= u128::from(b % 128u8) << shift;
if b >= STOP_BIT {
return Ok((result, &data[i + 1..]));
}
shift += 7;
}
Err(io::Error::new(
io::ErrorKind::InvalidData,
"Failed to deserialize u128 vint",
))
}
/// Wrapper over a `u64` that serializes as a variable int.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct VInt(pub u64);
@@ -176,6 +210,7 @@ impl BinarySerializable for VInt {
mod tests {
use super::{serialize_vint_u32, BinarySerializable, VInt};
use crate::vint::{deserialize_vint_u128, serialize_vint_u128};
fn aux_test_vint(val: u64) {
let mut v = [14u8; 10];
@@ -217,6 +252,21 @@ mod tests {
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
}
fn aux_test_vint_u128(val: u128) {
let mut data = vec![];
serialize_vint_u128(val, &mut data);
let (deser_val, _data) = deserialize_vint_u128(&data).unwrap();
assert_eq!(val, deser_val);
}
#[test]
fn test_vint_u128() {
aux_test_vint_u128(0);
aux_test_vint_u128(1);
aux_test_vint_u128(u128::MAX / 3);
aux_test_vint_u128(u128::MAX);
}
#[test]
fn test_vint_u32() {
aux_test_serialize_vint_u32(0);

View File

@@ -62,7 +62,7 @@ impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
pub struct AntiCallToken(());
/// Trait used to indicate when no more write need to be done on a writer
pub trait TerminatingWrite: Write + Send + Sync {
pub trait TerminatingWrite: Write + Send {
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
fn terminate(mut self) -> io::Result<()>
where Self: Sized {

View File

@@ -1,5 +1,7 @@
# Summary
[Avant Propos](./avant-propos.md)
- [Segments](./basis.md)

View File

@@ -3,7 +3,7 @@
> Tantivy is a **search** engine **library** for Rust.
If you are familiar with Lucene, it's an excellent approximation to consider tantivy as Lucene for rust. tantivy is heavily inspired by Lucene's design and
they both have the same scope and targeted use cases.
they both have the same scope and targetted use cases.
If you are not familiar with Lucene, let's break down our little tagline.
@@ -31,4 +31,4 @@ relevancy, collapsing, highlighting, spatial search.
index from a different format.
Tantivy exposes a lot of low level API to do all of these things.

View File

@@ -11,7 +11,7 @@ directory shipped with tantivy is the `MmapDirectory`.
While this design has some downsides, this greatly simplifies the source code of
tantivy. Caching is also entirely delegated to the OS.
`tantivy` works entirely (or almost) by directly reading the datastructures as they are laid on disk. As a result, the act of opening an indexing does not involve loading different datastructures from the disk into random access memory : starting a process, opening an index, and performing your first query can typically be done in a matter of milliseconds.
`tantivy` works entirely (or almost) by directly reading the datastructures as they are layed on disk. As a result, the act of opening an indexing does not involve loading different datastructures from the disk into random access memory : starting a process, opening an index, and performing your first query can typically be done in a matter of milliseconds.
This is an interesting property for a command line search engine, or for some multi-tenant log search engine : spawning a new process for each new query can be a perfectly sensible solution in some use case.
@@ -22,6 +22,7 @@ Of course this is crucial to reduce IO, and ensure that as much of our index can
Also, whenever possible its data is accessed sequentially. Of course, this is an amazing property when tantivy needs to access the data from your spinning hard disk, but this is also
critical for performance, if your data is read from and an `SSD` or even already in your pagecache.
## Segments, and the log method
That kind of compact layout comes at one cost: it prevents our datastructures from being dynamic.
@@ -52,7 +53,11 @@ to get tantivy to fit your use case:
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated to segment `D-7`.
## Merging
# Merging
As you index more and more data, your index will accumulate more and more segments.
Having a lot of small segments is not really optimal. There is a bit of redundancy in having
@@ -61,7 +66,11 @@ all these term dictionary. Also when searching, we will need to do term lookups
That's where merging or compacting comes into place. Tantivy will continuously consider merge
opportunities and start merging segments in the background.
## Indexing throughput, number of indexing threads
# Indexing throughput, number of indexing threads
[^1]: This may eventually change.

View File

@@ -1,3 +1,3 @@
# Examples
- [Basic search](/examples/basic_search.html)
- [Basic search](/examples/basic_search.html)

View File

@@ -1,11 +1,11 @@
- [Index Sorting](#index-sorting)
- [Why Sorting](#why-sorting)
- [Compression](#compression)
- [Top-N Optimization](#top-n-optimization)
- [Pruning](#pruning)
- [Other](#other)
- [Usage](#usage)
+ [Why Sorting](#why-sorting)
* [Compression](#compression)
* [Top-N Optimization](#top-n-optimization)
* [Pruning](#pruning)
* [Other](#other)
+ [Usage](#usage)
# Index Sorting
@@ -15,34 +15,32 @@ Tantivy allows you to sort the index according to a property.
Presorting an index has several advantages:
### Compression
###### Compression
When data is sorted it is easier to compress the data. E.g. the numbers sequence [5, 2, 3, 1, 4] would be sorted to [1, 2, 3, 4, 5].
When data is sorted it is easier to compress the data. E.g. the numbers sequence [5, 2, 3, 1, 4] would be sorted to [1, 2, 3, 4, 5].
If we apply delta encoding this list would be unsorted [5, -3, 1, -2, 3] vs. [1, 1, 1, 1, 1].
Compression ratio is mainly affected on the fast field of the sorted property, every thing else is likely unaffected.
Compression ratio is mainly affected on the fast field of the sorted property, every thing else is likely unaffected.
###### Top-N Optimization
### Top-N Optimization
When data is presorted by a field and search queries request sorting by the same field, we can leverage the natural order of the documents.
When data is presorted by a field and search queries request sorting by the same field, we can leverage the natural order of the documents.
E.g. if the data is sorted by timestamp and want the top n newest docs containing a term, we can simply leveraging the order of the docids.
Note: Tantivy 0.16 does not do this optimization yet.
### Pruning
###### Pruning
Let's say we want all documents and want to apply the filter `>= 2010-08-11`. When the data is sorted, we could make a lookup in the fast field to find the docid range and use this as the filter.
Note: Tantivy 0.16 does not do this optimization yet.
### Other?
###### Other?
In principle there are many algorithms possible that exploit the monotonically increasing nature. (aggregations maybe?)
## Usage
The index sorting can be configured setting [`sort_by_field`](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/core/index_meta.rs#L238) on `IndexSettings` and passing it to a `IndexBuilder`. As of Tantivy 0.16 only fast fields are allowed to be used.
```rust
```
let settings = IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "intval".to_string(),
@@ -60,3 +58,4 @@ let index = index_builder.create_in_ram().unwrap();
Sorting an index is applied in the serialization step. In general there are two serialization steps: [Finishing a single segment](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/segment_writer.rs#L338) and [merging multiple segments](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/merger.rs#L1073).
In both cases we generate a docid mapping reflecting the sort. This mapping is used when serializing the different components (doc store, fastfields, posting list, normfield, facets).

View File

@@ -21,17 +21,16 @@ For instance, if user is a json field, the following document:
```
emits the following tokens:
- ("name", Text, "Paul")
- ("name", Text, "Masurel")
- ("address.city", Text, "Tokyo")
- ("address.country", Text, "Japan")
- ("created_at", Date, 15420648505)
- ("name", Text, "Paul")
- ("name", Text, "Masurel")
- ("address.city", Text, "Tokyo")
- ("address.country", Text, "Japan")
- ("created_at", Date, 15420648505)
## Bytes-encoding and lexicographical sort
# Bytes-encoding and lexicographical sort.
Like any other terms, these triplets are encoded into a binary format as follows.
- `json_path`: the json path is a sequence of "segments". In the example above, `address.city`
is just a debug representation of the json path `["address", "city"]`.
Its representation is done by separating segments by a unicode char `\x01`, and ending the path by `\x00`.
@@ -42,16 +41,16 @@ This representation is designed to align the natural sort of Terms with the lexi
of their binary representation (Tantivy's dictionary (whether fst or sstable) is sorted and does prefix encoding).
In the example above, the terms will be sorted as
- ("address.city", Text, "Tokyo")
- ("address.country", Text, "Japan")
- ("name", Text, "Masurel")
- ("name", Text, "Paul")
- ("created_at", Date, 15420648505)
- ("address.city", Text, "Tokyo")
- ("address.country", Text, "Japan")
- ("name", Text, "Masurel")
- ("name", Text, "Paul")
- ("created_at", Date, 15420648505)
As seen in "pitfalls", we may end up having to search for a value for a same path in several different fields. Putting the field code after the path makes it maximizes compression opportunities but also increases the chances for the two terms to end up in the actual same term dictionary block.
## Pitfalls, limitation and corner cases
# Pitfalls, limitation and corner cases.
Json gives very little information about the type of the literals it stores.
All numeric types end up mapped as a "Number" and there are no types for dates.
@@ -71,21 +70,19 @@ For instance, we do not even know if the type is a number or string based.
So the query
```rust
```
my_path.my_segment:233
```
Will be interpreted as
```rust
(my_path.my_segment, String, 233) or (my_path.my_segment, u64, 233)
```
`(my_path.my_segment, String, 233) or (my_path.my_segment, u64, 233)`
Likewise, we need to emit two tokens if the query contains an rfc3999 date.
Indeed the date could have been actually a single token inside the text of a document at ingestion time. Generally speaking, we will always at least emit a string token in query parsing, and sometimes more.
If one more json field is defined, things get even more complicated.
## Default json field
If the schema contains a text field called "text" and a json field that is set as a default field:
@@ -99,11 +96,11 @@ This is a product decision.
The user can still target the JSON field by specifying its name explicitly:
`json_dynamic.text:hello`.
## Range queries are not supported
## Range queries are not supported.
Json field do not support range queries.
## Arrays do not work like nested object
## Arrays do not work like nested object.
If json object contains an array, a search query might return more documents
than what might be expected.
@@ -123,8 +120,9 @@ Let's take an example.
Despite the array structure, a document in tantivy is a bag of terms.
The query:
```rust
```
cart.product_type:sneakers AND cart.attributes.color:red
```
Actually match the document above.

View File

@@ -7,11 +7,10 @@
// Of course, you can have a look at the tantivy's built-in collectors
// such as the `CountCollector` for more examples.
use fastfield_codecs::Column;
// ---
// Importing tantivy...
use tantivy::collector::{Collector, SegmentCollector};
use tantivy::fastfield::DynamicFastFieldReader;
use tantivy::fastfield::{DynamicFastFieldReader, FastFieldReader};
use tantivy::query::QueryParser;
use tantivy::schema::{Field, Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, Score, SegmentReader};
@@ -104,7 +103,7 @@ impl SegmentCollector for StatsSegmentCollector {
type Fruit = Option<Stats>;
fn collect(&mut self, doc: u32, _score: Score) {
let value = self.fast_field_reader.get_val(doc as u64) as f64;
let value = self.fast_field_reader.get(doc) as f64;
self.stats.count += 1;
self.stats.sum += value;
self.stats.squared_sum += value * value;

View File

@@ -50,7 +50,7 @@ fn main() -> tantivy::Result<()> {
// for your unit tests... Or this example.
let index = Index::create_in_ram(schema.clone());
// here we are registering our custom tokenizer
// here we are registering our custome tokenizer
// this will store tokens of 3 characters each
index
.tokenizers()

View File

@@ -2,8 +2,8 @@ use std::cmp::Reverse;
use std::collections::{HashMap, HashSet};
use std::sync::{Arc, RwLock, Weak};
use fastfield_codecs::Column;
use tantivy::collector::TopDocs;
use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser;
use tantivy::schema::{Field, Schema, FAST, TEXT};
use tantivy::{
@@ -52,7 +52,7 @@ impl Warmer for DynamicPriceColumn {
let product_id_reader = segment.fast_fields().u64(self.field)?;
let product_ids: Vec<ProductId> = segment
.doc_ids_alive()
.map(|doc| product_id_reader.get_val(doc as u64))
.map(|doc| product_id_reader.get(doc))
.collect();
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
let mut price_vals: Vec<Price> = Vec::new();

View File

@@ -11,9 +11,10 @@ description = "Fast field codecs used by tantivy"
[dependencies]
common = { version = "0.3", path = "../common/", package = "tantivy-common" }
tantivy-bitpacker = { version="0.2", path = "../bitpacker/" }
ownedbytes = { version = "0.3.0", path = "../ownedbytes" }
prettytable-rs = {version="0.9.0", optional= true}
rand = {version="0.8.3", optional= true}
prettytable-rs = {version="0.8.0", optional= true}
rand = { version="0.8.3", optional= true}
itertools = { version="0.10.3", optional=true}
measure_time = { version="0.8.2", optional=true}
[dev-dependencies]
more-asserts = "0.3.0"
@@ -21,6 +22,6 @@ proptest = "1.0.0"
rand = "0.8.3"
[features]
bin = ["prettytable-rs", "rand"]
bin = ["prettytable-rs", "rand", "itertools", "measure_time"]
default = ["bin"]

View File

@@ -4,9 +4,13 @@ extern crate test;
#[cfg(test)]
mod tests {
use fastfield_codecs::bitpacked::BitpackedCodec;
use fastfield_codecs::blockwise_linear::BlockwiseLinearCodec;
use fastfield_codecs::linear::LinearCodec;
use fastfield_codecs::bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer};
use fastfield_codecs::linearinterpol::{
LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer,
};
use fastfield_codecs::multilinearinterpol::{
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
};
use fastfield_codecs::*;
fn get_data() -> Vec<u64> {
@@ -25,59 +29,72 @@ mod tests {
fn value_iter() -> impl Iterator<Item = u64> {
0..20_000
}
fn bench_get<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
fn bench_get<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(
b: &mut Bencher,
data: &[u64],
) {
let mut bytes = vec![];
Codec::serialize(&mut bytes, &data).unwrap();
let reader = Codec::open_from_bytes(OwnedBytes::new(bytes)).unwrap();
S::serialize(
&mut bytes,
&data,
stats_from_vec(data),
data.iter().cloned(),
data.iter().cloned(),
)
.unwrap();
let reader = R::open_from_bytes(&bytes).unwrap();
b.iter(|| {
let mut sum = 0u64;
for pos in value_iter() {
let val = reader.get_val(pos as u64);
debug_assert_eq!(data[pos as usize], val);
sum = sum.wrapping_add(val);
reader.get_u64(pos as u64, &bytes);
}
sum
});
}
fn bench_create<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
let mut bytes = Vec::new();
fn bench_create<S: FastFieldCodecSerializer>(b: &mut Bencher, data: &[u64]) {
let mut bytes = vec![];
b.iter(|| {
bytes.clear();
Codec::serialize(&mut bytes, &data).unwrap();
S::serialize(
&mut bytes,
&data,
stats_from_vec(data),
data.iter().cloned(),
data.iter().cloned(),
)
.unwrap();
});
}
use ownedbytes::OwnedBytes;
use test::Bencher;
#[bench]
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_create::<BitpackedCodec>(b, &data);
bench_create::<BitpackedFastFieldSerializer>(b, &data);
}
#[bench]
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_create::<LinearCodec>(b, &data);
bench_create::<LinearInterpolFastFieldSerializer>(b, &data);
}
#[bench]
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_create::<BlockwiseLinearCodec>(b, &data);
bench_create::<MultiLinearInterpolFastFieldSerializer>(b, &data);
}
#[bench]
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_get::<BitpackedCodec>(b, &data);
bench_get::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>(b, &data);
}
#[bench]
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_get::<LinearCodec>(b, &data);
bench_get::<LinearInterpolFastFieldSerializer, LinearInterpolFastFieldReader>(b, &data);
}
#[bench]
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_get::<BlockwiseLinearCodec>(b, &data);
bench_get::<MultiLinearInterpolFastFieldSerializer, MultiLinearInterpolFastFieldReader>(
b, &data,
);
}
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
let min_value = data.iter().cloned().min().unwrap_or(0);

View File

@@ -1,26 +1,37 @@
use std::io::{self, Write};
use common::BinarySerializable;
use ownedbytes::OwnedBytes;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::{Column, FastFieldCodec, FastFieldCodecType};
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct BitpackedReader {
data: OwnedBytes,
pub struct BitpackedFastFieldReader {
bit_unpacker: BitUnpacker,
min_value_u64: u64,
max_value_u64: u64,
num_vals: u64,
pub min_value_u64: u64,
pub max_value_u64: u64,
}
impl Column for BitpackedReader {
impl FastFieldCodecReader for BitpackedFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let (_data, mut footer) = bytes.split_at(bytes.len() - 16);
let min_value = u64::deserialize(&mut footer)?;
let amplitude = u64::deserialize(&mut footer)?;
let max_value = min_value + amplitude;
let num_bits = compute_num_bits(amplitude);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(BitpackedFastFieldReader {
min_value_u64: min_value,
max_value_u64: max_value,
bit_unpacker,
})
}
#[inline]
fn get_val(&self, doc: u64) -> u64 {
self.min_value_u64 + self.bit_unpacker.get(doc, &self.data)
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
self.min_value_u64 + self.bit_unpacker.get(doc, data)
}
#[inline]
fn min_value(&self) -> u64 {
@@ -30,21 +41,16 @@ impl Column for BitpackedReader {
fn max_value(&self) -> u64 {
self.max_value_u64
}
#[inline]
fn num_vals(&self) -> u64 {
self.num_vals
}
}
pub struct BitpackedSerializerLegacy<'a, W: 'a + Write> {
pub struct BitpackedFastFieldSerializerLegacy<'a, W: 'a + Write> {
bit_packer: BitPacker,
write: &'a mut W,
min_value: u64,
num_vals: u64,
amplitude: u64,
num_bits: u8,
}
impl<'a, W: Write> BitpackedSerializerLegacy<'a, W> {
impl<'a, W: Write> BitpackedFastFieldSerializerLegacy<'a, W> {
/// Creates a new fast field serializer.
///
/// The serializer in fact encode the values by bitpacking
@@ -57,16 +63,15 @@ impl<'a, W: Write> BitpackedSerializerLegacy<'a, W> {
write: &'a mut W,
min_value: u64,
max_value: u64,
) -> io::Result<BitpackedSerializerLegacy<'a, W>> {
) -> io::Result<BitpackedFastFieldSerializerLegacy<'a, W>> {
assert!(min_value <= max_value);
let amplitude = max_value - min_value;
let num_bits = compute_num_bits(amplitude);
let bit_packer = BitPacker::new();
Ok(BitpackedSerializerLegacy {
Ok(BitpackedFastFieldSerializerLegacy {
bit_packer,
write,
min_value,
num_vals: 0,
amplitude,
num_bits,
})
@@ -77,45 +82,21 @@ impl<'a, W: Write> BitpackedSerializerLegacy<'a, W> {
let val_to_write: u64 = val - self.min_value;
self.bit_packer
.write(val_to_write, self.num_bits, &mut self.write)?;
self.num_vals += 1;
Ok(())
}
pub fn close_field(mut self) -> io::Result<()> {
self.bit_packer.close(&mut self.write)?;
self.min_value.serialize(&mut self.write)?;
self.amplitude.serialize(&mut self.write)?;
self.num_vals.serialize(&mut self.write)?;
Ok(())
}
}
pub struct BitpackedCodec;
impl FastFieldCodec for BitpackedCodec {
/// The CODEC_TYPE is an enum value used for serialization.
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Bitpacked;
type Reader = BitpackedReader;
/// Opens a fast field given a file.
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
let footer_offset = bytes.len() - 24;
let (data, mut footer) = bytes.split(footer_offset);
let min_value = u64::deserialize(&mut footer)?;
let amplitude = u64::deserialize(&mut footer)?;
let num_vals = u64::deserialize(&mut footer)?;
let max_value = min_value + amplitude;
let num_bits = compute_num_bits(amplitude);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(BitpackedReader {
data,
bit_unpacker,
min_value_u64: min_value,
max_value_u64: max_value,
num_vals,
})
}
pub struct BitpackedFastFieldSerializer {}
impl FastFieldCodecSerializer for BitpackedFastFieldSerializer {
const NAME: &'static str = "Bitpacked";
const ID: u8 = 1;
/// Serializes data with the BitpackedFastFieldSerializer.
///
/// The serializer in fact encode the values by bitpacking
@@ -124,41 +105,51 @@ impl FastFieldCodec for BitpackedCodec {
/// It requires a `min_value` and a `max_value` to compute
/// compute the minimum number of bits required to encode
/// values.
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column) -> io::Result<()> {
let mut serializer = BitpackedSerializerLegacy::open(
write,
fastfield_accessor.min_value(),
fastfield_accessor.max_value(),
)?;
fn serialize(
write: &mut impl Write,
_fastfield_accessor: &dyn FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
_data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
let mut serializer =
BitpackedFastFieldSerializerLegacy::open(write, stats.min_value, stats.max_value)?;
for val in fastfield_accessor.iter() {
for val in data_iter {
serializer.add_val(val)?;
}
serializer.close_field()?;
Ok(())
}
fn estimate(fastfield_accessor: &impl Column) -> Option<f32> {
let amplitude = fastfield_accessor.max_value() - fastfield_accessor.min_value();
fn is_applicable(
_fastfield_accessor: &impl FastFieldDataAccess,
_stats: FastFieldStats,
) -> bool {
true
}
fn estimate(_fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
let amplitude = stats.max_value - stats.min_value;
let num_bits = compute_num_bits(amplitude);
let num_bits_uncompressed = 64;
Some(num_bits as f32 / num_bits_uncompressed as f32)
num_bits as f32 / num_bits_uncompressed as f32
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_datasets;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) {
crate::tests::create_and_validate::<BitpackedCodec>(data, name);
crate::tests::create_and_validate::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>(
data, name,
);
}
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_datasets();
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();

View File

@@ -1,49 +0,0 @@
pub trait Column<T = u64> {
/// Return the value associated to the given idx.
///
/// This accessor should return as fast as possible.
///
/// # Panics
///
/// May panic if `idx` is greater than the column length.
fn get_val(&self, idx: u64) -> T;
/// Fills an output buffer with the fast field values
/// associated with the `DocId` going from
/// `start` to `start + output.len()`.
///
/// Regardless of the type of `Item`, this method works
/// - transmuting the output array
/// - extracting the `Item`s as if they were `u64`
/// - possibly converting the `u64` value to the right type.
///
/// # Panics
///
/// May panic if `start + output.len()` is greater than
/// the segment's `maxdoc`.
fn get_range(&self, start: u64, output: &mut [T]) {
for (out, idx) in output.iter_mut().zip(start..) {
*out = self.get_val(idx);
}
}
/// Returns the minimum value for this fast field.
///
/// The min value does not take in account of possible
/// deleted document, and should be considered as a lower bound
/// of the actual minimum value.
fn min_value(&self) -> T;
/// Returns the maximum value for this fast field.
///
/// The max value does not take in account of possible
/// deleted document, and should be considered as an upper bound
/// of the actual maximum value
fn max_value(&self) -> T;
fn num_vals(&self) -> u64;
/// Returns a iterator over the data
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = T> + 'a> {
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
}
}

View File

@@ -0,0 +1,729 @@
/// This codec takes a large number space (u128) and reduces it to a compact number space.
///
/// It will find spaces in the numer range. For example:
///
/// 100, 101, 102, 103, 104, 50000, 50001
/// could be mapped to
/// 100..104 -> 0..4
/// 50000..50001 -> 5..6
///
/// Compact space 0..6 requires much less bits than 100..50001
///
/// The codec is created to compress ip addresses, but may be employed in other use cases.
use std::{
cmp::Ordering,
collections::BinaryHeap,
io::{self, Write},
net::{IpAddr, Ipv6Addr},
ops::RangeInclusive,
};
use common::{deserialize_vint_u128, serialize_vint_u128};
use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
use crate::FastFieldCodecReaderU128;
pub fn ip_to_u128(ip_addr: IpAddr) -> u128 {
let ip_addr_v6: Ipv6Addr = match ip_addr {
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
IpAddr::V6(v6) => v6,
};
u128::from_be_bytes(ip_addr_v6.octets())
}
const INTERVAL_COST_IN_BITS: usize = 64;
#[derive(Default, Debug)]
pub struct IntervalEncoding();
pub struct IntervalCompressor {
pub null_value: u128,
min_value: u128,
max_value: u128,
compact_space: CompactSpace,
pub num_bits: u8,
}
#[derive(Debug, Eq, PartialEq)]
struct DeltaAndPos {
delta: u128,
pos: usize,
}
impl DeltaAndPos {
fn new(ip: u128, pos: usize) -> Self {
DeltaAndPos { delta: ip, pos }
}
}
impl Ord for DeltaAndPos {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.delta.cmp(&other.delta)
}
}
impl PartialOrd for DeltaAndPos {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.delta.partial_cmp(&other.delta)
}
}
#[test]
fn test_delta_and_pos_sort() {
let mut deltas: BinaryHeap<DeltaAndPos> = BinaryHeap::new();
deltas.push(DeltaAndPos::new(10, 1));
deltas.push(DeltaAndPos::new(100, 10));
deltas.push(DeltaAndPos::new(1, 10));
assert_eq!(deltas.pop().unwrap().delta, 100);
assert_eq!(deltas.pop().unwrap().delta, 10);
}
/// Put the deltas for the sorted ip addresses into a binary heap
fn get_deltas(ip_addrs_sorted: &[u128]) -> BinaryHeap<DeltaAndPos> {
let mut prev_opt = None;
let mut deltas: BinaryHeap<DeltaAndPos> = BinaryHeap::new();
for (pos, ip_addr) in ip_addrs_sorted.iter().cloned().enumerate() {
let delta = if let Some(prev) = prev_opt {
ip_addr - prev
} else {
ip_addr + 1
};
// skip too small deltas
if delta > 2 {
deltas.push(DeltaAndPos::new(delta, pos));
}
prev_opt = Some(ip_addr);
}
deltas
}
/// Will collect blanks and add them to compact space if it will affect the number of bits used on
/// the compact space.
fn get_compact_space(ip_addrs_sorted: &[u128], cost_per_interval: usize) -> CompactSpace {
let max_val = *ip_addrs_sorted.last().unwrap_or(&0u128) + 1;
let mut deltas = get_deltas(ip_addrs_sorted);
let mut amplitude_compact_space = max_val;
let mut amplitude_bits: u8 = (amplitude_compact_space as f64).log2().ceil() as u8;
let mut staged_blanks = vec![];
let mut compact_space = CompactSpaceBuilder::new();
// We will stage blanks until they reduce the compact space by 1 bit.
// Binary heap to process the gaps by their size
while let Some(ip_addr_and_pos) = deltas.pop() {
let delta = ip_addr_and_pos.delta;
let pos = ip_addr_and_pos.pos;
staged_blanks.push((delta, pos));
let staged_spaces_sum: u128 = staged_blanks.iter().map(|(delta, _)| delta - 1).sum();
// +1 for later added null value
let amplitude_new_compact_space = amplitude_compact_space - staged_spaces_sum + 1;
let amplitude_new_bits = (amplitude_new_compact_space as f64).log2().ceil() as u8;
if amplitude_bits == amplitude_new_bits {
continue;
}
let saved_bits = (amplitude_bits - amplitude_new_bits) as usize * ip_addrs_sorted.len();
let cost = staged_blanks.len() * cost_per_interval;
if cost >= saved_bits {
// Continue here, since although we walk over the deltas by size,
// we can potentially save a lot at the last bits, which are smaller deltas
//
// E.g. if the first range reduces the compact space by 1000 from 2000 to 1000, which
// saves 11-10=1 bit and the next range reduces the compact space by 950 to
// 50, which saves 10-6=4 bit
continue;
}
amplitude_compact_space = amplitude_new_compact_space;
amplitude_bits = amplitude_new_bits;
for (_, pos) in staged_blanks.drain(..) {
let ip_addr = ip_addrs_sorted[pos];
if pos == 0 {
compact_space.add_hole(0..=ip_addr - 1);
} else {
compact_space.add_hole(ip_addrs_sorted[pos - 1] + 1..=ip_addr - 1);
}
}
}
compact_space.add_hole(max_val..=u128::MAX);
compact_space.finish()
}
#[test]
fn compact_space_test() {
// small ranges are ignored here
let ips = vec![
2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
];
let ranges_and_compact_start = get_compact_space(&ips, 11);
let null_value = ranges_and_compact_start.null_value;
let amplitude = ranges_and_compact_start.amplitude_compact_space();
assert_eq!(null_value, 5);
assert_eq!(amplitude, 20);
assert_eq!(2, ranges_and_compact_start.to_compact(2).unwrap());
assert_eq!(ranges_and_compact_start.to_compact(100).unwrap_err(), 0);
}
#[derive(Debug, Clone, Eq, PartialEq)]
struct CompactSpaceBuilder {
covered_space: Vec<std::ops::RangeInclusive<u128>>,
}
impl CompactSpaceBuilder {
fn new() -> Self {
Self {
covered_space: vec![0..=u128::MAX],
}
}
// Will extend the first range and add a null value to it.
fn assign_and_return_null(&mut self) -> u128 {
self.covered_space[0] = *self.covered_space[0].start()..=*self.covered_space[0].end() + 1;
*self.covered_space[0].end()
}
// Assumes that repeated add_hole calls don't overlap.
fn add_hole(&mut self, hole: std::ops::RangeInclusive<u128>) {
let position = self
.covered_space
.iter()
.position(|range| range.start() <= hole.start() && range.end() >= hole.end());
if let Some(position) = position {
let old_range = self.covered_space.remove(position);
if old_range == hole {
return;
}
let new_range_end = hole.end().saturating_add(1)..=*old_range.end();
if old_range.start() == hole.start() {
self.covered_space.insert(position, new_range_end);
return;
}
let new_range_start = *old_range.start()..=hole.start().saturating_sub(1);
if old_range.end() == hole.end() {
self.covered_space.insert(position, new_range_start);
return;
}
self.covered_space.insert(position, new_range_end);
self.covered_space.insert(position, new_range_start);
}
}
fn finish(mut self) -> CompactSpace {
let null_value = self.assign_and_return_null();
let mut compact_start: u64 = 0;
let mut ranges_and_compact_start = vec![];
for cov in self.covered_space {
let covered_range_len = cov.end() - cov.start();
ranges_and_compact_start.push((cov, compact_start));
compact_start += covered_range_len as u64 + 1;
}
CompactSpace {
ranges_and_compact_start,
null_value,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
struct CompactSpace {
ranges_and_compact_start: Vec<(std::ops::RangeInclusive<u128>, u64)>,
pub null_value: u128,
}
impl CompactSpace {
fn amplitude_compact_space(&self) -> u128 {
let last_range = &self.ranges_and_compact_start[self.ranges_and_compact_start.len() - 1];
last_range.1 as u128 + (last_range.0.end() - last_range.0.start()) + 1
}
fn get_range_and_compact_start(&self, pos: usize) -> &(std::ops::RangeInclusive<u128>, u64) {
&self.ranges_and_compact_start[pos]
}
fn serialize(&self, output: &mut Vec<u8>) {
serialize_vint_u128(self.null_value as u128, output);
serialize_vint_u128(self.ranges_and_compact_start.len() as u128, output);
let mut prev_ip = 0;
for (ip_range, _compact) in &self.ranges_and_compact_start {
let delta_ip = ip_range.start() - prev_ip;
serialize_vint_u128(delta_ip as u128, output);
prev_ip = *ip_range.start();
let delta_ip = ip_range.end() - prev_ip;
serialize_vint_u128(delta_ip as u128, output);
prev_ip = *ip_range.end();
}
}
fn deserialize(data: &[u8]) -> io::Result<(&[u8], Self)> {
let (null_value, data) = deserialize_vint_u128(data)?;
let (num_ip_addrs, mut data) = deserialize_vint_u128(data)?;
let mut ip_addr = 0u128;
let mut compact = 0u64;
let mut ranges_and_compact_start: Vec<(std::ops::RangeInclusive<u128>, u64)> = vec![];
for _ in 0..num_ip_addrs {
let (ip_addr_delta, new_data) = deserialize_vint_u128(data)?;
data = new_data;
ip_addr += ip_addr_delta;
let ip_addr_start = ip_addr;
let (ip_addr_delta, new_data) = deserialize_vint_u128(data)?;
data = new_data;
ip_addr += ip_addr_delta;
let ip_addr_end = ip_addr;
let compact_delta = ip_addr_end - ip_addr_start + 1;
ranges_and_compact_start.push((ip_addr_start..=ip_addr_end, compact));
compact += compact_delta as u64;
}
Ok((
data,
Self {
null_value,
ranges_and_compact_start,
},
))
}
/// Returns either Ok(the value in the compact space) or if it is outside the compact space the
/// Err(position on the next larger range above the value)
fn to_compact(&self, ip: u128) -> Result<u64, usize> {
self.ranges_and_compact_start
.binary_search_by(|probe| {
let ip_range = &probe.0;
if *ip_range.start() <= ip && *ip_range.end() >= ip {
return Ordering::Equal;
} else if ip < *ip_range.start() {
return Ordering::Greater;
} else if ip > *ip_range.end() {
return Ordering::Less;
}
panic!("not covered all ranges in check");
})
.map(|pos| {
let (range, compact_start) = &self.ranges_and_compact_start[pos];
compact_start + (ip - range.start()) as u64
})
.map_err(|pos| pos - 1)
}
/// Unpacks a ip from compact space to u128 space
fn unpack_ip(&self, compact: u64) -> u128 {
let pos = self
.ranges_and_compact_start
.binary_search_by_key(&compact, |probe| probe.1)
.map_or_else(|e| e - 1, |v| v);
let range_and_compact_start = &self.ranges_and_compact_start[pos];
let diff = compact - self.ranges_and_compact_start[pos].1;
range_and_compact_start.0.start() + diff as u128
}
}
#[test]
fn ranges_and_compact_start_test() {
let ips = vec![
2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
];
let ranges_and_compact_start = get_compact_space(&ips, 11);
assert_eq!(ranges_and_compact_start.null_value, 5);
let mut output = vec![];
ranges_and_compact_start.serialize(&mut output);
assert_eq!(
ranges_and_compact_start,
CompactSpace::deserialize(&output).unwrap().1
);
for ip in &ips {
let compact = ranges_and_compact_start.to_compact(*ip).unwrap();
assert_eq!(ranges_and_compact_start.unpack_ip(compact), *ip);
}
}
pub fn train(ip_addrs_sorted: &[u128]) -> IntervalCompressor {
let ranges_and_compact_start = get_compact_space(ip_addrs_sorted, INTERVAL_COST_IN_BITS);
let null_value = ranges_and_compact_start.null_value;
let amplitude_compact_space = ranges_and_compact_start.amplitude_compact_space();
assert!(
amplitude_compact_space <= u64::MAX as u128,
"case unsupported."
);
let num_bits = tantivy_bitpacker::compute_num_bits(amplitude_compact_space as u64);
let min_value = *ip_addrs_sorted.first().unwrap_or(&0);
let max_value = *ip_addrs_sorted.last().unwrap_or(&0);
let compressor = IntervalCompressor {
null_value,
min_value,
max_value,
compact_space: ranges_and_compact_start,
num_bits,
};
let max_value = *ip_addrs_sorted.last().unwrap_or(&0u128).max(&null_value);
assert_eq!(
compressor.to_compact(max_value) + 1,
amplitude_compact_space as u64
);
compressor
}
impl IntervalCompressor {
/// Taking the vals as Vec may cost a lot of memory.
/// It is used to sort the vals.
///
/// Less memory alternative: We could just store the index (u32), and use that as sorting.
pub fn from_vals(mut vals: Vec<u128>) -> Self {
vals.sort();
train(&vals)
}
fn to_compact(&self, ip_addr: u128) -> u64 {
self.compact_space.to_compact(ip_addr).unwrap()
}
fn write_footer(&self, write: &mut impl Write, num_vals: u128) -> io::Result<()> {
let mut footer = vec![];
// header flags for future optional dictionary encoding
let header_flags = 0u64;
footer.extend_from_slice(&header_flags.to_le_bytes());
let null_value = self
.compact_space
.to_compact(self.null_value)
.expect("could not convert null to compact space");
serialize_vint_u128(null_value as u128, &mut footer);
serialize_vint_u128(self.min_value, &mut footer);
serialize_vint_u128(self.max_value, &mut footer);
self.compact_space.serialize(&mut footer);
footer.push(self.num_bits);
serialize_vint_u128(num_vals as u128, &mut footer);
write.write_all(&footer)?;
let footer_len = footer.len() as u32;
write.write_all(&footer_len.to_le_bytes())?;
Ok(())
}
pub fn compress(&self, vals: &[u128]) -> io::Result<Vec<u8>> {
let mut output = vec![];
self.compress_into(vals.iter().cloned(), &mut output)?;
Ok(output)
}
pub fn compress_into(
&self,
vals: impl Iterator<Item = u128>,
write: &mut impl Write,
) -> io::Result<()> {
let mut bitpacker = BitPacker::default();
let mut num_vals = 0;
for ip_addr in vals {
let compact = self.to_compact(ip_addr);
bitpacker.write(compact, self.num_bits, write).unwrap();
num_vals += 1;
}
bitpacker.close(write).unwrap();
self.write_footer(write, num_vals as u128)?;
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct IntervallDecompressor {
compact_space: CompactSpace,
bit_unpacker: BitUnpacker,
null_compact_space: u64,
min_value: u128,
max_value: u128,
num_vals: usize,
}
impl FastFieldCodecReaderU128 for IntervallDecompressor {
fn open_from_bytes(bytes: &[u8]) -> std::io::Result<Self> {
Self::open(bytes)
}
fn get(&self, doc: u64, data: &[u8]) -> Option<u128> {
self.get(doc, data)
}
fn get_between_vals(&self, range: RangeInclusive<u128>, data: &[u8]) -> Vec<usize> {
self.get_range(range, data)
}
fn min_value(&self) -> u128 {
self.min_value()
}
fn max_value(&self) -> u128 {
self.max_value()
}
/// The computed and assigned number for null values
fn null_value(&self) -> u128 {
self.compact_space.null_value
}
fn iter<'a>(&'a self, data: &'a [u8]) -> Box<dyn Iterator<Item = Option<u128>> + 'a> {
Box::new(self.iter(data))
}
}
impl IntervallDecompressor {
pub fn open(data: &[u8]) -> io::Result<IntervallDecompressor> {
let (data, footer_len_bytes) = data.split_at(data.len() - 4);
let footer_len = u32::from_le_bytes(footer_len_bytes.try_into().unwrap());
let data = &data[data.len() - footer_len as usize..];
let (_header_flags, data) = data.split_at(8);
let (null_compact_space, data) = deserialize_vint_u128(data)?;
let (min_value, data) = deserialize_vint_u128(data)?;
let (max_value, data) = deserialize_vint_u128(data)?;
let (mut data, compact_space) = CompactSpace::deserialize(data).unwrap();
let num_bits = data[0];
data = &data[1..];
let (num_vals, _data) = deserialize_vint_u128(data)?;
let decompressor = IntervallDecompressor {
null_compact_space: null_compact_space as u64,
min_value,
max_value,
compact_space,
num_vals: num_vals as usize,
bit_unpacker: BitUnpacker::new(num_bits),
};
Ok(decompressor)
}
/// Converting to compact space for the decompressor is more complex, since we may get values
/// which are outside the compact space. e.g. if we map
/// 1000 => 5
/// 2000 => 6
///
/// and we want a mapping for 1005, there is no equivalent compact space. We instead return an
/// error with the index of the next range.
fn to_compact(&self, ip_addr: u128) -> Result<u64, usize> {
self.compact_space.to_compact(ip_addr)
}
fn compact_to_ip_addr(&self, compact: u64) -> u128 {
self.compact_space.unpack_ip(compact)
}
/// Comparing on compact space: 1.2 GElements/s
///
/// Comparing on original space: .06 GElements/s (not completely optimized)
pub fn get_range(&self, range: RangeInclusive<u128>, data: &[u8]) -> Vec<usize> {
let from_ip_addr = *range.start();
let to_ip_addr = *range.end();
assert!(to_ip_addr >= from_ip_addr);
let compact_from = self.to_compact(from_ip_addr);
let compact_to = self.to_compact(to_ip_addr);
// Quick return, if both ranges fall into the same non-mapped space, the range can't cover
// any values, so we can early exit
match (compact_to, compact_from) {
(Err(pos1), Err(pos2)) if pos1 == pos2 => return vec![],
_ => {}
}
let compact_from = compact_from.unwrap_or_else(|pos| {
let range_and_compact_start = self.compact_space.get_range_and_compact_start(pos);
let compact_end = range_and_compact_start.1
+ (range_and_compact_start.0.end() - range_and_compact_start.0.start()) as u64;
compact_end + 1
});
// If there is no compact space, we go to the closest upperbound compact space
let compact_to = compact_to.unwrap_or_else(|pos| {
let range_and_compact_start = self.compact_space.get_range_and_compact_start(pos);
let compact_end = range_and_compact_start.1
+ (range_and_compact_start.0.end() - range_and_compact_start.0.start()) as u64;
compact_end
});
let range = compact_from..=compact_to;
let mut positions = vec![];
for (pos, compact_ip) in self
.iter_compact(data)
.enumerate()
.filter(|(_pos, val)| *val != self.null_compact_space)
{
if range.contains(&compact_ip) {
positions.push(pos);
}
}
positions
}
#[inline]
pub fn iter_compact<'a>(&'a self, data: &'a [u8]) -> impl Iterator<Item = u64> + 'a {
(0..self.num_vals).map(move |idx| self.bit_unpacker.get(idx as u64, data) as u64)
}
#[inline]
fn iter<'a>(&'a self, data: &'a [u8]) -> impl Iterator<Item = Option<u128>> + 'a {
// TODO: Performance. It would be better to iterate on the ranges and check existence via
// the bit_unpacker.
self.iter_compact(data).map(|compact| {
if compact == self.null_compact_space {
None
} else {
Some(self.compact_to_ip_addr(compact))
}
})
}
pub fn get(&self, idx: u64, data: &[u8]) -> Option<u128> {
let compact = self.bit_unpacker.get(idx, data);
if compact == self.null_compact_space {
None
} else {
Some(self.compact_to_ip_addr(compact))
}
}
pub fn min_value(&self) -> u128 {
self.min_value
}
pub fn max_value(&self) -> u128 {
self.max_value
}
}
impl IntervalEncoding {
pub fn train(&self, mut vals: Vec<u128>) -> IntervalCompressor {
vals.sort();
train(&vals)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn decode_all(data: &[u8]) -> Vec<u128> {
let decompressor = IntervallDecompressor::open(data).unwrap();
let mut u128_vals = Vec::new();
for idx in 0..decompressor.num_vals as usize {
let val = decompressor.get(idx as u64, data);
if let Some(val) = val {
u128_vals.push(val);
}
}
u128_vals
}
fn test_aux_vals(encoder: &IntervalEncoding, u128_vals: &[u128]) -> Vec<u8> {
let compressor = encoder.train(u128_vals.to_vec());
let data = compressor.compress(u128_vals).unwrap();
let decoded_val = decode_all(&data);
assert_eq!(&decoded_val, u128_vals);
data
}
#[test]
fn test_range_1() {
let vals = &[
1u128,
100u128,
3u128,
99999u128,
100000u128,
100001u128,
4_000_211_221u128,
4_000_211_222u128,
333u128,
];
let interval_encoding = IntervalEncoding::default();
let data = test_aux_vals(&interval_encoding, vals);
let decomp = IntervallDecompressor::open(&data).unwrap();
let positions = decomp.get_range(0..=1, &data);
assert_eq!(positions, vec![0]);
let positions = decomp.get_range(0..=2, &data);
assert_eq!(positions, vec![0]);
let positions = decomp.get_range(0..=3, &data);
assert_eq!(positions, vec![0, 2]);
assert_eq!(decomp.get_range(99999u128..=99999u128, &data), vec![3]);
assert_eq!(decomp.get_range(99998u128..=100000u128, &data), vec![3, 4]);
assert_eq!(decomp.get_range(99998u128..=99999u128, &data), vec![3]);
assert_eq!(decomp.get_range(99998u128..=99998u128, &data), vec![]);
assert_eq!(decomp.get_range(333u128..=333u128, &data), vec![8]);
assert_eq!(decomp.get_range(332u128..=333u128, &data), vec![8]);
assert_eq!(decomp.get_range(332u128..=334u128, &data), vec![8]);
assert_eq!(decomp.get_range(333u128..=334u128, &data), vec![8]);
assert_eq!(
decomp.get_range(4_000_211_221u128..=5_000_000_000u128, &data),
vec![6, 7]
);
}
#[test]
fn test_empty() {
let vals = &[];
let interval_encoding = IntervalEncoding::default();
let data = test_aux_vals(&interval_encoding, vals);
let _decomp = IntervallDecompressor::open(&data).unwrap();
}
#[test]
fn test_range_2() {
let vals = &[
100u128,
99999u128,
100000u128,
100001u128,
4_000_211_221u128,
4_000_211_222u128,
333u128,
];
let interval_encoding = IntervalEncoding::default();
let data = test_aux_vals(&interval_encoding, vals);
let decomp = IntervallDecompressor::open(&data).unwrap();
let positions = decomp.get_range(0..=5, &data);
assert_eq!(positions, vec![]);
let positions = decomp.get_range(0..=100, &data);
assert_eq!(positions, vec![0]);
let positions = decomp.get_range(0..=105, &data);
assert_eq!(positions, vec![0]);
}
#[test]
fn test_null() {
let vals = &[2u128];
let interval_encoding = IntervalEncoding::default().train(vals.to_vec());
let vals = vec![interval_encoding.null_value, 2u128];
let data = interval_encoding.compress(&vals).unwrap();
let decomp = IntervallDecompressor::open(&data).unwrap();
let positions = decomp.get_range(0..=1, &data);
assert_eq!(positions, vec![]);
let positions = decomp.get_range(2..=2, &data);
assert_eq!(positions, vec![1]);
}
#[test]
fn test_first_large_gaps() {
let vals = &[1_000_000_000u128; 100];
let interval_encoding = IntervalEncoding::default();
let _data = test_aux_vals(&interval_encoding, vals);
}
use proptest::prelude::*;
proptest! {
#[test]
fn compress_decompress_random(vals in proptest::collection::vec(any::<u128>()
, 1..1000)) {
let interval_encoding = IntervalEncoding::default();
let _data = test_aux_vals(&interval_encoding, &vals);
}
}
}

View File

@@ -4,82 +4,86 @@ extern crate more_asserts;
use std::io;
use std::io::Write;
use common::BinarySerializable;
use ownedbytes::OwnedBytes;
use std::ops::RangeInclusive;
pub mod bitpacked;
pub mod blockwise_linear;
pub mod linear;
pub mod ip_codec;
pub mod linearinterpol;
pub mod multilinearinterpol;
mod column;
pub trait FastFieldCodecReader: Sized {
/// reads the metadata and returns the CodecReader
fn open_from_bytes(bytes: &[u8]) -> std::io::Result<Self>;
pub use self::column::Column;
fn get_u64(&self, doc: u64, data: &[u8]) -> u64;
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
#[repr(u8)]
pub enum FastFieldCodecType {
Bitpacked = 1,
Linear = 2,
BlockwiseLinear = 3,
Gcd = 4,
fn min_value(&self) -> u64;
fn max_value(&self) -> u64;
}
impl BinarySerializable for FastFieldCodecType {
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
self.to_code().serialize(wrt)
}
pub trait FastFieldCodecReaderU128: Sized {
/// reads the metadata and returns the CodecReader
fn open_from_bytes(bytes: &[u8]) -> std::io::Result<Self>;
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let code = u8::deserialize(reader)?;
let codec_type: Self = Self::from_code(code)
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
Ok(codec_type)
}
}
/// Get value for doc
fn get(&self, doc: u64, data: &[u8]) -> Option<u128>;
impl FastFieldCodecType {
pub fn to_code(self) -> u8 {
self as u8
}
/// Iterator
///
/// Replace with opaque type after: https://github.com/rust-lang/rust/issues/63063
fn iter<'a>(&'a self, data: &'a [u8]) -> Box<dyn Iterator<Item = Option<u128>> + 'a>;
pub fn from_code(code: u8) -> Option<Self> {
match code {
1 => Some(Self::Bitpacked),
2 => Some(Self::Linear),
3 => Some(Self::BlockwiseLinear),
4 => Some(Self::Gcd),
_ => None,
}
}
/// Get positions (=docs in single value) for provided value range
fn get_between_vals(&self, range: RangeInclusive<u128>, data: &[u8]) -> Vec<usize>;
/// The computed and assigned number value for null values
fn null_value(&self) -> u128;
fn min_value(&self) -> u128;
fn max_value(&self) -> u128;
}
/// The FastFieldSerializerEstimate trait is required on all variants
/// of fast field compressions, to decide which one to choose.
pub trait FastFieldCodec {
/// A codex needs to provide a unique name and id, which is
pub trait FastFieldCodecSerializer {
/// A codec needs to provide a unique name and id, which is
/// used for debugging and de/serialization.
const CODEC_TYPE: FastFieldCodecType;
const NAME: &'static str;
const ID: u8;
type Reader: Column<u64>;
/// Reads the metadata and returns the CodecReader
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader>;
/// Serializes the data using the serializer into write.
///
/// The fastfield_accessor iterator should be preferred over using fastfield_accessor for
/// performance reasons.
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column<u64>) -> io::Result<()>;
/// Check if the Codec is able to compress the data
fn is_applicable(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> bool;
/// Returns an estimate of the compression ratio.
/// If the codec is not applicable, returns `None`.
///
/// The baseline is uncompressed 64bit data.
///
/// It could make sense to also return a value representing
/// computational complexity.
fn estimate(fastfield_accessor: &impl Column) -> Option<f32>;
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32;
/// Serializes the data using the serializer into write.
/// There are multiple iterators, in case the codec needs to read the data multiple times.
/// The iterators should be preferred over using fastfield_accessor for performance reasons.
fn serialize(
write: &mut impl Write,
fastfield_accessor: &dyn FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()>;
}
/// FastFieldDataAccess is the trait to access fast field data during serialization and estimation.
pub trait FastFieldDataAccess {
/// Return the value associated to the given position.
///
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance
/// reasons.
///
/// # Panics
///
/// May panic if `position` is greater than the index.
fn get_val(&self, position: u64) -> u64;
}
#[derive(Debug, Clone)]
@@ -90,102 +94,61 @@ pub struct FastFieldStats {
pub num_vals: u64,
}
struct VecColum<'a>(&'a [u64]);
impl<'a> Column for VecColum<'a> {
impl<'a> FastFieldDataAccess for &'a [u64] {
fn get_val(&self, position: u64) -> u64 {
self.0[position as usize]
}
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
Box::new(self.0.iter().cloned())
}
fn min_value(&self) -> u64 {
self.0.iter().min().cloned().unwrap_or(0)
}
fn max_value(&self) -> u64 {
self.0.iter().max().cloned().unwrap_or(0)
}
fn num_vals(&self) -> u64 {
self.0.len() as u64
self[position as usize]
}
}
impl<'a> From<&'a [u64]> for VecColum<'a> {
fn from(data: &'a [u64]) -> Self {
Self(data)
impl FastFieldDataAccess for Vec<u64> {
fn get_val(&self, position: u64) -> u64 {
self[position as usize]
}
}
#[cfg(test)]
mod tests {
use proptest::prelude::*;
use proptest::strategy::Strategy;
use proptest::{prop_oneof, proptest};
use crate::bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer};
use crate::linearinterpol::{LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer};
use crate::multilinearinterpol::{
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
};
use crate::bitpacked::BitpackedCodec;
use crate::blockwise_linear::BlockwiseLinearCodec;
use crate::linear::LinearCodec;
pub fn create_and_validate<Codec: FastFieldCodec>(
pub fn create_and_validate<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(
data: &[u64],
name: &str,
) -> Option<(f32, f32)> {
let estimation = Codec::estimate(&VecColum::from(data))?;
let mut out: Vec<u8> = Vec::new();
Codec::serialize(&mut out, &VecColum::from(data)).unwrap();
) -> (f32, f32) {
if !S::is_applicable(&data, crate::tests::stats_from_vec(data)) {
return (f32::MAX, 0.0);
}
let estimation = S::estimate(&data, crate::tests::stats_from_vec(data));
let mut out = vec![];
S::serialize(
&mut out,
&data,
crate::tests::stats_from_vec(data),
data.iter().cloned(),
data.iter().cloned(),
)
.unwrap();
let reader = R::open_from_bytes(&out).unwrap();
for (doc, orig_val) in data.iter().enumerate() {
let val = reader.get_u64(doc as u64, &out);
if val != *orig_val {
panic!(
"val {:?} does not match orig_val {:?}, in data set {}, data {:?}",
val, orig_val, name, data
);
}
}
let actual_compression = out.len() as f32 / (data.len() as f32 * 8.0);
let reader = Codec::open_from_bytes(OwnedBytes::new(out)).unwrap();
assert_eq!(reader.num_vals(), data.len() as u64);
for (doc, orig_val) in data.iter().copied().enumerate() {
let val = reader.get_val(doc as u64);
assert_eq!(
val, orig_val,
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data \
`{data:?}`",
);
}
Some((estimation, actual_compression))
(estimation, actual_compression)
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
#[test]
fn test_proptest_small(data in proptest::collection::vec(num_strategy(), 1..10)) {
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(10))]
#[test]
fn test_proptest_large(data in proptest::collection::vec(num_strategy(), 1..6000)) {
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
}
}
fn num_strategy() -> impl Strategy<Value = u64> {
prop_oneof![
1 => prop::num::u64::ANY.prop_map(|num| u64::MAX - (num % 10) ),
1 => prop::num::u64::ANY.prop_map(|num| num % 10 ),
20 => prop::num::u64::ANY,
]
}
pub fn get_codec_test_datasets() -> Vec<(Vec<u64>, &'static str)> {
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
let mut data_and_names = vec![];
let data = (10..=10_000_u64).collect::<Vec<_>>();
let data = (10..=20_u64).collect::<Vec<_>>();
data_and_names.push((data, "simple monotonically increasing"));
data_and_names.push((
@@ -195,93 +158,92 @@ mod tests {
data_and_names.push((vec![5, 50, 3, 13, 1, 1000, 35], "rand small"));
data_and_names.push((vec![10], "single value"));
data_and_names.push((
vec![1572656989877777, 1170935903116329, 720575940379279, 0],
"overflow error",
));
data_and_names
}
fn test_codec<C: FastFieldCodec>() {
let codec_name = format!("{:?}", C::CODEC_TYPE);
for (data, dataset_name) in get_codec_test_datasets() {
let estimate_actual_opt: Option<(f32, f32)> =
crate::tests::create_and_validate::<C>(&data, dataset_name);
let result = if let Some((estimate, actual)) = estimate_actual_opt {
format!("Estimate `{estimate}` Actual `{actual}`")
} else {
fn test_codec<S: FastFieldCodecSerializer, R: FastFieldCodecReader>() {
let codec_name = S::NAME;
for (data, data_set_name) in get_codec_test_data_sets() {
let (estimate, actual) =
crate::tests::create_and_validate::<S, R>(&data, data_set_name);
let result = if estimate == f32::MAX {
"Disabled".to_string()
} else {
format!("Estimate {:?} Actual {:?} ", estimate, actual)
};
println!("Codec {codec_name}, DataSet {dataset_name}, {result}");
println!(
"Codec {}, DataSet {}, {}",
codec_name, data_set_name, result
);
}
}
#[test]
fn test_codec_bitpacking() {
test_codec::<BitpackedCodec>();
test_codec::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>();
}
#[test]
fn test_codec_interpolation() {
test_codec::<LinearCodec>();
test_codec::<LinearInterpolFastFieldSerializer, LinearInterpolFastFieldReader>();
}
#[test]
fn test_codec_multi_interpolation() {
test_codec::<BlockwiseLinearCodec>();
test_codec::<MultiLinearInterpolFastFieldSerializer, MultiLinearInterpolFastFieldReader>();
}
use super::*;
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
let min_value = data.iter().cloned().min().unwrap_or(0);
let max_value = data.iter().cloned().max().unwrap_or(0);
FastFieldStats {
min_value,
max_value,
num_vals: data.len() as u64,
}
}
#[test]
fn estimation_good_interpolation_case() {
let data = (10..=20000_u64).collect::<Vec<_>>();
let data: VecColum = data.as_slice().into();
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
let linear_interpol_estimation =
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(linear_interpol_estimation, 0.01);
let multi_linear_interpol_estimation = BlockwiseLinearCodec::estimate(&data).unwrap();
let multi_linear_interpol_estimation =
MultiLinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(multi_linear_interpol_estimation, 0.2);
assert_le!(linear_interpol_estimation, multi_linear_interpol_estimation);
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
let bitpacked_estimation =
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(linear_interpol_estimation, bitpacked_estimation);
}
#[test]
fn estimation_test_bad_interpolation_case() {
let data: &[u64] = &[200, 10, 10, 10, 10, 1000, 20];
let data = vec![200, 10, 10, 10, 10, 1000, 20];
let data: VecColum = data.into();
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
let linear_interpol_estimation =
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(linear_interpol_estimation, 0.32);
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
let bitpacked_estimation =
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(bitpacked_estimation, linear_interpol_estimation);
}
#[test]
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
let mut data: Vec<u64> = (200..=20000_u64).collect();
let mut data = (200..=20000_u64).collect::<Vec<_>>();
data.push(1_000_000);
let data: VecColum = data.as_slice().into();
// in this case the linear interpolation can't in fact not be worse than bitpacking,
// but the estimator adds some threshold, which leads to estimated worse behavior
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
let linear_interpol_estimation =
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(linear_interpol_estimation, 0.35);
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
let bitpacked_estimation =
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(bitpacked_estimation, 0.32);
assert_le!(bitpacked_estimation, linear_interpol_estimation);
}
#[test]
fn test_fast_field_codec_type_to_code() {
let mut count_codec = 0;
for code in 0..=255 {
if let Some(codec_type) = FastFieldCodecType::from_code(code) {
assert_eq!(codec_type.to_code(), code);
count_codec += 1;
}
}
assert_eq!(count_codec, 4);
}
}

View File

@@ -2,23 +2,21 @@ use std::io::{self, Read, Write};
use std::ops::Sub;
use common::{BinarySerializable, FixedSize};
use ownedbytes::OwnedBytes;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::{Column, FastFieldCodec, FastFieldCodecType};
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct LinearReader {
data: OwnedBytes,
pub struct LinearInterpolFastFieldReader {
bit_unpacker: BitUnpacker,
pub footer: LinearFooter,
pub footer: LinearInterpolFooter,
pub slope: f32,
}
#[derive(Clone, Debug)]
pub struct LinearFooter {
pub struct LinearInterpolFooter {
pub relative_max_value: u64,
pub offset: u64,
pub first_val: u64,
@@ -28,7 +26,7 @@ pub struct LinearFooter {
pub max_value: u64,
}
impl BinarySerializable for LinearFooter {
impl BinarySerializable for LinearInterpolFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
self.relative_max_value.serialize(write)?;
self.offset.serialize(write)?;
@@ -40,8 +38,8 @@ impl BinarySerializable for LinearFooter {
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearFooter> {
Ok(LinearFooter {
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearInterpolFooter> {
Ok(LinearInterpolFooter {
relative_max_value: u64::deserialize(reader)?,
offset: u64::deserialize(reader)?,
first_val: u64::deserialize(reader)?,
@@ -53,15 +51,29 @@ impl BinarySerializable for LinearFooter {
}
}
impl FixedSize for LinearFooter {
impl FixedSize for LinearInterpolFooter {
const SIZE_IN_BYTES: usize = 56;
}
impl Column for LinearReader {
impl FastFieldCodecReader for LinearInterpolFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let (_data, mut footer) = bytes.split_at(bytes.len() - LinearInterpolFooter::SIZE_IN_BYTES);
let footer = LinearInterpolFooter::deserialize(&mut footer)?;
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
let num_bits = compute_num_bits(footer.relative_max_value);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(LinearInterpolFastFieldReader {
bit_unpacker,
footer,
slope,
})
}
#[inline]
fn get_val(&self, doc: u64) -> u64 {
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
let calculated_value = get_calculated_value(self.footer.first_val, doc, self.slope);
(calculated_value + self.bit_unpacker.get(doc, &self.data)) - self.footer.offset
(calculated_value + self.bit_unpacker.get(doc, data)) - self.footer.offset
}
#[inline]
@@ -72,87 +84,47 @@ impl Column for LinearReader {
fn max_value(&self) -> u64 {
self.footer.max_value
}
#[inline]
fn num_vals(&self) -> u64 {
self.footer.num_vals
}
}
/// Fastfield serializer, which tries to guess values by linear interpolation
/// and stores the difference bitpacked.
pub struct LinearCodec;
pub struct LinearInterpolFastFieldSerializer {}
#[inline]
pub(crate) fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
if num_vals <= 1 {
return 0.0;
}
// We calculate the slope with f64 high precision and use the result in lower precision f32
// This is done in order to handle estimations for very large values like i64::MAX
let diff = diff(last_val, first_val);
(diff / (num_vals - 1) as f64) as f32
}
/// Delay the cast, to improve precision for very large u64 values.
///
/// Since i64 is mapped monotonically to u64 space, 0i64 is after the mapping i64::MAX.
/// So very large values are not uncommon.
///
/// ```rust
/// let val1 = i64::MAX;
/// let val2 = i64::MAX - 100;
/// assert_eq!(val1 - val2, 100);
/// assert_eq!(val1 as f64 - val2 as f64, 0.0);
/// ```
fn diff(val1: u64, val2: u64) -> f64 {
if val1 >= val2 {
(val1 - val2) as f64
} else {
(val2 - val1) as f64 * -1.0
}
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
}
#[inline]
pub fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
if slope < 0.0 {
first_val.saturating_sub((pos as f32 * -slope) as u64)
} else {
first_val.saturating_add((pos as f32 * slope) as u64)
}
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
first_val + (pos as f32 * slope) as u64
}
impl FastFieldCodec for LinearCodec {
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Linear;
type Reader = LinearReader;
/// Opens a fast field given a file.
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
let footer_offset = bytes.len() - LinearFooter::SIZE_IN_BYTES;
let (data, mut footer) = bytes.split(footer_offset);
let footer = LinearFooter::deserialize(&mut footer)?;
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
let num_bits = compute_num_bits(footer.relative_max_value);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(LinearReader {
data,
bit_unpacker,
footer,
slope,
})
}
impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
const NAME: &'static str = "LinearInterpol";
const ID: u8 = 2;
/// Creates a new fast field serializer.
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column) -> io::Result<()> {
assert!(fastfield_accessor.min_value() <= fastfield_accessor.max_value());
fn serialize(
write: &mut impl Write,
fastfield_accessor: &dyn FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
assert!(stats.min_value <= stats.max_value);
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
let slope = get_slope(first_val, last_val, fastfield_accessor.num_vals());
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, stats.num_vals);
// calculate offset to ensure all values are positive
let mut offset = 0;
let mut rel_positive_max = 0;
for (pos, actual_value) in fastfield_accessor.iter().enumerate() {
for (pos, actual_value) in data_iter1.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
if calculated_value > actual_value {
// negative value we need to apply an offset
@@ -170,55 +142,56 @@ impl FastFieldCodec for LinearCodec {
let num_bits = compute_num_bits(relative_max_value);
let mut bit_packer = BitPacker::new();
for (pos, val) in fastfield_accessor.iter().enumerate() {
for (pos, val) in data_iter.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
let diff = (val + offset) - calculated_value;
bit_packer.write(diff, num_bits, write)?;
}
bit_packer.close(write)?;
let footer = LinearFooter {
let footer = LinearInterpolFooter {
relative_max_value,
offset,
first_val,
last_val,
num_vals: fastfield_accessor.num_vals(),
min_value: fastfield_accessor.min_value(),
max_value: fastfield_accessor.max_value(),
num_vals: stats.num_vals,
min_value: stats.min_value,
max_value: stats.max_value,
};
footer.serialize(write)?;
Ok(())
}
/// estimation for linear interpolation is hard because, you don't know
/// where the local maxima for the deviation of the calculated value are and
/// the offset to shift all values to >=0 is also unknown.
#[allow(clippy::question_mark)]
fn estimate(fastfield_accessor: &impl Column) -> Option<f32> {
if fastfield_accessor.num_vals() < 3 {
return None; // disable compressor for this case
fn is_applicable(
_fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> bool {
if stats.num_vals < 3 {
return false; // disable compressor for this case
}
// On serialisation the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues.
// For this we take the maximum theroretical offset and add this to the max value.
// If this doesn't overflow the algorithm should be fine
let theorethical_maximum_offset =
fastfield_accessor.max_value() - fastfield_accessor.min_value();
if fastfield_accessor
.max_value()
// If this doesn't overflow the algortihm should be fine
let theorethical_maximum_offset = stats.max_value - stats.min_value;
if stats
.max_value
.checked_add(theorethical_maximum_offset)
.is_none()
{
return None;
return false;
}
true
}
/// estimation for linear interpolation is hard because, you don't know
/// where the local maxima for the deviation of the calculated value are and
/// the offset to shift all values to >=0 is also unknown.
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
let slope = get_slope(first_val, last_val, fastfield_accessor.num_vals());
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, stats.num_vals);
// let's sample at 0%, 5%, 10% .. 95%, 100%
let num_vals = fastfield_accessor.num_vals() as f32 / 100.0;
let num_vals = stats.num_vals as f32 / 100.0;
let sample_positions = (0..20)
.map(|pos| (num_vals * pos as f32 * 5.0) as usize)
.collect::<Vec<_>>();
@@ -240,11 +213,10 @@ impl FastFieldCodec for LinearCodec {
//
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64
* fastfield_accessor.num_vals()
+ LinearFooter::SIZE_IN_BYTES as u64;
let num_bits_uncompressed = 64 * fastfield_accessor.num_vals();
Some(num_bits as f32 / num_bits_uncompressed as f32)
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
+ LinearInterpolFooter::SIZE_IN_BYTES as u64;
let num_bits_uncompressed = 64 * stats.num_vals;
num_bits as f32 / num_bits_uncompressed as f32
}
}
@@ -259,48 +231,29 @@ fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
#[cfg(test)]
mod tests {
use rand::RngCore;
use super::*;
use crate::tests::get_codec_test_datasets;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) -> Option<(f32, f32)> {
crate::tests::create_and_validate::<LinearCodec>(data, name)
}
#[test]
fn get_calculated_value_test() {
// pos slope
assert_eq!(get_calculated_value(100, 10, 5.0), 150);
// neg slope
assert_eq!(get_calculated_value(100, 10, -5.0), 50);
// pos slope, very high values
assert_eq!(
get_calculated_value(i64::MAX as u64, 10, 5.0),
i64::MAX as u64 + 50
);
// neg slope, very high values
assert_eq!(
get_calculated_value(i64::MAX as u64, 10, -5.0),
i64::MAX as u64 - 50
);
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
crate::tests::create_and_validate::<
LinearInterpolFastFieldSerializer,
LinearInterpolFastFieldReader,
>(data, name)
}
#[test]
fn test_compression() {
let data = (10..=6_000_u64).collect::<Vec<_>>();
let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large").unwrap();
create_and_validate(&data, "simple monotonically large");
assert!(actual_compression < 0.01);
assert!(estimate < 0.01);
}
#[test]
fn test_with_codec_datasets() {
let data_sets = get_codec_test_datasets();
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();
@@ -317,13 +270,6 @@ mod tests {
create_and_validate(&data, "large amplitude");
}
#[test]
fn overflow_error_test() {
let data = vec![1572656989877777, 1170935903116329, 720575940379279, 0];
create_and_validate(&data, "overflow test");
}
#[test]
fn linear_interpol_fast_concave_data() {
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
@@ -343,10 +289,10 @@ mod tests {
#[test]
fn linear_interpol_fast_field_rand() {
let mut rng = rand::thread_rng();
for _ in 0..50 {
let mut data = (0..10_000).map(|_| rng.next_u64()).collect::<Vec<_>>();
for _ in 0..5000 {
let mut data = (0..50).map(|_| rand::random::<u64>()).collect::<Vec<_>>();
create_and_validate(&data, "random");
data.reverse();
create_and_validate(&data, "random");
}

View File

@@ -1,68 +1,158 @@
#[macro_use]
extern crate prettytable;
use fastfield_codecs::bitpacked::BitpackedCodec;
use fastfield_codecs::blockwise_linear::BlockwiseLinearCodec;
use fastfield_codecs::linear::LinearCodec;
use fastfield_codecs::{Column, FastFieldCodec, FastFieldCodecType, FastFieldStats};
use std::collections::HashSet;
use std::env;
use std::io::BufRead;
use std::net::{IpAddr, Ipv6Addr};
use std::str::FromStr;
use fastfield_codecs::ip_codec::{IntervalEncoding, IntervallDecompressor};
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer;
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer;
use fastfield_codecs::{FastFieldCodecSerializer, FastFieldStats};
use itertools::Itertools;
use measure_time::print_time;
use prettytable::{Cell, Row, Table};
struct Data<'a>(&'a [u64]);
fn print_set_stats(ip_addrs: &[u128]) {
println!("NumIps\t{}", ip_addrs.len());
let ip_addr_set: HashSet<u128> = ip_addrs.iter().cloned().collect();
println!("NumUniqueIps\t{}", ip_addr_set.len());
let ratio_unique = ip_addr_set.len() as f64 / ip_addrs.len() as f64;
println!("RatioUniqueOverTotal\t{ratio_unique:.4}");
impl<'a> Column for Data<'a> {
fn get_val(&self, position: u64) -> u64 {
self.0[position as usize]
// histogram
let mut ip_addrs = ip_addrs.to_vec();
ip_addrs.sort();
let mut cnts: Vec<usize> = ip_addrs
.into_iter()
.dedup_with_count()
.map(|(cnt, _)| cnt)
.collect();
cnts.sort();
let top_256_cnt: usize = cnts.iter().rev().take(256).sum();
let top_128_cnt: usize = cnts.iter().rev().take(128).sum();
let top_64_cnt: usize = cnts.iter().rev().take(64).sum();
let top_8_cnt: usize = cnts.iter().rev().take(8).sum();
let total: usize = cnts.iter().sum();
println!("{}", total);
println!("{}", top_256_cnt);
println!("{}", top_128_cnt);
println!("Percentage Top8 {:02}", top_8_cnt as f32 / total as f32);
println!("Percentage Top64 {:02}", top_64_cnt as f32 / total as f32);
println!("Percentage Top128 {:02}", top_128_cnt as f32 / total as f32);
println!("Percentage Top256 {:02}", top_256_cnt as f32 / total as f32);
let mut cnts: Vec<(usize, usize)> = cnts.into_iter().dedup_with_count().collect();
cnts.sort_by(|a, b| {
if a.1 == b.1 {
a.0.cmp(&b.0)
} else {
b.1.cmp(&a.1)
}
});
println!("\n\n----\nIP Address histogram");
println!("IPAddrCount\tFrequency");
for (ip_addr_count, times) in cnts {
println!("{}\t{}", ip_addr_count, times);
}
}
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
Box::new(self.0.iter().cloned())
}
fn ip_dataset() -> Vec<u128> {
let mut ip_addr_v4 = 0;
fn min_value(&self) -> u64 {
*self.0.iter().min().unwrap_or(&0)
}
let stdin = std::io::stdin();
let ip_addrs: Vec<u128> = stdin
.lock()
.lines()
.flat_map(|line| {
let line = line.unwrap();
let line = line.trim();
let ip_addr = IpAddr::from_str(line.trim()).ok()?;
if ip_addr.is_ipv4() {
ip_addr_v4 += 1;
}
let ip_addr_v6: Ipv6Addr = match ip_addr {
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
IpAddr::V6(v6) => v6,
};
Some(ip_addr_v6)
})
.map(|ip_v6| u128::from_be_bytes(ip_v6.octets()))
.collect();
fn max_value(&self) -> u64 {
*self.0.iter().max().unwrap_or(&0)
}
println!("IpAddrsAny\t{}", ip_addrs.len());
println!("IpAddrsV4\t{}", ip_addr_v4);
fn num_vals(&self) -> u64 {
self.0.len() as u64
ip_addrs
}
fn bench_ip() {
let encoding = IntervalEncoding();
let dataset = ip_dataset();
print_set_stats(&dataset);
let compressor = encoding.train(dataset.to_vec());
let data = compressor.compress(&dataset).unwrap();
let decompressor = IntervallDecompressor::open(&data).unwrap();
for i in 11100..11150 {
print_time!("get range");
let doc_values = decompressor.get_range(dataset[i]..=dataset[i], &data);
println!("{:?}", doc_values.len());
}
}
fn main() {
if env::args().nth(1).unwrap() == "bench" {
bench_ip();
return;
}
let mut table = Table::new();
// Add a row per time
table.add_row(row!["", "Compression Ratio", "Compression Estimation"]);
for (data, data_set_name) in get_codec_test_data_sets() {
let results: Vec<(f32, f32, FastFieldCodecType)> = [
serialize_with_codec::<LinearCodec>(&data),
serialize_with_codec::<BlockwiseLinearCodec>(&data),
serialize_with_codec::<BlockwiseLinearCodec>(&data),
serialize_with_codec::<BitpackedCodec>(&data),
]
.into_iter()
.flatten()
.collect();
let mut results = vec![];
let res = serialize_with_codec::<LinearInterpolFastFieldSerializer>(&data);
results.push(res);
let res = serialize_with_codec::<MultiLinearInterpolFastFieldSerializer>(&data);
results.push(res);
let res = serialize_with_codec::<fastfield_codecs::bitpacked::BitpackedFastFieldSerializer>(
&data,
);
results.push(res);
// let best_estimation_codec = results
//.iter()
//.min_by(|res1, res2| res1.partial_cmp(&res2).unwrap())
//.unwrap();
let best_compression_ratio_codec = results
.iter()
.min_by(|&res1, &res2| res1.partial_cmp(res2).unwrap())
.min_by(|res1, res2| res1.partial_cmp(res2).unwrap())
.cloned()
.unwrap();
table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")]));
for (est, comp, codec_type) in results {
let est_cell = est.to_string();
let ratio_cell = comp.to_string();
for (is_applicable, est, comp, name) in results {
let (est_cell, ratio_cell) = if !is_applicable {
("Codec Disabled".to_string(), "".to_string())
} else {
(est.to_string(), comp.to_string())
};
let style = if comp == best_compression_ratio_codec.1 {
"Fb"
} else {
""
};
table.add_row(Row::new(vec![
Cell::new(&format!("{codec_type:?}")).style_spec("bFg"),
Cell::new(name).style_spec("bFg"),
Cell::new(&ratio_cell).style_spec(style),
Cell::new(&est_cell).style_spec(""),
]));
@@ -107,15 +197,26 @@ pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
data_and_names
}
pub fn serialize_with_codec<C: FastFieldCodec>(
pub fn serialize_with_codec<S: FastFieldCodecSerializer>(
data: &[u64],
) -> Option<(f32, f32, FastFieldCodecType)> {
let data = Data(data);
let estimation = C::estimate(&data)?;
let mut out = Vec::new();
C::serialize(&mut out, &data).unwrap();
let actual_compression = out.len() as f32 / (data.num_vals() * 8) as f32;
Some((estimation, actual_compression, C::CODEC_TYPE))
) -> (bool, f32, f32, &'static str) {
let is_applicable = S::is_applicable(&data, stats_from_vec(data));
if !is_applicable {
return (false, 0.0, 0.0, S::NAME);
}
let estimation = S::estimate(&data, stats_from_vec(data));
let mut out = vec![];
S::serialize(
&mut out,
&data,
stats_from_vec(data),
data.iter().cloned(),
data.iter().cloned(),
)
.unwrap();
let actual_compression = out.len() as f32 / (data.len() * 8) as f32;
(true, estimation, actual_compression, S::NAME)
}
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {

View File

@@ -1,4 +1,4 @@
//! The BlockwiseLinear codec uses linear interpolation to guess a values and stores the
//! MultiLinearInterpol compressor uses linear interpolation to guess a values and stores the
//! offset, but in blocks of 512.
//!
//! With a CHUNK_SIZE of 512 and 29 byte metadata per block, we get a overhead for metadata of 232 /
@@ -14,25 +14,22 @@ use std::io::{self, Read, Write};
use std::ops::Sub;
use common::{BinarySerializable, CountingWriter, DeserializeFrom};
use ownedbytes::OwnedBytes;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::linear::{get_calculated_value, get_slope};
use crate::{Column, FastFieldCodec, FastFieldCodecType};
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
const CHUNK_SIZE: u64 = 512;
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct BlockwiseLinearReader {
data: OwnedBytes,
pub footer: BlockwiseLinearFooter,
pub struct MultiLinearInterpolFastFieldReader {
pub footer: MultiLinearInterpolFooter,
}
#[derive(Clone, Debug, Default)]
struct Function {
// The offset in the data is required, because we have different bit_widths per block
// The offset in the data is required, because we have diffrent bit_widths per block
data_start_offset: u64,
// start_pos in the block will be CHUNK_SIZE * BLOCK_NUM
start_pos: u64,
@@ -102,14 +99,14 @@ impl BinarySerializable for Function {
}
#[derive(Clone, Debug)]
pub struct BlockwiseLinearFooter {
pub struct MultiLinearInterpolFooter {
pub num_vals: u64,
pub min_value: u64,
pub max_value: u64,
interpolations: Vec<Function>,
}
impl BinarySerializable for BlockwiseLinearFooter {
impl BinarySerializable for MultiLinearInterpolFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
let mut out = vec![];
self.num_vals.serialize(&mut out)?;
@@ -121,8 +118,8 @@ impl BinarySerializable for BlockwiseLinearFooter {
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<BlockwiseLinearFooter> {
let mut footer = BlockwiseLinearFooter {
fn deserialize<R: Read>(reader: &mut R) -> io::Result<MultiLinearInterpolFooter> {
let mut footer = MultiLinearInterpolFooter {
num_vals: u64::deserialize(reader)?,
min_value: u64::deserialize(reader)?,
max_value: u64::deserialize(reader)?,
@@ -146,20 +143,26 @@ fn get_interpolation_function(doc: u64, interpolations: &[Function]) -> &Functio
&interpolations[get_interpolation_position(doc)]
}
impl Column for BlockwiseLinearReader {
impl FastFieldCodecReader for MultiLinearInterpolFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
let (_data, mut footer) = bytes.split_at(bytes.len() - (4 + footer_len) as usize);
let footer = MultiLinearInterpolFooter::deserialize(&mut footer)?;
Ok(MultiLinearInterpolFastFieldReader { footer })
}
#[inline]
fn get_val(&self, idx: u64) -> u64 {
let interpolation = get_interpolation_function(idx, &self.footer.interpolations);
let in_block_idx = idx - interpolation.start_pos;
let calculated_value = get_calculated_value(
interpolation.value_start_pos,
in_block_idx,
interpolation.slope,
);
let diff = interpolation.bit_unpacker.get(
in_block_idx,
&self.data[interpolation.data_start_offset as usize..],
);
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
let interpolation = get_interpolation_function(doc, &self.footer.interpolations);
let doc = doc - interpolation.start_pos;
let calculated_value =
get_calculated_value(interpolation.value_start_pos, doc, interpolation.slope);
let diff = interpolation
.bit_unpacker
.get(doc, &data[interpolation.data_start_offset as usize..]);
(calculated_value + diff) - interpolation.positive_val_offset
}
@@ -171,38 +174,39 @@ impl Column for BlockwiseLinearReader {
fn max_value(&self) -> u64 {
self.footer.max_value
}
#[inline]
fn num_vals(&self) -> u64 {
self.footer.num_vals
}
}
/// Same as LinearSerializer, but working on chunks of CHUNK_SIZE elements.
pub struct BlockwiseLinearCodec;
#[inline]
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
}
impl FastFieldCodec for BlockwiseLinearCodec {
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::BlockwiseLinear;
#[inline]
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
(first_val as i64 + (pos as f32 * slope) as i64) as u64
}
type Reader = BlockwiseLinearReader;
/// Opens a fast field given a file.
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
let footer_offset = bytes.len() - 4 - footer_len as usize;
let (data, mut footer) = bytes.split(footer_offset);
let footer = BlockwiseLinearFooter::deserialize(&mut footer)?;
Ok(BlockwiseLinearReader { data, footer })
}
/// Same as LinearInterpolFastFieldSerializer, but working on chunks of CHUNK_SIZE elements.
pub struct MultiLinearInterpolFastFieldSerializer {}
impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
const NAME: &'static str = "MultiLinearInterpol";
const ID: u8 = 3;
/// Creates a new fast field serializer.
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column) -> io::Result<()> {
assert!(fastfield_accessor.min_value() <= fastfield_accessor.max_value());
fn serialize(
write: &mut impl Write,
fastfield_accessor: &dyn FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
_data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
assert!(stats.min_value <= stats.max_value);
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let mut first_function = Function {
end_pos: fastfield_accessor.num_vals(),
end_pos: stats.num_vals,
value_start_pos: first_val,
value_end_pos: last_val,
..Default::default()
@@ -213,7 +217,7 @@ impl FastFieldCodec for BlockwiseLinearCodec {
// Since we potentially apply multiple passes over the data, the data is cached.
// Multiple iteration can be expensive (merge with index sorting can add lot of overhead per
// iteration)
let data = fastfield_accessor.iter().collect::<Vec<_>>();
let data = data_iter.collect::<Vec<_>>();
//// let's split this into chunks of CHUNK_SIZE
for data_pos in (0..data.len() as u64).step_by(CHUNK_SIZE as usize).skip(1) {
@@ -276,47 +280,49 @@ impl FastFieldCodec for BlockwiseLinearCodec {
}
bit_packer.close(write)?;
let footer = BlockwiseLinearFooter {
num_vals: fastfield_accessor.num_vals(),
min_value: fastfield_accessor.min_value(),
max_value: fastfield_accessor.max_value(),
let footer = MultiLinearInterpolFooter {
num_vals: stats.num_vals,
min_value: stats.min_value,
max_value: stats.max_value,
interpolations,
};
footer.serialize(write)?;
Ok(())
}
/// estimation for linear interpolation is hard because, you don't know
/// where the local maxima are for the deviation of the calculated value and
/// the offset is also unknown.
#[allow(clippy::question_mark)]
fn estimate(fastfield_accessor: &impl Column) -> Option<f32> {
if fastfield_accessor.num_vals() < 10 * CHUNK_SIZE {
return None;
fn is_applicable(
_fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> bool {
if stats.num_vals < 5_000 {
return false;
}
// On serialization the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues.
// For this we take the maximum theroretical offset and add this to the max value.
// If this doesn't overflow the algorithm should be fine
let theorethical_maximum_offset =
fastfield_accessor.max_value() - fastfield_accessor.min_value();
if fastfield_accessor
.max_value()
// If this doesn't overflow the algortihm should be fine
let theorethical_maximum_offset = stats.max_value - stats.min_value;
if stats
.max_value
.checked_add(theorethical_maximum_offset)
.is_none()
{
return None;
return false;
}
true
}
/// estimation for linear interpolation is hard because, you don't know
/// where the local maxima are for the deviation of the calculated value and
/// the offset is also unknown.
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
let first_val_in_first_block = fastfield_accessor.get_val(0);
let last_elem_in_first_chunk = CHUNK_SIZE.min(fastfield_accessor.num_vals());
let last_elem_in_first_chunk = CHUNK_SIZE.min(stats.num_vals);
let last_val_in_first_block =
fastfield_accessor.get_val(last_elem_in_first_chunk as u64 - 1);
let slope = get_slope(
first_val_in_first_block,
last_val_in_first_block,
fastfield_accessor.num_vals(),
stats.num_vals,
);
// let's sample at 0%, 5%, 10% .. 95%, 100%, but for the first block only
@@ -343,11 +349,11 @@ impl FastFieldCodec for BlockwiseLinearCodec {
//
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * fastfield_accessor.num_vals() as u64
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
// function metadata per block
+ 29 * (fastfield_accessor.num_vals() / CHUNK_SIZE);
let num_bits_uncompressed = 64 * fastfield_accessor.num_vals();
Some(num_bits as f32 / num_bits_uncompressed as f32)
+ 29 * (stats.num_vals / CHUNK_SIZE);
let num_bits_uncompressed = 64 * stats.num_vals;
num_bits as f32 / num_bits_uncompressed as f32
}
}
@@ -362,35 +368,20 @@ fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_datasets;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) -> Option<(f32, f32)> {
crate::tests::create_and_validate::<BlockwiseLinearCodec>(data, name)
}
const HIGHEST_BIT: u64 = 1 << 63;
pub fn i64_to_u64(val: i64) -> u64 {
(val as u64) ^ HIGHEST_BIT
}
#[test]
fn test_compression_i64() {
let data = (i64::MAX - 600_000..=i64::MAX - 550_000)
.map(i64_to_u64)
.collect::<Vec<_>>();
let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large i64").unwrap();
assert!(actual_compression < 0.2);
assert!(estimate < 0.20);
assert!(estimate > 0.15);
assert!(actual_compression > 0.01);
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
crate::tests::create_and_validate::<
MultiLinearInterpolFastFieldSerializer,
MultiLinearInterpolFastFieldReader,
>(data, name)
}
#[test]
fn test_compression() {
let data = (10..=6_000_u64).collect::<Vec<_>>();
let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large").unwrap();
create_and_validate(&data, "simple monotonically large");
assert!(actual_compression < 0.2);
assert!(estimate < 0.20);
assert!(estimate > 0.15);
@@ -399,7 +390,7 @@ mod tests {
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_datasets();
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();

View File

@@ -6,7 +6,7 @@ use std::{fmt, io, mem};
use stable_deref_trait::StableDeref;
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
/// this data as a slice.
/// this data as a static slice.
///
/// The backing object is required to be `StableDeref`.
#[derive(Clone)]
@@ -21,7 +21,7 @@ impl OwnedBytes {
OwnedBytes::new(&[][..])
}
/// Creates an `OwnedBytes` instance given a `StableDeref` object.
/// Creates an `OwnedBytes` intance given a `StableDeref` object.
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
data_holder: T,
) -> OwnedBytes {

View File

@@ -1,5 +1,3 @@
#![allow(clippy::derive_partial_eq_without_eq)]
mod occur;
mod query_grammar;
mod user_input_ast;

View File

@@ -67,7 +67,7 @@ fn word<'a>() -> impl Parser<&'a str, Output = String> {
/// 2021-04-13T19:46:26.266051969+00:00
///
/// NOTE: also accepts 999999-99-99T99:99:99.266051969+99:99
/// We delegate rejecting such invalid dates to the logical AST computation code
/// We delegate rejecting such invalid dates to the logical AST compuation code
/// which invokes time::OffsetDateTime::parse(..., &Rfc3339) on the value to actually parse
/// it (instead of merely extracting the datetime value as string as done here).
fn date_time<'a>() -> impl Parser<&'a str, Output = String> {

View File

@@ -57,7 +57,8 @@ impl AggregationResult {
match self {
AggregationResult::BucketResult(_bucket) => Err(TantivyError::InternalError(
"Tried to retrieve value from bucket aggregation. This is not supported and \
should not happen during collection phase, but should be caught during validation"
should not happen during collection phase, but should be catched during \
validation"
.to_string(),
)),
AggregationResult::MetricResult(metric) => metric.get_value(agg_property),

View File

@@ -1,7 +1,6 @@
use std::cmp::Ordering;
use std::fmt::Display;
use fastfield_codecs::Column;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
@@ -15,7 +14,7 @@ use crate::aggregation::intermediate_agg_result::{
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
};
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
use crate::fastfield::DynamicFastFieldReader;
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
use crate::schema::Type;
use crate::{DocId, TantivyError};
@@ -71,7 +70,7 @@ pub struct HistogramAggregation {
/// The interval to chunk your data range. Each bucket spans a value range of [0..interval).
/// Must be a positive value.
pub interval: f64,
/// Intervals implicitly defines an absolute grid of buckets `[interval * k, interval * (k +
/// Intervals implicitely defines an absolute grid of buckets `[interval * k, interval * (k +
/// 1))`.
///
/// Offset makes it possible to shift this grid into
@@ -332,10 +331,10 @@ impl SegmentHistogramCollector {
.expect("unexpected fast field cardinatility");
let mut iter = doc.chunks_exact(4);
for docs in iter.by_ref() {
let val0 = self.f64_from_fastfield_u64(accessor.get_val(docs[0] as u64));
let val1 = self.f64_from_fastfield_u64(accessor.get_val(docs[1] as u64));
let val2 = self.f64_from_fastfield_u64(accessor.get_val(docs[2] as u64));
let val3 = self.f64_from_fastfield_u64(accessor.get_val(docs[3] as u64));
let val0 = self.f64_from_fastfield_u64(accessor.get(docs[0]));
let val1 = self.f64_from_fastfield_u64(accessor.get(docs[1]));
let val2 = self.f64_from_fastfield_u64(accessor.get(docs[2]));
let val3 = self.f64_from_fastfield_u64(accessor.get(docs[3]));
let bucket_pos0 = get_bucket_num(val0);
let bucket_pos1 = get_bucket_num(val1);
@@ -371,8 +370,8 @@ impl SegmentHistogramCollector {
&bucket_with_accessor.sub_aggregation,
)?;
}
for &doc in iter.remainder() {
let val = f64_from_fastfield_u64(accessor.get_val(doc as u64), &self.field_type);
for doc in iter.remainder() {
let val = f64_from_fastfield_u64(accessor.get(*doc), &self.field_type);
if !bounds.contains(val) {
continue;
}
@@ -383,7 +382,7 @@ impl SegmentHistogramCollector {
self.buckets[bucket_pos].key,
get_bucket_val(val, self.interval, self.offset) as f64
);
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation)?;
}
if force_flush {
if let Some(sub_aggregations) = self.sub_aggregations.as_mut() {

View File

@@ -1,7 +1,6 @@
use std::fmt::Debug;
use std::ops::Range;
use fastfield_codecs::Column;
use fnv::FnvHashMap;
use serde::{Deserialize, Serialize};
@@ -13,6 +12,7 @@ use crate::aggregation::intermediate_agg_result::{
};
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key, SerializedKey};
use crate::fastfield::FastFieldReader;
use crate::schema::Type;
use crate::{DocId, TantivyError};
@@ -210,8 +210,8 @@ impl SegmentRangeCollector {
let key = range
.key
.clone()
.map(Key::Str)
.unwrap_or_else(|| range_to_key(&range.range, &field_type));
.map(|key| Key::Str(key))
.unwrap_or(range_to_key(&range.range, &field_type));
let to = if range.range.end == u64::MAX {
None
} else {
@@ -264,10 +264,10 @@ impl SegmentRangeCollector {
.as_single()
.expect("unexpected fast field cardinatility");
for docs in iter.by_ref() {
let val1 = accessor.get_val(docs[0] as u64);
let val2 = accessor.get_val(docs[1] as u64);
let val3 = accessor.get_val(docs[2] as u64);
let val4 = accessor.get_val(docs[3] as u64);
let val1 = accessor.get(docs[0]);
let val2 = accessor.get(docs[1]);
let val3 = accessor.get(docs[2]);
let val4 = accessor.get(docs[3]);
let bucket_pos1 = self.get_bucket_pos(val1);
let bucket_pos2 = self.get_bucket_pos(val2);
let bucket_pos3 = self.get_bucket_pos(val3);
@@ -278,10 +278,10 @@ impl SegmentRangeCollector {
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation)?;
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
}
for &doc in iter.remainder() {
let val = accessor.get_val(doc as u64);
for doc in iter.remainder() {
let val = accessor.get(*doc);
let bucket_pos = self.get_bucket_pos(val);
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation)?;
}
if force_flush {
for bucket in &mut self.buckets {

View File

@@ -110,8 +110,8 @@ pub struct TermsAggregation {
/// Set the order. `String` is here a target, which is either "_count", "_key", or the name of
/// a metric sub_aggregation.
///
/// Single value metrics like average can be addressed by its name.
/// Multi value metrics like stats are required to address their field by name e.g.
/// Single value metrics like average can be adressed by its name.
/// Multi value metrics like stats are required to adress their field by name e.g.
/// "stats.avg"
///
/// Examples in JSON format:

View File

@@ -39,7 +39,7 @@ impl AggregationCollector {
///
/// # Purpose
/// AggregationCollector returns `IntermediateAggregationResults` and not the final
/// `AggregationResults`, so that results from different indices can be merged and then converted
/// `AggregationResults`, so that results from differenct indices can be merged and then converted
/// into the final `AggregationResults` via the `into_final_result()` method.
pub struct DistributedAggregationCollector {
agg: Aggregations,

View File

@@ -43,7 +43,7 @@ impl IntermediateAggregationResults {
/// Convert intermediate result and its aggregation request to the final result.
///
/// Internal function, AggregationsInternal is used instead Aggregations, which is optimized
/// for internal processing, by splitting metric and buckets into separate groups.
/// for internal processing, by splitting metric and buckets into seperate groups.
pub(crate) fn into_final_bucket_result_internal(
self,
req: &AggregationsInternal,

View File

@@ -1,10 +1,9 @@
use std::fmt::Debug;
use fastfield_codecs::Column;
use serde::{Deserialize, Serialize};
use crate::aggregation::f64_from_fastfield_u64;
use crate::fastfield::DynamicFastFieldReader;
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
use crate::schema::Type;
use crate::DocId;
@@ -61,10 +60,10 @@ impl SegmentAverageCollector {
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
let mut iter = doc.chunks_exact(4);
for docs in iter.by_ref() {
let val1 = field.get_val(docs[0] as u64);
let val2 = field.get_val(docs[1] as u64);
let val3 = field.get_val(docs[2] as u64);
let val4 = field.get_val(docs[3] as u64);
let val1 = field.get(docs[0]);
let val2 = field.get(docs[1]);
let val3 = field.get(docs[2]);
let val4 = field.get(docs[3]);
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
@@ -74,8 +73,8 @@ impl SegmentAverageCollector {
self.data.collect(val3);
self.data.collect(val4);
}
for &doc in iter.remainder() {
let val = field.get_val(doc as u64);
for doc in iter.remainder() {
let val = field.get(*doc);
let val = f64_from_fastfield_u64(val, &self.field_type);
self.data.collect(val);
}

View File

@@ -1,8 +1,7 @@
use fastfield_codecs::Column;
use serde::{Deserialize, Serialize};
use crate::aggregation::f64_from_fastfield_u64;
use crate::fastfield::DynamicFastFieldReader;
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
use crate::schema::Type;
use crate::{DocId, TantivyError};
@@ -167,10 +166,10 @@ impl SegmentStatsCollector {
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
let mut iter = doc.chunks_exact(4);
for docs in iter.by_ref() {
let val1 = field.get_val(docs[0] as u64);
let val2 = field.get_val(docs[1] as u64);
let val3 = field.get_val(docs[2] as u64);
let val4 = field.get_val(docs[3] as u64);
let val1 = field.get(docs[0]);
let val2 = field.get(docs[1]);
let val3 = field.get(docs[2]);
let val4 = field.get(docs[3]);
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
@@ -180,8 +179,8 @@ impl SegmentStatsCollector {
self.stats.collect(val3);
self.stats.collect(val4);
}
for &doc in iter.remainder() {
let val = field.get_val(doc as u64);
for doc in iter.remainder() {
let val = field.get(*doc);
let val = f64_from_fastfield_u64(val, &self.field_type);
self.stats.collect(val);
}

View File

@@ -11,10 +11,8 @@
// Importing tantivy...
use std::marker::PhantomData;
use fastfield_codecs::Column;
use crate::collector::{Collector, SegmentCollector};
use crate::fastfield::{DynamicFastFieldReader, FastValue};
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
use crate::schema::Field;
use crate::{Score, SegmentReader, TantivyError};
@@ -176,7 +174,7 @@ where
type Fruit = TSegmentCollector::Fruit;
fn collect(&mut self, doc: u32, score: Score) {
let value = self.fast_field_reader.get_val(doc as u64);
let value = self.fast_field_reader.get(doc);
if (self.predicate)(value) {
self.segment_collector.collect(doc, score)
}

View File

@@ -1,8 +1,7 @@
use fastdivide::DividerU64;
use fastfield_codecs::Column;
use crate::collector::{Collector, SegmentCollector};
use crate::fastfield::{DynamicFastFieldReader, FastValue};
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
use crate::schema::{Field, Type};
use crate::{DocId, Score};
@@ -92,7 +91,7 @@ impl SegmentCollector for SegmentHistogramCollector {
type Fruit = Vec<u64>;
fn collect(&mut self, doc: DocId, _score: Score) {
let value = self.ff_reader.get_val(doc as u64);
let value = self.ff_reader.get(doc);
self.histogram_computer.add_value(value);
}

View File

@@ -1,9 +1,7 @@
use fastfield_codecs::Column;
use super::*;
use crate::collector::{Count, FilterCollector, TopDocs};
use crate::core::SegmentReader;
use crate::fastfield::{BytesFastFieldReader, DynamicFastFieldReader};
use crate::fastfield::{BytesFastFieldReader, DynamicFastFieldReader, FastFieldReader};
use crate::query::{AllQuery, QueryParser};
use crate::schema::{Field, Schema, FAST, TEXT};
use crate::time::format_description::well_known::Rfc3339;
@@ -199,7 +197,7 @@ impl SegmentCollector for FastFieldSegmentCollector {
type Fruit = Vec<u64>;
fn collect(&mut self, doc: DocId, _score: Score) {
let val = self.reader.get_val(doc as u64);
let val = self.reader.get(doc);
self.vals.push(val);
}

View File

@@ -2,8 +2,6 @@ use std::collections::BinaryHeap;
use std::fmt;
use std::marker::PhantomData;
use fastfield_codecs::Column;
use super::Collector;
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
use crate::collector::top_collector::{ComparableDoc, TopCollector, TopSegmentCollector};
@@ -11,7 +9,7 @@ use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
use crate::collector::{
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
};
use crate::fastfield::{DynamicFastFieldReader, FastValue};
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
use crate::query::Weight;
use crate::schema::Field;
use crate::{DocAddress, DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
@@ -136,7 +134,7 @@ struct ScorerByFastFieldReader {
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
fn score(&mut self, doc: DocId) -> u64 {
self.ff_reader.get_val(doc as u64)
self.ff_reader.get(doc)
}
}
@@ -409,7 +407,7 @@ impl TopDocs {
/// # use tantivy::query::QueryParser;
/// use tantivy::SegmentReader;
/// use tantivy::collector::TopDocs;
/// use tantivy::fastfield::Column;
/// use tantivy::fastfield::FastFieldReader;
/// use tantivy::schema::Field;
///
/// fn create_schema() -> Schema {
@@ -458,7 +456,7 @@ impl TopDocs {
///
/// // We can now define our actual scoring function
/// move |doc: DocId, original_score: Score| {
/// let popularity: u64 = popularity_reader.get_val(doc as u64);
/// let popularity: u64 = popularity_reader.get(doc);
/// // Well.. For the sake of the example we use a simple logarithm
/// // function.
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
@@ -501,7 +499,7 @@ impl TopDocs {
///
/// This method only makes it possible to compute the score from a given
/// `DocId`, fastfield values for the doc and any information you could
/// have precomputed beforehand. It does not make it possible for instance
/// have precomputed beforehands. It does not make it possible for instance
/// to compute something like TfIdf as it does not have access to the list of query
/// terms present in the document, nor the term frequencies for the different terms.
///
@@ -517,7 +515,7 @@ impl TopDocs {
/// use tantivy::SegmentReader;
/// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field;
/// use fastfield_codecs::Column;
/// use tantivy::fastfield::FastFieldReader;
///
/// # fn create_schema() -> Schema {
/// # let mut schema_builder = Schema::builder();
@@ -569,8 +567,8 @@ impl TopDocs {
///
/// // We can now define our actual scoring function
/// move |doc: DocId| {
/// let popularity: u64 = popularity_reader.get_val(doc as u64);
/// let boosted: u64 = boosted_reader.get_val(doc as u64);
/// let popularity: u64 = popularity_reader.get(doc);
/// let boosted: u64 = boosted_reader.get(doc);
/// // Score do not have to be `f64` in tantivy.
/// // Here we return a couple to get lexicographical order
/// // for free.

View File

@@ -7,7 +7,6 @@ use std::sync::Arc;
use super::segment::Segment;
use super::IndexSettings;
use crate::core::single_segment_index_writer::SingleSegmentIndexWriter;
use crate::core::{
Executor, IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory, META_FILEPATH,
};
@@ -17,7 +16,7 @@ use crate::directory::MmapDirectory;
use crate::directory::{Directory, ManagedDirectory, RamDirectory, INDEX_WRITER_LOCK};
use crate::error::{DataCorruption, TantivyError};
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_ARENA_NUM_BYTES_MIN};
use crate::indexer::segment_updater::save_metas;
use crate::indexer::segment_updater::save_new_metas;
use crate::reader::{IndexReader, IndexReaderBuilder};
use crate::schema::{Field, FieldType, Schema};
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
@@ -48,34 +47,6 @@ fn load_metas(
.map_err(From::from)
}
/// Save the index meta file.
/// This operation is atomic :
/// Either
/// - it fails, in which case an error is returned,
/// and the `meta.json` remains untouched,
/// - it succeeds, and `meta.json` is written
/// and flushed.
///
/// This method is not part of tantivy's public API
fn save_new_metas(
schema: Schema,
index_settings: IndexSettings,
directory: &dyn Directory,
) -> crate::Result<()> {
save_metas(
&IndexMeta {
index_settings,
segments: Vec::new(),
schema,
opstamp: 0u64,
payload: None,
},
directory,
)?;
directory.sync_directory()?;
Ok(())
}
/// IndexBuilder can be used to create an index.
///
/// Use in conjunction with `SchemaBuilder`. Global index settings
@@ -164,25 +135,6 @@ impl IndexBuilder {
self.create(mmap_directory)
}
/// Dragons ahead!!!
///
/// The point of this API is to let users create a simple index with a single segment
/// and without starting any thread.
///
/// Do not use this method if you are not sure what you are doing.
///
/// It expects an originally empty directory, and will not run any GC operation.
#[doc(hidden)]
pub fn single_segment_index_writer(
self,
dir: impl Into<Box<dyn Directory>>,
mem_budget: usize,
) -> crate::Result<SingleSegmentIndexWriter> {
let index = self.create(dir)?;
let index_simple_writer = SingleSegmentIndexWriter::new(index, mem_budget)?;
Ok(index_simple_writer)
}
/// Creates a new index in a temp directory.
///
/// The index will use the `MMapDirectory` in a newly created directory.
@@ -628,12 +580,10 @@ impl fmt::Debug for Index {
#[cfg(test)]
mod tests {
use crate::collector::Count;
use crate::directory::{RamDirectory, WatchCallback};
use crate::query::TermQuery;
use crate::schema::{Field, IndexRecordOption, Schema, INDEXED, TEXT};
use crate::schema::{Field, Schema, INDEXED, TEXT};
use crate::tokenizer::TokenizerManager;
use crate::{Directory, Index, IndexBuilder, IndexReader, IndexSettings, ReloadPolicy, Term};
use crate::{Directory, Index, IndexBuilder, IndexReader, IndexSettings, ReloadPolicy};
#[test]
fn test_indexer_for_field() {
@@ -899,28 +849,4 @@ mod tests {
);
Ok(())
}
#[test]
fn test_single_segment_index_writer() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let directory = RamDirectory::default();
let mut single_segment_index_writer = Index::builder()
.schema(schema)
.single_segment_index_writer(directory, 10_000_000)?;
for _ in 0..10 {
let doc = doc!(text_field=>"hello");
single_segment_index_writer.add_document(doc)?;
}
let index = single_segment_index_writer.finalize()?;
let searcher = index.reader()?.searcher();
let term_query = TermQuery::new(
Term::from_field_text(text_field, "hello"),
IndexRecordOption::Basic,
);
let count = searcher.search(&term_query, &Count)?;
assert_eq!(count, 10);
Ok(())
}
}

View File

@@ -311,7 +311,7 @@ pub struct IndexMeta {
/// `IndexSettings` to configure index options.
#[serde(default)]
pub index_settings: IndexSettings,
/// List of `SegmentMeta` information associated to each finalized segment of the index.
/// List of `SegmentMeta` informations associated to each finalized segment of the index.
pub segments: Vec<SegmentMeta>,
/// Index `Schema`
pub schema: Schema,

View File

@@ -230,13 +230,4 @@ impl InvertedIndexReader {
}
Ok(())
}
/// Returns the number of documents containing the term asynchronously.
pub async fn doc_freq_async(&self, term: &Term) -> crate::AsyncIoResult<u32> {
Ok(self
.get_term_info_async(term)
.await?
.map(|term_info| term_info.doc_freq)
.unwrap_or(0u32))
}
}

View File

@@ -7,7 +7,6 @@ mod segment;
mod segment_component;
mod segment_id;
mod segment_reader;
mod single_segment_index_writer;
use std::path::Path;
@@ -24,7 +23,6 @@ pub use self::segment::Segment;
pub use self::segment_component::SegmentComponent;
pub use self::segment_id::SegmentId;
pub use self::segment_reader::SegmentReader;
pub use self::single_segment_index_writer::SingleSegmentIndexWriter;
/// The meta file contains all the information about the list of segments and the schema
/// of the index.

View File

@@ -134,19 +134,6 @@ impl Searcher {
Ok(total_doc_freq)
}
/// Return the overall number of documents containing
/// the given term in an asynchronous manner.
#[cfg(feature = "quickwit")]
pub async fn doc_freq_async(&self, term: &Term) -> crate::Result<u64> {
let mut total_doc_freq = 0;
for segment_reader in &self.inner.segment_readers {
let inverted_index = segment_reader.inverted_index(term.field())?;
let doc_freq = inverted_index.doc_freq_async(term).await?;
total_doc_freq += u64::from(doc_freq);
}
Ok(total_doc_freq)
}
/// Return the list of segment readers
pub fn segment_readers(&self) -> &[SegmentReader] {
&self.inner.segment_readers
@@ -247,14 +234,6 @@ impl SearcherInner {
generation: TrackedObject<SearcherGeneration>,
doc_store_cache_size: usize,
) -> io::Result<SearcherInner> {
assert_eq!(
&segment_readers
.iter()
.map(|reader| (reader.segment_id(), reader.delete_opstamp()))
.collect::<BTreeMap<_, _>>(),
generation.segments(),
"Set of segments referenced by this Searcher and its SearcherGeneration must match"
);
let store_readers: Vec<StoreReader> = segment_readers
.iter()
.map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_size))

View File

@@ -16,7 +16,7 @@ use uuid::Uuid;
/// by a UUID which is used to prefix the filenames
/// of all of the file associated with the segment.
///
/// In unit test, for reproducibility, the `SegmentId` are
/// In unit test, for reproducability, the `SegmentId` are
/// simply generated in an autoincrement fashion.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct SegmentId(Uuid);

View File

@@ -1,47 +0,0 @@
use crate::indexer::operation::AddOperation;
use crate::indexer::segment_updater::save_metas;
use crate::indexer::SegmentWriter;
use crate::{Directory, Document, Index, IndexMeta, Opstamp, Segment};
#[doc(hidden)]
pub struct SingleSegmentIndexWriter {
segment_writer: SegmentWriter,
segment: Segment,
opstamp: Opstamp,
}
impl SingleSegmentIndexWriter {
pub fn new(index: Index, mem_budget: usize) -> crate::Result<Self> {
let segment = index.new_segment();
let segment_writer = SegmentWriter::for_segment(mem_budget, segment.clone())?;
Ok(Self {
segment_writer,
segment,
opstamp: 0,
})
}
pub fn add_document(&mut self, document: Document) -> crate::Result<()> {
let opstamp = self.opstamp;
self.opstamp += 1;
self.segment_writer
.add_document(AddOperation { opstamp, document })
}
pub fn finalize(self) -> crate::Result<Index> {
let max_doc = self.segment_writer.max_doc();
self.segment_writer.finalize()?;
let segment: Segment = self.segment.with_max_doc(max_doc);
let index = segment.index();
let index_meta = IndexMeta {
index_settings: index.settings().clone(),
segments: vec![segment.meta().clone()],
schema: index.schema(),
opstamp: 0,
payload: None,
};
save_metas(&index_meta, index.directory())?;
index.directory().sync_directory()?;
Ok(segment.index().clone())
}
}

View File

@@ -233,56 +233,4 @@ mod test {
}
Ok(())
}
#[test]
fn test_composite_file_bug() -> crate::Result<()> {
let path = Path::new("test_path");
let directory = RamDirectory::create();
{
let w = directory.open_write(path).unwrap();
let mut composite_write = CompositeWrite::wrap(w);
let mut write = composite_write.for_field_with_idx(Field::from_field_id(1u32), 0);
VInt(32431123u64).serialize(&mut write)?;
write.flush()?;
let write = composite_write.for_field_with_idx(Field::from_field_id(1u32), 1);
write.flush()?;
let mut write = composite_write.for_field_with_idx(Field::from_field_id(0u32), 0);
VInt(1_000_000).serialize(&mut write)?;
write.flush()?;
composite_write.close()?;
}
{
let r = directory.open_read(path)?;
let composite_file = CompositeFile::open(&r)?;
{
let file = composite_file
.open_read_with_idx(Field::from_field_id(1u32), 0)
.unwrap()
.read_bytes()?;
let mut file0_buf = file.as_slice();
let payload_0 = VInt::deserialize(&mut file0_buf)?.0;
assert_eq!(file0_buf.len(), 0);
assert_eq!(payload_0, 32431123u64);
}
{
let file = composite_file
.open_read_with_idx(Field::from_field_id(1u32), 1)
.unwrap()
.read_bytes()?;
let file = file.as_slice();
assert_eq!(file.len(), 0);
}
{
let file = composite_file
.open_read_with_idx(Field::from_field_id(0u32), 0)
.unwrap()
.read_bytes()?;
let file = file.as_slice();
assert_eq!(file.len(), 3);
}
}
Ok(())
}
}

View File

@@ -45,7 +45,7 @@ pub static INDEX_WRITER_LOCK: Lazy<Lock> = Lazy::new(|| Lock {
/// The meta lock file is here to protect the segment files being opened by
/// `IndexReader::reload()` from being garbage collected.
/// It makes it possible for another process to safely consume
/// our index in-writing. Ideally, we may have preferred `RWLock` semantics
/// our index in-writing. Ideally, we may have prefered `RWLock` semantics
/// here, but it is difficult to achieve on Windows.
///
/// Opening segment readers is a very fast process.

View File

@@ -112,7 +112,7 @@ impl FileSlice {
/// Returns a `OwnedBytes` with all of the data in the `FileSlice`.
///
/// The behavior is strongly dependent on the implementation of the underlying
/// The behavior is strongly dependant on the implementation of the underlying
/// `Directory` and the `FileSliceTrait` it creates.
/// In particular, it is up to the `Directory` implementation
/// to handle caching if needed.

View File

@@ -114,7 +114,7 @@ impl ManagedDirectory {
let mut files_to_delete = vec![];
// It is crucial to get the living files after acquiring the
// read lock of meta information. That way, we
// read lock of meta informations. That way, we
// avoid the following scenario.
//
// 1) we get the list of living files.

View File

@@ -40,7 +40,7 @@ impl Drop for VecWriter {
fn drop(&mut self) {
if !self.is_flushed {
warn!(
"You forgot to flush {:?} before its writer got Drop. Do not rely on drop. This \
"You forgot to flush {:?} before its writter got Drop. Do not rely on drop. This \
also occurs when the indexer crashed, so you may want to check the logs for the \
root cause.",
self.path

View File

@@ -247,7 +247,7 @@ fn test_lock_blocking(directory: &dyn Directory) {
//< lock_a_res is sent to the thread.
in_thread_clone.store(true, SeqCst);
let _just_sync = receiver.recv();
// explicitly dropping lock_a_res. It would have been sufficient to just force it
// explicitely dropping lock_a_res. It would have been sufficient to just force it
// to be part of the move, but the intent seems clearer that way.
drop(lock_a_res);
});

View File

@@ -1,7 +1,5 @@
use fastfield_codecs::Column;
use crate::directory::{FileSlice, OwnedBytes};
use crate::fastfield::{DynamicFastFieldReader, MultiValueLength};
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, MultiValueLength};
use crate::DocId;
/// Reader for byte array fast fields
@@ -30,9 +28,8 @@ impl BytesFastFieldReader {
}
fn range(&self, doc: DocId) -> (usize, usize) {
let idx = doc as u64;
let start = self.idx_reader.get_val(idx) as usize;
let stop = self.idx_reader.get_val(idx + 1) as usize;
let start = self.idx_reader.get(doc) as usize;
let stop = self.idx_reader.get(doc + 1) as usize;
(start, stop)
}
@@ -55,6 +52,11 @@ impl BytesFastFieldReader {
}
impl MultiValueLength for BytesFastFieldReader {
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u64> {
let (start, stop) = self.range(doc_id);
start as u64..stop as u64
}
fn get_len(&self, doc_id: DocId) -> u64 {
self.num_bytes(doc_id) as u64
}

241
src/fastfield/fast_value.rs Normal file
View File

@@ -0,0 +1,241 @@
use std::net::{IpAddr, Ipv6Addr};
use crate::schema::{Cardinality, FieldType, Type};
use crate::DateTime;
pub fn ip_to_u128(ip_addr: IpAddr) -> u128 {
let ip_addr_v6: Ipv6Addr = match ip_addr {
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
IpAddr::V6(v6) => v6,
};
u128::from_be_bytes(ip_addr_v6.octets())
}
/// Trait for large types that are allowed for fast fields: u128, IpAddr
pub trait FastValueU128: Clone + Copy + Send + Sync + PartialOrd + 'static {
/// Converts a value from u128
///
/// Internally all fast field values are encoded as u128.
fn from_u128(val: u128) -> Self;
/// Converts a value to u128.
///
/// Internally all fast field values are encoded as u128.
fn to_u128(&self) -> u128;
/// Cast value to `u128`.
/// The value is just reinterpreted in memory.
fn as_u128(&self) -> u128;
/// Returns the `schema::Type` for this FastValue.
fn to_type() -> Type;
/// Build a default value. This default value is never used, so the value does not
/// really matter.
fn make_zero() -> Self {
Self::from_u128(0u128)
}
}
impl FastValueU128 for u128 {
fn from_u128(val: u128) -> Self {
val
}
fn to_u128(&self) -> u128 {
*self
}
fn as_u128(&self) -> u128 {
*self
}
fn to_type() -> Type {
Type::U128
}
}
impl FastValueU128 for IpAddr {
fn from_u128(val: u128) -> Self {
IpAddr::from(val.to_be_bytes())
}
fn to_u128(&self) -> u128 {
ip_to_u128(*self)
}
fn as_u128(&self) -> u128 {
ip_to_u128(*self)
}
fn to_type() -> Type {
Type::Ip
}
}
/// Trait for types that are allowed for fast fields:
/// (u64, i64 and f64, bool, DateTime).
pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd + 'static {
/// Converts a value from u64
///
/// Internally all fast field values are encoded as u64.
/// **Note: To be used for converting encoded Term, Posting values.**
fn from_u64(val: u64) -> Self;
/// Converts a value to u64.
///
/// Internally all fast field values are encoded as u64.
fn to_u64(&self) -> u64;
/// Returns the fast field cardinality that can be extracted from the given
/// `FieldType`.
///
/// If the type is not a fast field, `None` is returned.
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality>;
/// Cast value to `u64`.
/// The value is just reinterpreted in memory.
fn as_u64(&self) -> u64;
/// Build a default value. This default value is never used, so the value does not
/// really matter.
fn make_zero() -> Self {
Self::from_u64(0i64.to_u64())
}
/// Returns the `schema::Type` for this FastValue.
fn to_type() -> Type;
}
impl FastValue for u64 {
fn from_u64(val: u64) -> Self {
val
}
fn to_u64(&self) -> u64 {
*self
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(),
FieldType::Facet(_) => Some(Cardinality::MultiValues),
_ => None,
}
}
fn as_u64(&self) -> u64 {
*self
}
fn to_type() -> Type {
Type::U64
}
}
impl FastValue for i64 {
fn from_u64(val: u64) -> Self {
common::u64_to_i64(val)
}
fn to_u64(&self) -> u64 {
common::i64_to_u64(*self)
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::I64(ref integer_options) => integer_options.get_fastfield_cardinality(),
_ => None,
}
}
fn as_u64(&self) -> u64 {
*self as u64
}
fn to_type() -> Type {
Type::I64
}
}
impl FastValue for f64 {
fn from_u64(val: u64) -> Self {
common::u64_to_f64(val)
}
fn to_u64(&self) -> u64 {
common::f64_to_u64(*self)
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::F64(ref integer_options) => integer_options.get_fastfield_cardinality(),
_ => None,
}
}
fn as_u64(&self) -> u64 {
self.to_bits()
}
fn to_type() -> Type {
Type::F64
}
}
impl FastValue for bool {
fn from_u64(val: u64) -> Self {
val != 0u64
}
fn to_u64(&self) -> u64 {
match self {
false => 0,
true => 1,
}
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::Bool(ref integer_options) => integer_options.get_fastfield_cardinality(),
_ => None,
}
}
fn as_u64(&self) -> u64 {
*self as u64
}
fn to_type() -> Type {
Type::Bool
}
}
impl FastValue for DateTime {
/// Converts a timestamp microseconds into DateTime.
///
/// **Note the timestamps is expected to be in microseconds.**
fn from_u64(timestamp_micros_u64: u64) -> Self {
let timestamp_micros = i64::from_u64(timestamp_micros_u64);
Self::from_timestamp_micros(timestamp_micros)
}
fn to_u64(&self) -> u64 {
common::i64_to_u64(self.into_timestamp_micros())
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::Date(ref options) => options.get_fastfield_cardinality(),
_ => None,
}
}
fn as_u64(&self) -> u64 {
self.into_timestamp_micros().as_u64()
}
fn to_type() -> Type {
Type::Date
}
}

View File

@@ -1,130 +1,80 @@
use std::io::{self, Write};
use std::num::NonZeroU64;
use common::BinarySerializable;
use fastdivide::DividerU64;
use fastfield_codecs::{Column, FastFieldCodec};
use ownedbytes::OwnedBytes;
use fastfield_codecs::FastFieldCodecReader;
use gcd::Gcd;
pub const GCD_DEFAULT: u64 = 1;
pub const GCD_CODEC_ID: u8 = 4;
/// Wrapper for accessing a fastfield.
///
/// Holds the data and the codec to the read the data.
#[derive(Clone)]
pub struct GCDReader<CodecReader: Column> {
gcd_params: GCDParams,
reader: CodecReader,
}
#[derive(Debug, Clone, Copy)]
struct GCDParams {
pub struct GCDFastFieldCodec<CodecReader> {
gcd: u64,
min_value: u64,
num_vals: u64,
reader: CodecReader,
}
impl<C: FastFieldCodecReader + Clone> FastFieldCodecReader for GCDFastFieldCodec<C> {
/// Opens a fast field given the bytes.
fn open_from_bytes(bytes: &[u8]) -> std::io::Result<Self> {
let (header, mut footer) = bytes.split_at(bytes.len() - 16);
let gcd = u64::deserialize(&mut footer)?;
let min_value = u64::deserialize(&mut footer)?;
let reader = C::open_from_bytes(header)?;
impl GCDParams {
pub fn eval(&self, val: u64) -> u64 {
self.min_value + self.gcd * val
}
}
impl BinarySerializable for GCDParams {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
self.gcd.serialize(writer)?;
self.min_value.serialize(writer)?;
self.num_vals.serialize(writer)?;
Ok(())
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let gcd: u64 = u64::deserialize(reader)?;
let min_value: u64 = u64::deserialize(reader)?;
let num_vals: u64 = u64::deserialize(reader)?;
Ok(Self {
Ok(GCDFastFieldCodec {
gcd,
min_value,
num_vals,
reader,
})
}
}
pub fn open_gcd_from_bytes<WrappedCodec: FastFieldCodec>(
bytes: OwnedBytes,
) -> io::Result<GCDReader<WrappedCodec::Reader>> {
let footer_offset = bytes.len() - 24;
let (body, mut footer) = bytes.split(footer_offset);
let gcd_params = GCDParams::deserialize(&mut footer)?;
let reader: WrappedCodec::Reader = WrappedCodec::open_from_bytes(body)?;
Ok(GCDReader { gcd_params, reader })
}
impl<C: Column + Clone> Column for GCDReader<C> {
#[inline]
fn get_val(&self, doc: u64) -> u64 {
let val = self.reader.get_val(doc);
self.gcd_params.eval(val)
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
let mut data = self.reader.get_u64(doc, data);
data *= self.gcd;
data += self.min_value;
data
}
fn min_value(&self) -> u64 {
self.gcd_params.eval(self.reader.min_value())
self.min_value + self.reader.min_value() * self.gcd
}
fn max_value(&self) -> u64 {
self.gcd_params.eval(self.reader.max_value())
}
fn num_vals(&self) -> u64 {
self.gcd_params.num_vals
self.min_value + self.reader.max_value() * self.gcd
}
}
pub fn write_gcd_header<W: Write>(
field_write: &mut W,
min_value: u64,
gcd: u64,
num_vals: u64,
) -> io::Result<()> {
pub fn write_gcd_header<W: Write>(field_write: &mut W, min_value: u64, gcd: u64) -> io::Result<()> {
gcd.serialize(field_write)?;
min_value.serialize(field_write)?;
num_vals.serialize(field_write)?;
Ok(())
}
/// Compute the gcd of two non null numbers.
///
/// It is recommended, but not required, to feed values such that `large >= small`.
fn compute_gcd(mut large: NonZeroU64, mut small: NonZeroU64) -> NonZeroU64 {
loop {
let rem: u64 = large.get() % small;
if let Some(new_small) = NonZeroU64::new(rem) {
(large, small) = (small, new_small);
} else {
return small;
}
}
}
// Find GCD for iterator of numbers
pub fn find_gcd(numbers: impl Iterator<Item = u64>) -> Option<NonZeroU64> {
let mut numbers = numbers.flat_map(NonZeroU64::new);
let mut gcd: NonZeroU64 = numbers.next()?;
if gcd.get() == 1 {
return Some(gcd);
pub fn find_gcd(numbers: impl Iterator<Item = u64>) -> Option<u64> {
let mut numbers = numbers.filter(|n| *n != 0);
let mut gcd = numbers.next()?;
if gcd == 1 {
return Some(1);
}
let mut gcd_divider = DividerU64::divide_by(gcd.get());
let mut gcd_divider = DividerU64::divide_by(gcd);
for val in numbers {
let remainder = val.get() - (gcd_divider.divide(val.get())) * gcd.get();
let remainder = val - (gcd_divider.divide(val)) * gcd;
if remainder == 0 {
continue;
}
gcd = compute_gcd(val, gcd);
if gcd.get() == 1 {
return Some(gcd);
gcd = gcd.gcd(val);
if gcd == 1 {
return Some(1);
}
gcd_divider = DividerU64::divide_by(gcd.get());
gcd_divider = DividerU64::divide_by(gcd);
}
Some(gcd)
}
@@ -132,23 +82,19 @@ pub fn find_gcd(numbers: impl Iterator<Item = u64>) -> Option<NonZeroU64> {
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::num::NonZeroU64;
use std::path::Path;
use std::time::{Duration, SystemTime};
use common::HasLen;
use fastfield_codecs::Column;
use crate::directory::{CompositeFile, RamDirectory, WritePtr};
use crate::fastfield::gcd::compute_gcd;
use crate::fastfield::serializer::FastFieldCodecEnableCheck;
use crate::fastfield::tests::{FIELD, FIELDI64, SCHEMA, SCHEMAI64};
use crate::fastfield::{
find_gcd, CompositeFastFieldSerializer, DynamicFastFieldReader, FastFieldCodecType,
FastFieldsWriter, ALL_CODECS,
find_gcd, CompositeFastFieldSerializer, DynamicFastFieldReader, FastFieldCodecName,
FastFieldReader, FastFieldsWriter, ALL_CODECS,
};
use crate::schema::{Cardinality, Schema};
use crate::{DateOptions, DatePrecision, DateTime, Directory};
use crate::schema::Schema;
use crate::Directory;
fn get_index(
docs: &[crate::Document],
@@ -174,33 +120,33 @@ mod tests {
}
fn test_fastfield_gcd_i64_with_codec(
code_type: FastFieldCodecType,
codec_name: FastFieldCodecName,
num_vals: usize,
) -> crate::Result<()> {
let path = Path::new("test");
let mut docs = vec![];
for i in 1..=num_vals {
let val = (i as i64 - 5) * 1000i64;
let val = i as i64 * 1000i64;
docs.push(doc!(*FIELDI64=>val));
}
let directory = get_index(&docs, &SCHEMAI64, code_type.into())?;
let directory = get_index(&docs, &SCHEMAI64, codec_name.clone().into())?;
let file = directory.open_read(path).unwrap();
// assert_eq!(file.len(), 118);
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<i64>::open(file)?;
assert_eq!(fast_field_reader.get_val(0), -4000i64);
assert_eq!(fast_field_reader.get_val(1), -3000i64);
assert_eq!(fast_field_reader.get_val(2), -2000i64);
assert_eq!(fast_field_reader.max_value(), (num_vals as i64 - 5) * 1000);
assert_eq!(fast_field_reader.min_value(), -4000i64);
assert_eq!(fast_field_reader.get(0), 1000i64);
assert_eq!(fast_field_reader.get(1), 2000i64);
assert_eq!(fast_field_reader.get(2), 3000i64);
assert_eq!(fast_field_reader.max_value(), num_vals as i64 * 1000);
assert_eq!(fast_field_reader.min_value(), 1000i64);
let file = directory.open_read(path).unwrap();
// Can't apply gcd
let path = Path::new("test");
docs.pop();
docs.push(doc!(*FIELDI64=>2001i64));
let directory = get_index(&docs, &SCHEMAI64, code_type.into())?;
let directory = get_index(&docs, &SCHEMAI64, codec_name.into())?;
let file2 = directory.open_read(path).unwrap();
assert!(file2.len() > file.len());
@@ -209,14 +155,14 @@ mod tests {
#[test]
fn test_fastfield_gcd_i64() -> crate::Result<()> {
for &code_type in ALL_CODECS {
test_fastfield_gcd_i64_with_codec(code_type, 5500)?;
for codec_name in ALL_CODECS {
test_fastfield_gcd_i64_with_codec(codec_name.clone(), 5005)?;
}
Ok(())
}
fn test_fastfield_gcd_u64_with_codec(
code_type: FastFieldCodecType,
codec_name: FastFieldCodecName,
num_vals: usize,
) -> crate::Result<()> {
let path = Path::new("test");
@@ -225,14 +171,15 @@ mod tests {
let val = i as u64 * 1000u64;
docs.push(doc!(*FIELD=>val));
}
let directory = get_index(&docs, &SCHEMA, code_type.into())?;
let directory = get_index(&docs, &SCHEMA, codec_name.clone().into())?;
let file = directory.open_read(path).unwrap();
// assert_eq!(file.len(), 118);
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(file)?;
assert_eq!(fast_field_reader.get_val(0), 1000u64);
assert_eq!(fast_field_reader.get_val(1), 2000u64);
assert_eq!(fast_field_reader.get_val(2), 3000u64);
assert_eq!(fast_field_reader.get(0), 1000u64);
assert_eq!(fast_field_reader.get(1), 2000u64);
assert_eq!(fast_field_reader.get(2), 3000u64);
assert_eq!(fast_field_reader.max_value(), num_vals as u64 * 1000);
assert_eq!(fast_field_reader.min_value(), 1000u64);
let file = directory.open_read(path).unwrap();
@@ -241,7 +188,7 @@ mod tests {
let path = Path::new("test");
docs.pop();
docs.push(doc!(*FIELDI64=>2001u64));
let directory = get_index(&docs, &SCHEMA, code_type.into())?;
let directory = get_index(&docs, &SCHEMA, codec_name.into())?;
let file2 = directory.open_read(path).unwrap();
assert!(file2.len() > file.len());
@@ -250,8 +197,8 @@ mod tests {
#[test]
fn test_fastfield_gcd_u64() -> crate::Result<()> {
for &code_type in ALL_CODECS {
test_fastfield_gcd_u64_with_codec(code_type, 5500)?;
for codec_name in ALL_CODECS {
test_fastfield_gcd_u64_with_codec(codec_name.clone(), 5005)?;
}
Ok(())
}
@@ -259,103 +206,19 @@ mod tests {
#[test]
pub fn test_fastfield2() {
let test_fastfield = DynamicFastFieldReader::<u64>::from(vec![100, 200, 300]);
assert_eq!(test_fastfield.get_val(0), 100);
assert_eq!(test_fastfield.get_val(1), 200);
assert_eq!(test_fastfield.get_val(2), 300);
}
#[test]
pub fn test_gcd_date() -> crate::Result<()> {
let size_prec_sec =
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Seconds)?;
let size_prec_micro =
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Microseconds)?;
assert!(size_prec_sec < size_prec_micro);
let size_prec_sec =
test_gcd_date_with_codec(FastFieldCodecType::Linear, DatePrecision::Seconds)?;
let size_prec_micro =
test_gcd_date_with_codec(FastFieldCodecType::Linear, DatePrecision::Microseconds)?;
assert!(size_prec_sec < size_prec_micro);
Ok(())
}
fn test_gcd_date_with_codec(
codec_type: FastFieldCodecType,
precision: DatePrecision,
) -> crate::Result<usize> {
let time1 = DateTime::from_timestamp_micros(
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs() as i64,
);
let time2 = DateTime::from_timestamp_micros(
SystemTime::now()
.checked_sub(Duration::from_micros(4111))
.unwrap()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs() as i64,
);
let time3 = DateTime::from_timestamp_micros(
SystemTime::now()
.checked_sub(Duration::from_millis(2000))
.unwrap()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs() as i64,
);
let mut schema_builder = Schema::builder();
let date_options = DateOptions::default()
.set_fast(Cardinality::SingleValue)
.set_precision(precision);
let field = schema_builder.add_date_field("field", date_options);
let schema = schema_builder.build();
let docs = vec![doc!(field=>time1), doc!(field=>time2), doc!(field=>time3)];
let directory = get_index(&docs, &schema, codec_type.into())?;
let path = Path::new("test");
let file = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(*FIELD).unwrap();
let len = file.len();
let test_fastfield = DynamicFastFieldReader::<DateTime>::open(file)?;
assert_eq!(test_fastfield.get_val(0), time1.truncate(precision));
assert_eq!(test_fastfield.get_val(1), time2.truncate(precision));
assert_eq!(test_fastfield.get_val(2), time3.truncate(precision));
Ok(len)
}
#[test]
fn test_compute_gcd() {
let test_compute_gcd_aux = |large, small, expected| {
let large = NonZeroU64::new(large).unwrap();
let small = NonZeroU64::new(small).unwrap();
let expected = NonZeroU64::new(expected).unwrap();
assert_eq!(compute_gcd(small, large), expected);
assert_eq!(compute_gcd(large, small), expected);
};
test_compute_gcd_aux(1, 4, 1);
test_compute_gcd_aux(2, 4, 2);
test_compute_gcd_aux(10, 25, 5);
test_compute_gcd_aux(25, 25, 25);
assert_eq!(test_fastfield.get(0), 100);
assert_eq!(test_fastfield.get(1), 200);
assert_eq!(test_fastfield.get(2), 300);
}
#[test]
fn find_gcd_test() {
assert_eq!(find_gcd([0].into_iter()), None);
assert_eq!(find_gcd([0, 10].into_iter()), NonZeroU64::new(10));
assert_eq!(find_gcd([10, 0].into_iter()), NonZeroU64::new(10));
assert_eq!(find_gcd([0, 10].into_iter()), Some(10));
assert_eq!(find_gcd([10, 0].into_iter()), Some(10));
assert_eq!(find_gcd([].into_iter()), None);
assert_eq!(find_gcd([15, 30, 5, 10].into_iter()), NonZeroU64::new(5));
assert_eq!(find_gcd([15, 16, 10].into_iter()), NonZeroU64::new(1));
assert_eq!(find_gcd([0, 5, 5, 5].into_iter()), NonZeroU64::new(5));
assert_eq!(find_gcd([0, 0].into_iter()), None);
assert_eq!(find_gcd([15, 30, 5, 10].into_iter()), Some(5));
assert_eq!(find_gcd([15, 16, 10].into_iter()), Some(1));
assert_eq!(find_gcd([0, 5, 5, 5].into_iter()), Some(5));
}
}

View File

@@ -20,26 +20,30 @@
//!
//! Read access performance is comparable to that of an array lookup.
use fastfield_codecs::FastFieldCodecType;
use std::collections::btree_map::Range;
pub use self::alive_bitset::{intersect_alive_bitsets, write_alive_bitset, AliveBitSet};
pub use self::bytes::{BytesFastFieldReader, BytesFastFieldWriter};
pub use self::error::{FastFieldNotAvailableError, Result};
pub use self::facet_reader::FacetReader;
pub(crate) use self::gcd::{find_gcd, GCDReader, GCD_DEFAULT};
pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter};
pub use self::reader::DynamicFastFieldReader;
pub use self::fast_value::{FastValue, FastValueU128};
pub(crate) use self::gcd::{find_gcd, GCDFastFieldCodec, GCD_CODEC_ID, GCD_DEFAULT};
pub use self::multivalued::{
MultiValuedFastFieldReader, MultiValuedFastFieldWriter, MultiValuedU128FastFieldReader,
};
pub use self::reader::{DynamicFastFieldReader, FastFieldReader, FastFieldReaderCodecWrapperU128};
pub use self::readers::FastFieldReaders;
pub(crate) use self::readers::{type_and_cardinality, FastType};
pub use self::serializer::{Column, CompositeFastFieldSerializer, FastFieldStats};
pub use self::serializer::{CompositeFastFieldSerializer, FastFieldDataAccess, FastFieldStats};
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use crate::schema::{Cardinality, FieldType, Type, Value};
use crate::{DateTime, DocId};
use crate::schema::Value;
use crate::DocId;
mod alive_bitset;
mod bytes;
mod error;
mod facet_reader;
mod fast_value;
mod gcd;
mod multivalued;
mod reader;
@@ -47,188 +51,18 @@ mod readers;
mod serializer;
mod writer;
pub(crate) const ALL_CODECS: &[FastFieldCodecType; 3] = &[
FastFieldCodecType::Bitpacked,
FastFieldCodecType::Linear,
FastFieldCodecType::BlockwiseLinear,
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)]
pub(crate) enum FastFieldCodecName {
Bitpacked,
LinearInterpol,
BlockwiseLinearInterpol,
}
pub(crate) const ALL_CODECS: &[FastFieldCodecName; 3] = &[
FastFieldCodecName::Bitpacked,
FastFieldCodecName::LinearInterpol,
FastFieldCodecName::BlockwiseLinearInterpol,
];
/// Trait for `BytesFastFieldReader` and `MultiValuedFastFieldReader` to return the length of data
/// for a doc_id
pub trait MultiValueLength {
/// returns the num of values associated to a doc_id
fn get_len(&self, doc_id: DocId) -> u64;
/// returns the sum of num values for all doc_ids
fn get_total_len(&self) -> u64;
}
/// Trait for types that are allowed for fast fields:
/// (u64, i64 and f64, bool, DateTime).
pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd + 'static {
/// Converts a value from u64
///
/// Internally all fast field values are encoded as u64.
/// **Note: To be used for converting encoded Term, Posting values.**
fn from_u64(val: u64) -> Self;
/// Converts a value to u64.
///
/// Internally all fast field values are encoded as u64.
fn to_u64(&self) -> u64;
/// Returns the fast field cardinality that can be extracted from the given
/// `FieldType`.
///
/// If the type is not a fast field, `None` is returned.
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality>;
/// Cast value to `u64`.
/// The value is just reinterpreted in memory.
fn as_u64(&self) -> u64;
/// Build a default value. This default value is never used, so the value does not
/// really matter.
fn make_zero() -> Self {
Self::from_u64(0i64.to_u64())
}
/// Returns the `schema::Type` for this FastValue.
fn to_type() -> Type;
}
impl FastValue for u64 {
fn from_u64(val: u64) -> Self {
val
}
fn to_u64(&self) -> u64 {
*self
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(),
FieldType::Facet(_) => Some(Cardinality::MultiValues),
_ => None,
}
}
fn as_u64(&self) -> u64 {
*self
}
fn to_type() -> Type {
Type::U64
}
}
impl FastValue for i64 {
fn from_u64(val: u64) -> Self {
common::u64_to_i64(val)
}
fn to_u64(&self) -> u64 {
common::i64_to_u64(*self)
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::I64(ref integer_options) => integer_options.get_fastfield_cardinality(),
_ => None,
}
}
fn as_u64(&self) -> u64 {
*self as u64
}
fn to_type() -> Type {
Type::I64
}
}
impl FastValue for f64 {
fn from_u64(val: u64) -> Self {
common::u64_to_f64(val)
}
fn to_u64(&self) -> u64 {
common::f64_to_u64(*self)
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::F64(ref integer_options) => integer_options.get_fastfield_cardinality(),
_ => None,
}
}
fn as_u64(&self) -> u64 {
self.to_bits()
}
fn to_type() -> Type {
Type::F64
}
}
impl FastValue for bool {
fn from_u64(val: u64) -> Self {
val != 0u64
}
fn to_u64(&self) -> u64 {
match self {
false => 0,
true => 1,
}
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::Bool(ref integer_options) => integer_options.get_fastfield_cardinality(),
_ => None,
}
}
fn as_u64(&self) -> u64 {
*self as u64
}
fn to_type() -> Type {
Type::Bool
}
}
impl FastValue for DateTime {
/// Converts a timestamp microseconds into DateTime.
///
/// **Note the timestamps is expected to be in microseconds.**
fn from_u64(timestamp_micros_u64: u64) -> Self {
let timestamp_micros = i64::from_u64(timestamp_micros_u64);
Self::from_timestamp_micros(timestamp_micros)
}
fn to_u64(&self) -> u64 {
common::i64_to_u64(self.into_timestamp_micros())
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::Date(ref options) => options.get_fastfield_cardinality(),
_ => None,
}
}
fn as_u64(&self) -> u64 {
self.into_timestamp_micros().as_u64()
}
fn to_type() -> Type {
Type::Date
}
}
fn value_to_u64(value: &Value) -> u64 {
match value {
Value::U64(val) => val.to_u64(),
@@ -240,6 +74,17 @@ fn value_to_u64(value: &Value) -> u64 {
}
}
/// Trait for `BytesFastFieldReader` and `MultiValuedFastFieldReader` to return the length of data
/// for a doc_id
pub trait MultiValueLength {
/// returns the positions of values associated to a doc_id
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u64>;
/// returns the num of values associated to a doc_id
fn get_len(&self, doc_id: DocId) -> u64;
/// returns the sum of num values for all doc_ids
fn get_total_len(&self) -> u64;
}
/// The fast field type
pub enum FastFieldType {
/// Numeric type, e.g. f64.
@@ -264,6 +109,7 @@ impl FastFieldType {
mod tests {
use std::collections::HashMap;
use std::net::IpAddr;
use std::ops::Range;
use std::path::Path;
@@ -276,9 +122,11 @@ mod tests {
use super::*;
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
use crate::merge_policy::NoMergePolicy;
use crate::schema::{Document, Field, Schema, FAST, STRING, TEXT};
use crate::schema::{
self, Cardinality, Document, Field, IpOptions, Schema, FAST, INDEXED, STORED, STRING, TEXT,
};
use crate::time::OffsetDateTime;
use crate::{DateOptions, DatePrecision, Index, SegmentId, SegmentReader};
use crate::{DateOptions, DatePrecision, DateTime, Index, SegmentId, SegmentReader};
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
let mut schema_builder = Schema::builder();
@@ -298,13 +146,13 @@ mod tests {
#[test]
pub fn test_fastfield() {
let test_fastfield = DynamicFastFieldReader::<u64>::from(vec![100, 200, 300]);
assert_eq!(test_fastfield.get_val(0u64), 100);
assert_eq!(test_fastfield.get_val(1u64), 200);
assert_eq!(test_fastfield.get_val(2u64), 300);
assert_eq!(test_fastfield.get(0), 100);
assert_eq!(test_fastfield.get(1), 200);
assert_eq!(test_fastfield.get(2), 300);
}
#[test]
pub fn test_fastfield_i64_u64() {
pub fn test_datetime_conversion() {
let datetime = DateTime::from_utc(OffsetDateTime::UNIX_EPOCH);
assert_eq!(i64::from_u64(datetime.to_u64()), 0i64);
}
@@ -326,13 +174,13 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 45);
assert_eq!(file.len(), 37);
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(file)?;
assert_eq!(fast_field_reader.get_val(0), 13u64);
assert_eq!(fast_field_reader.get_val(1), 14u64);
assert_eq!(fast_field_reader.get_val(2), 2u64);
assert_eq!(fast_field_reader.get(0), 13u64);
assert_eq!(fast_field_reader.get(1), 14u64);
assert_eq!(fast_field_reader.get(2), 2u64);
Ok(())
}
@@ -357,20 +205,20 @@ mod tests {
serializer.close()?;
}
let file = directory.open_read(path)?;
assert_eq!(file.len(), 70);
assert_eq!(file.len(), 62);
{
let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
assert_eq!(fast_field_reader.get_val(0), 4u64);
assert_eq!(fast_field_reader.get_val(1), 14_082_001u64);
assert_eq!(fast_field_reader.get_val(2), 3_052u64);
assert_eq!(fast_field_reader.get_val(3), 9002u64);
assert_eq!(fast_field_reader.get_val(4), 15_001u64);
assert_eq!(fast_field_reader.get_val(5), 777u64);
assert_eq!(fast_field_reader.get_val(6), 1_002u64);
assert_eq!(fast_field_reader.get_val(7), 1_501u64);
assert_eq!(fast_field_reader.get_val(8), 215u64);
assert_eq!(fast_field_reader.get(0), 4u64);
assert_eq!(fast_field_reader.get(1), 14_082_001u64);
assert_eq!(fast_field_reader.get(2), 3_052u64);
assert_eq!(fast_field_reader.get(3), 9002u64);
assert_eq!(fast_field_reader.get(4), 15_001u64);
assert_eq!(fast_field_reader.get(5), 777u64);
assert_eq!(fast_field_reader.get(6), 1_002u64);
assert_eq!(fast_field_reader.get(7), 1_501u64);
assert_eq!(fast_field_reader.get(8), 215u64);
}
Ok(())
}
@@ -393,13 +241,13 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 43);
assert_eq!(file.len(), 35);
{
let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
for doc in 0..10_000 {
assert_eq!(fast_field_reader.get_val(doc), 100_000u64);
assert_eq!(fast_field_reader.get(doc), 100_000u64);
}
}
Ok(())
@@ -425,15 +273,15 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 80051);
assert_eq!(file.len(), 80043);
{
let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
assert_eq!(fast_field_reader.get_val(0), 0u64);
assert_eq!(fast_field_reader.get(0), 0u64);
for doc in 1..10_001 {
assert_eq!(
fast_field_reader.get_val(doc),
fast_field_reader.get(doc),
5_000_000_000_000_000_000u64 + doc as u64 - 1u64
);
}
@@ -465,8 +313,7 @@ mod tests {
}
let file = directory.open_read(path).unwrap();
// assert_eq!(file.len(), 17710 as usize); //bitpacked size
// assert_eq!(file.len(), 10175_usize); // linear interpol size
assert_eq!(file.len(), 75_usize); // linear interpol size after calc improvement
assert_eq!(file.len(), 10175_usize); // linear interpol size
{
let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite.open_read(i64_field).unwrap();
@@ -475,7 +322,7 @@ mod tests {
assert_eq!(fast_field_reader.min_value(), -100i64);
assert_eq!(fast_field_reader.max_value(), 9_999i64);
for (doc, i) in (-100i64..10_000i64).enumerate() {
assert_eq!(fast_field_reader.get_val(doc as u64), i);
assert_eq!(fast_field_reader.get(doc as u32), i);
}
let mut buffer = vec![0i64; 100];
fast_field_reader.get_range(53, &mut buffer[..]);
@@ -511,7 +358,7 @@ mod tests {
let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite.open_read(i64_field).unwrap();
let fast_field_reader = DynamicFastFieldReader::<i64>::open(data)?;
assert_eq!(fast_field_reader.get_val(0), 0i64);
assert_eq!(fast_field_reader.get(0u32), 0i64);
}
Ok(())
}
@@ -551,7 +398,7 @@ mod tests {
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
for a in 0..n {
assert_eq!(fast_field_reader.get_val(a as u64), permutation[a as usize]);
assert_eq!(fast_field_reader.get(a as u32), permutation[a as usize]);
}
}
Ok(())
@@ -614,6 +461,85 @@ mod tests {
all
}
#[test]
fn test_ip_fastfield_minimal() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let ip_field = schema_builder.add_ip_field("ip", FAST | INDEXED | STORED);
let ips_field = schema_builder.add_ip_field(
"ips",
IpOptions::default().set_fast(Cardinality::MultiValues),
);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let ip1 = IpAddr::from((1_u128).to_be_bytes());
let ip2 = IpAddr::from((2_u128).to_be_bytes());
let ip3 = IpAddr::from((3_u128).to_be_bytes());
let mut index_writer = index.writer_for_tests()?;
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!())?;
index_writer.add_document(doc!(
ip_field => ip2,
ips_field => ip2,
ips_field => ip2,
))?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
// single value
let ip_addr_fast_field = fast_fields.ip_addr(ip_field).unwrap();
assert_eq!(ip_addr_fast_field.get_val(0), None);
assert_eq!(ip_addr_fast_field.get_val(1), Some(ip2));
assert_eq!(ip_addr_fast_field.get_between_vals(ip2..=ip2), vec![1]);
assert_eq!(ip_addr_fast_field.get_between_vals(ip1..=ip2), vec![1]);
assert_eq!(ip_addr_fast_field.get_between_vals(ip2..=ip3), vec![1]);
assert_eq!(ip_addr_fast_field.get_between_vals(ip1..=ip3), vec![1]);
assert_eq!(
ip_addr_fast_field.get_between_vals(ip1..=ip1),
vec![] as Vec<usize>
);
assert_eq!(
ip_addr_fast_field.get_between_vals(ip3..=ip3),
vec![] as Vec<usize>
);
// multi value
let ip_addr_fast_field = fast_fields.ip_addrs(ips_field).unwrap();
assert_eq!(ip_addr_fast_field.get_first_val(0), None);
assert_eq!(ip_addr_fast_field.get_first_val(1), Some(ip2));
let mut out = vec![];
ip_addr_fast_field.get_vals(0, &mut out);
assert_eq!(out, vec![] as Vec<IpAddr>);
let mut out = vec![];
ip_addr_fast_field.get_vals(1, &mut out);
assert_eq!(out, vec![ip2, ip2]);
assert_eq!(ip_addr_fast_field.get_between_vals(ip2..=ip2), vec![1]);
assert_eq!(ip_addr_fast_field.get_between_vals(ip1..=ip2), vec![1]);
assert_eq!(ip_addr_fast_field.get_between_vals(ip2..=ip3), vec![1]);
assert_eq!(ip_addr_fast_field.get_between_vals(ip1..=ip3), vec![1]);
assert_eq!(
ip_addr_fast_field.get_between_vals(ip1..=ip1),
vec![] as Vec<usize>
);
assert_eq!(
ip_addr_fast_field.get_between_vals(ip3..=ip3),
vec![] as Vec<usize>
);
Ok(())
}
#[test]
fn test_text_fastfield() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
@@ -842,19 +768,19 @@ mod tests {
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
let mut dates = vec![];
{
assert_eq!(date_fast_field.get_val(0).into_timestamp_micros(), 1i64);
assert_eq!(date_fast_field.get(0u32).into_timestamp_micros(), 1i64);
dates_fast_field.get_vals(0u32, &mut dates);
assert_eq!(dates.len(), 2);
assert_eq!(dates[0].into_timestamp_micros(), 2i64);
assert_eq!(dates[1].into_timestamp_micros(), 3i64);
}
{
assert_eq!(date_fast_field.get_val(1).into_timestamp_micros(), 4i64);
assert_eq!(date_fast_field.get(1u32).into_timestamp_micros(), 4i64);
dates_fast_field.get_vals(1u32, &mut dates);
assert!(dates.is_empty());
}
{
assert_eq!(date_fast_field.get_val(2).into_timestamp_micros(), 0i64);
assert_eq!(date_fast_field.get(2u32).into_timestamp_micros(), 0i64);
dates_fast_field.get_vals(2u32, &mut dates);
assert_eq!(dates.len(), 2);
assert_eq!(dates[0].into_timestamp_micros(), 5i64);
@@ -866,10 +792,10 @@ mod tests {
#[test]
pub fn test_fastfield_bool() {
let test_fastfield = DynamicFastFieldReader::<bool>::from(vec![true, false, true, false]);
assert_eq!(test_fastfield.get_val(0), true);
assert_eq!(test_fastfield.get_val(1), false);
assert_eq!(test_fastfield.get_val(2), true);
assert_eq!(test_fastfield.get_val(3), false);
assert_eq!(test_fastfield.get(0), true);
assert_eq!(test_fastfield.get(1), false);
assert_eq!(test_fastfield.get(2), true);
assert_eq!(test_fastfield.get(3), false);
}
#[test]
@@ -896,14 +822,14 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 44);
assert_eq!(file.len(), 36);
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(field).unwrap();
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
assert_eq!(fast_field_reader.get_val(0), true);
assert_eq!(fast_field_reader.get_val(1), false);
assert_eq!(fast_field_reader.get_val(2), true);
assert_eq!(fast_field_reader.get_val(3), false);
assert_eq!(fast_field_reader.get(0), true);
assert_eq!(fast_field_reader.get(1), false);
assert_eq!(fast_field_reader.get(2), true);
assert_eq!(fast_field_reader.get(3), false);
Ok(())
}
@@ -932,13 +858,13 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 56);
assert_eq!(file.len(), 48);
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(field).unwrap();
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
for i in 0..25 {
assert_eq!(fast_field_reader.get_val(i * 2), true);
assert_eq!(fast_field_reader.get_val(i * 2 + 1), false);
assert_eq!(fast_field_reader.get(i * 2), true);
assert_eq!(fast_field_reader.get(i * 2 + 1), false);
}
Ok(())
@@ -966,11 +892,11 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 43);
assert_eq!(file.len(), 35);
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(field).unwrap();
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
assert_eq!(fast_field_reader.get_val(0), false);
assert_eq!(fast_field_reader.get(0), false);
Ok(())
}
@@ -978,20 +904,37 @@ mod tests {
#[cfg(all(test, feature = "unstable"))]
mod bench {
use fastfield_codecs::Column;
use std::collections::HashMap;
use std::path::Path;
use test::{self, Bencher};
use super::tests::generate_permutation;
use super::tests::{generate_permutation, FIELD, SCHEMA};
use super::*;
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
use crate::fastfield::tests::generate_permutation_gcd;
use crate::fastfield::FastFieldReader;
#[bench]
fn bench_intfastfield_jumpy_veclookup(b: &mut Bencher) {
fn bench_intfastfield_linear_veclookup(b: &mut Bencher) {
let permutation = generate_permutation();
let n = permutation.len();
b.iter(|| {
let n = test::black_box(7000u32);
let mut a = 0u64;
for _ in 0..n {
for i in (0u32..n / 7).map(|v| v * 7) {
a ^= permutation[i as usize];
}
a
});
}
#[bench]
fn bench_intfastfield_veclookup(b: &mut Bencher) {
let permutation = generate_permutation();
b.iter(|| {
let n = test::black_box(1000u32);
let mut a = 0u64;
for _ in 0u32..n {
a = permutation[a as usize];
}
a
@@ -999,83 +942,102 @@ mod bench {
}
#[bench]
fn bench_intfastfield_jumpy_fflookup(b: &mut Bencher) {
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
let path = Path::new("test");
let permutation = generate_permutation();
let n = permutation.len();
let column = DynamicFastFieldReader::from(permutation);
b.iter(|| {
let mut a = 0u64;
for _ in 0..n {
a = column.get_val(a as u64);
let directory: RamDirectory = RamDirectory::create();
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for &x in &permutation {
fast_field_writers.add_document(&doc!(*FIELD=>x));
}
a
});
fast_field_writers
.serialize(&mut serializer, &HashMap::new(), None)
.unwrap();
serializer.close().unwrap();
}
let file = directory.open_read(&path).unwrap();
{
let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
b.iter(|| {
let n = test::black_box(7000u32);
let mut a = 0u64;
for i in (0u32..n / 7).map(|val| val * 7) {
a ^= fast_field_reader.get(i);
}
a
});
}
}
#[bench]
fn bench_intfastfield_stride7_vec(b: &mut Bencher) {
fn bench_intfastfield_fflookup(b: &mut Bencher) {
let path = Path::new("test");
let permutation = generate_permutation();
let n = permutation.len();
b.iter(|| {
let mut a = 0u64;
for i in (0..n / 7).map(|val| val * 7) {
a += permutation[i as usize];
let directory: RamDirectory = RamDirectory::create();
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for &x in &permutation {
fast_field_writers.add_document(&doc!(*FIELD=>x));
}
a
});
fast_field_writers
.serialize(&mut serializer, &HashMap::new(), None)
.unwrap();
serializer.close().unwrap();
}
let file = directory.open_read(&path).unwrap();
{
let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
b.iter(|| {
let mut a = 0u32;
for i in 0u32..permutation.len() as u32 {
a = fast_field_reader.get(i) as u32;
}
a
});
}
}
#[bench]
fn bench_intfastfield_stride7_fflookup(b: &mut Bencher) {
let permutation = generate_permutation();
let n = permutation.len();
let column = DynamicFastFieldReader::from(permutation);
b.iter(|| {
let mut a = 0u64;
for i in (0..n / 7).map(|val| val * 7) {
a += column.get_val(i as u64);
}
a
});
}
#[bench]
fn bench_intfastfield_scan_all_fflookup(b: &mut Bencher) {
let permutation = generate_permutation();
let n = permutation.len();
let column = DynamicFastFieldReader::from(permutation);
b.iter(|| {
let mut a = 0u64;
for i in 0u64..n as u64 {
a += column.get_val(i);
}
a
});
}
#[bench]
fn bench_intfastfield_scan_all_fflookup_gcd(b: &mut Bencher) {
fn bench_intfastfield_fflookup_gcd(b: &mut Bencher) {
let path = Path::new("test");
let permutation = generate_permutation_gcd();
let n = permutation.len();
let column = DynamicFastFieldReader::from(permutation);
b.iter(|| {
let mut a = 0u64;
for i in 0..n as u64 {
a += column.get_val(i);
let directory: RamDirectory = RamDirectory::create();
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for &x in &permutation {
fast_field_writers.add_document(&doc!(*FIELD=>x));
}
a
});
}
fast_field_writers
.serialize(&mut serializer, &HashMap::new(), None)
.unwrap();
serializer.close().unwrap();
}
let file = directory.open_read(&path).unwrap();
{
let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
#[bench]
fn bench_intfastfield_scan_all_vec(b: &mut Bencher) {
let permutation = generate_permutation();
b.iter(|| {
let mut a = 0u64;
for i in 0..permutation.len() {
a += permutation[i as usize] as u64;
}
a
});
b.iter(|| {
let mut a = 0u32;
for i in 0u32..permutation.len() as u32 {
a = fast_field_reader.get(i) as u32;
}
a
});
}
}
}

View File

@@ -1,8 +1,8 @@
mod reader;
mod writer;
pub use self::reader::MultiValuedFastFieldReader;
pub use self::writer::MultiValuedFastFieldWriter;
pub use self::reader::{MultiValuedFastFieldReader, MultiValuedU128FastFieldReader};
pub use self::writer::{MultiValuedFastFieldWriter, U128MultiValueFastFieldWriter};
#[cfg(test)]
mod tests {

View File

@@ -1,8 +1,11 @@
use std::ops::Range;
use std::ops::{Range, RangeInclusive};
use fastfield_codecs::Column;
use fastfield_codecs::ip_codec::IntervallDecompressor;
use crate::fastfield::{DynamicFastFieldReader, FastValue, MultiValueLength};
use crate::fastfield::{
DynamicFastFieldReader, FastFieldReader, FastFieldReaderCodecWrapperU128, FastValue,
FastValueU128, MultiValueLength,
};
use crate::DocId;
/// Reader for a multivalued `u64` fast field.
@@ -33,9 +36,8 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
/// to the given document are `start..end`.
#[inline]
fn range(&self, doc: DocId) -> Range<u64> {
let idx = doc as u64;
let start = self.idx_reader.get_val(idx);
let end = self.idx_reader.get_val(idx + 1);
let start = self.idx_reader.get(doc);
let end = self.idx_reader.get(doc + 1);
start..end
}
@@ -58,7 +60,7 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
///
/// The min value does not take in account of possible
/// deleted document, and should be considered as a lower bound
/// of the actual minimum value.
/// of the actual mimimum value.
pub fn min_value(&self) -> Item {
self.vals_reader.min_value()
}
@@ -87,6 +89,155 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
}
impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u64> {
self.range(doc_id)
}
fn get_len(&self, doc_id: DocId) -> u64 {
self.num_vals(doc_id) as u64
}
fn get_total_len(&self) -> u64 {
self.total_num_vals() as u64
}
}
/// Reader for a multivalued `u128` fast field.
///
/// The reader is implemented as a `u64` fast field for the index and a `u128` fast field.
///
/// The `vals_reader` will access the concatenated list of all
/// values for all reader.
/// The `idx_reader` associated, for each document, the index of its first value.
#[derive(Clone)]
pub struct MultiValuedU128FastFieldReader<Item: FastValueU128> {
idx_reader: DynamicFastFieldReader<u64>,
vals_reader: FastFieldReaderCodecWrapperU128<Item, IntervallDecompressor>,
}
impl<Item: FastValueU128> MultiValuedU128FastFieldReader<Item> {
pub(crate) fn open(
idx_reader: DynamicFastFieldReader<u64>,
vals_reader: FastFieldReaderCodecWrapperU128<Item, IntervallDecompressor>,
) -> MultiValuedU128FastFieldReader<Item> {
Self {
idx_reader,
vals_reader,
}
}
/// Returns `[start, end)`, such that the values associated
/// to the given document are `start..end`.
#[inline]
fn range(&self, doc: DocId) -> Range<u64> {
let start = self.idx_reader.get(doc);
let end = self.idx_reader.get(doc + 1);
start..end
}
/// Returns the array of values associated to the given `doc`.
#[inline]
pub fn get_first_val(&self, doc: DocId) -> Option<Item> {
let range = self.range(doc);
if range.is_empty() {
return None;
}
self.vals_reader.get_val(range.start)
}
/// Returns the array of values associated to the given `doc`.
#[inline]
fn get_vals_for_range(&self, range: Range<u64>, vals: &mut Vec<Item>) {
let len = (range.end - range.start) as usize;
vals.resize(len, Item::make_zero());
self.vals_reader.get_range(range.start, &mut vals[..]);
}
/// Returns the array of values associated to the given `doc`.
#[inline]
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
let range = self.range(doc);
self.get_vals_for_range(range, vals);
}
/// Returns all docids which are in the provided value range
pub fn get_between_vals(&self, range: RangeInclusive<Item>) -> Vec<DocId> {
let positions = self.vals_reader.get_between_vals(range);
positions_to_docids(&positions, self)
}
/// Iterates over all elements in the fast field
pub fn iter(&self) -> impl Iterator<Item = Option<Item>> + '_ {
self.vals_reader.iter()
}
/// Returns the minimum value for this fast field.
///
/// The min value does not take in account of possible
/// deleted document, and should be considered as a lower bound
/// of the actual mimimum value.
pub fn min_value(&self) -> Item {
self.vals_reader.min_value()
}
/// Returns the maximum value for this fast field.
///
/// The max value does not take in account of possible
/// deleted document, and should be considered as an upper bound
/// of the actual maximum value.
pub fn max_value(&self) -> Item {
self.vals_reader.max_value()
}
/// Returns the number of values associated with the document `DocId`.
#[inline]
pub fn num_vals(&self, doc: DocId) -> usize {
let range = self.range(doc);
(range.end - range.start) as usize
}
/// Returns the overall number of values in this field .
#[inline]
pub fn total_num_vals(&self) -> u64 {
self.idx_reader.max_value()
}
}
/// Converts a list of positions of values in a 1:n index to the corresponding list of DocIds.
///
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the index.
///
/// Correctness: positions needs to be sorted.
///
/// TODO: Instead of a linear scan we can employ a binary search to match a docid to its value
/// position.
fn positions_to_docids<T: MultiValueLength>(positions: &[usize], multival_idx: &T) -> Vec<DocId> {
let mut docs = vec![];
let mut cur_doc = 0u32;
let mut last_doc = None;
for pos in positions {
loop {
let range = multival_idx.get_range(cur_doc);
if range.contains(&(*pos as u64)) {
// avoid duplicates
if Some(cur_doc) == last_doc {
break;
}
docs.push(cur_doc);
last_doc = Some(cur_doc);
break;
}
cur_doc += 1;
}
}
docs
}
impl<Item: FastValueU128> MultiValueLength for MultiValuedU128FastFieldReader<Item> {
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u64> {
self.range(doc_id)
}
fn get_len(&self, doc_id: DocId) -> u64 {
self.num_vals(doc_id) as u64
}
@@ -95,6 +246,7 @@ impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
self.total_num_vals() as u64
}
}
#[cfg(test)]
mod tests {

View File

@@ -1,9 +1,10 @@
use std::io;
use fastfield_codecs::ip_codec::{ip_to_u128, IntervalCompressor};
use fnv::FnvHashMap;
use tantivy_bitpacker::minmax;
use crate::fastfield::serializer::BitpackedSerializerLegacy;
use crate::fastfield::serializer::BitpackedFastFieldSerializerLegacy;
use crate::fastfield::{value_to_u64, CompositeFastFieldSerializer, FastFieldType, FastValue};
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::postings::UnorderedTermId;
@@ -120,25 +121,9 @@ impl MultiValuedFastFieldWriter {
&'a self,
doc_id_map: Option<&'b DocIdMapping>,
) -> impl Iterator<Item = &'b [u64]> {
let doc_id_iter: Box<dyn Iterator<Item = u32>> = if let Some(doc_id_map) = doc_id_map {
Box::new(doc_id_map.iter_old_doc_ids())
} else {
let max_doc = self.doc_index.len() as DocId;
Box::new(0..max_doc)
};
doc_id_iter.map(move |doc_id| self.get_values_for_doc_id(doc_id))
get_ordered_values(&self.vals, &self.doc_index, doc_id_map)
}
/// returns all values for a doc_ids
fn get_values_for_doc_id(&self, doc_id: u32) -> &[u64] {
let start_pos = self.doc_index[doc_id as usize] as usize;
let end_pos = self
.doc_index
.get(doc_id as usize + 1)
.cloned()
.unwrap_or(self.vals.len() as u64) as usize; // special case, last doc_id has no offset information
&self.vals[start_pos..end_pos]
}
/// Serializes fast field values by pushing them to the `FastFieldSerializer`.
///
/// If a mapping is given, the values are remapped *and sorted* before serialization.
@@ -171,7 +156,7 @@ impl MultiValuedFastFieldWriter {
}
{
// writing the values themselves.
let mut value_serializer: BitpackedSerializerLegacy<'_, _>;
let mut value_serializer: BitpackedFastFieldSerializerLegacy<'_, _>;
if let Some(mapping) = mapping_opt {
value_serializer = serializer.new_u64_fast_field_with_idx(
self.field,
@@ -220,3 +205,132 @@ impl MultiValuedFastFieldWriter {
Ok(())
}
}
/// Writer for multi-valued (as in, more than one value per document)
/// int fast field.
///
/// This `Writer` is only useful for advanced users.
/// The normal way to get your multivalued int in your index
/// is to
/// - declare your field with fast set to `Cardinality::MultiValues`
/// in your schema
/// - add your document simply by calling `.add_document(...)`.
///
/// The `MultiValuedFastFieldWriter` can be acquired from the
pub struct U128MultiValueFastFieldWriter {
field: Field,
vals: Vec<u128>,
doc_index: Vec<u64>,
}
impl U128MultiValueFastFieldWriter {
/// Creates a new `U128MultiValueFastFieldWriter`
pub(crate) fn new(field: Field) -> Self {
U128MultiValueFastFieldWriter {
field,
vals: Vec::new(),
doc_index: Vec::new(),
}
}
/// The memory used (inclusive childs)
pub fn mem_usage(&self) -> usize {
self.vals.capacity() * std::mem::size_of::<UnorderedTermId>()
+ self.doc_index.capacity() * std::mem::size_of::<u64>()
}
/// Finalize the current document.
pub(crate) fn next_doc(&mut self) {
self.doc_index.push(self.vals.len() as u64);
}
/// Pushes a new value to the current document.
pub(crate) fn add_val(&mut self, val: u128) {
self.vals.push(val);
}
/// Shift to the next document and adds
/// all of the matching field values present in the document.
pub fn add_document(&mut self, doc: &Document) {
self.next_doc();
for field_value in doc.field_values() {
if field_value.field == self.field {
let value = field_value.value();
let ip_addr = value.as_ip().unwrap();
let value = ip_to_u128(ip_addr);
self.add_val(value);
}
}
}
/// Returns an iterator over values per doc_id in ascending doc_id order.
///
/// Normally the order is simply iterating self.doc_id_index.
/// With doc_id_map it accounts for the new mapping, returning values in the order of the
/// new doc_ids.
fn get_ordered_values<'a: 'b, 'b>(
&'a self,
doc_id_map: Option<&'b DocIdMapping>,
) -> impl Iterator<Item = &'b [u128]> {
get_ordered_values(&self.vals, &self.doc_index, doc_id_map)
}
/// Serializes fast field values.
pub fn serialize(
&self,
serializer: &mut CompositeFastFieldSerializer,
doc_id_map: Option<&DocIdMapping>,
) -> io::Result<()> {
{
// writing the offset index
let mut doc_index_serializer =
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
let mut offset = 0;
for vals in self.get_ordered_values(doc_id_map) {
doc_index_serializer.add_val(offset)?;
offset += vals.len() as u64;
}
doc_index_serializer.add_val(self.vals.len() as u64)?;
doc_index_serializer.close_field()?;
}
{
let field_write = serializer.get_field_writer(self.field, 1);
let compressor = IntervalCompressor::from_vals(self.vals.to_vec());
let iter = self.get_ordered_values(doc_id_map).flatten().cloned();
compressor.compress_into(iter, field_write)?;
}
Ok(())
}
}
/// Returns an iterator over values per doc_id in ascending doc_id order.
///
/// Normally the order is simply iterating self.doc_id_index.
/// With doc_id_map it accounts for the new mapping, returning values in the order of the
/// new doc_ids.
fn get_ordered_values<'a: 'b, 'b, T>(
vals: &'a [T],
doc_index: &'a [u64],
doc_id_map: Option<&'b DocIdMapping>,
) -> impl Iterator<Item = &'b [T]> {
let doc_id_iter: Box<dyn Iterator<Item = u32>> = if let Some(doc_id_map) = doc_id_map {
Box::new(doc_id_map.iter_old_doc_ids())
} else {
let max_doc = doc_index.len() as DocId;
Box::new(0..max_doc)
};
doc_id_iter.map(move |doc_id| get_values_for_doc_id(doc_id, vals, doc_index))
}
/// returns all values for a doc_id
fn get_values_for_doc_id<'a, T>(doc_id: u32, vals: &'a [T], doc_index: &'a [u64]) -> &'a [T] {
let start_pos = doc_index[doc_id as usize] as usize;
let end_pos = doc_index
.get(doc_id as usize + 1)
.cloned()
.unwrap_or(vals.len() as u64) as usize; // special case, last doc_id has no offset information
&vals[start_pos..end_pos]
}

View File

@@ -1,19 +1,65 @@
use std::collections::HashMap;
use std::marker::PhantomData;
use std::ops::RangeInclusive;
use std::path::Path;
use common::BinarySerializable;
use fastfield_codecs::bitpacked::{BitpackedCodec, BitpackedReader};
use fastfield_codecs::blockwise_linear::{BlockwiseLinearCodec, BlockwiseLinearReader};
use fastfield_codecs::linear::{LinearCodec, LinearReader};
use fastfield_codecs::{Column, FastFieldCodec, FastFieldCodecType};
use fastfield_codecs::bitpacked::{
BitpackedFastFieldReader as BitpackedReader, BitpackedFastFieldSerializer,
};
use fastfield_codecs::linearinterpol::{
LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer,
};
use fastfield_codecs::multilinearinterpol::{
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
};
use fastfield_codecs::{FastFieldCodecReader, FastFieldCodecReaderU128, FastFieldCodecSerializer};
use super::gcd::open_gcd_from_bytes;
use super::FastValue;
use super::{FastValue, FastValueU128, GCDFastFieldCodec, GCD_CODEC_ID};
use crate::directory::{CompositeFile, Directory, FileSlice, OwnedBytes, RamDirectory, WritePtr};
use crate::error::DataCorruption;
use crate::fastfield::{CompositeFastFieldSerializer, FastFieldsWriter, GCDReader};
use crate::fastfield::{CompositeFastFieldSerializer, FastFieldsWriter};
use crate::schema::{Schema, FAST};
use crate::DocId;
/// FastFieldReader is the trait to access fast field data.
pub trait FastFieldReader<Item: FastValue>: Clone {
/// Return the value associated to the given document.
///
/// This accessor should return as fast as possible.
///
/// # Panics
///
/// May panic if `doc` is greater than the segment
fn get(&self, doc: DocId) -> Item;
/// Fills an output buffer with the fast field values
/// associated with the `DocId` going from
/// `start` to `start + output.len()`.
///
/// Regardless of the type of `Item`, this method works
/// - transmuting the output array
/// - extracting the `Item`s as if they were `u64`
/// - possibly converting the `u64` value to the right type.
///
/// # Panics
///
/// May panic if `start + output.len()` is greater than
/// the segment's `maxdoc`.
fn get_range(&self, start: u64, output: &mut [Item]);
/// Returns the minimum value for this fast field.
///
/// The min value does not take in account of possible
/// deleted document, and should be considered as a lower bound
/// of the actual mimimum value.
fn min_value(&self) -> Item;
/// Returns the maximum value for this fast field.
///
/// The max value does not take in account of possible
/// deleted document, and should be considered as an upper bound
/// of the actual maximum value.
fn max_value(&self) -> Item;
}
#[derive(Clone)]
/// DynamicFastFieldReader wraps different readers to access
@@ -22,122 +68,218 @@ pub enum DynamicFastFieldReader<Item: FastValue> {
/// Bitpacked compressed fastfield data.
Bitpacked(FastFieldReaderCodecWrapper<Item, BitpackedReader>),
/// Linear interpolated values + bitpacked
Linear(FastFieldReaderCodecWrapper<Item, LinearReader>),
LinearInterpol(FastFieldReaderCodecWrapper<Item, LinearInterpolFastFieldReader>),
/// Blockwise linear interpolated values + bitpacked
BlockwiseLinear(FastFieldReaderCodecWrapper<Item, BlockwiseLinearReader>),
MultiLinearInterpol(FastFieldReaderCodecWrapper<Item, MultiLinearInterpolFastFieldReader>),
/// GCD and Bitpacked compressed fastfield data.
BitpackedGCD(FastFieldReaderCodecWrapper<Item, GCDReader<BitpackedReader>>),
BitpackedGCD(FastFieldReaderCodecWrapper<Item, GCDFastFieldCodec<BitpackedReader>>),
/// GCD and Linear interpolated values + bitpacked
LinearGCD(FastFieldReaderCodecWrapper<Item, GCDReader<LinearReader>>),
LinearInterpolGCD(
FastFieldReaderCodecWrapper<Item, GCDFastFieldCodec<LinearInterpolFastFieldReader>>,
),
/// GCD and Blockwise linear interpolated values + bitpacked
BlockwiseLinearGCD(FastFieldReaderCodecWrapper<Item, GCDReader<BlockwiseLinearReader>>),
MultiLinearInterpolGCD(
FastFieldReaderCodecWrapper<Item, GCDFastFieldCodec<MultiLinearInterpolFastFieldReader>>,
),
}
impl<Item: FastValue> DynamicFastFieldReader<Item> {
/// Returns correct the reader wrapped in the `DynamicFastFieldReader` enum for the data.
pub fn open_from_id(
mut bytes: OwnedBytes,
codec_type: FastFieldCodecType,
codec_id: u8,
) -> crate::Result<DynamicFastFieldReader<Item>> {
let reader = match codec_type {
FastFieldCodecType::Bitpacked => {
DynamicFastFieldReader::Bitpacked(BitpackedCodec::open_from_bytes(bytes)?.into())
let reader = match codec_id {
BitpackedFastFieldSerializer::ID => {
DynamicFastFieldReader::Bitpacked(FastFieldReaderCodecWrapper::<
Item,
BitpackedReader,
>::open_from_bytes(bytes)?)
}
FastFieldCodecType::Linear => {
DynamicFastFieldReader::Linear(LinearCodec::open_from_bytes(bytes)?.into())
LinearInterpolFastFieldSerializer::ID => {
DynamicFastFieldReader::LinearInterpol(FastFieldReaderCodecWrapper::<
Item,
LinearInterpolFastFieldReader,
>::open_from_bytes(bytes)?)
}
FastFieldCodecType::BlockwiseLinear => DynamicFastFieldReader::BlockwiseLinear(
BlockwiseLinearCodec::open_from_bytes(bytes)?.into(),
),
FastFieldCodecType::Gcd => {
let codec_type = FastFieldCodecType::deserialize(&mut bytes)?;
match codec_type {
FastFieldCodecType::Bitpacked => DynamicFastFieldReader::BitpackedGCD(
open_gcd_from_bytes::<BitpackedCodec>(bytes)?.into(),
),
FastFieldCodecType::Linear => DynamicFastFieldReader::LinearGCD(
open_gcd_from_bytes::<LinearCodec>(bytes)?.into(),
),
FastFieldCodecType::BlockwiseLinear => {
DynamicFastFieldReader::BlockwiseLinearGCD(
open_gcd_from_bytes::<BlockwiseLinearCodec>(bytes)?.into(),
MultiLinearInterpolFastFieldSerializer::ID => {
DynamicFastFieldReader::MultiLinearInterpol(FastFieldReaderCodecWrapper::<
Item,
MultiLinearInterpolFastFieldReader,
>::open_from_bytes(
bytes
)?)
}
_ if codec_id == GCD_CODEC_ID => {
let codec_id = bytes.read_u8();
match codec_id {
BitpackedFastFieldSerializer::ID => {
DynamicFastFieldReader::BitpackedGCD(FastFieldReaderCodecWrapper::<
Item,
GCDFastFieldCodec<BitpackedReader>,
>::open_from_bytes(
bytes
)?)
}
LinearInterpolFastFieldSerializer::ID => {
DynamicFastFieldReader::LinearInterpolGCD(FastFieldReaderCodecWrapper::<
Item,
GCDFastFieldCodec<LinearInterpolFastFieldReader>,
>::open_from_bytes(
bytes
)?)
}
MultiLinearInterpolFastFieldSerializer::ID => {
DynamicFastFieldReader::MultiLinearInterpolGCD(
FastFieldReaderCodecWrapper::<
Item,
GCDFastFieldCodec<MultiLinearInterpolFastFieldReader>,
>::open_from_bytes(bytes)?,
)
}
FastFieldCodecType::Gcd => {
return Err(DataCorruption::comment_only(
"Gcd codec wrapped into another gcd codec. This combination is not \
allowed.",
_ => {
panic!(
"unknown fastfield codec id {:?}. Data corrupted or using old tantivy \
version.",
codec_id
)
.into())
}
}
}
_ => {
panic!(
"unknown fastfield codec id {:?}. Data corrupted or using old tantivy version.",
codec_id
)
}
};
Ok(reader)
}
/// Returns correct the reader wrapped in the `DynamicFastFieldReader` enum for the data.
pub fn open(file: FileSlice) -> crate::Result<DynamicFastFieldReader<Item>> {
let mut bytes = file.read_bytes()?;
let codec_type = FastFieldCodecType::deserialize(&mut bytes)?;
Self::open_from_id(bytes, codec_type)
let codec_id = bytes.read_u8();
Self::open_from_id(bytes, codec_id)
}
}
impl<Item: FastValue> Column<Item> for DynamicFastFieldReader<Item> {
impl<Item: FastValue> FastFieldReader<Item> for DynamicFastFieldReader<Item> {
#[inline]
fn get_val(&self, idx: u64) -> Item {
fn get(&self, doc: DocId) -> Item {
match self {
Self::Bitpacked(reader) => reader.get_val(idx),
Self::Linear(reader) => reader.get_val(idx),
Self::BlockwiseLinear(reader) => reader.get_val(idx),
Self::BitpackedGCD(reader) => reader.get_val(idx),
Self::LinearGCD(reader) => reader.get_val(idx),
Self::BlockwiseLinearGCD(reader) => reader.get_val(idx),
Self::Bitpacked(reader) => reader.get(doc),
Self::LinearInterpol(reader) => reader.get(doc),
Self::MultiLinearInterpol(reader) => reader.get(doc),
Self::BitpackedGCD(reader) => reader.get(doc),
Self::LinearInterpolGCD(reader) => reader.get(doc),
Self::MultiLinearInterpolGCD(reader) => reader.get(doc),
}
}
#[inline]
fn get_range(&self, start: u64, output: &mut [Item]) {
match self {
Self::Bitpacked(reader) => reader.get_range(start, output),
Self::Linear(reader) => reader.get_range(start, output),
Self::BlockwiseLinear(reader) => reader.get_range(start, output),
Self::LinearInterpol(reader) => reader.get_range(start, output),
Self::MultiLinearInterpol(reader) => reader.get_range(start, output),
Self::BitpackedGCD(reader) => reader.get_range(start, output),
Self::LinearGCD(reader) => reader.get_range(start, output),
Self::BlockwiseLinearGCD(reader) => reader.get_range(start, output),
Self::LinearInterpolGCD(reader) => reader.get_range(start, output),
Self::MultiLinearInterpolGCD(reader) => reader.get_range(start, output),
}
}
fn min_value(&self) -> Item {
match self {
Self::Bitpacked(reader) => reader.min_value(),
Self::Linear(reader) => reader.min_value(),
Self::BlockwiseLinear(reader) => reader.min_value(),
Self::LinearInterpol(reader) => reader.min_value(),
Self::MultiLinearInterpol(reader) => reader.min_value(),
Self::BitpackedGCD(reader) => reader.min_value(),
Self::LinearGCD(reader) => reader.min_value(),
Self::BlockwiseLinearGCD(reader) => reader.min_value(),
Self::LinearInterpolGCD(reader) => reader.min_value(),
Self::MultiLinearInterpolGCD(reader) => reader.min_value(),
}
}
fn max_value(&self) -> Item {
match self {
Self::Bitpacked(reader) => reader.max_value(),
Self::Linear(reader) => reader.max_value(),
Self::BlockwiseLinear(reader) => reader.max_value(),
Self::LinearInterpol(reader) => reader.max_value(),
Self::MultiLinearInterpol(reader) => reader.max_value(),
Self::BitpackedGCD(reader) => reader.max_value(),
Self::LinearGCD(reader) => reader.max_value(),
Self::BlockwiseLinearGCD(reader) => reader.max_value(),
Self::LinearInterpolGCD(reader) => reader.max_value(),
Self::MultiLinearInterpolGCD(reader) => reader.max_value(),
}
}
}
/// Wrapper for accessing a fastfield.
///
/// Holds the data and the codec to the read the data.
#[derive(Clone)]
pub struct FastFieldReaderCodecWrapperU128<Item: FastValueU128, CodecReader> {
reader: CodecReader,
bytes: OwnedBytes,
_phantom: PhantomData<Item>,
}
impl<Item: FastValueU128, C: FastFieldCodecReaderU128> FastFieldReaderCodecWrapperU128<Item, C> {
/// Opens a fast field given the bytes.
pub fn open_from_bytes(bytes: OwnedBytes) -> crate::Result<Self> {
let reader = C::open_from_bytes(bytes.as_slice())?;
Ok(Self {
reader,
bytes,
_phantom: PhantomData,
})
}
/// Returns the item for the docid, if present
pub fn get_val(&self, doc: u64) -> Option<Item> {
self.reader
.get(doc, self.bytes.as_slice())
.map(|el| Item::from_u128(el))
}
/// Internally `multivalued` also use SingleValue Fast fields.
/// It works as follows... A first column contains the list of start index
/// for each document, a second column contains the actual values.
///
/// The values associated to a given doc, are then
/// `second_column[first_column.get(doc)..first_column.get(doc+1)]`.
///
/// Which means single value fast field reader can be indexed internally with
/// something different from a `DocId`. For this use case, we want to use `u64`
/// values.
///
/// See `get_range` for an actual documentation about this method.
pub(crate) fn get_range(&self, start: u64, output: &mut [Item]) {
for (i, out) in output.iter_mut().enumerate() {
if let Some(val) = self.get_val(start + (i as u64)) {
*out = val
}
}
}
fn num_vals(&self) -> u64 {
match self {
Self::Bitpacked(reader) => reader.num_vals(),
Self::Linear(reader) => reader.num_vals(),
Self::BlockwiseLinear(reader) => reader.num_vals(),
Self::BitpackedGCD(reader) => reader.num_vals(),
Self::LinearGCD(reader) => reader.num_vals(),
Self::BlockwiseLinearGCD(reader) => reader.num_vals(),
}
/// Iterates over all elements in the fast field
pub fn iter(&self) -> impl Iterator<Item = Option<Item>> + '_ {
self.reader
.iter(self.bytes.as_slice())
.map(|el| el.map(Item::from_u128))
}
/// Returns all docids which are in the provided value range
pub fn get_between_vals(&self, range: RangeInclusive<Item>) -> Vec<usize> {
let range = range.start().to_u128()..=range.end().to_u128();
self.reader.get_between_vals(range, self.bytes.as_slice())
}
/// Return min_value.
pub fn min_value(&self) -> Item {
Item::from_u128(self.reader.min_value())
}
/// Return max_value.
pub fn max_value(&self) -> Item {
Item::from_u128(self.reader.max_value())
}
}
@@ -147,24 +289,35 @@ impl<Item: FastValue> Column<Item> for DynamicFastFieldReader<Item> {
#[derive(Clone)]
pub struct FastFieldReaderCodecWrapper<Item: FastValue, CodecReader> {
reader: CodecReader,
bytes: OwnedBytes,
_phantom: PhantomData<Item>,
}
impl<Item: FastValue, CodecReader> From<CodecReader>
for FastFieldReaderCodecWrapper<Item, CodecReader>
{
fn from(reader: CodecReader) -> Self {
FastFieldReaderCodecWrapper {
reader,
_phantom: PhantomData,
}
impl<Item: FastValue, C: FastFieldCodecReader> FastFieldReaderCodecWrapper<Item, C> {
/// Opens a fast field given a file.
pub fn open(file: FileSlice) -> crate::Result<Self> {
let mut bytes = file.read_bytes()?;
let codec_id = bytes.read_u8();
assert_eq!(
BitpackedFastFieldSerializer::ID,
codec_id,
"Tried to open fast field as bitpacked encoded (id=1), but got serializer with \
different id"
);
Self::open_from_bytes(bytes)
}
/// Opens a fast field given the bytes.
pub fn open_from_bytes(bytes: OwnedBytes) -> crate::Result<Self> {
let reader = C::open_from_bytes(bytes.as_slice())?;
Ok(FastFieldReaderCodecWrapper {
reader,
bytes,
_phantom: PhantomData,
})
}
}
impl<Item: FastValue, D: Column> FastFieldReaderCodecWrapper<Item, D> {
#[inline]
pub(crate) fn get_u64(&self, idx: u64) -> Item {
let data = self.reader.get_val(idx);
pub(crate) fn get_u64(&self, doc: u64) -> Item {
let data = self.reader.get_u64(doc, self.bytes.as_slice());
Item::from_u64(data)
}
@@ -187,7 +340,9 @@ impl<Item: FastValue, D: Column> FastFieldReaderCodecWrapper<Item, D> {
}
}
impl<Item: FastValue, C: Column + Clone> Column<Item> for FastFieldReaderCodecWrapper<Item, C> {
impl<Item: FastValue, C: FastFieldCodecReader + Clone> FastFieldReader<Item>
for FastFieldReaderCodecWrapper<Item, C>
{
/// Return the value associated to the given document.
///
/// This accessor should return as fast as possible.
@@ -196,8 +351,8 @@ impl<Item: FastValue, C: Column + Clone> Column<Item> for FastFieldReaderCodecWr
///
/// May panic if `doc` is greater than the segment
// `maxdoc`.
fn get_val(&self, idx: u64) -> Item {
self.get_u64(idx)
fn get(&self, doc: DocId) -> Item {
self.get_u64(u64::from(doc))
}
/// Fills an output buffer with the fast field values
@@ -234,10 +389,6 @@ impl<Item: FastValue, C: Column + Clone> Column<Item> for FastFieldReaderCodecWr
fn max_value(&self) -> Item {
Item::from_u64(self.reader.max_value())
}
fn num_vals(&self) -> u64 {
self.reader.num_vals()
}
}
impl<Item: FastValue> From<Vec<Item>> for DynamicFastFieldReader<Item> {

View File

@@ -1,4 +1,9 @@
use super::reader::DynamicFastFieldReader;
use std::net::IpAddr;
use fastfield_codecs::ip_codec::IntervallDecompressor;
use super::multivalued::MultiValuedU128FastFieldReader;
use super::reader::{DynamicFastFieldReader, FastFieldReaderCodecWrapperU128};
use crate::directory::{CompositeFile, FileSlice};
use crate::fastfield::{
BytesFastFieldReader, FastFieldNotAvailableError, FastValue, MultiValuedFastFieldReader,
@@ -20,6 +25,7 @@ pub struct FastFieldReaders {
pub(crate) enum FastType {
I64,
U64,
U128,
F64,
Bool,
Date,
@@ -46,6 +52,9 @@ pub(crate) fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType,
FieldType::Str(options) if options.is_fast() => {
Some((FastType::U64, Cardinality::MultiValues))
}
FieldType::Ip(options) => options
.get_fastfield_cardinality()
.map(|cardinality| (FastType::U128, cardinality)),
_ => None,
}
}
@@ -137,6 +146,69 @@ impl FastFieldReaders {
self.typed_fast_field_reader(field)
}
/// Returns the `ip` fast field reader reader associated to `field`.
///
/// If `field` is not a u128 fast field, this method returns an Error.
pub fn ip_addr(
&self,
field: Field,
) -> crate::Result<FastFieldReaderCodecWrapperU128<IpAddr, IntervallDecompressor>> {
self.check_type(field, FastType::U128, Cardinality::SingleValue)?;
let fast_field_slice = self.fast_field_data(field, 0)?;
let bytes = fast_field_slice.read_bytes()?;
FastFieldReaderCodecWrapperU128::<IpAddr, IntervallDecompressor>::open_from_bytes(bytes)
}
/// Returns the `ip` fast field reader reader associated to `field`.
///
/// If `field` is not a u128 fast field, this method returns an Error.
pub fn ip_addrs(&self, field: Field) -> crate::Result<MultiValuedU128FastFieldReader<IpAddr>> {
self.check_type(field, FastType::U128, Cardinality::MultiValues)?;
let idx_reader: DynamicFastFieldReader<u64> = self.typed_fast_field_reader(field)?;
let fast_field_slice = self.fast_field_data(field, 1)?;
let bytes = fast_field_slice.read_bytes()?;
let vals_reader =
FastFieldReaderCodecWrapperU128::<IpAddr, IntervallDecompressor>::open_from_bytes(
bytes,
)?;
Ok(MultiValuedU128FastFieldReader::open(
idx_reader,
vals_reader,
))
}
/// Returns the `u128` fast field reader reader associated to `field`.
///
/// If `field` is not a u128 fast field, this method returns an Error.
pub fn u128(
&self,
field: Field,
) -> crate::Result<FastFieldReaderCodecWrapperU128<u128, IntervallDecompressor>> {
let fast_field_slice = self.fast_field_data(field, 0)?;
let bytes = fast_field_slice.read_bytes()?;
FastFieldReaderCodecWrapperU128::<u128, IntervallDecompressor>::open_from_bytes(bytes)
}
/// Returns the `u128` multi-valued fast field reader reader associated to `field`.
///
/// If `field` is not a u128 multi-valued fast field, this method returns an Error.
pub fn u128s(&self, field: Field) -> crate::Result<MultiValuedU128FastFieldReader<u128>> {
self.check_type(field, FastType::U128, Cardinality::MultiValues)?;
let idx_reader: DynamicFastFieldReader<u64> = self.typed_fast_field_reader(field)?;
let fast_field_slice = self.fast_field_data(field, 1)?;
let bytes = fast_field_slice.read_bytes()?;
let vals_reader =
FastFieldReaderCodecWrapperU128::<u128, IntervallDecompressor>::open_from_bytes(bytes)?;
Ok(MultiValuedU128FastFieldReader::open(
idx_reader,
vals_reader,
))
}
/// Returns the `u64` fast field reader reader associated to `field`, regardless of whether the
/// given field is effectively of type `u64` or not.
///

View File

@@ -1,17 +1,17 @@
use std::io::{self, Write};
use std::num::NonZeroU64;
use common::{BinarySerializable, CountingWriter};
use fastdivide::DividerU64;
pub use fastfield_codecs::bitpacked::{BitpackedCodec, BitpackedSerializerLegacy};
use fastfield_codecs::blockwise_linear::BlockwiseLinearCodec;
use fastfield_codecs::linear::LinearCodec;
use fastfield_codecs::FastFieldCodecType;
pub use fastfield_codecs::{Column, FastFieldCodec, FastFieldStats};
pub use fastfield_codecs::bitpacked::{
BitpackedFastFieldSerializer, BitpackedFastFieldSerializerLegacy,
};
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer;
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer;
pub use fastfield_codecs::{FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
use super::{find_gcd, ALL_CODECS, GCD_DEFAULT};
use super::{find_gcd, FastFieldCodecName, ALL_CODECS, GCD_DEFAULT};
use crate::directory::{CompositeWrite, WritePtr};
use crate::fastfield::gcd::write_gcd_header;
use crate::fastfield::GCD_CODEC_ID;
use crate::schema::Field;
/// `CompositeFastFieldSerializer` is in charge of serializing
@@ -41,7 +41,7 @@ pub struct CompositeFastFieldSerializer {
#[derive(Debug, Clone)]
pub struct FastFieldCodecEnableCheck {
enabled_codecs: Vec<FastFieldCodecType>,
enabled_codecs: Vec<FastFieldCodecName>,
}
impl FastFieldCodecEnableCheck {
fn allow_all() -> Self {
@@ -49,28 +49,31 @@ impl FastFieldCodecEnableCheck {
enabled_codecs: ALL_CODECS.to_vec(),
}
}
fn is_enabled(&self, code_type: FastFieldCodecType) -> bool {
self.enabled_codecs.contains(&code_type)
fn is_enabled(&self, codec_name: FastFieldCodecName) -> bool {
self.enabled_codecs.contains(&codec_name)
}
}
impl From<FastFieldCodecType> for FastFieldCodecEnableCheck {
fn from(code_type: FastFieldCodecType) -> Self {
impl From<FastFieldCodecName> for FastFieldCodecEnableCheck {
fn from(codec_name: FastFieldCodecName) -> Self {
FastFieldCodecEnableCheck {
enabled_codecs: vec![code_type],
enabled_codecs: vec![codec_name],
}
}
}
// use this, when this is merged and stabilized explicit_generic_args_with_impl_trait
// https://github.com/rust-lang/rust/pull/86176
fn codec_estimation<C: FastFieldCodec>(
fastfield_accessor: &impl Column,
estimations: &mut Vec<(f32, FastFieldCodecType)>,
fn codec_estimation<T: FastFieldCodecSerializer, A: FastFieldDataAccess>(
stats: FastFieldStats,
fastfield_accessor: &A,
estimations: &mut Vec<(f32, &str, u8)>,
) {
if let Some(ratio) = C::estimate(fastfield_accessor) {
estimations.push((ratio, C::CODEC_TYPE));
if !T::is_applicable(fastfield_accessor, stats.clone()) {
return;
}
let (ratio, name, id) = (T::estimate(fastfield_accessor, stats), T::NAME, T::ID);
estimations.push((ratio, name, id));
}
impl CompositeFastFieldSerializer {
@@ -94,100 +97,99 @@ impl CompositeFastFieldSerializer {
/// Serialize data into a new u64 fast field. The best compression codec will be chosen
/// automatically.
pub fn create_auto_detect_u64_fast_field(
pub fn create_auto_detect_u64_fast_field<F, I>(
&mut self,
field: Field,
fastfield_accessor: impl Column,
) -> io::Result<()> {
self.create_auto_detect_u64_fast_field_with_idx(field, fastfield_accessor, 0)
stats: FastFieldStats,
fastfield_accessor: impl FastFieldDataAccess,
iter_gen: F,
) -> io::Result<()>
where
F: Fn() -> I,
I: Iterator<Item = u64>,
{
self.create_auto_detect_u64_fast_field_with_idx(
field,
stats,
fastfield_accessor,
iter_gen,
0,
)
}
/// Serialize data into a new u64 fast field. The best compression codec will be chosen
/// automatically.
pub fn write_header<W: Write>(
field_write: &mut W,
codec_type: FastFieldCodecType,
) -> io::Result<()> {
codec_type.to_code().serialize(field_write)?;
pub fn write_header<W: Write>(field_write: &mut W, codec_id: u8) -> io::Result<()> {
codec_id.serialize(field_write)?;
Ok(())
}
/// Serialize data into a new u64 fast field. The best compression codec will be chosen
/// automatically.
pub fn create_auto_detect_u64_fast_field_with_idx(
pub fn create_auto_detect_u64_fast_field_with_idx<F, I>(
&mut self,
field: Field,
fastfield_accessor: impl Column,
stats: FastFieldStats,
fastfield_accessor: impl FastFieldDataAccess,
iter_gen: F,
idx: usize,
) -> io::Result<()> {
let min_value = fastfield_accessor.min_value();
) -> io::Result<()>
where
F: Fn() -> I,
I: Iterator<Item = u64>,
{
let field_write = self.composite_write.for_field_with_idx(field, idx);
let gcd = find_gcd(fastfield_accessor.iter().map(|val| val - min_value))
.map(NonZeroU64::get)
.unwrap_or(GCD_DEFAULT);
let gcd = find_gcd(iter_gen().map(|val| val - stats.min_value)).unwrap_or(GCD_DEFAULT);
if gcd == 1 {
return Self::create_auto_detect_u64_fast_field_with_idx_gcd(
self.codec_enable_checker.clone(),
field,
field_write,
stats,
fastfield_accessor,
iter_gen(),
iter_gen(),
);
}
Self::write_header(field_write, FastFieldCodecType::Gcd)?;
struct GCDWrappedFFAccess<T: Column> {
Self::write_header(field_write, GCD_CODEC_ID)?;
struct GCDWrappedFFAccess<T: FastFieldDataAccess> {
fastfield_accessor: T,
base_value: u64,
max_value: u64,
num_vals: u64,
gcd: DividerU64,
min_value: u64,
gcd: u64,
}
impl<T: Column> Column for GCDWrappedFFAccess<T> {
impl<T: FastFieldDataAccess> FastFieldDataAccess for GCDWrappedFFAccess<T> {
fn get_val(&self, position: u64) -> u64 {
self.gcd
.divide(self.fastfield_accessor.get_val(position) - self.base_value)
}
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
Box::new(
self.fastfield_accessor
.iter()
.map(|val| self.gcd.divide(val - self.base_value)),
)
}
fn min_value(&self) -> u64 {
0
}
fn max_value(&self) -> u64 {
self.max_value
}
fn num_vals(&self) -> u64 {
self.num_vals
(self.fastfield_accessor.get_val(position) - self.min_value) / self.gcd
}
}
let num_vals = fastfield_accessor.num_vals();
let base_value = fastfield_accessor.min_value();
let max_value = (fastfield_accessor.max_value() - fastfield_accessor.min_value()) / gcd;
let fastfield_accessor = GCDWrappedFFAccess {
fastfield_accessor,
base_value,
max_value,
num_vals,
gcd: DividerU64::divide_by(gcd),
min_value: stats.min_value,
gcd,
};
let min_value = stats.min_value;
let stats = FastFieldStats {
min_value: 0,
max_value: (stats.max_value - stats.min_value) / gcd,
num_vals: stats.num_vals,
};
let iter1 = iter_gen().map(|val| (val - min_value) / gcd);
let iter2 = iter_gen().map(|val| (val - min_value) / gcd);
Self::create_auto_detect_u64_fast_field_with_idx_gcd(
self.codec_enable_checker.clone(),
field,
field_write,
stats,
fastfield_accessor,
iter1,
iter2,
)?;
write_gcd_header(field_write, base_value, gcd, num_vals)?;
write_gcd_header(field_write, min_value, gcd)?;
Ok(())
}
@@ -197,23 +199,38 @@ impl CompositeFastFieldSerializer {
codec_enable_checker: FastFieldCodecEnableCheck,
field: Field,
field_write: &mut CountingWriter<W>,
fastfield_accessor: impl Column,
stats: FastFieldStats,
fastfield_accessor: impl FastFieldDataAccess,
iter1: impl Iterator<Item = u64>,
iter2: impl Iterator<Item = u64>,
) -> io::Result<()> {
let mut estimations = vec![];
if codec_enable_checker.is_enabled(FastFieldCodecType::Bitpacked) {
codec_estimation::<BitpackedCodec>(&fastfield_accessor, &mut estimations);
if codec_enable_checker.is_enabled(FastFieldCodecName::Bitpacked) {
codec_estimation::<BitpackedFastFieldSerializer, _>(
stats.clone(),
&fastfield_accessor,
&mut estimations,
);
}
if codec_enable_checker.is_enabled(FastFieldCodecType::Linear) {
codec_estimation::<LinearCodec>(&fastfield_accessor, &mut estimations);
if codec_enable_checker.is_enabled(FastFieldCodecName::LinearInterpol) {
codec_estimation::<LinearInterpolFastFieldSerializer, _>(
stats.clone(),
&fastfield_accessor,
&mut estimations,
);
}
if codec_enable_checker.is_enabled(FastFieldCodecType::BlockwiseLinear) {
codec_estimation::<BlockwiseLinearCodec>(&fastfield_accessor, &mut estimations);
if codec_enable_checker.is_enabled(FastFieldCodecName::BlockwiseLinearInterpol) {
codec_estimation::<MultiLinearInterpolFastFieldSerializer, _>(
stats.clone(),
&fastfield_accessor,
&mut estimations,
);
}
if let Some(broken_estimation) = estimations.iter().find(|estimation| estimation.0.is_nan())
{
warn!(
"broken estimation for fast field codec {:?}",
"broken estimation for fast field codec {}",
broken_estimation.1
);
}
@@ -221,25 +238,43 @@ impl CompositeFastFieldSerializer {
// codecs
estimations.retain(|estimation| !estimation.0.is_nan() && estimation.0 != f32::MAX);
estimations.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let (_ratio, codec_type) = estimations[0];
debug!("choosing fast field codec {codec_type:?} for field_id {field:?}"); // todo print actual field name
let (_ratio, name, id) = estimations[0];
debug!(
"choosing fast field codec {} for field_id {:?}",
name, field
); // todo print actual field name
Self::write_header(field_write, codec_type)?;
match codec_type {
FastFieldCodecType::Bitpacked => {
BitpackedCodec::serialize(field_write, &fastfield_accessor)?;
Self::write_header(field_write, id)?;
match name {
BitpackedFastFieldSerializer::NAME => {
BitpackedFastFieldSerializer::serialize(
field_write,
&fastfield_accessor,
stats,
iter1,
iter2,
)?;
}
FastFieldCodecType::Linear => {
LinearCodec::serialize(field_write, &fastfield_accessor)?;
LinearInterpolFastFieldSerializer::NAME => {
LinearInterpolFastFieldSerializer::serialize(
field_write,
&fastfield_accessor,
stats,
iter1,
iter2,
)?;
}
FastFieldCodecType::BlockwiseLinear => {
BlockwiseLinearCodec::serialize(field_write, &fastfield_accessor)?;
MultiLinearInterpolFastFieldSerializer::NAME => {
MultiLinearInterpolFastFieldSerializer::serialize(
field_write,
&fastfield_accessor,
stats,
iter1,
iter2,
)?;
}
FastFieldCodecType::Gcd => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"GCD codec not supported.",
));
_ => {
panic!("unknown fastfield serializer {}", name)
}
}
field_write.flush()?;
@@ -253,7 +288,7 @@ impl CompositeFastFieldSerializer {
field: Field,
min_value: u64,
max_value: u64,
) -> io::Result<BitpackedSerializerLegacy<'_, CountingWriter<WritePtr>>> {
) -> io::Result<BitpackedFastFieldSerializerLegacy<'_, CountingWriter<WritePtr>>> {
self.new_u64_fast_field_with_idx(field, min_value, max_value, 0)
}
@@ -263,7 +298,7 @@ impl CompositeFastFieldSerializer {
field: Field,
min_value: u64,
max_value: u64,
) -> io::Result<BitpackedSerializerLegacy<'_, CountingWriter<WritePtr>>> {
) -> io::Result<BitpackedFastFieldSerializerLegacy<'_, CountingWriter<WritePtr>>> {
self.new_u64_fast_field_with_idx(field, min_value, max_value, 0)
}
@@ -274,11 +309,12 @@ impl CompositeFastFieldSerializer {
min_value: u64,
max_value: u64,
idx: usize,
) -> io::Result<BitpackedSerializerLegacy<'_, CountingWriter<WritePtr>>> {
) -> io::Result<BitpackedFastFieldSerializerLegacy<'_, CountingWriter<WritePtr>>> {
let field_write = self.composite_write.for_field_with_idx(field, idx);
// Prepend codec id to field data for compatibility with DynamicFastFieldReader.
FastFieldCodecType::Bitpacked.serialize(field_write)?;
BitpackedSerializerLegacy::open(field_write, min_value, max_value)
let id = BitpackedFastFieldSerializer::ID;
id.serialize(field_write)?;
BitpackedFastFieldSerializerLegacy::open(field_write, min_value, max_value)
}
/// Start serializing a new [u8] fast field
@@ -291,6 +327,11 @@ impl CompositeFastFieldSerializer {
FastBytesFieldSerializer { write: field_write }
}
/// Gets the underlying writer
pub fn get_field_writer(&mut self, field: Field, idx: usize) -> &mut impl Write {
self.composite_write.for_field_with_idx(field, idx)
}
/// Closes the serializer
///
/// After this call the data must be persistently saved on disk.

View File

@@ -2,13 +2,14 @@ use std::collections::HashMap;
use std::io;
use common;
use fastfield_codecs::Column;
use fastfield_codecs::ip_codec::{ip_to_u128, IntervalCompressor};
use fnv::FnvHashMap;
use roaring::RoaringBitmap;
use tantivy_bitpacker::BlockedBitpacker;
use super::multivalued::MultiValuedFastFieldWriter;
use super::multivalued::{MultiValuedFastFieldWriter, U128MultiValueFastFieldWriter};
use super::serializer::FastFieldStats;
use super::{FastFieldType, FastValue};
use super::{FastFieldDataAccess, FastFieldType, FastValue};
use crate::fastfield::{BytesFastFieldWriter, CompositeFastFieldSerializer};
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::postings::UnorderedTermId;
@@ -20,6 +21,8 @@ use crate::DatePrecision;
pub struct FastFieldsWriter {
term_id_writers: Vec<MultiValuedFastFieldWriter>,
single_value_writers: Vec<IntFastFieldWriter>,
u128_value_writers: Vec<U128FastFieldWriter>,
u128_multi_value_writers: Vec<U128MultiValueFastFieldWriter>,
multi_values_writers: Vec<MultiValuedFastFieldWriter>,
bytes_value_writers: Vec<BytesFastFieldWriter>,
}
@@ -35,6 +38,8 @@ fn fast_field_default_value(field_entry: &FieldEntry) -> u64 {
impl FastFieldsWriter {
/// Create all `FastFieldWriter` required by the schema.
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
let mut u128_value_writers = Vec::new();
let mut u128_multi_value_writers = Vec::new();
let mut single_value_writers = Vec::new();
let mut term_id_writers = Vec::new();
let mut multi_values_writers = Vec::new();
@@ -98,10 +103,27 @@ impl FastFieldsWriter {
bytes_value_writers.push(fast_field_writer);
}
}
FieldType::Ip(opt) => {
if opt.is_fast() {
match opt.get_fastfield_cardinality() {
Some(Cardinality::SingleValue) => {
let fast_field_writer = U128FastFieldWriter::new(field);
u128_value_writers.push(fast_field_writer);
}
Some(Cardinality::MultiValues) => {
let fast_field_writer = U128MultiValueFastFieldWriter::new(field);
u128_multi_value_writers.push(fast_field_writer);
}
None => {}
}
}
}
FieldType::Str(_) | FieldType::JsonObject(_) => {}
}
}
FastFieldsWriter {
u128_value_writers,
u128_multi_value_writers,
term_id_writers,
single_value_writers,
multi_values_writers,
@@ -130,6 +152,16 @@ impl FastFieldsWriter {
.iter()
.map(|w| w.mem_usage())
.sum::<usize>()
+ self
.u128_value_writers
.iter()
.map(|w| w.mem_usage())
.sum::<usize>()
+ self
.u128_multi_value_writers
.iter()
.map(|w| w.mem_usage())
.sum::<usize>()
}
/// Get the `FastFieldWriter` associated to a field.
@@ -191,7 +223,6 @@ impl FastFieldsWriter {
.iter_mut()
.find(|field_writer| field_writer.field() == field)
}
/// Indexes all of the fastfields of a new document.
pub fn add_document(&mut self, doc: &Document) {
for field_writer in &mut self.term_id_writers {
@@ -206,6 +237,12 @@ impl FastFieldsWriter {
for field_writer in &mut self.bytes_value_writers {
field_writer.add_document(doc);
}
for field_writer in &mut self.u128_value_writers {
field_writer.add_document(doc);
}
for field_writer in &mut self.u128_multi_value_writers {
field_writer.add_document(doc);
}
}
/// Serializes all of the `FastFieldWriter`s by pushing them in
@@ -231,6 +268,129 @@ impl FastFieldsWriter {
for field_writer in &self.bytes_value_writers {
field_writer.serialize(serializer, doc_id_map)?;
}
for field_writer in &self.u128_value_writers {
field_writer.serialize(serializer, doc_id_map)?;
}
for field_writer in &self.u128_multi_value_writers {
field_writer.serialize(serializer, doc_id_map)?;
}
Ok(())
}
}
/// Fast field writer for u128 values.
/// The fast field writer just keeps the values in memory.
///
/// Only when the segment writer can be closed and
/// persisted on disc, the fast field writer is
/// sent to a `FastFieldSerializer` via the `.serialize(...)`
/// method.
///
/// We cannot serialize earlier as the values are
/// compressed to a compact number space and the number of
/// bits required for bitpacking can only been known once
/// we have seen all of the values.
pub struct U128FastFieldWriter {
field: Field,
vals: Vec<u128>,
val_count: u32,
null_values: RoaringBitmap,
}
impl U128FastFieldWriter {
/// Creates a new `IntFastFieldWriter`
pub fn new(field: Field) -> Self {
Self {
field,
vals: vec![],
val_count: 0,
null_values: RoaringBitmap::new(),
}
}
/// The memory used (inclusive childs)
pub fn mem_usage(&self) -> usize {
self.vals.len() * 16
}
/// Records a new value.
///
/// The n-th value being recorded is implicitely
/// associated to the document with the `DocId` n.
/// (Well, `n-1` actually because of 0-indexing)
pub fn add_val(&mut self, val: u128) {
self.vals.push(val);
}
/// Extract the fast field value from the document
/// (or use the default value) and records it.
///
/// Extract the value associated to the fast field for
/// this document.
pub fn add_document(&mut self, doc: &Document) {
match doc.get_first(self.field) {
Some(v) => {
let ip_addr = v.as_ip().unwrap();
let value = ip_to_u128(ip_addr);
self.add_val(value);
}
None => {
self.null_values.insert(self.val_count as u32);
}
};
self.val_count += 1;
}
/// Push the fast fields value to the `FastFieldWriter`.
pub fn serialize(
&self,
serializer: &mut CompositeFastFieldSerializer,
doc_id_map: Option<&DocIdMapping>,
) -> io::Result<()> {
let mut field_write = serializer.get_field_writer(self.field, 0);
let compressor = IntervalCompressor::from_vals(self.vals.to_vec());
let mut val_idx = 0;
let mut get_val = |idx| {
if self.null_values.contains(idx as u32) {
compressor.null_value
} else {
let val = self.vals[val_idx];
val_idx += 1;
val
}
};
if let Some(doc_id_map) = doc_id_map {
// To get the actual value, we could materialize the vec with u128 including nulls, but
// that could cost a lot of memory. Instead we just compute the index for of
// the values
let mut idx_to_val_idx = vec![];
idx_to_val_idx.resize(self.val_count as usize, 0);
let mut val_idx = 0;
for idx in 0..self.val_count {
if !self.null_values.contains(idx as u32) {
idx_to_val_idx[idx as usize] = val_idx as u32;
val_idx += 1;
}
}
let iter = doc_id_map.iter_old_doc_ids().map(|idx| {
if self.null_values.contains(idx as u32) {
compressor.null_value
} else {
self.vals[idx_to_val_idx[idx as usize] as usize]
}
});
compressor.compress_into(iter, &mut field_write)?;
} else {
let iter = (0..self.val_count).map(&mut get_val);
compressor.compress_into(iter, &mut field_write)?;
}
Ok(())
}
}
@@ -294,7 +454,7 @@ impl IntFastFieldWriter {
/// Records a new value.
///
/// The n-th value being recorded is implicitly
/// The n-th value being recorded is implicitely
/// associated to the document with the `DocId` n.
/// (Well, `n-1` actually because of 0-indexing)
pub fn add_val(&mut self, val: u64) {
@@ -360,20 +520,38 @@ impl IntFastFieldWriter {
(self.val_min, self.val_max)
};
let fastfield_accessor = WriterFastFieldAccessProvider {
doc_id_map,
vals: &self.vals,
};
let stats = FastFieldStats {
min_value: min,
max_value: max,
num_vals: self.val_count as u64,
};
let fastfield_accessor = WriterFastFieldAccessProvider {
doc_id_map,
vals: &self.vals,
stats,
if let Some(doc_id_map) = doc_id_map {
let iter_gen = || {
doc_id_map
.iter_old_doc_ids()
.map(|doc_id| self.vals.get(doc_id as usize))
};
serializer.create_auto_detect_u64_fast_field(
self.field,
stats,
fastfield_accessor,
iter_gen,
)?;
} else {
let iter_gen = || self.vals.iter();
serializer.create_auto_detect_u64_fast_field(
self.field,
stats,
fastfield_accessor,
iter_gen,
)?;
};
serializer.create_auto_detect_u64_fast_field(self.field, fastfield_accessor)?;
Ok(())
}
}
@@ -382,9 +560,8 @@ impl IntFastFieldWriter {
struct WriterFastFieldAccessProvider<'map, 'bitp> {
doc_id_map: Option<&'map DocIdMapping>,
vals: &'bitp BlockedBitpacker,
stats: FastFieldStats,
}
impl<'map, 'bitp> Column for WriterFastFieldAccessProvider<'map, 'bitp> {
impl<'map, 'bitp> FastFieldDataAccess for WriterFastFieldAccessProvider<'map, 'bitp> {
/// Return the value associated to the given doc.
///
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance
@@ -403,28 +580,4 @@ impl<'map, 'bitp> Column for WriterFastFieldAccessProvider<'map, 'bitp> {
self.vals.get(doc as usize)
}
}
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
if let Some(doc_id_map) = self.doc_id_map {
Box::new(
doc_id_map
.iter_old_doc_ids()
.map(|doc_id| self.vals.get(doc_id as usize)),
)
} else {
Box::new(self.vals.iter())
}
}
fn min_value(&self) -> u64 {
self.stats.min_value
}
fn max_value(&self) -> u64 {
self.stats.max_value
}
fn num_vals(&self) -> u64 {
self.stats.num_vals
}
}

View File

@@ -2,42 +2,35 @@
//! to get mappings from old doc_id to new doc_id and vice versa, after sorting
use std::cmp::Reverse;
use std::ops::Index;
use super::SegmentWriter;
use crate::schema::{Field, Schema};
use crate::{DocAddress, DocId, IndexSortByField, Order, TantivyError};
use crate::{DocId, IndexSortByField, Order, SegmentOrdinal, TantivyError};
/// Struct to provide mapping from new doc_id to old doc_id and segment.
#[derive(Clone)]
pub(crate) struct SegmentDocIdMapping {
new_doc_id_to_old_doc_addr: Vec<DocAddress>,
new_doc_id_to_old_and_segment: Vec<(DocId, SegmentOrdinal)>,
is_trivial: bool,
}
impl SegmentDocIdMapping {
pub(crate) fn new(new_doc_id_to_old_and_segment: Vec<DocAddress>, is_trivial: bool) -> Self {
pub(crate) fn new(
new_doc_id_to_old_and_segment: Vec<(DocId, SegmentOrdinal)>,
is_trivial: bool,
) -> Self {
Self {
new_doc_id_to_old_doc_addr: new_doc_id_to_old_and_segment,
new_doc_id_to_old_and_segment,
is_trivial,
}
}
/// Returns an iterator over the old document addresses, ordered by the new document ids.
///
/// In the returned `DocAddress`, the `segment_ord` is the ordinal of targetted segment
/// in the list of merged segments.
pub(crate) fn iter_old_doc_addrs(&self) -> impl Iterator<Item = DocAddress> + '_ {
self.new_doc_id_to_old_doc_addr.iter().copied()
pub(crate) fn iter(&self) -> impl Iterator<Item = &(DocId, SegmentOrdinal)> {
self.new_doc_id_to_old_and_segment.iter()
}
pub(crate) fn len(&self) -> usize {
self.new_doc_id_to_old_doc_addr.len()
self.new_doc_id_to_old_and_segment.len()
}
pub(crate) fn get_old_doc_addr(&self, new_doc_id: DocId) -> DocAddress {
self.new_doc_id_to_old_doc_addr[new_doc_id as usize]
}
/// This flags means the segments are simply stacked in the order of their ordinal.
/// e.g. [(0, 1), .. (n, 1), (0, 2)..., (m, 2)]
///
@@ -46,6 +39,21 @@ impl SegmentDocIdMapping {
self.is_trivial
}
}
impl Index<usize> for SegmentDocIdMapping {
type Output = (DocId, SegmentOrdinal);
fn index(&self, idx: usize) -> &Self::Output {
&self.new_doc_id_to_old_and_segment[idx]
}
}
impl IntoIterator for SegmentDocIdMapping {
type Item = (DocId, SegmentOrdinal);
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.new_doc_id_to_old_and_segment.into_iter()
}
}
/// Struct to provide mapping from old doc_id to new doc_id and vice versa within a segment.
pub struct DocIdMapping {
@@ -143,9 +151,8 @@ pub(crate) fn get_doc_id_mapping_from_field(
#[cfg(test)]
mod tests_indexsorting {
use fastfield_codecs::Column;
use crate::collector::TopDocs;
use crate::fastfield::FastFieldReader;
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::query::QueryParser;
use crate::schema::{Schema, *};
@@ -465,9 +472,9 @@ mod tests_indexsorting {
let my_number = index.schema().get_field("my_number").unwrap();
let fast_field = fast_fields.u64(my_number).unwrap();
assert_eq!(fast_field.get_val(0), 10u64);
assert_eq!(fast_field.get_val(1), 20u64);
assert_eq!(fast_field.get_val(2), 30u64);
assert_eq!(fast_field.get(0u32), 10u64);
assert_eq!(fast_field.get(1u32), 20u64);
assert_eq!(fast_field.get(2u32), 30u64);
let multi_numbers = index.schema().get_field("multi_numbers").unwrap();
let multifield = fast_fields.u64s(multi_numbers).unwrap();

View File

@@ -31,7 +31,7 @@ pub const MARGIN_IN_BYTES: usize = 1_000_000;
pub const MEMORY_ARENA_NUM_BYTES_MIN: usize = ((MARGIN_IN_BYTES as u32) * 3u32) as usize;
pub const MEMORY_ARENA_NUM_BYTES_MAX: usize = u32::MAX as usize - MARGIN_IN_BYTES;
// We impose the number of index writer thread to be at most this.
// We impose the number of index writter thread to be at most this.
pub const MAX_NUM_THREAD: usize = 8;
// Add document will block if the number of docs waiting in the queue to be indexed
@@ -174,7 +174,9 @@ fn index_documents(
segment_updater: &mut SegmentUpdater,
mut delete_cursor: DeleteCursor,
) -> crate::Result<()> {
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone())?;
let schema = segment.schema();
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), schema)?;
for document_group in grouped_document_iterator {
for doc in document_group {
segment_writer.add_document(doc)?;
@@ -708,7 +710,7 @@ impl IndexWriter {
}
/// Runs a group of document operations ensuring that the operations are
/// assigned contiguous u64 opstamps and that add operations of the same
/// assigned contigous u64 opstamps and that add operations of the same
/// group are flushed into the same segment.
///
/// If the indexing pipeline is full, this call may block.
@@ -774,8 +776,8 @@ impl Drop for IndexWriter {
#[cfg(test)]
mod tests {
use std::collections::{HashMap, HashSet};
use std::net::IpAddr;
use fastfield_codecs::Column;
use proptest::prelude::*;
use proptest::prop_oneof;
use proptest::strategy::Strategy;
@@ -784,10 +786,11 @@ mod tests {
use crate::collector::TopDocs;
use crate::directory::error::LockError;
use crate::error::*;
use crate::fastfield::FastFieldReader;
use crate::indexer::NoMergePolicy;
use crate::query::{QueryParser, TermQuery};
use crate::schema::{
self, Cardinality, Facet, FacetOptions, IndexRecordOption, NumericOptions,
self, Cardinality, Facet, FacetOptions, IndexRecordOption, IpOptions, NumericOptions,
TextFieldIndexing, TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
};
use crate::store::DOCSTORE_CACHE_CAPACITY;
@@ -1325,7 +1328,7 @@ mod tests {
let fast_field_reader = segment_reader.fast_fields().u64(id_field)?;
let in_order_alive_ids: Vec<u64> = segment_reader
.doc_ids_alive()
.map(|doc| fast_field_reader.get_val(doc as u64))
.map(|doc| fast_field_reader.get(doc))
.collect();
assert_eq!(&in_order_alive_ids[..], &[9, 8, 7, 6, 5, 4, 1, 0]);
Ok(())
@@ -1382,6 +1385,11 @@ mod tests {
force_end_merge: bool,
) -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let ip_field = schema_builder.add_ip_field("ip", FAST | INDEXED | STORED);
let ips_field = schema_builder.add_ip_field(
"ips",
IpOptions::default().set_fast(Cardinality::MultiValues),
);
let id_field = schema_builder.add_u64_field("id", FAST | INDEXED | STORED);
let bytes_field = schema_builder.add_bytes_field("bytes", FAST | INDEXED | STORED);
let bool_field = schema_builder.add_bool_field("bool", FAST | INDEXED | STORED);
@@ -1437,17 +1445,37 @@ mod tests {
match op {
IndexingOp::AddDoc { id } => {
let facet = Facet::from(&("/cola/".to_string() + &id.to_string()));
index_writer.add_document(doc!(id_field=>id,
bytes_field => id.to_le_bytes().as_slice(),
multi_numbers=> id,
multi_numbers => id,
bool_field => (id % 2u64) != 0,
multi_bools => (id % 2u64) != 0,
multi_bools => (id % 2u64) == 0,
text_field => id.to_string(),
facet_field => facet,
large_text_field=> LOREM
))?;
let ip_from_id = IpAddr::from((id as u128).to_be_bytes());
if id % 3 == 0 {
// every 3rd doc has no ip field
index_writer.add_document(doc!(id_field=>id,
bytes_field => id.to_le_bytes().as_slice(),
multi_numbers=> id,
multi_numbers => id,
bool_field => (id % 2u64) != 0,
multi_bools => (id % 2u64) != 0,
multi_bools => (id % 2u64) == 0,
text_field => id.to_string(),
facet_field => facet,
large_text_field=> LOREM
))?;
} else {
index_writer.add_document(doc!(id_field=>id,
bytes_field => id.to_le_bytes().as_slice(),
ip_field => ip_from_id,
ips_field => ip_from_id,
ips_field => ip_from_id,
multi_numbers=> id,
multi_numbers => id,
bool_field => (id % 2u64) != 0,
multi_bools => (id % 2u64) != 0,
multi_bools => (id % 2u64) == 0,
text_field => id.to_string(),
facet_field => facet,
large_text_field=> LOREM
))?;
}
}
IndexingOp::DeleteDoc { id } => {
index_writer.delete_term(Term::from_field_u64(id_field, id));
@@ -1491,7 +1519,7 @@ mod tests {
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
segment_reader
.doc_ids_alive()
.map(move |doc| ff_reader.get_val(doc as u64))
.map(move |doc| ff_reader.get(doc))
})
.collect();
@@ -1502,7 +1530,7 @@ mod tests {
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
segment_reader
.doc_ids_alive()
.map(move |doc| ff_reader.get_val(doc as u64))
.map(move |doc| ff_reader.get(doc))
})
.collect();
@@ -1528,6 +1556,54 @@ mod tests {
.collect::<HashSet<_>>()
);
// Check ip addr
let ips: HashSet<Option<IpAddr>> = searcher
.segment_readers()
.iter()
.flat_map(|segment_reader| {
let ff_reader = segment_reader.fast_fields().ip_addr(ip_field).unwrap();
segment_reader
.doc_ids_alive()
.map(move |doc| ff_reader.get_val(doc as u64))
})
.collect();
let expected_ips = expected_ids_and_num_occurrences
.keys()
.map(|id| {
if id % 3 == 0 {
None
} else {
Some(IpAddr::from((*id as u128).to_be_bytes()))
}
})
.collect::<HashSet<_>>();
assert_eq!(ips, expected_ips);
let expected_ips = expected_ids_and_num_occurrences
.keys()
.filter_map(|id| {
if id % 3 == 0 {
None
} else {
Some(IpAddr::from((*id as u128).to_be_bytes()))
}
})
.collect::<HashSet<_>>();
let ips: HashSet<IpAddr> = searcher
.segment_readers()
.iter()
.flat_map(|segment_reader| {
let ff_reader = segment_reader.fast_fields().ip_addrs(ips_field).unwrap();
segment_reader.doc_ids_alive().flat_map(move |doc| {
let mut vals = vec![];
ff_reader.get_vals(doc, &mut vals);
vals
})
})
.collect();
assert_eq!(ips, expected_ips);
// multivalue fast field tests
for segment_reader in searcher.segment_readers().iter() {
let ff_reader = segment_reader.fast_fields().u64s(multi_numbers).unwrap();
@@ -1620,7 +1696,7 @@ mod tests {
facet_reader
.facet_from_ord(facet_ords[0], &mut facet)
.unwrap();
let id = ff_reader.get_val(doc_id as u64);
let id = ff_reader.get(doc_id);
let facet_expected = Facet::from(&("/cola/".to_string() + &id.to_string()));
assert_eq!(facet, facet_expected);
@@ -1629,6 +1705,31 @@ mod tests {
Ok(())
}
#[test]
fn test_minimal() {
assert!(test_operation_strategy(
&[
IndexingOp::AddDoc { id: 23 },
IndexingOp::AddDoc { id: 13 },
IndexingOp::DeleteDoc { id: 13 }
],
true,
false
)
.is_ok());
assert!(test_operation_strategy(
&[
IndexingOp::AddDoc { id: 23 },
IndexingOp::AddDoc { id: 13 },
IndexingOp::DeleteDoc { id: 13 }
],
false,
false
)
.is_ok());
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(20))]
#[test]

View File

@@ -38,10 +38,10 @@ use crate::{DatePrecision, DateTime, DocId, Term};
/// of values, with a position gap. Here we would like `The` and `Who` to get indexed at
/// position 2 and 3 respectively.
///
/// With regular fields, we sort the fields beforehand, so that all terms with the same
/// With regular fields, we sort the fields beforehands, so that all terms with the same
/// path are indexed consecutively.
///
/// In JSON object, we do not have this comfort, so we need to record these position offsets in
/// In JSON object, we do not have this confort, so we need to record these position offsets in
/// a map.
///
/// Note that using a single position for the entire object would not hurt correctness.

View File

@@ -43,7 +43,7 @@ pub mod tests {
/// `MergePolicy` useful for test purposes.
///
/// Every time there is more than one segment,
/// Everytime there is more than one segment,
/// it will suggest to merge them.
#[derive(Debug, Clone)]
pub struct MergeWheneverPossible;

View File

@@ -2,15 +2,18 @@ use std::cmp;
use std::collections::HashMap;
use std::sync::Arc;
use fastfield_codecs::ip_codec::{IntervalCompressor, IntervallDecompressor};
use itertools::Itertools;
use measure_time::debug_time;
use tantivy_bitpacker::minmax;
use crate::core::{Segment, SegmentReader};
use crate::docset::{DocSet, TERMINATED};
use crate::error::DataCorruption;
use crate::fastfield::{
AliveBitSet, Column, CompositeFastFieldSerializer, DynamicFastFieldReader, FastFieldStats,
MultiValueLength, MultiValuedFastFieldReader,
AliveBitSet, CompositeFastFieldSerializer, DynamicFastFieldReader, FastFieldDataAccess,
FastFieldReader, FastFieldReaderCodecWrapperU128, FastFieldStats, MultiValueLength,
MultiValuedFastFieldReader, MultiValuedU128FastFieldReader,
};
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
use crate::indexer::doc_id_mapping::{expect_field_id_for_sort_field, SegmentDocIdMapping};
@@ -20,8 +23,8 @@ use crate::schema::{Cardinality, Field, FieldType, Schema};
use crate::store::StoreWriter;
use crate::termdict::{TermMerger, TermOrdinal};
use crate::{
DocAddress, DocId, IndexSettings, IndexSortByField, InvertedIndexReader, Order,
SegmentComponent, SegmentOrdinal,
DocId, IndexSettings, IndexSortByField, InvertedIndexReader, Order, SegmentComponent,
SegmentOrdinal,
};
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
@@ -87,7 +90,7 @@ pub struct IndexMerger {
}
fn compute_min_max_val(
u64_reader: &impl Column<u64>,
u64_reader: &impl FastFieldReader<u64>,
segment_reader: &SegmentReader,
) -> Option<(u64, u64)> {
if segment_reader.max_doc() == 0 {
@@ -101,11 +104,11 @@ fn compute_min_max_val(
}
// some deleted documents,
// we need to recompute the max / min
segment_reader
.doc_ids_alive()
.map(|doc_id| u64_reader.get_val(doc_id as u64))
.minmax()
.into_option()
minmax(
segment_reader
.doc_ids_alive()
.map(|doc_id| u64_reader.get(doc_id)),
)
}
struct TermOrdinalMapping {
@@ -133,7 +136,7 @@ impl TermOrdinalMapping {
fn max_term_ord(&self) -> TermOrdinal {
self.per_segment_new_term_ordinals
.iter()
.flat_map(|term_ordinals| term_ordinals.iter().max().cloned())
.flat_map(|term_ordinals| term_ordinals.iter().cloned().max())
.max()
.unwrap_or_default()
}
@@ -259,9 +262,9 @@ impl IndexMerger {
.iter()
.map(|reader| reader.get_fieldnorms_reader(field))
.collect::<Result<_, _>>()?;
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
let fieldnorms_reader = &fieldnorms_readers[old_doc_addr.segment_ord as usize];
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(old_doc_addr.doc_id);
for (doc_id, reader_ordinal) in doc_id_mapping.iter() {
let fieldnorms_reader = &fieldnorms_readers[*reader_ordinal as usize];
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(*doc_id);
fieldnorms_data.push(fieldnorm_id);
}
@@ -320,6 +323,24 @@ impl IndexMerger {
self.write_bytes_fast_field(field, fast_field_serializer, doc_id_mapping)?;
}
}
FieldType::Ip(options) => match options.get_fastfield_cardinality() {
Some(Cardinality::SingleValue) => {
self.write_u128_single_fast_field(
field,
fast_field_serializer,
doc_id_mapping,
)?;
}
Some(Cardinality::MultiValues) => {
self.write_u128_multi_fast_field(
field,
fast_field_serializer,
doc_id_mapping,
)?;
}
None => {}
},
FieldType::JsonObject(_) | FieldType::Facet(_) | FieldType::Str(_) => {
// We don't handle json fast field for the moment
// They can be implemented using what is done
@@ -330,6 +351,114 @@ impl IndexMerger {
Ok(())
}
// used to merge `u128` single fast fields.
fn write_u128_multi_fast_field(
&self,
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
let reader_ordinal_and_field_accessors = self
.readers
.iter()
.map(|segment_reader| {
let val_length_reader: MultiValuedU128FastFieldReader<u128> =
segment_reader.fast_fields().u128s(field).expect(
"Failed to find index for multivalued field. This is a bug in tantivy, \
please report.",
);
(segment_reader, val_length_reader)
})
.collect::<Vec<_>>();
Self::write_1_n_fast_field_idx_generic(
field,
fast_field_serializer,
doc_id_mapping,
&reader_ordinal_and_field_accessors,
)?;
let fast_field_readers = self
.readers
.iter()
.map(|reader| {
let u128_reader: MultiValuedU128FastFieldReader<u128> =
reader.fast_fields().u128s(field).expect(
"Failed to find a reader for single fast field. This is a tantivy bug and \
it should never happen.",
);
u128_reader
})
.collect::<Vec<_>>();
let compressor = {
let vals = fast_field_readers
.iter()
.flat_map(|reader| reader.iter())
.flatten()
.collect::<Vec<u128>>();
IntervalCompressor::from_vals(vals)
};
let iter = doc_id_mapping.iter().flat_map(|(doc_id, reader_ordinal)| {
let fast_field_reader = &fast_field_readers[*reader_ordinal as usize];
let mut out = vec![];
fast_field_reader.get_vals(*doc_id, &mut out);
out.into_iter()
});
let field_write = fast_field_serializer.get_field_writer(field, 1);
compressor.compress_into(iter, field_write)?;
Ok(())
}
// used to merge `u128` single fast fields.
fn write_u128_single_fast_field(
&self,
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
let fast_field_readers = self
.readers
.iter()
.map(|reader| {
let u128_reader: FastFieldReaderCodecWrapperU128<u128, IntervallDecompressor> =
reader.fast_fields().u128(field).expect(
"Failed to find a reader for single fast field. This is a tantivy bug and \
it should never happen.",
);
u128_reader
})
.collect::<Vec<_>>();
let compressor = {
let vals = fast_field_readers
.iter()
.flat_map(|reader| reader.iter())
.flatten()
.collect::<Vec<u128>>();
IntervalCompressor::from_vals(vals)
};
let iter = doc_id_mapping.iter().map(|(doc_id, reader_ordinal)| {
let fast_field_reader = &fast_field_readers[*reader_ordinal as usize];
fast_field_reader
.get_val(*doc_id as u64)
.unwrap_or(compressor.null_value)
});
let field_write = fast_field_serializer.get_field_writer(field, 0);
compressor.compress_into(iter, field_write)?;
Ok(())
}
// used both to merge field norms, `u64/i64` single fast fields.
fn write_single_fast_field(
&self,
@@ -373,46 +502,29 @@ impl IndexMerger {
struct SortedDocIdFieldAccessProvider<'a> {
doc_id_mapping: &'a SegmentDocIdMapping,
fast_field_readers: &'a Vec<DynamicFastFieldReader<u64>>,
stats: FastFieldStats,
}
impl<'a> Column for SortedDocIdFieldAccessProvider<'a> {
impl<'a> FastFieldDataAccess for SortedDocIdFieldAccessProvider<'a> {
fn get_val(&self, doc: u64) -> u64 {
let DocAddress {
doc_id,
segment_ord,
} = self.doc_id_mapping.get_old_doc_addr(doc as u32);
self.fast_field_readers[segment_ord as usize].get_val(doc_id as u64)
}
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
Box::new(
self.doc_id_mapping
.iter_old_doc_addrs()
.map(|old_doc_addr| {
let fast_field_reader =
&self.fast_field_readers[old_doc_addr.segment_ord as usize];
fast_field_reader.get_val(old_doc_addr.doc_id as u64)
}),
)
}
fn min_value(&self) -> u64 {
self.stats.min_value
}
fn max_value(&self) -> u64 {
self.stats.max_value
}
fn num_vals(&self) -> u64 {
self.stats.num_vals
let (doc_id, reader_ordinal) = self.doc_id_mapping[doc as usize];
self.fast_field_readers[reader_ordinal as usize].get(doc_id)
}
}
let fastfield_accessor = SortedDocIdFieldAccessProvider {
doc_id_mapping,
fast_field_readers: &fast_field_readers,
stats,
};
fast_field_serializer.create_auto_detect_u64_fast_field(field, fastfield_accessor)?;
let iter_gen = || {
doc_id_mapping.iter().map(|(doc_id, reader_ordinal)| {
let fast_field_reader = &fast_field_readers[*reader_ordinal as usize];
fast_field_reader.get(*doc_id)
})
};
fast_field_serializer.create_auto_detect_u64_fast_field(
field,
stats,
fastfield_accessor,
iter_gen,
)?;
Ok(())
}
@@ -428,7 +540,7 @@ impl IndexMerger {
let everything_is_in_order = reader_ordinal_and_field_accessors
.into_iter()
.map(|(_, col)| Arc::new(col))
.map(|reader| reader.1)
.tuple_windows()
.all(|(field_accessor1, field_accessor2)| {
if sort_by_field.order.is_asc() {
@@ -443,7 +555,7 @@ impl IndexMerger {
pub(crate) fn get_sort_field_accessor(
reader: &SegmentReader,
sort_by_field: &IndexSortByField,
) -> crate::Result<impl Column> {
) -> crate::Result<impl FastFieldReader<u64>> {
let field_id = expect_field_id_for_sort_field(reader.schema(), sort_by_field)?; // for now expect fastfield, but not strictly required
let value_accessor = reader.fast_fields().u64_lenient(field_id)?;
Ok(value_accessor)
@@ -452,7 +564,7 @@ impl IndexMerger {
pub(crate) fn get_reader_with_sort_field_accessor(
&self,
sort_by_field: &IndexSortByField,
) -> crate::Result<Vec<(SegmentOrdinal, impl Column)>> {
) -> crate::Result<Vec<(SegmentOrdinal, impl FastFieldReader<u64> + Clone)>> {
let reader_ordinal_and_field_accessors = self
.readers
.iter()
@@ -485,11 +597,15 @@ impl IndexMerger {
let doc_id_reader_pair =
reader_ordinal_and_field_accessors
.iter()
.map(|(reader_ord, ff_reader)| {
let reader = &self.readers[*reader_ord as usize];
reader
.doc_ids_alive()
.map(move |doc_id| (doc_id, reader_ord, ff_reader))
.map(|reader_and_field_accessor| {
let reader = &self.readers[reader_and_field_accessor.0 as usize];
reader.doc_ids_alive().map(move |doc_id| {
(
doc_id,
reader_and_field_accessor.0,
&reader_and_field_accessor.1,
)
})
});
let total_num_new_docs = self
@@ -498,25 +614,22 @@ impl IndexMerger {
.map(|reader| reader.num_docs() as usize)
.sum();
let mut sorted_doc_ids: Vec<DocAddress> = Vec::with_capacity(total_num_new_docs);
let mut sorted_doc_ids = Vec::with_capacity(total_num_new_docs);
// create iterator tuple of (old doc_id, reader) in order of the new doc_ids
sorted_doc_ids.extend(
doc_id_reader_pair
.into_iter()
.kmerge_by(|a, b| {
let val1 = a.2.get_val(a.0 as u64);
let val2 = b.2.get_val(b.0 as u64);
let val1 = a.2.get(a.0);
let val2 = b.2.get(b.0);
if sort_by_field.order == Order::Asc {
val1 < val2
} else {
val1 > val2
}
})
.map(|(doc_id, &segment_ord, _)| DocAddress {
doc_id,
segment_ord,
}),
.map(|(doc_id, reader_with_id, _)| (doc_id, reader_with_id)),
);
Ok(SegmentDocIdMapping::new(sorted_doc_ids, false))
}
@@ -536,16 +649,16 @@ impl IndexMerger {
// This is required by the bitpacker, as it needs to know
// what should be the bit length use for bitpacking.
let mut num_docs = 0;
for (reader, u64s_reader) in reader_and_field_accessors.iter() {
for (reader, value_length_reader) in reader_and_field_accessors.iter() {
if let Some(alive_bitset) = reader.alive_bitset() {
num_docs += alive_bitset.num_alive_docs() as u64;
for doc in reader.doc_ids_alive() {
let num_vals = u64s_reader.get_len(doc) as u64;
let num_vals = value_length_reader.get_len(doc) as u64;
total_num_vals += num_vals;
}
} else {
num_docs += reader.max_doc() as u64;
total_num_vals += u64s_reader.get_total_len();
total_num_vals += value_length_reader.get_total_len();
}
}
@@ -560,49 +673,25 @@ impl IndexMerger {
// copying into a temp vec is not ideal, but the fast field codec api requires random
// access, which is used in the estimation. It's possible to 1. calculate random
// access on the fly or 2. change the codec api to make random access optional, but
// acccess on the fly or 2. change the codec api to make random access optional, but
// they both have also major drawbacks.
let mut offsets = Vec::with_capacity(doc_id_mapping.len());
let mut offset = 0;
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
let reader = &reader_and_field_accessors[old_doc_addr.segment_ord as usize].1;
for (doc_id, reader) in doc_id_mapping.iter() {
let reader = &reader_and_field_accessors[*reader as usize].1;
offsets.push(offset);
offset += reader.get_len(old_doc_addr.doc_id) as u64;
offset += reader.get_len(*doc_id) as u64;
}
offsets.push(offset);
#[derive(Clone)]
struct FieldIndexAccessProvider<'a> {
offsets: &'a [u64],
stats: FastFieldStats,
}
impl<'a> Column for FieldIndexAccessProvider<'a> {
fn get_val(&self, doc: u64) -> u64 {
self.offsets[doc as usize]
}
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
Box::new(self.offsets.iter().cloned())
}
fn min_value(&self) -> u64 {
self.stats.min_value
}
fn max_value(&self) -> u64 {
self.stats.max_value
}
fn num_vals(&self) -> u64 {
self.stats.num_vals
}
}
let fastfield_accessor = FieldIndexAccessProvider {
offsets: &offsets,
let iter_gen = || offsets.iter().cloned();
fast_field_serializer.create_auto_detect_u64_fast_field(
field,
stats,
};
fast_field_serializer.create_auto_detect_u64_fast_field(field, fastfield_accessor)?;
&offsets[..],
iter_gen,
)?;
Ok(offsets)
}
/// Returns the fastfield index (index for the data, not the data).
@@ -645,7 +734,7 @@ impl IndexMerger {
debug_time!("write-term-id-fast-field");
// Multifastfield consists of 2 fastfields.
// The first serves as an index into the second one and is strictly increasing.
// The first serves as an index into the second one and is stricly increasing.
// The second contains the actual values.
// First we merge the idx fast field.
@@ -670,12 +759,12 @@ impl IndexMerger {
fast_field_serializer.new_u64_fast_field_with_idx(field, 0u64, max_term_ord, 1)?;
let mut vals = Vec::with_capacity(100);
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
for (old_doc_id, reader_ordinal) in doc_id_mapping.iter() {
let term_ordinal_mapping: &[TermOrdinal] =
term_ordinal_mappings.get_segment(old_doc_addr.segment_ord as usize);
term_ordinal_mappings.get_segment(*reader_ordinal as usize);
let ff_reader = &fast_field_reader[old_doc_addr.segment_ord as usize];
ff_reader.get_vals(old_doc_addr.doc_id, &mut vals);
let ff_reader = &fast_field_reader[*reader_ordinal as usize];
ff_reader.get_vals(*old_doc_id, &mut vals);
for &prev_term_ord in &vals {
let new_term_ord = term_ordinal_mapping[prev_term_ord as usize];
serialize_vals.add_val(new_term_ord)?;
@@ -696,17 +785,16 @@ impl IndexMerger {
.map(|reader| reader.num_docs() as usize)
.sum();
let mut mapping: Vec<DocAddress> = Vec::with_capacity(total_num_new_docs);
let mut mapping = Vec::with_capacity(total_num_new_docs);
mapping.extend(
self.readers
.iter()
.enumerate()
.flat_map(|(segment_ord, reader)| {
reader.doc_ids_alive().map(move |doc_id| DocAddress {
segment_ord: segment_ord as u32,
doc_id,
})
.flat_map(|(reader_ordinal, reader)| {
reader
.doc_ids_alive()
.map(move |doc_id| (doc_id, reader_ordinal as SegmentOrdinal))
}),
);
Ok(SegmentDocIdMapping::new(mapping, true))
@@ -718,7 +806,7 @@ impl IndexMerger {
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
// Multifastfield consists in 2 fastfields.
// The first serves as an index into the second one and is strictly increasing.
// The first serves as an index into the second one and is stricly increasing.
// The second contains the actual values.
// First we merge the idx fast field.
@@ -775,68 +863,49 @@ impl IndexMerger {
doc_id_mapping: &'a SegmentDocIdMapping,
fast_field_readers: &'a Vec<MultiValuedFastFieldReader<u64>>,
offsets: Vec<u64>,
stats: FastFieldStats,
}
impl<'a> Column for SortedDocIdMultiValueAccessProvider<'a> {
impl<'a> FastFieldDataAccess for SortedDocIdMultiValueAccessProvider<'a> {
fn get_val(&self, pos: u64) -> u64 {
// use the offsets index to find the doc_id which will contain the position.
// the offsets are strictly increasing so we can do a simple search on it.
let new_doc_id: DocId =
self.offsets
.iter()
.position(|&offset| offset > pos)
.expect("pos is out of bounds") as DocId
- 1u32;
// the offsets are stricly increasing so we can do a simple search on it.
let new_doc_id = self
.offsets
.iter()
.position(|&offset| offset > pos)
.expect("pos is out of bounds")
- 1;
// now we need to find the position of `pos` in the multivalued bucket
let num_pos_covered_until_now = self.offsets[new_doc_id as usize];
let num_pos_covered_until_now = self.offsets[new_doc_id];
let pos_in_values = pos - num_pos_covered_until_now;
let old_doc_addr = self.doc_id_mapping.get_old_doc_addr(new_doc_id);
let num_vals = self.fast_field_readers[old_doc_addr.segment_ord as usize]
.get_len(old_doc_addr.doc_id);
let (old_doc_id, reader_ordinal) = self.doc_id_mapping[new_doc_id as usize];
let num_vals = self.fast_field_readers[reader_ordinal as usize].get_len(old_doc_id);
assert!(num_vals >= pos_in_values);
let mut vals = Vec::new();
self.fast_field_readers[old_doc_addr.segment_ord as usize]
.get_vals(old_doc_addr.doc_id, &mut vals);
let mut vals = vec![];
self.fast_field_readers[reader_ordinal as usize].get_vals(old_doc_id, &mut vals);
vals[pos_in_values as usize]
}
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
Box::new(
self.doc_id_mapping
.iter_old_doc_addrs()
.flat_map(|old_doc_addr| {
let ff_reader =
&self.fast_field_readers[old_doc_addr.segment_ord as usize];
let mut vals = Vec::new();
ff_reader.get_vals(old_doc_addr.doc_id, &mut vals);
vals.into_iter()
}),
)
}
fn min_value(&self) -> u64 {
self.stats.min_value
}
fn max_value(&self) -> u64 {
self.stats.max_value
}
fn num_vals(&self) -> u64 {
self.stats.num_vals
}
}
let fastfield_accessor = SortedDocIdMultiValueAccessProvider {
doc_id_mapping,
fast_field_readers: &ff_readers,
offsets,
stats,
};
let iter_gen = || {
doc_id_mapping.iter().flat_map(|(doc_id, reader_ordinal)| {
let ff_reader = &ff_readers[*reader_ordinal as usize];
let mut vals = vec![];
ff_reader.get_vals(*doc_id, &mut vals);
vals.into_iter()
})
};
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx(
field,
stats,
fastfield_accessor,
iter_gen,
1,
)?;
@@ -869,9 +938,9 @@ impl IndexMerger {
)?;
let mut serialize_vals = fast_field_serializer.new_bytes_fast_field_with_idx(field, 1);
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
let bytes_reader = &reader_and_field_accessors[old_doc_addr.segment_ord as usize].1;
let val = bytes_reader.get_bytes(old_doc_addr.doc_id);
for (doc_id, reader_ordinal) in doc_id_mapping.iter() {
let bytes_reader = &reader_and_field_accessors[*reader_ordinal as usize].1;
let val = bytes_reader.get_bytes(*doc_id);
serialize_vals.write_all(val)?;
}
@@ -927,9 +996,9 @@ impl IndexMerger {
segment_local_map
})
.collect();
for (new_doc_id, old_doc_addr) in doc_id_mapping.iter_old_doc_addrs().enumerate() {
let segment_map = &mut merged_doc_id_map[old_doc_addr.segment_ord as usize];
segment_map[old_doc_addr.doc_id as usize] = Some(new_doc_id as DocId);
for (new_doc_id, (old_doc_id, segment_ord)) in doc_id_mapping.iter().enumerate() {
let segment_map = &mut merged_doc_id_map[*segment_ord as usize];
segment_map[*old_doc_id as usize] = Some(new_doc_id as DocId);
}
// Note that the total number of tokens is not exact.
@@ -1104,15 +1173,15 @@ impl IndexMerger {
.map(|(i, store)| store.iter_raw(self.readers[i].alive_bitset()))
.collect();
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
let doc_bytes_it = &mut document_iterators[old_doc_addr.segment_ord as usize];
for (old_doc_id, reader_ordinal) in doc_id_mapping.iter() {
let doc_bytes_it = &mut document_iterators[*reader_ordinal as usize];
if let Some(doc_bytes_res) = doc_bytes_it.next() {
let doc_bytes = doc_bytes_res?;
store_writer.store_bytes(&doc_bytes)?;
} else {
return Err(DataCorruption::comment_only(&format!(
"unexpected missing document in docstore on merge, doc address \
{old_doc_addr:?}",
"unexpected missing document in docstore on merge, doc id {:?}",
old_doc_id
))
.into());
}
@@ -1199,7 +1268,6 @@ impl IndexMerger {
#[cfg(test)]
mod tests {
use byteorder::{BigEndian, ReadBytesExt};
use fastfield_codecs::Column;
use schema::FAST;
use crate::collector::tests::{
@@ -1207,6 +1275,7 @@ mod tests {
};
use crate::collector::{Count, FacetCollector};
use crate::core::Index;
use crate::fastfield::FastFieldReader;
use crate::query::{AllQuery, BooleanQuery, Scorer, TermQuery};
use crate::schema::{
Cardinality, Document, Facet, FacetOptions, IndexRecordOption, NumericOptions, Term,
@@ -2137,7 +2206,7 @@ mod tests {
let mut term_scorer = term_query
.specialized_weight(&searcher, true)?
.specialized_scorer(segment_reader, 1.0)?;
// the difference compared to before is intrinsic to the bm25 formula. no worries
// the difference compared to before is instrinsic to the bm25 formula. no worries
// there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
@@ -2162,7 +2231,7 @@ mod tests {
let mut term_scorer = term_query
.specialized_weight(&searcher, true)?
.specialized_scorer(segment_reader, 1.0)?;
// the difference compared to before is intrinsic to the bm25 formula. no worries there.
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);

View File

@@ -1,10 +1,8 @@
#[cfg(test)]
mod tests {
use fastfield_codecs::Column;
use crate::collector::TopDocs;
use crate::core::Index;
use crate::fastfield::{AliveBitSet, MultiValuedFastFieldReader};
use crate::fastfield::{AliveBitSet, FastFieldReader, MultiValuedFastFieldReader};
use crate::query::QueryParser;
use crate::schema::{
self, BytesOptions, Cardinality, Facet, FacetOptions, IndexRecordOption, NumericOptions,
@@ -188,17 +186,17 @@ mod tests {
let fast_fields = segment_reader.fast_fields();
let fast_field = fast_fields.u64(int_field).unwrap();
assert_eq!(fast_field.get_val(5), 1u64);
assert_eq!(fast_field.get_val(4), 2u64);
assert_eq!(fast_field.get_val(3), 3u64);
assert_eq!(fast_field.get(5u32), 1u64);
assert_eq!(fast_field.get(4u32), 2u64);
assert_eq!(fast_field.get(3u32), 3u64);
if force_disjunct_segment_sort_values {
assert_eq!(fast_field.get_val(2u64), 20u64);
assert_eq!(fast_field.get_val(1u64), 100u64);
assert_eq!(fast_field.get(2u32), 20u64);
assert_eq!(fast_field.get(1u32), 100u64);
} else {
assert_eq!(fast_field.get_val(2u64), 10u64);
assert_eq!(fast_field.get_val(1u64), 20u64);
assert_eq!(fast_field.get(2u32), 10u64);
assert_eq!(fast_field.get(1u32), 20u64);
}
assert_eq!(fast_field.get_val(0u64), 1_000u64);
assert_eq!(fast_field.get(0u32), 1_000u64);
// test new field norm mapping
{
@@ -375,12 +373,12 @@ mod tests {
let fast_fields = segment_reader.fast_fields();
let fast_field = fast_fields.u64(int_field).unwrap();
assert_eq!(fast_field.get_val(0), 1u64);
assert_eq!(fast_field.get_val(1), 2u64);
assert_eq!(fast_field.get_val(2), 3u64);
assert_eq!(fast_field.get_val(3), 10u64);
assert_eq!(fast_field.get_val(4), 20u64);
assert_eq!(fast_field.get_val(5), 1_000u64);
assert_eq!(fast_field.get(0u32), 1u64);
assert_eq!(fast_field.get(1u32), 2u64);
assert_eq!(fast_field.get(2u32), 3u64);
assert_eq!(fast_field.get(3u32), 10u64);
assert_eq!(fast_field.get(4u32), 20u64);
assert_eq!(fast_field.get(5u32), 1_000u64);
let get_vals = |fast_field: &MultiValuedFastFieldReader<u64>, doc_id: u32| -> Vec<u64> {
let mut vals = vec![];
@@ -480,13 +478,13 @@ mod tests {
#[cfg(all(test, feature = "unstable"))]
mod bench_sorted_index_merge {
use fastfield_codecs::Column;
use test::{self, Bencher};
use crate::core::Index;
use crate::fastfield::DynamicFastFieldReader;
// use cratedoc_id, readerdoc_id_mappinglet vals = reader.fate::schema;
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
use crate::indexer::merger::IndexMerger;
use crate::schema::{Cardinality, NumericOptions, Schema};
use crate::schema::{Cardinality, Document, NumericOptions, Schema};
use crate::{IndexSettings, IndexSortByField, IndexWriter, Order};
fn create_index(sort_by_field: Option<IndexSortByField>) -> Index {
let mut schema_builder = Schema::builder();
@@ -505,7 +503,9 @@ mod bench_sorted_index_merge {
{
let mut index_writer = index.writer_for_tests().unwrap();
let index_doc = |index_writer: &mut IndexWriter, val: u64| {
index_writer.add_document(doc!(int_field=>val)).unwrap();
let mut doc = Document::default();
doc.add_u64(int_field, val);
index_writer.add_document(doc).unwrap();
};
// 3 segments with 10_000 values in the fast fields
for _ in 0..3 {
@@ -518,7 +518,6 @@ mod bench_sorted_index_merge {
}
index
}
#[bench]
fn create_sorted_index_walk_overkmerge_on_merge_fastfield(
b: &mut Bencher,
@@ -534,19 +533,19 @@ mod bench_sorted_index_merge {
IndexMerger::open(index.schema(), index.settings().clone(), &segments[..])?;
let doc_id_mapping = merger.generate_doc_id_mapping(&sort_by_field).unwrap();
b.iter(|| {
let sorted_doc_ids = doc_id_mapping.iter_old_doc_addrs().map(|doc_addr| {
let reader = &merger.readers[doc_addr.segment_ord as usize];
let sorted_doc_ids = doc_id_mapping.iter().map(|(doc_id, ordinal)| {
let reader = &merger.readers[*ordinal as usize];
let u64_reader: DynamicFastFieldReader<u64> =
reader.fast_fields().typed_fast_field_reader(field).expect(
"Failed to find a reader for single fast field. This is a tantivy bug and \
it should never happen.",
);
(doc_addr.doc_id, reader, u64_reader)
(doc_id, reader, u64_reader)
});
// add values in order of the new doc_ids
let mut val = 0;
for (doc_id, _reader, field_reader) in sorted_doc_ids {
val = field_reader.get_val(doc_id as u64);
val = field_reader.get(*doc_id);
}
val

View File

@@ -21,7 +21,7 @@ pub(crate) enum SegmentsStatus {
}
impl SegmentRegisters {
/// Check if all the segments are committed or uncommitted.
/// Check if all the segments are committed or uncommited.
///
/// If some segment is missing or segments are in a different state (this should not happen
/// if tantivy is used correctly), returns `None`.
@@ -168,8 +168,8 @@ impl SegmentManager {
segment_entries.push(segment_entry);
}
} else {
let error_msg = "Merge operation sent for segments that are not all uncommitted or \
committed."
let error_msg = "Merge operation sent for segments that are not all uncommited or \
commited."
.to_string();
return Err(TantivyError::InvalidArgument(error_msg));
}
@@ -182,7 +182,7 @@ impl SegmentManager {
}
// Replace a list of segments for their equivalent merged segment.
//
// Returns true if these segments are committed, false if the merge segments are uncommitted.
// Returns true if these segments are committed, false if the merge segments are uncommited.
pub(crate) fn end_merge(
&self,
before_merge_segment_ids: &[SegmentId],

View File

@@ -25,10 +25,39 @@ use crate::indexer::{
DefaultMergePolicy, MergeCandidate, MergeOperation, MergePolicy, SegmentEntry,
SegmentSerializer,
};
use crate::schema::Schema;
use crate::{FutureResult, Opstamp};
const NUM_MERGE_THREADS: usize = 4;
/// Save the index meta file.
/// This operation is atomic :
/// Either
/// - it fails, in which case an error is returned,
/// and the `meta.json` remains untouched,
/// - it succeeds, and `meta.json` is written
/// and flushed.
///
/// This method is not part of tantivy's public API
pub fn save_new_metas(
schema: Schema,
index_settings: IndexSettings,
directory: &dyn Directory,
) -> crate::Result<()> {
save_metas(
&IndexMeta {
index_settings,
segments: Vec::new(),
schema,
opstamp: 0u64,
payload: None,
},
directory,
)?;
directory.sync_directory()?;
Ok(())
}
/// Save the index meta file.
/// This operation is atomic:
/// Either
@@ -38,7 +67,7 @@ const NUM_MERGE_THREADS: usize = 4;
/// and flushed.
///
/// This method is not part of tantivy's public API
pub(crate) fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate::Result<()> {
fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate::Result<()> {
info!("save metas");
let mut buffer = serde_json::to_vec_pretty(metas)?;
// Just adding a new line at the end of the buffer.
@@ -142,7 +171,7 @@ pub fn merge_indices<T: Into<Box<dyn Directory>>>(
if indices.is_empty() {
// If there are no indices to merge, there is no need to do anything.
return Err(crate::TantivyError::InvalidArgument(
"No indices given to merge".to_string(),
"No indices given to marge".to_string(),
));
}
@@ -190,7 +219,7 @@ pub fn merge_filtered_segments<T: Into<Box<dyn Directory>>>(
if segments.is_empty() {
// If there are no indices to merge, there is no need to do anything.
return Err(crate::TantivyError::InvalidArgument(
"No segments given to merge".to_string(),
"No segments given to marge".to_string(),
));
}
@@ -253,7 +282,7 @@ pub fn merge_filtered_segments<T: Into<Box<dyn Directory>>>(
pub(crate) struct InnerSegmentUpdater {
// we keep a copy of the current active IndexMeta to
// avoid loading the file every time we need it in the
// avoid loading the file everytime we need it in the
// `SegmentUpdater`.
//
// This should be up to date as all update happen through
@@ -471,7 +500,7 @@ impl SegmentUpdater {
// It returns an error if for some reason the merge operation could not be started.
//
// At this point an error is not necessarily the sign of a malfunction.
// (e.g. A rollback could have happened, between the instant when the merge operation was
// (e.g. A rollback could have happened, between the instant when the merge operaiton was
// suggested and the moment when it ended up being executed.)
//
// `segment_ids` is required to be non-empty.

View File

@@ -53,7 +53,7 @@ fn remap_doc_opstamps(
/// set of documents.
///
/// They creates the postings list in anonymous memory.
/// The segment is laid on disk when the segment gets `finalized`.
/// The segment is layed on disk when the segment gets `finalized`.
pub struct SegmentWriter {
pub(crate) max_doc: DocId,
pub(crate) ctx: IndexingContext,
@@ -80,8 +80,8 @@ impl SegmentWriter {
pub fn for_segment(
memory_budget_in_bytes: usize,
segment: Segment,
schema: Schema,
) -> crate::Result<SegmentWriter> {
let schema = segment.schema();
let tokenizer_manager = segment.index().tokenizers().clone();
let table_size = compute_initial_table_size(memory_budget_in_bytes)?;
let segment_serializer = SegmentSerializer::for_segment(segment, false)?;
@@ -294,6 +294,13 @@ impl SegmentWriter {
ctx,
)?;
}
FieldType::Ip(_) => {
for value in values {
let ip_val = value.as_ip().ok_or_else(make_schema_error)?;
term_buffer.set_text(&ip_val.to_string());
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
}
}
}
}
Ok(())

View File

@@ -11,7 +11,6 @@
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
#![warn(missing_docs)]
#![allow(clippy::len_without_is_empty)]
#![allow(clippy::derive_partial_eq_without_eq)]
//! # `tantivy`
//!
@@ -301,7 +300,7 @@ pub use self::docset::{DocSet, TERMINATED};
pub use crate::core::{
Executor, Index, IndexBuilder, IndexMeta, IndexSettings, IndexSortByField, InvertedIndexReader,
Order, Searcher, SearcherGeneration, Segment, SegmentComponent, SegmentId, SegmentMeta,
SegmentReader, SingleSegmentIndexWriter,
SegmentReader,
};
pub use crate::directory::Directory;
pub use crate::indexer::demuxer::*;
@@ -421,7 +420,6 @@ pub struct DocAddress {
#[cfg(test)]
pub mod tests {
use common::{BinarySerializable, FixedSize};
use fastfield_codecs::Column;
use rand::distributions::{Bernoulli, Uniform};
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
@@ -430,6 +428,7 @@ pub mod tests {
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
use crate::core::SegmentReader;
use crate::docset::{DocSet, TERMINATED};
use crate::fastfield::FastFieldReader;
use crate::merge_policy::NoMergePolicy;
use crate::query::BooleanQuery;
use crate::schema::*;
@@ -1036,21 +1035,21 @@ pub mod tests {
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_unsigned);
assert!(fast_field_reader_opt.is_ok());
let fast_field_reader = fast_field_reader_opt.unwrap();
assert_eq!(fast_field_reader.get_val(0), 4u64)
assert_eq!(fast_field_reader.get(0), 4u64)
}
{
let fast_field_reader_res = segment_reader.fast_fields().i64(fast_field_signed);
assert!(fast_field_reader_res.is_ok());
let fast_field_reader = fast_field_reader_res.unwrap();
assert_eq!(fast_field_reader.get_val(0), 4i64)
assert_eq!(fast_field_reader.get(0), 4i64)
}
{
let fast_field_reader_res = segment_reader.fast_fields().f64(fast_field_float);
assert!(fast_field_reader_res.is_ok());
let fast_field_reader = fast_field_reader_res.unwrap();
assert_eq!(fast_field_reader.get_val(0), 4f64)
assert_eq!(fast_field_reader.get(0), 4f64)
}
Ok(())
}

View File

@@ -199,7 +199,7 @@ impl BlockSegmentPostings {
self.doc_decoder.output_array()
}
/// Returns a full block, regardless of whether the block is complete or incomplete (
/// Returns a full block, regardless of whetehr the block is complete or incomplete (
/// as it happens for the last block of the posting list).
///
/// In the latter case, the block is guaranteed to be padded with the sentinel value:
@@ -494,7 +494,7 @@ mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
// create two postings list, one containing even number,
// create two postings list, one containg even number,
// the other containing odd numbers.
for i in 0..6 {
let doc = doc!(int_field=> (i % 2) as u64);

View File

@@ -227,7 +227,7 @@ pub mod tests {
{
let mut segment_writer =
SegmentWriter::for_segment(3_000_000, segment.clone()).unwrap();
SegmentWriter::for_segment(3_000_000, segment.clone(), schema).unwrap();
{
// checking that position works if the field has two values
let op = AddOperation {
@@ -500,7 +500,7 @@ pub mod tests {
Ok(())
}
/// Wraps a given docset, and forward all call but the
/// Wraps a given docset, and forward alls call but the
/// `.skip_next(...)`. This is useful to test that a specialized
/// implementation of `.skip_next(...)` is consistent
/// with the default implementation.

View File

@@ -50,6 +50,7 @@ fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> Box<dyn Postings
| FieldType::Bool(_)
| FieldType::Date(_)
| FieldType::Bytes(_)
| FieldType::Ip(_)
| FieldType::Facet(_) => Box::new(SpecializedPostingsWriter::<NothingRecorder>::default()),
FieldType::JsonObject(ref json_object_options) => {
if let Some(text_indexing_option) = json_object_options.get_text_indexing_options() {

View File

@@ -14,7 +14,7 @@ pub trait Postings: DocSet + 'static {
/// The number of times the term appears in the document.
fn term_freq(&self) -> u32;
/// Returns the positions offsetted with a given value.
/// Returns the positions offseted with a given value.
/// The output vector will be resized to the `term_freq`.
fn positions_with_offset(&mut self, offset: u32, output: &mut Vec<u32>);

View File

@@ -89,6 +89,7 @@ pub(crate) fn serialize_postings(
| FieldType::Bool(_) => {}
FieldType::Bytes(_) => {}
FieldType::JsonObject(_) => {}
FieldType::Ip(_) => {} // TODO check
}
let postings_writer = per_field_postings_writers.get_for_field(field);
@@ -116,7 +117,7 @@ pub(crate) struct IndexingPosition {
/// and building a `Segment` in anonymous memory.
///
/// `PostingsWriter` writes in a `MemoryArena`.
pub(crate) trait PostingsWriter: Send + Sync {
pub(crate) trait PostingsWriter {
/// Record that a document contains a term at a given position.
///
/// * doc - the document id

View File

@@ -56,7 +56,7 @@ impl<'a> Iterator for VInt32Reader<'a> {
/// * the document id
/// * the term frequency
/// * the term positions
pub(crate) trait Recorder: Copy + Default + Send + Sync + 'static {
pub(crate) trait Recorder: Copy + Default + 'static {
/// Returns the current document
fn current_doc(&self) -> u32;
/// Starts recording information about a new document

View File

@@ -40,7 +40,7 @@ fn len_to_capacity(len: u32) -> CapacityResult {
/// An exponential unrolled link.
///
/// The use case is as follows. Tantivy's indexer conceptually acts like a
/// `HashMap<Term, Vec<u32>>`. As we come across a given term in document
/// `HashMap<Term, Vec<u32>>`. As we come accross a given term in document
/// `D`, we lookup the term in the map and append the document id to its vector.
///
/// The vector is then only read when it is serialized.

View File

@@ -371,7 +371,7 @@ mod tests {
fn compute_checkpoints_manual(term_scorers: Vec<TermScorer>, n: usize) -> Vec<(DocId, Score)> {
let mut heap: BinaryHeap<Float> = BinaryHeap::with_capacity(n);
let mut checkpoints: Vec<(DocId, Score)> = Vec::new();
let mut scorer = Union::build(term_scorers, SumCombiner::default);
let mut scorer: Union<TermScorer, SumCombiner> = Union::from(term_scorers);
let mut limit = Score::MIN;
loop {

View File

@@ -1,5 +1,7 @@
use std::collections::BTreeMap;
use super::boolean_weight::BooleanWeight;
use crate::query::{Occur, Query, SumWithCoordsCombiner, TermQuery, Weight};
use crate::query::{Occur, Query, TermQuery, Weight};
use crate::schema::{IndexRecordOption, Term};
use crate::Searcher;
@@ -151,16 +153,12 @@ impl Query for BooleanQuery {
Ok((*occur, subquery.weight(searcher, scoring_enabled)?))
})
.collect::<crate::Result<_>>()?;
Ok(Box::new(BooleanWeight::new(
sub_weights,
scoring_enabled,
Box::new(SumWithCoordsCombiner::default),
)))
Ok(Box::new(BooleanWeight::new(sub_weights, scoring_enabled)))
}
fn query_terms<'a>(&'a self, visitor: &mut dyn FnMut(&'a Term, bool)) {
fn query_terms(&self, terms: &mut BTreeMap<Term, bool>) {
for (_occur, subquery) in &self.subqueries {
subquery.query_terms(visitor);
subquery.query_terms(terms);
}
}
}

View File

@@ -3,7 +3,7 @@ use std::collections::HashMap;
use crate::core::SegmentReader;
use crate::postings::FreqReadingOption;
use crate::query::explanation::does_not_match;
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner};
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner};
use crate::query::term_query::TermScorer;
use crate::query::weight::{for_each_pruning_scorer, for_each_scorer};
use crate::query::{
@@ -17,16 +17,11 @@ enum SpecializedScorer {
Other(Box<dyn Scorer>),
}
fn scorer_union<TScoreCombiner>(
scorers: Vec<Box<dyn Scorer>>,
score_combiner_fn: impl Fn() -> TScoreCombiner,
) -> SpecializedScorer
where
TScoreCombiner: ScoreCombiner,
{
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> SpecializedScorer
where TScoreCombiner: ScoreCombiner {
assert!(!scorers.is_empty());
if scorers.len() == 1 {
return SpecializedScorer::Other(scorers.into_iter().next().unwrap()); //< we checked the size beforehand
return SpecializedScorer::Other(scorers.into_iter().next().unwrap()); //< we checked the size beforehands
}
{
@@ -43,45 +38,35 @@ where
// Block wand is only available if we read frequencies.
return SpecializedScorer::TermUnion(scorers);
} else {
return SpecializedScorer::Other(Box::new(Union::build(
return SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(
scorers,
score_combiner_fn,
)));
}
}
}
SpecializedScorer::Other(Box::new(Union::build(scorers, score_combiner_fn)))
SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(scorers)))
}
fn into_box_scorer<TScoreCombiner: ScoreCombiner>(
scorer: SpecializedScorer,
score_combiner_fn: impl Fn() -> TScoreCombiner,
) -> Box<dyn Scorer> {
fn into_box_scorer<TScoreCombiner: ScoreCombiner>(scorer: SpecializedScorer) -> Box<dyn Scorer> {
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
let union_scorer = Union::build(term_scorers, score_combiner_fn);
let union_scorer = Union::<TermScorer, TScoreCombiner>::from(term_scorers);
Box::new(union_scorer)
}
SpecializedScorer::Other(scorer) => scorer,
}
}
pub struct BooleanWeight<TScoreCombiner: ScoreCombiner> {
pub struct BooleanWeight {
weights: Vec<(Occur, Box<dyn Weight>)>,
scoring_enabled: bool,
score_combiner_fn: Box<dyn Fn() -> TScoreCombiner + Sync + Send>,
}
impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
pub fn new(
weights: Vec<(Occur, Box<dyn Weight>)>,
scoring_enabled: bool,
score_combiner_fn: Box<dyn Fn() -> TScoreCombiner + Sync + Send + 'static>,
) -> BooleanWeight<TScoreCombiner> {
impl BooleanWeight {
pub fn new(weights: Vec<(Occur, Box<dyn Weight>)>, scoring_enabled: bool) -> BooleanWeight {
BooleanWeight {
weights,
scoring_enabled,
score_combiner_fn,
}
}
@@ -101,23 +86,21 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
Ok(per_occur_scorers)
}
fn complex_scorer<TComplexScoreCombiner: ScoreCombiner>(
fn complex_scorer<TScoreCombiner: ScoreCombiner>(
&self,
reader: &SegmentReader,
boost: Score,
score_combiner_fn: impl Fn() -> TComplexScoreCombiner,
) -> crate::Result<SpecializedScorer> {
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
let should_scorer_opt: Option<SpecializedScorer> = per_occur_scorers
.remove(&Occur::Should)
.map(|scorers| scorer_union(scorers, &score_combiner_fn));
.map(scorer_union::<TScoreCombiner>);
let exclude_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::MustNot)
.map(|scorers| scorer_union(scorers, DoNothingCombiner::default))
.map(|specialized_scorer| {
into_box_scorer(specialized_scorer, DoNothingCombiner::default)
});
.map(scorer_union::<DoNothingCombiner>)
.map(into_box_scorer::<DoNothingCombiner>);
let must_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::Must)
@@ -129,10 +112,10 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
SpecializedScorer::Other(Box::new(RequiredOptionalScorer::<
Box<dyn Scorer>,
Box<dyn Scorer>,
TComplexScoreCombiner,
TScoreCombiner,
>::new(
must_scorer,
into_box_scorer(should_scorer, &score_combiner_fn),
into_box_scorer::<TScoreCombiner>(should_scorer),
)))
} else {
SpecializedScorer::Other(must_scorer)
@@ -146,7 +129,8 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
};
if let Some(exclude_scorer) = exclude_scorer_opt {
let positive_scorer_boxed = into_box_scorer(positive_scorer, &score_combiner_fn);
let positive_scorer_boxed: Box<dyn Scorer> =
into_box_scorer::<TScoreCombiner>(positive_scorer);
Ok(SpecializedScorer::Other(Box::new(Exclude::new(
positive_scorer_boxed,
exclude_scorer,
@@ -157,7 +141,7 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
}
}
impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombiner> {
impl Weight for BooleanWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
if self.weights.is_empty() {
Ok(Box::new(EmptyScorer))
@@ -169,15 +153,13 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
weight.scorer(reader, boost)
}
} else if self.scoring_enabled {
self.complex_scorer(reader, boost, &self.score_combiner_fn)
self.complex_scorer::<SumWithCoordsCombiner>(reader, boost)
.map(|specialized_scorer| {
into_box_scorer(specialized_scorer, &self.score_combiner_fn)
into_box_scorer::<SumWithCoordsCombiner>(specialized_scorer)
})
} else {
self.complex_scorer(reader, boost, &DoNothingCombiner::default)
.map(|specialized_scorer| {
into_box_scorer(specialized_scorer, &DoNothingCombiner::default)
})
self.complex_scorer::<DoNothingCombiner>(reader, boost)
.map(into_box_scorer::<DoNothingCombiner>)
}
}
@@ -206,10 +188,11 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn)?;
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0)?;
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
let mut union_scorer = Union::build(term_scorers, &self.score_combiner_fn);
let mut union_scorer =
Union::<TermScorer, SumWithCoordsCombiner>::from(term_scorers);
for_each_scorer(&mut union_scorer, callback);
}
SpecializedScorer::Other(mut scorer) => {
@@ -235,7 +218,7 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn)?;
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0)?;
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
super::block_wand(term_scorers, threshold, callback);

View File

@@ -4,7 +4,6 @@ mod boolean_weight;
pub(crate) use self::block_wand::{block_wand, block_wand_single_scorer};
pub use self::boolean_query::BooleanQuery;
pub(crate) use self::boolean_weight::BooleanWeight;
#[cfg(test)]
mod tests {

View File

@@ -1,3 +1,4 @@
use std::collections::BTreeMap;
use std::fmt;
use crate::fastfield::AliveBitSet;
@@ -48,8 +49,8 @@ impl Query for BoostQuery {
Ok(boosted_weight)
}
fn query_terms<'a>(&'a self, visitor: &mut dyn FnMut(&'a Term, bool)) {
self.query.query_terms(visitor)
fn query_terms(&self, terms: &mut BTreeMap<Term, bool>) {
self.query.query_terms(terms)
}
}

View File

@@ -1,176 +0,0 @@
use std::fmt;
use crate::query::{Explanation, Query, Scorer, Weight};
use crate::{DocId, DocSet, Score, Searcher, SegmentReader, TantivyError, Term};
/// `ConstScoreQuery` is a wrapper over a query to provide a constant score.
/// It can avoid unnecessary score computation on the wrapped query.
///
/// The document set matched by the `ConstScoreQuery` is strictly the same as the underlying query.
/// The configured score is used for each document.
pub struct ConstScoreQuery {
query: Box<dyn Query>,
score: Score,
}
impl ConstScoreQuery {
/// Builds a const score query.
pub fn new(query: Box<dyn Query>, score: Score) -> ConstScoreQuery {
ConstScoreQuery { query, score }
}
}
impl Clone for ConstScoreQuery {
fn clone(&self) -> Self {
ConstScoreQuery {
query: self.query.box_clone(),
score: self.score,
}
}
}
impl fmt::Debug for ConstScoreQuery {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Const(score={}, query={:?})", self.score, self.query)
}
}
impl Query for ConstScoreQuery {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> {
let inner_weight = self.query.weight(searcher, scoring_enabled)?;
Ok(if scoring_enabled {
Box::new(ConstWeight::new(inner_weight, self.score))
} else {
inner_weight
})
}
fn query_terms<'a>(&'a self, visitor: &mut dyn FnMut(&'a Term, bool)) {
self.query.query_terms(visitor);
}
}
struct ConstWeight {
weight: Box<dyn Weight>,
score: Score,
}
impl ConstWeight {
pub fn new(weight: Box<dyn Weight>, score: Score) -> Self {
ConstWeight { weight, score }
}
}
impl Weight for ConstWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
let inner_scorer = self.weight.scorer(reader, boost)?;
Ok(Box::new(ConstScorer::new(inner_scorer, boost * self.score)))
}
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
if scorer.seek(doc) != doc {
return Err(TantivyError::InvalidArgument(format!(
"Document #({}) does not match",
doc
)));
}
let mut explanation = Explanation::new("Const", self.score);
let underlying_explanation = self.weight.explain(reader, doc)?;
explanation.add_detail(underlying_explanation);
Ok(explanation)
}
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
self.weight.count(reader)
}
}
/// Wraps a `DocSet` and simply returns a constant `Scorer`.
/// The `ConstScorer` is useful if you have a `DocSet` where
/// you needed a scorer.
///
/// The `ConstScorer`'s constant score can be set
/// by calling `.set_score(...)`.
pub struct ConstScorer<TDocSet: DocSet> {
docset: TDocSet,
score: Score,
}
impl<TDocSet: DocSet> ConstScorer<TDocSet> {
/// Creates a new `ConstScorer`.
pub fn new(docset: TDocSet, score: Score) -> ConstScorer<TDocSet> {
ConstScorer { docset, score }
}
}
impl<TDocSet: DocSet> From<TDocSet> for ConstScorer<TDocSet> {
fn from(docset: TDocSet) -> Self {
ConstScorer::new(docset, 1.0)
}
}
impl<TDocSet: DocSet> DocSet for ConstScorer<TDocSet> {
fn advance(&mut self) -> DocId {
self.docset.advance()
}
fn seek(&mut self, target: DocId) -> DocId {
self.docset.seek(target)
}
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
self.docset.fill_buffer(buffer)
}
fn doc(&self) -> DocId {
self.docset.doc()
}
fn size_hint(&self) -> u32 {
self.docset.size_hint()
}
}
impl<TDocSet: DocSet + 'static> Scorer for ConstScorer<TDocSet> {
fn score(&mut self) -> Score {
self.score
}
}
#[cfg(test)]
mod tests {
use super::ConstScoreQuery;
use crate::query::{AllQuery, Query};
use crate::schema::Schema;
use crate::{DocAddress, Document, Index};
#[test]
fn test_const_score_query_explain() -> crate::Result<()> {
let schema = Schema::builder().build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(Document::new())?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let query = ConstScoreQuery::new(Box::new(AllQuery), 0.42);
let explanation = query.explain(&searcher, DocAddress::new(0, 0u32)).unwrap();
assert_eq!(
explanation.to_pretty_json(),
r#"{
"value": 0.42,
"description": "Const",
"details": [
{
"value": 1.0,
"description": "AllQuery",
"context": []
}
],
"context": []
}"#
);
Ok(())
}
}

View File

@@ -1,131 +0,0 @@
use tantivy_query_grammar::Occur;
use crate::query::{BooleanWeight, DisjunctionMaxCombiner, Query, Weight};
use crate::{Score, Searcher, Term};
/// The disjunction max query кeturns documents matching one or more wrapped queries,
/// called query clauses or clauses.
///
/// If a returned document matches multiple query clauses,
/// the `DisjunctionMaxQuery` assigns the document the highest relevance score from any matching
/// clause, plus a tie breaking increment for any additional matching subqueries.
///
/// ```rust
/// use tantivy::collector::TopDocs;
/// use tantivy::doc;
/// use tantivy::query::{DisjunctionMaxQuery, Query, QueryClone, TermQuery};
/// use tantivy::schema::{IndexRecordOption, Schema, TEXT};
/// use tantivy::Term;
/// use tantivy::Index;
///
/// fn main() -> tantivy::Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let body = schema_builder.add_text_field("body", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of Girl",
/// ))?;
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ))?;
/// index_writer.add_document(doc!(
/// title => "The Diary of Girl",
/// ))?;
/// index_writer.commit()?;
/// }
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// // Make TermQuery's for "girl" and "diary" in the title
/// let girl_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(title, "girl"),
/// IndexRecordOption::Basic,
/// ));
/// let diary_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(title, "diary"),
/// IndexRecordOption::Basic,
/// ));
///
/// // TermQuery "diary" and "girl" should be present and only one should be accounted in score
/// let queries1 = vec![diary_term_query.box_clone(), girl_term_query.box_clone()];
/// let diary_and_girl = DisjunctionMaxQuery::new(queries1);
/// let documents = searcher.search(&diary_and_girl, &TopDocs::with_limit(3))?;
/// assert_eq!(documents[0].0, documents[1].0);
/// assert_eq!(documents[1].0, documents[2].0);
///
/// // TermQuery "diary" and "girl" should be present
/// // and one should be accounted with multiplier 0.7
/// let queries2 = vec![diary_term_query.box_clone(), girl_term_query.box_clone()];
/// let tie_breaker = 0.7;
/// let diary_and_girl_with_tie_breaker = DisjunctionMaxQuery::with_tie_breaker(queries2, tie_breaker);
/// let documents = searcher.search(&diary_and_girl_with_tie_breaker, &TopDocs::with_limit(3))?;
/// assert_eq!(documents[1].0, documents[2].0);
/// // For this test all terms brings the same score. So we can do easy math and assume that
/// // `DisjunctionMaxQuery` with tie breakers score should be equal
/// // to term1 score + `tie_breaker` * term2 score or (1.0 + tie_breaker) * term score
/// assert!(f32::abs(documents[0].0 - documents[1].0 * (1.0 + tie_breaker)) < 0.001);
/// Ok(())
/// }
/// ```
#[derive(Debug)]
pub struct DisjunctionMaxQuery {
disjuncts: Vec<Box<dyn Query>>,
tie_breaker: Score,
}
impl Clone for DisjunctionMaxQuery {
fn clone(&self) -> Self {
DisjunctionMaxQuery::with_tie_breaker(
self.disjuncts
.iter()
.map(|disjunct| disjunct.box_clone())
.collect::<Vec<_>>(),
self.tie_breaker,
)
}
}
impl Query for DisjunctionMaxQuery {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> {
let disjuncts = self
.disjuncts
.iter()
.map(|disjunct| Ok((Occur::Should, disjunct.weight(searcher, scoring_enabled)?)))
.collect::<crate::Result<_>>()?;
let tie_breaker = self.tie_breaker;
Ok(Box::new(BooleanWeight::new(
disjuncts,
scoring_enabled,
Box::new(move || DisjunctionMaxCombiner::with_tie_breaker(tie_breaker)),
)))
}
fn query_terms<'a>(&'a self, visitor: &mut dyn FnMut(&'a Term, bool)) {
for disjunct in &self.disjuncts {
disjunct.query_terms(visitor);
}
}
}
impl DisjunctionMaxQuery {
/// Creates a new `DisjunctionMaxQuery` with tie breaker.
pub fn with_tie_breaker(
disjuncts: Vec<Box<dyn Query>>,
tie_breaker: Score,
) -> DisjunctionMaxQuery {
DisjunctionMaxQuery {
disjuncts,
tie_breaker,
}
}
/// Creates a new `DisjunctionMaxQuery` with no tie breaker.
pub fn new(disjuncts: Vec<Box<dyn Query>>) -> DisjunctionMaxQuery {
DisjunctionMaxQuery::with_tie_breaker(disjuncts, 0.0)
}
}

View File

@@ -6,8 +6,6 @@ mod bitset;
mod bm25;
mod boolean_query;
mod boost_query;
mod const_score_query;
mod disjunction_max_query;
mod empty_query;
mod exclude;
mod explanation;
@@ -36,10 +34,7 @@ pub use self::automaton_weight::AutomatonWeight;
pub use self::bitset::BitSetDocSet;
pub(crate) use self::bm25::Bm25Weight;
pub use self::boolean_query::BooleanQuery;
pub(crate) use self::boolean_query::BooleanWeight;
pub use self::boost_query::BoostQuery;
pub use self::const_score_query::{ConstScoreQuery, ConstScorer};
pub use self::disjunction_max_query::DisjunctionMaxQuery;
pub use self::empty_query::{EmptyQuery, EmptyScorer, EmptyWeight};
pub use self::exclude::Exclude;
pub use self::explanation::Explanation;
@@ -54,10 +49,7 @@ pub use self::query_parser::{QueryParser, QueryParserError};
pub use self::range_query::RangeQuery;
pub use self::regex_query::RegexQuery;
pub use self::reqopt_scorer::RequiredOptionalScorer;
pub use self::score_combiner::{
DisjunctionMaxCombiner, ScoreCombiner, SumCombiner, SumWithCoordsCombiner,
};
pub use self::scorer::Scorer;
pub use self::scorer::{ConstScorer, Scorer};
pub use self::term_query::TermQuery;
pub use self::union::Union;
#[cfg(test)]
@@ -66,6 +58,8 @@ pub use self::weight::Weight;
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use crate::query::QueryParser;
use crate::schema::{Schema, TEXT};
use crate::{Index, Term};
@@ -80,34 +74,49 @@ mod tests {
let term_a = Term::from_field_text(text_field, "a");
let term_b = Term::from_field_text(text_field, "b");
{
let query = query_parser.parse_query("a").unwrap();
let mut terms = Vec::new();
query.query_terms(&mut |term, pos| terms.push((term, pos)));
assert_eq!(vec![(&term_a, false)], terms);
let mut terms: BTreeMap<Term, bool> = Default::default();
query_parser
.parse_query("a")
.unwrap()
.query_terms(&mut terms);
let terms: Vec<(&Term, &bool)> = terms.iter().collect();
assert_eq!(vec![(&term_a, &false)], terms);
}
{
let query = query_parser.parse_query("a b").unwrap();
let mut terms = Vec::new();
query.query_terms(&mut |term, pos| terms.push((term, pos)));
assert_eq!(vec![(&term_a, false), (&term_b, false)], terms);
let mut terms: BTreeMap<Term, bool> = Default::default();
query_parser
.parse_query("a b")
.unwrap()
.query_terms(&mut terms);
let terms: Vec<(&Term, &bool)> = terms.iter().collect();
assert_eq!(vec![(&term_a, &false), (&term_b, &false)], terms);
}
{
let query = query_parser.parse_query("\"a b\"").unwrap();
let mut terms = Vec::new();
query.query_terms(&mut |term, pos| terms.push((term, pos)));
assert_eq!(vec![(&term_a, true), (&term_b, true)], terms);
let mut terms: BTreeMap<Term, bool> = Default::default();
query_parser
.parse_query("\"a b\"")
.unwrap()
.query_terms(&mut terms);
let terms: Vec<(&Term, &bool)> = terms.iter().collect();
assert_eq!(vec![(&term_a, &true), (&term_b, &true)], terms);
}
{
let query = query_parser.parse_query("a a a a a").unwrap();
let mut terms = Vec::new();
query.query_terms(&mut |term, pos| terms.push((term, pos)));
assert_eq!(vec![(&term_a, false); 5], terms);
let mut terms: BTreeMap<Term, bool> = Default::default();
query_parser
.parse_query("a a a a a")
.unwrap()
.query_terms(&mut terms);
let terms: Vec<(&Term, &bool)> = terms.iter().collect();
assert_eq!(vec![(&term_a, &false)], terms);
}
{
let query = query_parser.parse_query("a -b").unwrap();
let mut terms = Vec::new();
query.query_terms(&mut |term, pos| terms.push((term, pos)));
assert_eq!(vec![(&term_a, false), (&term_b, false)], terms);
let mut terms: BTreeMap<Term, bool> = Default::default();
query_parser
.parse_query("a -b")
.unwrap()
.query_terms(&mut terms);
let terms: Vec<(&Term, &bool)> = terms.iter().collect();
assert_eq!(vec![(&term_a, &false), (&term_b, &false)], terms);
}
}
}

View File

@@ -139,7 +139,7 @@ impl MoreLikeThis {
}
/// Finds terms for a more-like-this query.
/// field_to_field_values is a mapping from field to possible values of that field.
/// field_to_field_values is a mapping from field to possible values of taht field.
fn retrieve_terms_from_doc_fields(
&self,
searcher: &Searcher,

View File

@@ -1,3 +1,5 @@
use std::collections::BTreeMap;
use super::PhraseWeight;
use crate::core::searcher::Searcher;
use crate::query::bm25::Bm25Weight;
@@ -66,7 +68,7 @@ impl PhraseQuery {
/// Slop allowed for the phrase.
///
/// The query will match if its terms are separated by `slop` terms at most.
/// The query will match if its terms are seperated by `slop` terms at most.
/// By default the slop is 0 meaning query terms need to be adjacent.
pub fn set_slop(&mut self, value: u32) {
self.slop = value;
@@ -127,9 +129,9 @@ impl Query for PhraseQuery {
Ok(Box::new(phrase_weight))
}
fn query_terms<'a>(&'a self, visitor: &mut dyn FnMut(&'a Term, bool)) {
fn query_terms(&self, terms: &mut BTreeMap<Term, bool>) {
for (_, term) in &self.phrase_terms {
visitor(term, true);
terms.insert(term.clone(), true);
}
}
}

View File

@@ -428,7 +428,7 @@ mod tests {
}
#[test]
fn test_slop() {
// The slop is not symmetric. It does not allow for the phrase to be out of order.
// The slop is not symetric. It does not allow for the phrase to be out of order.
test_intersection_aux(&[1], &[2], &[2], 1);
test_intersection_aux(&[1], &[3], &[], 1);
test_intersection_aux(&[1], &[3], &[3], 2);

View File

@@ -1,3 +1,4 @@
use std::collections::BTreeMap;
use std::fmt;
use downcast_rs::impl_downcast;
@@ -66,15 +67,12 @@ pub trait Query: QueryClone + Send + Sync + downcast_rs::Downcast + fmt::Debug {
Ok(result)
}
/// Extract all of the terms associated to the query and pass them to the
/// given closure.
/// Extract all of the terms associated to the query and insert them in the
/// term set given in arguments.
///
/// Each term is associated with a boolean indicating whether
/// positions are required or not.
///
/// Note that there can be multiple instances of any given term
/// in a query and deduplication must be handled by the visitor.
fn query_terms<'a>(&'a self, _visitor: &mut dyn FnMut(&'a Term, bool)) {}
/// Positions are required or not.
fn query_terms(&self, _term_set: &mut BTreeMap<Term, bool>) {}
}
/// Implements `box_clone`.
@@ -100,8 +98,8 @@ impl Query for Box<dyn Query> {
self.as_ref().count(searcher)
}
fn query_terms<'a>(&'a self, visitor: &mut dyn FnMut(&'a Term, bool)) {
self.as_ref().query_terms(visitor);
fn query_terms(&self, terms: &mut BTreeMap<Term, bool>) {
self.as_ref().query_terms(terms);
}
}

View File

@@ -184,7 +184,7 @@ pub struct QueryParser {
fn all_negative(ast: &LogicalAst) -> bool {
match ast {
LogicalAst::Leaf(_) => false,
LogicalAst::Boost(ref child_ast, _) => all_negative(child_ast),
LogicalAst::Boost(ref child_ast, _) => all_negative(&*child_ast),
LogicalAst::Clause(children) => children
.iter()
.all(|(ref occur, child)| (*occur == Occur::MustNot) || all_negative(child)),
@@ -400,6 +400,7 @@ impl QueryParser {
let bytes = base64::decode(phrase).map_err(QueryParserError::ExpectedBase64)?;
Ok(Term::from_field_bytes(field, &bytes))
}
FieldType::Ip(_) => Ok(Term::from_field_text(field, phrase)),
}
}
@@ -506,6 +507,13 @@ impl QueryParser {
let bytes_term = Term::from_field_bytes(field, &bytes);
Ok(vec![LogicalLiteral::Term(bytes_term)])
}
FieldType::Ip(ref option) => {
if !option.is_indexed() {
return Err(QueryParserError::FieldNotIndexed(field_name.to_string()));
}
let text_term = Term::from_field_text(field, phrase);
Ok(vec![LogicalLiteral::Term(text_term)])
}
}
}
@@ -577,7 +585,7 @@ impl QueryParser {
/// object by naturally extending the json field name with a "." separated field_path
/// - field_phrase: the phrase that is being searched.
///
/// The literal identifies the targeted field by a so-called *full field path*,
/// The literal identifies the targetted field by a so-called *full field path*,
/// specified before the ":". (e.g. identity.username:fulmicoton).
///
/// The way we split the full field path into (field_name, field_path) can be ambiguous,

Some files were not shown because too many files have changed in this diff Show More