Compare commits

..

16 Commits

Author SHA1 Message Date
Paul Masurel
4c846b1202 Added NRT directory kinda working 2019-04-05 10:07:29 +09:00
Paul Masurel
fac0013454 Added flush 2019-04-04 09:36:06 +09:00
Paul Masurel
95db5d9999 Merge branch 'master' into softcommits 2019-03-23 18:08:07 +09:00
Paul Masurel
7f0372fa97 reader 2019-02-16 16:09:16 +09:00
Paul Masurel
f8fdf68fcb unit test 2019-02-16 15:49:22 +09:00
Paul Masurel
c00e95cd04 Uncommited is not SegmentRegisters 2019-02-15 22:44:14 +09:00
Paul Masurel
a623d8f6d9 Added SegmentAvailable readonly view 2019-02-15 08:58:08 +09:00
Paul Masurel
b3ede2dd7e softcommits 2019-02-13 21:29:54 +09:00
Paul Masurel
b68686f040 opstamp constraint 2019-02-12 18:14:07 +09:00
Paul Masurel
629d3fb37f Added opstamp 2019-02-12 08:49:23 +09:00
Paul Masurel
f513f10e05 fmt 2019-02-08 15:04:35 +09:00
Paul Masurel
f262d4cc22 code cleaning 2019-02-08 14:54:34 +09:00
Paul Masurel
91e89714f4 Added soft commits 2019-02-08 14:42:52 +09:00
Paul Masurel
6fd3cb1254 Renaming 2019-02-06 05:48:15 +01:00
Paul Masurel
549b4e66e5 Using the new API 2019-02-06 00:17:56 +01:00
Paul Masurel
d9b2bf98e2 First stab 2019-02-05 21:23:07 +01:00
200 changed files with 4429 additions and 11478 deletions

View File

@@ -10,7 +10,7 @@ env:
global: global:
- CRATE_NAME=tantivy - CRATE_NAME=tantivy
- TRAVIS_CARGO_NIGHTLY_FEATURE="" - TRAVIS_CARGO_NIGHTLY_FEATURE=""
# - secure: eC8HjTi1wgRVCsMAeXEXt8Ckr0YBSGOEnQkkW4/Nde/OZ9jJjz2nmP1ELQlDE7+czHub2QvYtDMG0parcHZDx/Kus0yvyn08y3g2rhGIiE7y8OCvQm1Mybu2D/p7enm6shXquQ6Z5KRfRq+18mHy80wy9ABMA/ukEZdvnfQ76/Een8/Lb0eHaDoXDXn3PqLVtByvSfQQ7OhS60dEScu8PWZ6/l1057P5NpdWbMExBE7Ro4zYXNhkJeGZx0nP/Bd4Jjdt1XfPzMEybV6NZ5xsTILUBFTmOOt603IsqKGov089NExqxYu5bD3K+S4MzF1Nd6VhomNPJqLDCfhlymJCUj5n5Ku4yidlhQbM4Ej9nGrBalJnhcjBjPua5tmMF2WCxP9muKn/2tIOu1/+wc0vMf9Yd3wKIkf5+FtUxCgs2O+NslWvmOMAMI/yD25m7hb4t1IwE/4Bk+GVcWJRWXbo0/m6ZUHzRzdjUY2a1qvw7C9udzdhg7gcnXwsKrSWi2NjMiIVw86l+Zim0nLpKIN41sxZHLaFRG63Ki8zQ/481LGn32awJ6i3sizKS0WD+N1DfR2qYMrwYHaMN0uR0OFXYTJkFvTFttAeUY3EKmRKAuMhmO2YRdSr4/j/G5E9HMc1gSGJj6PxgpQU7EpvxRsmoVAEJr0mszmOj9icGHep/FM= - secure: eC8HjTi1wgRVCsMAeXEXt8Ckr0YBSGOEnQkkW4/Nde/OZ9jJjz2nmP1ELQlDE7+czHub2QvYtDMG0parcHZDx/Kus0yvyn08y3g2rhGIiE7y8OCvQm1Mybu2D/p7enm6shXquQ6Z5KRfRq+18mHy80wy9ABMA/ukEZdvnfQ76/Een8/Lb0eHaDoXDXn3PqLVtByvSfQQ7OhS60dEScu8PWZ6/l1057P5NpdWbMExBE7Ro4zYXNhkJeGZx0nP/Bd4Jjdt1XfPzMEybV6NZ5xsTILUBFTmOOt603IsqKGov089NExqxYu5bD3K+S4MzF1Nd6VhomNPJqLDCfhlymJCUj5n5Ku4yidlhQbM4Ej9nGrBalJnhcjBjPua5tmMF2WCxP9muKn/2tIOu1/+wc0vMf9Yd3wKIkf5+FtUxCgs2O+NslWvmOMAMI/yD25m7hb4t1IwE/4Bk+GVcWJRWXbo0/m6ZUHzRzdjUY2a1qvw7C9udzdhg7gcnXwsKrSWi2NjMiIVw86l+Zim0nLpKIN41sxZHLaFRG63Ki8zQ/481LGn32awJ6i3sizKS0WD+N1DfR2qYMrwYHaMN0uR0OFXYTJkFvTFttAeUY3EKmRKAuMhmO2YRdSr4/j/G5E9HMc1gSGJj6PxgpQU7EpvxRsmoVAEJr0mszmOj9icGHep/FM=
addons: addons:
apt: apt:
@@ -29,7 +29,7 @@ addons:
matrix: matrix:
include: include:
# Android # Android
- env: TARGET=aarch64-linux-android DISABLE_TESTS=1 - env: TARGET=aarch64-linux-android DISABLE_TESTS
#- env: TARGET=arm-linux-androideabi DISABLE_TESTS=1 #- env: TARGET=arm-linux-androideabi DISABLE_TESTS=1
#- env: TARGET=armv7-linux-androideabi DISABLE_TESTS=1 #- env: TARGET=armv7-linux-androideabi DISABLE_TESTS=1
#- env: TARGET=i686-linux-android DISABLE_TESTS=1 #- env: TARGET=i686-linux-android DISABLE_TESTS=1
@@ -38,21 +38,20 @@ matrix:
# Linux # Linux
#- env: TARGET=aarch64-unknown-linux-gnu #- env: TARGET=aarch64-unknown-linux-gnu
#- env: TARGET=i686-unknown-linux-gnu #- env: TARGET=i686-unknown-linux-gnu
- env: TARGET=x86_64-unknown-linux-gnu CODECOV=1 #UPLOAD_DOCS=1 - env: TARGET=x86_64-unknown-linux-gnu CODECOV=1
# - env: TARGET=x86_64-unknown-linux-musl CODECOV=1 # - env: TARGET=x86_64-unknown-linux-musl CODECOV=1
# OSX # OSX
#- env: TARGET=x86_64-apple-darwin - env: TARGET=x86_64-apple-darwin
# os: osx os: osx
before_install: before_install:
- set -e - set -e
- rustup self update - rustup self update
- rustup component add rustfmt
install: install:
- sh ci/install.sh - sh ci/install.sh
- source ~/.cargo/env || true - source ~/.cargo/env || true
- env | grep "TRAVIS"
before_script: before_script:
- export PATH=$HOME/.cargo/bin:$PATH - export PATH=$HOME/.cargo/bin:$PATH
@@ -61,25 +60,14 @@ before_script:
script: script:
- bash ci/script.sh - bash ci/script.sh
- cargo fmt --all -- --check
before_deploy: before_deploy:
- sh ci/before_deploy.sh - sh ci/before_deploy.sh
after_success: cache: cargo
# Needs GH_TOKEN env var to be set in travis settings before_cache:
- if [[ -v GH_TOKEN ]]; then echo "GH TOKEN IS SET"; else echo "GH TOKEN NOT SET"; fi # Travis can't cache files that are not readable by "others"
- if [[ -v UPLOAD_DOCS ]]; then cargo doc; cargo doc-upload; else echo "doc upload disabled."; fi - chmod -R a+r $HOME/.cargo
#cache: cargo
#before_cache:
# # Travis can't cache files that are not readable by "others"
# - chmod -R a+r $HOME/.cargo
# - find ./target/debug -type f -maxdepth 1 -delete
# - rm -f ./target/.rustc_info.json
# - rm -fr ./target/debug/{deps,.fingerprint}/tantivy*
# - rm -r target/debug/examples/
# - ls -1 examples/ | sed -e 's/\.rs$//' | xargs -I "{}" find target/* -name "*{}*" -type f -delete
#branches: #branches:
# only: # only:

View File

@@ -1,81 +1,3 @@
Tantivy 0.11.0
=====================
- Added f64 field. Internally reuse u64 code the same way i64 does (@fdb-hiroshima)
- Various bugfixes in the query parser.
- Better handling of hyphens in query parser. (#609)
- Better handling of whitespaces.
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
- API change around `Box<BoxableTokenizer>`. See detail in #629
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
## How to update?
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
an error and handling the `Result` is required.
Tantivy 0.10.2
=====================
- Closes #656. Solving memory leak.
Tantivy 0.10.1
=====================
- Closes #544. A few users experienced problems with the directory watching system.
Avoid watching the mmap directory until someone effectively creates a reader that uses
this functionality.
Tantivy 0.10.0
=====================
*Tantivy 0.10.0 index format is compatible with the index format in 0.9.0.*
- Added an API to easily tweak or entirely replace the
default score. See `TopDocs::tweak_score`and `TopScore::custom_score` (@pmasurel)
- Added an ASCII folding filter (@drusellers)
- Bugfix in `query.count` in presence of deletes (@pmasurel)
- Added `.explain(...)` in `Query` and `Weight` to (@pmasurel)
- Added an efficient way to `delete_all_documents` in `IndexWriter` (@petr-tik).
All segments are simply removed.
Minor
---------
- Switched to Rust 2018 (@uvd)
- Small simplification of the code.
Calling .freq() or .doc() when .advance() has never been called
on segment postings should panic from now on.
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
- Fast fields are now preloaded when the `SegmentReader` is created.
- `IndexMeta` is now public. (@hntd187)
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
only require a read lock. (@pmasurel)
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
- Bugfix - Files get deleted slightly earlier
- Compilation resources improved (@fdb-hiroshima)
## How to update?
Your program should be usable as is.
### Fast fields
Fast fields used to be accessed directly from the `SegmentReader`.
The API changed, you are now required to acquire your fast field reader via the
`segment_reader.fast_fields()`, and use one of the typed method:
- `.u64()`, `.i64()` if your field is single-valued ;
- `.u64s()`, `.i64s()` if your field is multi-valued ;
- `.bytes()` if your field is bytes fast field.
Tantivy 0.9.0 Tantivy 0.9.0
===================== =====================
*0.9.0 index format is not compatible with the *0.9.0 index format is not compatible with the
@@ -95,35 +17,6 @@ previous index format.*
- Added IndexReader. By default, index is reloaded automatically upon new commits (@fulmicoton) - Added IndexReader. By default, index is reloaded automatically upon new commits (@fulmicoton)
- SIMD linear search within blocks (@fulmicoton) - SIMD linear search within blocks (@fulmicoton)
## How to update ?
tantivy 0.9 brought some API breaking change.
To update from tantivy 0.8, you will need to go through the following steps.
- `schema::INT_INDEXED` and `schema::INT_STORED` should be replaced by `schema::INDEXED` and `schema::INT_STORED`.
- The index now does not hold the pool of searcher anymore. You are required to create an intermediary object called
`IndexReader` for this.
```rust
// create the reader. You typically need to create 1 reader for the entire
// lifetime of you program.
let reader = index.reader()?;
// Acquire a searcher (previously `index.searcher()`) is now written:
let searcher = reader.searcher();
// With the default setting of the reader, you are not required to
// call `index.load_searchers()` anymore.
//
// The IndexReader will pick up that change automatically, regardless
// of whether the update was done in a different process or not.
// If this behavior is not wanted, you can create your reader with
// the `ReloadPolicy::Manual`, and manually decide when to reload the index
// by calling `reader.reload()?`.
```
Tantivy 0.8.2 Tantivy 0.8.2
===================== =====================
Fixing build for x86_64 platforms. (#496) Fixing build for x86_64 platforms. (#496)

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy" name = "tantivy"
version = "0.11.0" version = "0.9.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
@@ -10,14 +10,12 @@ homepage = "https://github.com/tantivy-search/tantivy"
repository = "https://github.com/tantivy-search/tantivy" repository = "https://github.com/tantivy-search/tantivy"
readme = "README.md" readme = "README.md"
keywords = ["search", "information", "retrieval"] keywords = ["search", "information", "retrieval"]
edition = "2018"
[dependencies] [dependencies]
base64 = "0.11.0" base64 = "0.10.0"
byteorder = "1.0" byteorder = "1.0"
crc32fast = "1.2.0" lazy_static = "1"
once_cell = "1.0" regex = "1.0"
regex ={version = "1.3.0", default-features = false, features = ["std"]}
tantivy-fst = "0.1" tantivy-fst = "0.1"
memmap = {version = "0.7", optional=true} memmap = {version = "0.7", optional=true}
lz4 = {version="1.20", optional=true} lz4 = {version="1.20", optional=true}
@@ -25,6 +23,8 @@ snap = {version="0.2"}
atomicwrites = {version="0.2.2", optional=true} atomicwrites = {version="0.2.2", optional=true}
tempfile = "3.0" tempfile = "3.0"
log = "0.4" log = "0.4"
combine = "3"
tempdir = "0.3"
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
serde_json = "1.0" serde_json = "1.0"
@@ -34,32 +34,30 @@ itertools = "0.8"
levenshtein_automata = {version="0.1", features=["fst_automaton"]} levenshtein_automata = {version="0.1", features=["fst_automaton"]}
notify = {version="4", optional=true} notify = {version="4", optional=true}
bit-set = "0.5" bit-set = "0.5"
uuid = { version = "0.8", features = ["v4", "serde"] } uuid = { version = "0.7.2", features = ["v4", "serde"] }
crossbeam = "0.7" crossbeam = "0.5"
futures = "0.1" futures = "0.1"
futures-cpupool = "0.1" futures-cpupool = "0.1"
owning_ref = "0.4" owning_ref = "0.4"
stable_deref_trait = "1.0.0" stable_deref_trait = "1.0.0"
rust-stemmers = "1.1" rust-stemmers = "1.1"
downcast-rs = { version="1.0" } downcast-rs = { version="1.0" }
tantivy-query-grammar = { path="./query-grammar" } bitpacking = "0.6"
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.2" census = "0.2"
fnv = "1.0.6" fnv = "1.0.6"
owned-read = "0.4" owned-read = "0.4"
failure = "0.1" failure = "0.1"
htmlescape = "0.3.1" htmlescape = "0.3.1"
fail = "0.3" fail = "0.2"
scoped-pool = "1.0" scoped-pool = "1.0"
murmurhash32 = "0.2" murmurhash32 = "0.2"
chrono = "0.4" chrono = "0.4"
smallvec = "0.6"
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
winapi = "0.3" winapi = "0.2"
[dev-dependencies] [dev-dependencies]
rand = "0.7" rand = "0.6"
maplit = "1" maplit = "1"
matches = "0.1.8" matches = "0.1.8"
time = "0.1.42" time = "0.1.42"
@@ -74,31 +72,13 @@ debug-assertions = true
overflow-checks = true overflow-checks = true
[features] [features]
default = ["mmap"] # by default no-fail is disabled. We manually enable it when running test.
default = ["mmap", "no_fail"]
mmap = ["atomicwrites", "fs2", "memmap", "notify"] mmap = ["atomicwrites", "fs2", "memmap", "notify"]
lz4-compression = ["lz4"] lz4-compression = ["lz4"]
failpoints = ["fail/failpoints"] no_fail = ["fail/no_fail"]
unstable = [] # useful for benches. unstable = [] # useful for benches.
wasm-bindgen = ["uuid/wasm-bindgen"] wasm-bindgen = ["uuid/wasm-bindgen"]
[workspace]
members = ["query-grammar"]
[badges] [badges]
travis-ci = { repository = "tantivy-search/tantivy" } travis-ci = { repository = "tantivy-search/tantivy" }
[dev-dependencies.fail]
version = "0.3"
features = ["failpoints"]
# Following the "fail" crate best practises, we isolate
# tests that define specific behavior in fail check points
# in a different binary.
#
# We do that because, fail rely on a global definition of
# failpoints behavior and hence, it is incompatible with
# multithreading.
[[test]]
name = "failpoints"
path = "tests/failpoints/mod.rs"
required-features = ["fail/failpoints"]

View File

@@ -1,3 +0,0 @@
test:
echo "Run test only... No examples."
cargo test --tests --lib

102
README.md
View File

@@ -4,7 +4,6 @@
[![Join the chat at https://gitter.im/tantivy-search/tantivy](https://badges.gitter.im/tantivy-search/tantivy.svg)](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Join the chat at https://gitter.im/tantivy-search/tantivy](https://badges.gitter.im/tantivy-search/tantivy.svg)](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![Build status](https://ci.appveyor.com/api/projects/status/r7nb13kj23u8m9pj/branch/master?svg=true)](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master) [![Build status](https://ci.appveyor.com/api/projects/status/r7nb13kj23u8m9pj/branch/master?svg=true)](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
[![Crates.io](https://img.shields.io/crates/v/tantivy.svg)](https://crates.io/crates/tantivy)
[![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton)
![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png) ![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png)
@@ -21,9 +20,9 @@
[![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton) [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
**Tantivy** is a **full text search engine library** written in Rust. **Tantivy** is a **full text search engine library** written in rust.
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) and [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
an off-the-shelf search engine server, but rather a crate that can be used an off-the-shelf search engine server, but rather a crate that can be used
to build such a search engine. to build such a search engine.
@@ -31,7 +30,7 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
# Benchmark # Benchmark
Tantivy is typically faster than Lucene, but the results depend on Tantivy is typically faster than Lucene, but the results will depend on
the nature of the queries in your workload. the nature of the queries in your workload.
The following [benchmark](https://tantivy-search.github.io/bench/) break downs The following [benchmark](https://tantivy-search.github.io/bench/) break downs
@@ -40,19 +39,19 @@ performance for different type of queries / collection.
# Features # Features
- Full-text search - Full-text search
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)) and [Japanese](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) - Configurable tokenizer. (stemming available for 17 latin languages. Third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)) and [Japanese](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:) - Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
- Tiny startup time (<10ms), perfect for command line tools - Tiny startup time (<10ms), perfect for command line tools
- BM25 scoring (the same as Lucene) - BM25 scoring (the same as lucene)
- Natural query language (e.g. `(michael AND jackson) OR "king of pop"`) - Natural query language `(michael AND jackson) OR "king of pop"`
- Phrase queries search (e.g. `"michael jackson"`) - Phrase queries search (`"michael jackson"`)
- Incremental indexing - Incremental indexing
- Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop) - Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop)
- Mmap directory - Mmap directory
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set - SIMD integer compression when the platform/CPU includes the SSE2 instruction set.
- Single valued and multivalued u64, i64, and f64 fast fields (equivalent of doc values in Lucene) - Single valued and multivalued u64 and i64 fast fields (equivalent of doc values in Lucene)
- `&[u8]` fast fields - `&[u8]` fast fields
- Text, i64, u64, f64, dates, and hierarchical facet fields - Text, i64, u64, dates and hierarchical facet fields
- LZ4 compressed document store - LZ4 compressed document store
- Range queries - Range queries
- Faceted search - Faceted search
@@ -61,77 +60,50 @@ performance for different type of queries / collection.
# Non-features # Non-features
- Distributed search is out of the scope of Tantivy. That being said, Tantivy is a - Distributed search is out of the scope of tantivy. That being said, tantivy is meant as a
library upon which one could build a distributed search. Serializable/mergeable collector state for instance, library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
are within the scope of Tantivy. are within the scope of tantivy.
# Supported OS and compiler # Supported OS and compiler
Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows. Tantivy works on stable rust (>= 1.27) and supports Linux, MacOS and Windows.
# Getting started # Getting started
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html) - [tantivy's simple search example](http://fulmicoton.com/tantivy-examples/simple_search.html)
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli) - `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine, - [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli).
index documents, and search via the CLI or a small server with a REST API. `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
It walks you through getting a wikipedia search engine up and running in a few minutes. index documents and search via the CLI or a small server with a REST API.
- [Reference doc for the last released version](https://docs.rs/tantivy/) It will walk you through getting a wikipedia search engine up and running in a few minutes.
- [reference doc]
- [For the last released version](https://docs.rs/tantivy/)
- [For the last master branch](https://tantivy-search.github.io/tantivy/tantivy/index.html)
# How can I support this project? # Compiling
There are many ways to support this project. ## Development
- Use Tantivy and tell us about your experience on [Gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com) Tantivy compiles on stable rust but requires `Rust >= 1.27`.
- Report bugs To check out and run tests, you can simply run :
- Write a blog post
- Help with documentation by asking questions or submitting PRs
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
- Talk about Tantivy around you
- Drop a word on on [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) or even [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
# Contributing code
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
## Clone and build locally
Tantivy compiles on stable Rust but requires `Rust >= 1.27`.
To check out and run tests, you can simply run:
```bash
git clone https://github.com/tantivy-search/tantivy.git git clone https://github.com/tantivy-search/tantivy.git
cd tantivy cd tantivy
cargo build cargo build
```
## Run tests ## Running tests
Some tests will not run with just `cargo test` because of `fail-rs`. Some tests will not run with just `cargo test` because of `fail-rs`.
To run the tests exhaustively, run `./run-tests.sh`. To run the tests exhaustively, run `./run-tests.sh`.
## Debug # How can I support this project ?
You might find it useful to step through the programme with a debugger. There are many ways to support this project.
### A failing test - If you use tantivy, tell us about your experience on [gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com)
- Report bugs
Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that the `target/` directory exists. Use this bash script to find the name of the most recent debug build of Tantivy and run it under `rust-gdb`: - Write a blog post
- Complete documentation
```bash - Contribute code (you can join [our gitter](https://gitter.im/tantivy-search/tantivy) )
find target/debug/ -maxdepth 1 -executable -type f -name "tantivy*" -printf '%TY-%Tm-%Td %TT %p\n' | sort -r | cut -d " " -f 3 | xargs -I RECENT_DBG_TANTIVY rust-gdb RECENT_DBG_TANTIVY - Talk about tantivy around you
``` - Drop a word on on [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) or even [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
Now that you are in `rust-gdb`, you can set breakpoints on lines and methods that match your source code and run the debug executable with flags that you normally pass to `cargo test` like this:
```bash
$gdb run --test-threads 1 --test $NAME_OF_TEST
```
### An example
By default, `rustc` compiles everything in the `examples/` directory in debug mode. This makes it easy for you to make examples to reproduce bugs:
```bash
rust-gdb target/debug/examples/$EXAMPLE_NAME
$ gdb run
```

View File

@@ -18,5 +18,5 @@ install:
build: false build: false
test_script: test_script:
- REM SET RUST_LOG=tantivy,test & cargo test --verbose --no-default-features --features mmap - REM SET RUST_LOG=tantivy,test & cargo test --verbose --no-default-features --features mmap -- --test-threads 1
- REM SET RUST_BACKTRACE=1 & cargo build --examples - REM SET RUST_BACKTRACE=1 & cargo build --examples

View File

@@ -7,7 +7,7 @@ set -ex
main() { main() {
if [ ! -z $CODECOV ]; then if [ ! -z $CODECOV ]; then
echo "Codecov" echo "Codecov"
cargo build --verbose && cargo coverage --verbose --all && bash <(curl -s https://codecov.io/bash) -s target/kcov cargo build --verbose && cargo coverage --verbose && bash <(curl -s https://codecov.io/bash) -s target/kcov
else else
echo "Build" echo "Build"
cross build --target $TARGET cross build --target $TARGET
@@ -15,8 +15,7 @@ main() {
return return
fi fi
echo "Test" echo "Test"
cross test --target $TARGET --no-default-features --features mmap cross test --target $TARGET --no-default-features --features mmap -- --test-threads 1
cross test --target $TARGET --no-default-features --features mmap query-grammar
fi fi
for example in $(ls examples/*.rs) for example in $(ls examples/*.rs)
do do

View File

@@ -5,23 +5,28 @@
// //
// We will : // We will :
// - define our schema // - define our schema
// - create an index in a directory // = create an index in a directory
// - index a few documents into our index // - index few documents in our index
// - search for the best document matching a basic query // - search for the best document matchings "sea whale"
// - retrieve the best document's original content. // - retrieve the best document original content.
extern crate tempdir;
// --- // ---
// Importing tantivy... // Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs; use tantivy::collector::TopDocs;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::{doc, Index, ReloadPolicy}; use tantivy::Index;
use tempfile::TempDir; use tantivy::ReloadPolicy;
use tempdir::TempDir;
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the // Let's create a temporary directory for the
// sake of this example // sake of this example
let index_path = TempDir::new()?; let index_path = TempDir::new("tantivy_example_dir")?;
// # Defining the schema // # Defining the schema
// //
@@ -30,7 +35,7 @@ fn main() -> tantivy::Result<()> {
// and for each field, its type and "the way it should // and for each field, its type and "the way it should
// be indexed". // be indexed".
// First we need to define a schema ... // first we need to define a schema ...
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
// Our first field is title. // Our first field is title.
@@ -45,7 +50,7 @@ fn main() -> tantivy::Result<()> {
// //
// `STORED` means that the field will also be saved // `STORED` means that the field will also be saved
// in a compressed, row-oriented key-value store. // in a compressed, row-oriented key-value store.
// This store is useful for reconstructing the // This store is useful to reconstruct the
// documents that were selected during the search phase. // documents that were selected during the search phase.
schema_builder.add_text_field("title", TEXT | STORED); schema_builder.add_text_field("title", TEXT | STORED);
@@ -54,7 +59,8 @@ fn main() -> tantivy::Result<()> {
// need to be able to be able to retrieve it // need to be able to be able to retrieve it
// for our application. // for our application.
// //
// We can make our index lighter by omitting the `STORED` flag. // We can make our index lighter and
// by omitting `STORED` flag.
schema_builder.add_text_field("body", TEXT); schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -67,7 +73,7 @@ fn main() -> tantivy::Result<()> {
// with our schema in the directory. // with our schema in the directory.
let index = Index::create_in_dir(&index_path, schema.clone())?; let index = Index::create_in_dir(&index_path, schema.clone())?;
// To insert a document we will need an index writer. // To insert document we need an index writer.
// There must be only one writer at a time. // There must be only one writer at a time.
// This single `IndexWriter` is already // This single `IndexWriter` is already
// multithreaded. // multithreaded.
@@ -145,8 +151,8 @@ fn main() -> tantivy::Result<()> {
// At this point our documents are not searchable. // At this point our documents are not searchable.
// //
// //
// We need to call `.commit()` explicitly to force the // We need to call .commit() explicitly to force the
// `index_writer` to finish processing the documents in the queue, // index_writer to finish processing the documents in the queue,
// flush the current index to the disk, and advertise // flush the current index to the disk, and advertise
// the existence of new documents. // the existence of new documents.
// //
@@ -158,14 +164,14 @@ fn main() -> tantivy::Result<()> {
// persistently indexed. // persistently indexed.
// //
// In the scenario of a crash or a power failure, // In the scenario of a crash or a power failure,
// tantivy behaves as if it has rolled back to its last // tantivy behaves as if has rolled back to its last
// commit. // commit.
// # Searching // # Searching
// //
// ### Searcher // ### Searcher
// //
// A reader is required first in order to search an index. // A reader is required to get search the index.
// It acts as a `Searcher` pool that reloads itself, // It acts as a `Searcher` pool that reloads itself,
// depending on a `ReloadPolicy`. // depending on a `ReloadPolicy`.
// //
@@ -181,7 +187,7 @@ fn main() -> tantivy::Result<()> {
// We now need to acquire a searcher. // We now need to acquire a searcher.
// //
// A searcher points to a snapshotted, immutable version of the index. // A searcher points to snapshotted, immutable version of the index.
// //
// Some search experience might require more than // Some search experience might require more than
// one query. Using the same searcher ensures that all of these queries will run on the // one query. Using the same searcher ensures that all of these queries will run on the
@@ -201,7 +207,7 @@ fn main() -> tantivy::Result<()> {
// in both title and body. // in both title and body.
let query_parser = QueryParser::for_index(&index, vec![title, body]); let query_parser = QueryParser::for_index(&index, vec![title, body]);
// `QueryParser` may fail if the query is not in the right // QueryParser may fail if the query is not in the right
// format. For user facing applications, this can be a problem. // format. For user facing applications, this can be a problem.
// A ticket has been opened regarding this problem. // A ticket has been opened regarding this problem.
let query = query_parser.parse_query("sea whale")?; let query = query_parser.parse_query("sea whale")?;
@@ -217,7 +223,7 @@ fn main() -> tantivy::Result<()> {
// //
// We are not interested in all of the documents but // We are not interested in all of the documents but
// only in the top 10. Keeping track of our top 10 best documents // only in the top 10. Keeping track of our top 10 best documents
// is the role of the `TopDocs` collector. // is the role of the TopDocs.
// We can now perform our query. // We can now perform our query.
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?; let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;

View File

@@ -7,14 +7,19 @@
// Of course, you can have a look at the tantivy's built-in collectors // Of course, you can have a look at the tantivy's built-in collectors
// such as the `CountCollector` for more examples. // such as the `CountCollector` for more examples.
extern crate tempdir;
// --- // ---
// Importing tantivy... // Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::{Collector, SegmentCollector}; use tantivy::collector::{Collector, SegmentCollector};
use tantivy::fastfield::FastFieldReader; use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::Field; use tantivy::schema::Field;
use tantivy::schema::{Schema, FAST, INDEXED, TEXT}; use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, SegmentReader, TantivyError}; use tantivy::Index;
use tantivy::SegmentReader;
#[derive(Default)] #[derive(Default)]
struct Stats { struct Stats {
@@ -70,18 +75,9 @@ impl Collector for StatsCollector {
fn for_segment( fn for_segment(
&self, &self,
_segment_local_id: u32, _segment_local_id: u32,
segment_reader: &SegmentReader, segment: &SegmentReader,
) -> tantivy::Result<StatsSegmentCollector> { ) -> tantivy::Result<StatsSegmentCollector> {
let fast_field_reader = segment_reader let fast_field_reader = segment.fast_field_reader(self.field)?;
.fast_fields()
.u64(self.field)
.ok_or_else(|| {
let field_name = segment_reader.schema().get_field_name(self.field);
TantivyError::SchemaError(format!(
"Field {:?} is not a u64 fast field.",
field_name
))
})?;
Ok(StatsSegmentCollector { Ok(StatsSegmentCollector {
fast_field_reader, fast_field_reader,
stats: Stats::default(), stats: Stats::default(),

View File

@@ -2,11 +2,14 @@
// //
// In this example, we'll see how to define a tokenizer pipeline // In this example, we'll see how to define a tokenizer pipeline
// by aligning a bunch of `TokenFilter`. // by aligning a bunch of `TokenFilter`.
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs; use tantivy::collector::TopDocs;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::tokenizer::NgramTokenizer; use tantivy::tokenizer::NgramTokenizer;
use tantivy::{doc, Index}; use tantivy::Index;
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
// # Defining the schema // # Defining the schema

View File

@@ -8,10 +8,13 @@
// //
// --- // ---
// Importing tantivy... // Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs; use tantivy::collector::TopDocs;
use tantivy::query::TermQuery; use tantivy::query::TermQuery;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::{doc, Index, IndexReader}; use tantivy::Index;
use tantivy::IndexReader;
// A simple helper function to fetch a single document // A simple helper function to fetch a single document
// given its id from our index. // given its id from our index.

View File

@@ -10,18 +10,21 @@
// - search for the best document matchings "sea whale" // - search for the best document matchings "sea whale"
// - retrieve the best document original content. // - retrieve the best document original content.
extern crate tempdir;
// --- // ---
// Importing tantivy... // Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::FacetCollector; use tantivy::collector::FacetCollector;
use tantivy::query::AllQuery; use tantivy::query::AllQuery;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::{doc, Index}; use tantivy::Index;
use tempfile::TempDir;
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the // Let's create a temporary directory for the
// sake of this example // sake of this example
let index_path = TempDir::new()?; let index_path = TempDir::new("tantivy_facet_example_dir")?;
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
schema_builder.add_text_field("name", TEXT | STORED); schema_builder.add_text_field("name", TEXT | STORED);
@@ -73,3 +76,5 @@ fn main() -> tantivy::Result<()> {
Ok(()) Ok(())
} }
use tempdir::TempDir;

View File

@@ -2,10 +2,14 @@
// //
// Below is an example of creating an indexed integer field in your schema // Below is an example of creating an indexed integer field in your schema
// You can use RangeQuery to get a Count of all occurrences in a given range. // You can use RangeQuery to get a Count of all occurrences in a given range.
#[macro_use]
extern crate tantivy;
use tantivy::collector::Count; use tantivy::collector::Count;
use tantivy::query::RangeQuery; use tantivy::query::RangeQuery;
use tantivy::schema::{Schema, INDEXED}; use tantivy::schema::{Schema, INDEXED};
use tantivy::{doc, Index, Result}; use tantivy::Index;
use tantivy::Result;
fn run() -> Result<()> { fn run() -> Result<()> {
// For the sake of simplicity, this schema will only have 1 field // For the sake of simplicity, this schema will only have 1 field

View File

@@ -9,8 +9,11 @@
// --- // ---
// Importing tantivy... // Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::{doc, DocId, DocSet, Index, Postings}; use tantivy::Index;
use tantivy::{DocId, DocSet, Postings};
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
// We first create a schema for the sake of the // We first create a schema for the sake of the

View File

@@ -1,100 +0,0 @@
// # Indexing from different threads.
//
// It is fairly common to have to index from different threads.
// Tantivy forbids to create more than one `IndexWriter` at a time.
//
// This `IndexWriter` itself has its own multithreaded layer, so managing your own
// indexing threads will not help. However, it can still be useful for some applications.
//
// For instance, if preparing documents to send to tantivy before indexing is the bottleneck of
// your application, it is reasonable to have multiple threads.
//
// Another very common reason to want to index from multiple threads, is implementing a webserver
// with CRUD capabilities. The server framework will most likely handle request from
// different threads.
//
// The recommended way to address both of these use case is to wrap your `IndexWriter` into a
// `Arc<RwLock<IndexWriter>>`.
//
// While this is counterintuitive, adding and deleting documents do not require mutability
// over the `IndexWriter`, so several threads will be able to do this operation concurrently.
//
// The example below does not represent an actual real-life use case (who would spawn thread to
// index a single document?), but aims at demonstrating the mechanism that makes indexing
// from several threads possible.
// ---
// Importing tantivy...
use std::sync::{Arc, RwLock};
use std::thread;
use std::time::Duration;
use tantivy::schema::{Schema, STORED, TEXT};
use tantivy::{doc, Index, IndexWriter, Opstamp};
fn main() -> tantivy::Result<()> {
// # Defining the schema
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT | STORED);
let body = schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let index_writer: Arc<RwLock<IndexWriter>> = Arc::new(RwLock::new(index.writer(50_000_000)?));
// # First indexing thread.
let index_writer_clone_1 = index_writer.clone();
thread::spawn(move || {
// we index 100 times the document... for the sake of the example.
for i in 0..100 {
let opstamp = index_writer_clone_1
.read().unwrap() //< A read lock is sufficient here.
.add_document(
doc!(
title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
over the yellow sands in the sunlight before reaching the narrow pool. On one \
side of the river the golden foothill slopes curve up to the strong and rocky \
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
));
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
thread::sleep(Duration::from_millis(20));
}
});
// # Second indexing thread.
let index_writer_clone_2 = index_writer.clone();
// For convenience, tantivy also comes with a macro to
// reduce the boilerplate above.
thread::spawn(move || {
// we index 100 times the document... for the sake of the example.
for i in 0..100 {
// A read lock is sufficient here.
let opstamp = {
let index_writer_rlock = index_writer_clone_2.read().unwrap();
index_writer_rlock.add_document(doc!(
title => "Manufacturing consent",
body => "Some great book description..."
))
};
println!("add doc {} from thread 2 - opstamp {}", i, opstamp);
thread::sleep(Duration::from_millis(10));
}
});
// # In the main thread, we commit 10 times, once every 500ms.
for _ in 0..10 {
let opstamp: Opstamp = {
// Committing or rollbacking on the other hand requires write lock. This will block other threads.
let mut index_writer_wlock = index_writer.write().unwrap();
index_writer_wlock.commit().unwrap()
};
println!("committed with opstamp {}", opstamp);
thread::sleep(Duration::from_millis(500));
}
Ok(())
}

View File

@@ -4,19 +4,23 @@
// your hit result. // your hit result.
// Snippet are an extracted of a target document, and returned in HTML format. // Snippet are an extracted of a target document, and returned in HTML format.
// The keyword searched by the user are highlighted with a `<b>` tag. // The keyword searched by the user are highlighted with a `<b>` tag.
extern crate tempdir;
// --- // ---
// Importing tantivy... // Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs; use tantivy::collector::TopDocs;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::{doc, Index, Snippet, SnippetGenerator}; use tantivy::Index;
use tempfile::TempDir; use tantivy::{Snippet, SnippetGenerator};
use tempdir::TempDir;
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the // Let's create a temporary directory for the
// sake of this example // sake of this example
let index_path = TempDir::new()?; let index_path = TempDir::new("tantivy_example_dir")?;
// # Defining the schema // # Defining the schema
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();

View File

@@ -9,13 +9,17 @@
// - add a few stop words // - add a few stop words
// - index few documents in our index // - index few documents in our index
extern crate tempdir;
// --- // ---
// Importing tantivy... // Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs; use tantivy::collector::TopDocs;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::tokenizer::*; use tantivy::tokenizer::*;
use tantivy::{doc, Index}; use tantivy::Index;
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
// this example assumes you understand the content in `basic_search` // this example assumes you understand the content in `basic_search`

View File

@@ -1,4 +1,4 @@
use tantivy; extern crate tantivy;
use tantivy::schema::*; use tantivy::schema::*;
// # Document from json // # Document from json

View File

@@ -1,16 +0,0 @@
[package]
name = "tantivy-query-grammar"
version = "0.11.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
description = """Search engine library"""
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
homepage = "https://github.com/tantivy-search/tantivy"
repository = "https://github.com/tantivy-search/tantivy"
readme = "README.md"
keywords = ["search", "information", "retrieval"]
edition = "2018"
[dependencies]
combine = ">=3.6.0,<4.0.0"

View File

@@ -1,17 +0,0 @@
#![recursion_limit = "100"]
mod occur;
mod query_grammar;
mod user_input_ast;
use combine::parser::Parser;
pub use crate::occur::Occur;
use crate::query_grammar::parse_to_ast;
pub use crate::user_input_ast::{UserInputAST, UserInputBound, UserInputLeaf, UserInputLiteral};
pub struct Error;
pub fn parse_query(query: &str) -> Result<UserInputAST, Error> {
let (user_input_ast, _remaining) = parse_to_ast().parse(query).map_err(|_| Error)?;
Ok(user_input_ast)
}

View File

@@ -1,380 +0,0 @@
use super::user_input_ast::*;
use crate::Occur;
use combine::char::*;
use combine::error::StreamError;
use combine::stream::StreamErrorFor;
use combine::*;
parser! {
fn field[I]()(I) -> String
where [I: Stream<Item = char>] {
(
letter(),
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
).skip(char(':')).map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
}
}
parser! {
fn word[I]()(I) -> String
where [I: Stream<Item = char>] {
(
satisfy(|c: char| !c.is_whitespace() && !['-', '`', ':', '{', '}', '"', '[', ']', '(',')'].contains(&c) ),
many(satisfy(|c: char| !c.is_whitespace() && ![':', '{', '}', '"', '[', ']', '(',')'].contains(&c)))
)
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
.and_then(|s: String|
match s.as_str() {
"OR" => Err(StreamErrorFor::<I>::unexpected_static_message("OR")),
"AND" => Err(StreamErrorFor::<I>::unexpected_static_message("AND")),
"NOT" => Err(StreamErrorFor::<I>::unexpected_static_message("NOT")),
_ => Ok(s)
})
}
}
parser! {
fn literal[I]()(I) -> UserInputLeaf
where [I: Stream<Item = char>]
{
let term_val = || {
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
phrase.or(word())
};
let term_val_with_field = negative_number().or(term_val());
let term_query =
(field(), term_val_with_field)
.map(|(field_name, phrase)| UserInputLiteral {
field_name: Some(field_name),
phrase,
});
let term_default_field = term_val().map(|phrase| UserInputLiteral {
field_name: None,
phrase,
});
attempt(term_query)
.or(term_default_field)
.map(UserInputLeaf::from)
}
}
parser! {
fn negative_number[I]()(I) -> String
where [I: Stream<Item = char>]
{
(char('-'), many1(satisfy(char::is_numeric)),
optional((char('.'), many1(satisfy(char::is_numeric)))))
.map(|(s1, s2, s3): (char, String, Option<(char, String)>)| {
if let Some(('.', s3)) = s3 {
format!("{}{}.{}", s1, s2, s3)
} else {
format!("{}{}", s1, s2)
}
})
}
}
parser! {
fn spaces1[I]()(I) -> ()
where [I: Stream<Item = char>] {
skip_many1(space())
}
}
parser! {
/// Function that parses a range out of a Stream
/// Supports ranges like:
/// [5 TO 10], {5 TO 10}, [* TO 10], [10 TO *], {10 TO *], >5, <=10
/// [a TO *], [a TO c], [abc TO bcd}
fn range[I]()(I) -> UserInputLeaf
where [I: Stream<Item = char>] {
let range_term_val = || {
word().or(negative_number()).or(char('*').with(value("*".to_string())))
};
// check for unbounded range in the form of <5, <=10, >5, >=5
let elastic_unbounded_range = (choice([attempt(string(">=")),
attempt(string("<=")),
attempt(string("<")),
attempt(string(">"))])
.skip(spaces()),
range_term_val()).
map(|(comparison_sign, bound): (&str, String)|
match comparison_sign {
">=" => (UserInputBound::Inclusive(bound), UserInputBound::Unbounded),
"<=" => (UserInputBound::Unbounded, UserInputBound::Inclusive(bound)),
"<" => (UserInputBound::Unbounded, UserInputBound::Exclusive(bound)),
">" => (UserInputBound::Exclusive(bound), UserInputBound::Unbounded),
// default case
_ => (UserInputBound::Unbounded, UserInputBound::Unbounded)
});
let lower_bound = (one_of("{[".chars()), range_term_val())
.map(|(boundary_char, lower_bound): (char, String)|
if lower_bound == "*" {
UserInputBound::Unbounded
} else if boundary_char == '{' {
UserInputBound::Exclusive(lower_bound)
} else {
UserInputBound::Inclusive(lower_bound)
});
let upper_bound = (range_term_val(), one_of("}]".chars()))
.map(|(higher_bound, boundary_char): (String, char)|
if higher_bound == "*" {
UserInputBound::Unbounded
} else if boundary_char == '}' {
UserInputBound::Exclusive(higher_bound)
} else {
UserInputBound::Inclusive(higher_bound)
});
// return only lower and upper
let lower_to_upper = (lower_bound.
skip((spaces(),
string("TO"),
spaces())),
upper_bound);
(optional(field()).skip(spaces()),
// try elastic first, if it matches, the range is unbounded
attempt(elastic_unbounded_range).or(lower_to_upper))
.map(|(field, (lower, upper))|
// Construct the leaf from extracted field (optional)
// and bounds
UserInputLeaf::Range {
field,
lower,
upper
})
}
}
fn negate(expr: UserInputAST) -> UserInputAST {
expr.unary(Occur::MustNot)
}
fn must(expr: UserInputAST) -> UserInputAST {
expr.unary(Occur::Must)
}
parser! {
fn leaf[I]()(I) -> UserInputAST
where [I: Stream<Item = char>] {
char('-').with(leaf()).map(negate)
.or(char('+').with(leaf()).map(must))
.or(char('(').with(ast()).skip(char(')')))
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All)))
.or(attempt(string("NOT").skip(spaces1()).with(leaf()).map(negate)))
.or(attempt(range().map(UserInputAST::from)))
.or(literal().map(UserInputAST::from))
}
}
#[derive(Clone, Copy)]
enum BinaryOperand {
Or,
And,
}
parser! {
fn binary_operand[I]()(I) -> BinaryOperand
where [I: Stream<Item = char>]
{
string("AND").with(value(BinaryOperand::And))
.or(string("OR").with(value(BinaryOperand::Or)))
}
}
fn aggregate_binary_expressions(
left: UserInputAST,
others: Vec<(BinaryOperand, UserInputAST)>,
) -> UserInputAST {
let mut dnf: Vec<Vec<UserInputAST>> = vec![vec![left]];
for (operator, operand_ast) in others {
match operator {
BinaryOperand::And => {
if let Some(last) = dnf.last_mut() {
last.push(operand_ast);
}
}
BinaryOperand::Or => {
dnf.push(vec![operand_ast]);
}
}
}
if dnf.len() == 1 {
UserInputAST::and(dnf.into_iter().next().unwrap()) //< safe
} else {
let conjunctions = dnf.into_iter().map(UserInputAST::and).collect();
UserInputAST::or(conjunctions)
}
}
parser! {
pub fn ast[I]()(I) -> UserInputAST
where [I: Stream<Item = char>]
{
let operand_leaf = (binary_operand().skip(spaces()), leaf().skip(spaces()));
let boolean_expr = (leaf().skip(spaces().silent()), many1(operand_leaf)).map(
|(left, right)| aggregate_binary_expressions(left,right));
let whitespace_separated_leaves = many1(leaf().skip(spaces().silent()))
.map(|subqueries: Vec<UserInputAST>|
if subqueries.len() == 1 {
subqueries.into_iter().next().unwrap()
} else {
UserInputAST::Clause(subqueries.into_iter().collect())
});
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
spaces().with(expr).skip(spaces())
}
}
parser! {
pub fn parse_to_ast[I]()(I) -> UserInputAST
where [I: Stream<Item = char>]
{
spaces().with(optional(ast()).skip(eof())).map(|opt_ast| opt_ast.unwrap_or_else(UserInputAST::empty_query))
}
}
#[cfg(test)]
mod test {
use super::*;
fn test_parse_query_to_ast_helper(query: &str, expected: &str) {
let query = parse_to_ast().parse(query).unwrap().0;
let query_str = format!("{:?}", query);
assert_eq!(query_str, expected);
}
fn test_is_parse_err(query: &str) {
assert!(parse_to_ast().parse(query).is_err());
}
#[test]
fn test_parse_empty_to_ast() {
test_parse_query_to_ast_helper("", "<emptyclause>");
}
#[test]
fn test_parse_query_to_ast_hyphen() {
test_parse_query_to_ast_helper("\"www-form-encoded\"", "\"www-form-encoded\"");
test_parse_query_to_ast_helper("www-form-encoded", "\"www-form-encoded\"");
test_parse_query_to_ast_helper("www-form-encoded", "\"www-form-encoded\"");
}
#[test]
fn test_parse_query_to_ast_not_op() {
assert_eq!(
format!("{:?}", parse_to_ast().parse("NOT")),
"Err(UnexpectedParse)"
);
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
test_parse_query_to_ast_helper("NOT a", "-(\"a\")");
}
#[test]
fn test_parse_query_to_ast_binary_op() {
test_parse_query_to_ast_helper("a AND b", "(+(\"a\") +(\"b\"))");
test_parse_query_to_ast_helper("a OR b", "(?(\"a\") ?(\"b\"))");
test_parse_query_to_ast_helper("a OR b AND c", "(?(\"a\") ?((+(\"b\") +(\"c\"))))");
test_parse_query_to_ast_helper("a AND b AND c", "(+(\"a\") +(\"b\") +(\"c\"))");
assert_eq!(
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
"Err(UnexpectedParse)"
);
assert_eq!(
format!("{:?}", parse_to_ast().parse("a AND b aaa")),
"Err(UnexpectedParse)"
);
assert_eq!(
format!("{:?}", parse_to_ast().parse("aaa a OR b ")),
"Err(UnexpectedParse)"
);
assert_eq!(
format!("{:?}", parse_to_ast().parse("aaa ccc a OR b ")),
"Err(UnexpectedParse)"
);
}
#[test]
fn test_parse_elastic_query_ranges() {
test_parse_query_to_ast_helper("title: >a", "title:{\"a\" TO \"*\"}");
test_parse_query_to_ast_helper("title:>=a", "title:[\"a\" TO \"*\"}");
test_parse_query_to_ast_helper("title: <a", "title:{\"*\" TO \"a\"}");
test_parse_query_to_ast_helper("title:<=a", "title:{\"*\" TO \"a\"]");
test_parse_query_to_ast_helper("title:<=bsd", "title:{\"*\" TO \"bsd\"]");
test_parse_query_to_ast_helper("weight: >70", "weight:{\"70\" TO \"*\"}");
test_parse_query_to_ast_helper("weight:>=70", "weight:[\"70\" TO \"*\"}");
test_parse_query_to_ast_helper("weight: <70", "weight:{\"*\" TO \"70\"}");
test_parse_query_to_ast_helper("weight:<=70", "weight:{\"*\" TO \"70\"]");
test_parse_query_to_ast_helper("weight: >60.7", "weight:{\"60.7\" TO \"*\"}");
test_parse_query_to_ast_helper("weight: <= 70", "weight:{\"*\" TO \"70\"]");
test_parse_query_to_ast_helper("weight: <= 70.5", "weight:{\"*\" TO \"70.5\"]");
}
#[test]
fn test_range_parser() {
// testing the range() parser separately
let res = range().parse("title: <hello").unwrap().0;
let expected = UserInputLeaf::Range {
field: Some("title".to_string()),
lower: UserInputBound::Unbounded,
upper: UserInputBound::Exclusive("hello".to_string()),
};
let res2 = range().parse("title:{* TO hello}").unwrap().0;
assert_eq!(res, expected);
assert_eq!(res2, expected);
let expected_weight = UserInputLeaf::Range {
field: Some("weight".to_string()),
lower: UserInputBound::Inclusive("71.2".to_string()),
upper: UserInputBound::Unbounded,
};
let res3 = range().parse("weight: >=71.2").unwrap().0;
let res4 = range().parse("weight:[71.2 TO *}").unwrap().0;
assert_eq!(res3, expected_weight);
assert_eq!(res4, expected_weight);
}
#[test]
fn test_parse_query_to_triming_spaces() {
test_parse_query_to_ast_helper(" abc", "\"abc\"");
test_parse_query_to_ast_helper("abc ", "\"abc\"");
test_parse_query_to_ast_helper("( a OR abc)", "(?(\"a\") ?(\"abc\"))");
test_parse_query_to_ast_helper("(a OR abc)", "(?(\"a\") ?(\"abc\"))");
test_parse_query_to_ast_helper("(a OR abc)", "(?(\"a\") ?(\"abc\"))");
test_parse_query_to_ast_helper("a OR abc ", "(?(\"a\") ?(\"abc\"))");
test_parse_query_to_ast_helper("(a OR abc )", "(?(\"a\") ?(\"abc\"))");
test_parse_query_to_ast_helper("(a OR abc) ", "(?(\"a\") ?(\"abc\"))");
}
#[test]
fn test_parse_query_to_ast() {
test_parse_query_to_ast_helper("abc", "\"abc\"");
test_parse_query_to_ast_helper("a b", "(\"a\" \"b\")");
test_parse_query_to_ast_helper("+(a b)", "+((\"a\" \"b\"))");
test_parse_query_to_ast_helper("+d", "+(\"d\")");
test_parse_query_to_ast_helper("+(a b) +d", "(+((\"a\" \"b\")) +(\"d\"))");
test_parse_query_to_ast_helper("(+a +b) d", "((+(\"a\") +(\"b\")) \"d\")");
test_parse_query_to_ast_helper("(+a)", "+(\"a\")");
test_parse_query_to_ast_helper("(+a +b)", "(+(\"a\") +(\"b\"))");
test_parse_query_to_ast_helper("abc:toto", "abc:\"toto\"");
test_parse_query_to_ast_helper("abc:1.1", "abc:\"1.1\"");
test_parse_query_to_ast_helper("+abc:toto", "+(abc:\"toto\")");
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+(abc:\"toto\") -(\"titi\"))");
test_parse_query_to_ast_helper("-abc:toto", "-(abc:\"toto\")");
test_parse_query_to_ast_helper("abc:a b", "(abc:\"a\" \"b\")");
test_parse_query_to_ast_helper("abc:\"a b\"", "abc:\"a b\"");
test_parse_query_to_ast_helper("foo:[1 TO 5]", "foo:[\"1\" TO \"5\"]");
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[* TO toto}", "foo:{\"*\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[1 TO *}", "foo:[\"1\" TO \"*\"}");
test_parse_query_to_ast_helper("foo:[1.1 TO *}", "foo:[\"1.1\" TO \"*\"}");
test_is_parse_err("abc + ");
}
}

View File

@@ -1,2 +1,2 @@
#!/bin/bash #!/bin/bash
cargo test cargo test --no-default-features --features mmap -- --test-threads 1

View File

@@ -1,19 +1,21 @@
use super::Collector; use super::Collector;
use crate::collector::SegmentCollector; use collector::SegmentCollector;
use crate::DocId; use DocId;
use crate::Result; use Result;
use crate::Score; use Score;
use crate::SegmentLocalId; use SegmentLocalId;
use crate::SegmentReader; use SegmentReader;
/// `CountCollector` collector only counts how many /// `CountCollector` collector only counts how many
/// documents match the query. /// documents match the query.
/// ///
/// ```rust /// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::Count; /// use tantivy::collector::Count;
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Result};
/// ///
/// # fn main() { example().unwrap(); } /// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
@@ -92,8 +94,8 @@ impl SegmentCollector for SegmentCountCollector {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{Count, SegmentCountCollector}; use super::{Count, SegmentCountCollector};
use crate::collector::Collector; use collector::Collector;
use crate::collector::SegmentCollector; use collector::SegmentCollector;
#[test] #[test]
fn test_count_collect_does_not_requires_scoring() { fn test_count_collect_does_not_requires_scoring() {
@@ -123,4 +125,5 @@ mod tests {
assert_eq!(count_collector.harvest(), 2); assert_eq!(count_collector.harvest(), 2);
} }
} }
} }

View File

@@ -1,126 +0,0 @@
use crate::collector::top_collector::{TopCollector, TopSegmentCollector};
use crate::collector::{Collector, SegmentCollector};
use crate::Result;
use crate::{DocAddress, DocId, Score, SegmentReader};
pub(crate) struct CustomScoreTopCollector<TCustomScorer, TScore = Score> {
custom_scorer: TCustomScorer,
collector: TopCollector<TScore>,
}
impl<TCustomScorer, TScore> CustomScoreTopCollector<TCustomScorer, TScore>
where
TScore: Clone + PartialOrd,
{
pub fn new(
custom_scorer: TCustomScorer,
limit: usize,
) -> CustomScoreTopCollector<TCustomScorer, TScore> {
CustomScoreTopCollector {
custom_scorer,
collector: TopCollector::with_limit(limit),
}
}
}
/// A custom segment scorer makes it possible to define any kind of score
/// for a given document belonging to a specific segment.
///
/// It is the segment local version of the [`CustomScorer`](./trait.CustomScorer.html).
pub trait CustomSegmentScorer<TScore>: 'static {
/// Computes the score of a specific `doc`.
fn score(&self, doc: DocId) -> TScore;
}
/// `CustomScorer` makes it possible to define any kind of score.
///
/// The `CustomerScorer` itself does not make much of the computation itself.
/// Instead, it helps constructing `Self::Child` instances that will compute
/// the score at a segment scale.
pub trait CustomScorer<TScore>: Sync {
/// Type of the associated [`CustomSegmentScorer`](./trait.CustomSegmentScorer.html).
type Child: CustomSegmentScorer<TScore>;
/// Builds a child scorer for a specific segment. The child scorer is associated to
/// a specific segment.
fn segment_scorer(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
}
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
where
TCustomScorer: CustomScorer<TScore>,
TScore: 'static + PartialOrd + Clone + Send + Sync,
{
type Fruit = Vec<(TScore, DocAddress)>;
type Child = CustomScoreTopSegmentCollector<TCustomScorer::Child, TScore>;
fn for_segment(
&self,
segment_local_id: u32,
segment_reader: &SegmentReader,
) -> Result<Self::Child> {
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
let segment_collector = self
.collector
.for_segment(segment_local_id, segment_reader)?;
Ok(CustomScoreTopSegmentCollector {
segment_collector,
segment_scorer,
})
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> Result<Self::Fruit> {
self.collector.merge_fruits(segment_fruits)
}
}
pub struct CustomScoreTopSegmentCollector<T, TScore>
where
TScore: 'static + PartialOrd + Clone + Send + Sync + Sized,
T: CustomSegmentScorer<TScore>,
{
segment_collector: TopSegmentCollector<TScore>,
segment_scorer: T,
}
impl<T, TScore> SegmentCollector for CustomScoreTopSegmentCollector<T, TScore>
where
TScore: 'static + PartialOrd + Clone + Send + Sync,
T: 'static + CustomSegmentScorer<TScore>,
{
type Fruit = Vec<(TScore, DocAddress)>;
fn collect(&mut self, doc: DocId, _score: Score) {
let score = self.segment_scorer.score(doc);
self.segment_collector.collect(doc, score);
}
fn harvest(self) -> Vec<(TScore, DocAddress)> {
self.segment_collector.harvest()
}
}
impl<F, TScore, T> CustomScorer<TScore> for F
where
F: 'static + Send + Sync + Fn(&SegmentReader) -> T,
T: CustomSegmentScorer<TScore>,
{
type Child = T;
fn segment_scorer(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
Ok((self)(segment_reader))
}
}
impl<F, TScore> CustomSegmentScorer<TScore> for F
where
F: 'static + Sync + Send + Fn(DocId) -> TScore,
{
fn score(&self, doc: DocId) -> TScore {
(self)(doc)
}
}

View File

@@ -1,15 +1,9 @@
use crate::collector::Collector; use collector::Collector;
use crate::collector::SegmentCollector; use collector::SegmentCollector;
use crate::docset::SkipResult; use docset::SkipResult;
use crate::fastfield::FacetReader; use fastfield::FacetReader;
use crate::schema::Facet; use schema::Facet;
use crate::schema::Field; use schema::Field;
use crate::DocId;
use crate::Result;
use crate::Score;
use crate::SegmentLocalId;
use crate::SegmentReader;
use crate::TantivyError;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::btree_map; use std::collections::btree_map;
use std::collections::BTreeMap; use std::collections::BTreeMap;
@@ -18,6 +12,11 @@ use std::collections::BinaryHeap;
use std::collections::Bound; use std::collections::Bound;
use std::iter::Peekable; use std::iter::Peekable;
use std::{u64, usize}; use std::{u64, usize};
use DocId;
use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
struct Hit<'a> { struct Hit<'a> {
count: u64, count: u64,
@@ -27,13 +26,13 @@ struct Hit<'a> {
impl<'a> Eq for Hit<'a> {} impl<'a> Eq for Hit<'a> {}
impl<'a> PartialEq<Hit<'a>> for Hit<'a> { impl<'a> PartialEq<Hit<'a>> for Hit<'a> {
fn eq(&self, other: &Hit<'_>) -> bool { fn eq(&self, other: &Hit) -> bool {
self.count == other.count self.count == other.count
} }
} }
impl<'a> PartialOrd<Hit<'a>> for Hit<'a> { impl<'a> PartialOrd<Hit<'a>> for Hit<'a> {
fn partial_cmp(&self, other: &Hit<'_>) -> Option<Ordering> { fn partial_cmp(&self, other: &Hit) -> Option<Ordering> {
Some(self.cmp(other)) Some(self.cmp(other))
} }
} }
@@ -81,10 +80,12 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// ///
/// ///
/// ```rust /// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Facet, Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::FacetCollector; /// use tantivy::collector::FacetCollector;
/// use tantivy::query::AllQuery; /// use tantivy::query::AllQuery;
/// use tantivy::schema::{Facet, Schema, TEXT};
/// use tantivy::{doc, Index, Result};
/// ///
/// # fn main() { example().unwrap(); } /// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
@@ -263,10 +264,7 @@ impl Collector for FacetCollector {
_: SegmentLocalId, _: SegmentLocalId,
reader: &SegmentReader, reader: &SegmentReader,
) -> Result<FacetSegmentCollector> { ) -> Result<FacetSegmentCollector> {
let field_name = reader.schema().get_field_name(self.field); let facet_reader = reader.facet_reader(self.field)?;
let facet_reader = reader.facet_reader(self.field).ok_or_else(|| {
TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name))
})?;
let mut collapse_mapping = Vec::new(); let mut collapse_mapping = Vec::new();
let mut counts = Vec::new(); let mut counts = Vec::new();
@@ -396,7 +394,7 @@ impl<'a> Iterator for FacetChildIterator<'a> {
} }
impl FacetCounts { impl FacetCounts {
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_> pub fn get<T>(&self, facet_from: T) -> FacetChildIterator
where where
Facet: From<T>, Facet: From<T>,
{ {
@@ -410,8 +408,7 @@ impl FacetCounts {
let facet_after = Facet::from_encoded_string(facet_after_bytes); let facet_after = Facet::from_encoded_string(facet_after_bytes);
Bound::Excluded(facet_after) Bound::Excluded(facet_after)
}; };
let underlying: btree_map::Range<'_, _, _> = let underlying: btree_map::Range<_, _> = self.facet_counts.range((left_bound, right_bound));
self.facet_counts.range((left_bound, right_bound));
FacetChildIterator { underlying } FacetChildIterator { underlying }
} }
@@ -452,12 +449,12 @@ impl FacetCounts {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{FacetCollector, FacetCounts}; use super::{FacetCollector, FacetCounts};
use crate::core::Index; use core::Index;
use crate::query::AllQuery; use query::AllQuery;
use crate::schema::{Document, Facet, Field, Schema};
use rand::distributions::Uniform; use rand::distributions::Uniform;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use schema::{Document, Facet, Field, Schema};
use std::iter; use std::iter;
#[test] #[test]
@@ -515,7 +512,7 @@ mod tests {
#[should_panic(expected = "Tried to add a facet which is a descendant of \ #[should_panic(expected = "Tried to add a facet which is a descendant of \
an already added facet.")] an already added facet.")]
fn test_misused_facet_collector() { fn test_misused_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0)); let mut facet_collector = FacetCollector::for_field(Field(0));
facet_collector.add_facet(Facet::from("/country")); facet_collector.add_facet(Facet::from("/country"));
facet_collector.add_facet(Facet::from("/country/europe")); facet_collector.add_facet(Facet::from("/country/europe"));
} }
@@ -546,7 +543,7 @@ mod tests {
#[test] #[test]
fn test_non_used_facet_collector() { fn test_non_used_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0)); let mut facet_collector = FacetCollector::for_field(Field(0));
facet_collector.add_facet(Facet::from("/country")); facet_collector.add_facet(Facet::from("/country"));
facet_collector.add_facet(Facet::from("/countryeurope")); facet_collector.add_facet(Facet::from("/countryeurope"));
} }
@@ -599,18 +596,19 @@ mod tests {
); );
} }
} }
} }
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use crate::collector::FacetCollector; use collector::FacetCollector;
use crate::query::AllQuery; use query::AllQuery;
use crate::schema::{Facet, Schema}; use rand::{thread_rng, Rng};
use crate::Index; use schema::Facet;
use rand::seq::SliceRandom; use schema::Schema;
use rand::thread_rng;
use test::Bencher; use test::Bencher;
use Index;
#[bench] #[bench]
fn bench_facet_collector(b: &mut Bencher) { fn bench_facet_collector(b: &mut Bencher) {
@@ -627,7 +625,7 @@ mod bench {
} }
} }
// 40425 docs // 40425 docs
docs[..].shuffle(&mut thread_rng()); thread_rng().shuffle(&mut docs[..]);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs { for doc in docs {
@@ -636,7 +634,7 @@ mod bench {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
b.iter(|| { b.iter(|| {
let searcher = reader.searcher(); let searcher = index.searcher();
let facet_collector = FacetCollector::for_field(facet_field); let facet_collector = FacetCollector::for_field(facet_field);
searcher.search(&AllQuery, &facet_collector).unwrap(); searcher.search(&AllQuery, &facet_collector).unwrap();
}); });

View File

@@ -82,7 +82,6 @@ mod tests {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST); let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST); let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
let num_field_f64 = schema_builder.add_f64_field("num_f64", FAST);
let text_field = schema_builder.add_text_field("text", STRING); let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -95,7 +94,6 @@ mod tests {
index_writer.add_document(doc!( index_writer.add_document(doc!(
num_field_i64 => ((i as i64) % 3i64) as i64, num_field_i64 => ((i as i64) % 3i64) as i64,
num_field_u64 => (i % 2u64) as u64, num_field_u64 => (i % 2u64) as u64,
num_field_f64 => (i % 4u64) as f64,
text_field => "text" text_field => "text"
)); ));
} }
@@ -106,11 +104,10 @@ mod tests {
let searcher = index.reader().searcher(); let searcher = index.reader().searcher();
let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64); let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64);
let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64); let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64);
let mut ffvf_f64: IntFacetCollector<F64FastFieldReader> = IntFacetCollector::new(num_field_f64);
{ {
// perform the query // perform the query
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64).push(&mut ffvf_f64); let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64);
let mut query_parser = QueryParser::for_index(index, vec![text_field]); let mut query_parser = QueryParser::for_index(index, vec![text_field]);
let query = query_parser.parse_query("text:text").unwrap(); let query = query_parser.parse_query("text:text").unwrap();
query.search(&searcher, &mut facet_collectors).unwrap(); query.search(&searcher, &mut facet_collectors).unwrap();
@@ -120,8 +117,6 @@ mod tests {
assert_eq!(ffvf_u64.counters[&1], 5); assert_eq!(ffvf_u64.counters[&1], 5);
assert_eq!(ffvf_i64.counters[&0], 4); assert_eq!(ffvf_i64.counters[&0], 4);
assert_eq!(ffvf_i64.counters[&1], 3); assert_eq!(ffvf_i64.counters[&1], 3);
assert_eq!(ffvf_f64.counters[&0.0], 3);
assert_eq!(ffvf_f64.counters[&2.0], 2);
} }
} }

View File

@@ -35,6 +35,7 @@ The resulting `Fruit` will then be a typed tuple with each collector's original
in their respective position. in their respective position.
```rust ```rust
# extern crate tantivy;
# use tantivy::schema::*; # use tantivy::schema::*;
# use tantivy::*; # use tantivy::*;
# use tantivy::query::*; # use tantivy::query::*;
@@ -65,7 +66,7 @@ let (doc_count, top_docs): (usize, Vec<(Score, DocAddress)>) =
The `Collector` trait is implemented for up to 4 collectors. The `Collector` trait is implemented for up to 4 collectors.
If you have more than 4 collectors, you can either group them into If you have more than 4 collectors, you can either group them into
tuples of tuples `(a,(b,(c,d)))`, or rely on [`MultiCollector`](./struct.MultiCollector.html). tuples of tuples `(a,(b,(c,d)))`, or rely on `MultiCollector`'s.
# Combining several collectors dynamically # Combining several collectors dynamically
@@ -84,12 +85,12 @@ See the `custom_collector` example.
*/ */
use crate::DocId; use downcast_rs;
use crate::Result; use DocId;
use crate::Score; use Result;
use crate::SegmentLocalId; use Score;
use crate::SegmentReader; use SegmentLocalId;
use downcast_rs::impl_downcast; use SegmentReader;
mod count_collector; mod count_collector;
pub use self::count_collector::Count; pub use self::count_collector::Count;
@@ -102,11 +103,8 @@ mod top_collector;
mod top_score_collector; mod top_score_collector;
pub use self::top_score_collector::TopDocs; pub use self::top_score_collector::TopDocs;
mod custom_score_top_collector; mod top_field_collector;
pub use self::custom_score_top_collector::{CustomScorer, CustomSegmentScorer}; pub use self::top_field_collector::TopDocsByField;
mod tweak_score_top_collector;
pub use self::tweak_score_top_collector::{ScoreSegmentTweaker, ScoreTweaker};
mod facet_collector; mod facet_collector;
pub use self::facet_collector::FacetCollector; pub use self::facet_collector::FacetCollector;

View File

@@ -1,30 +1,29 @@
use super::Collector; use super::Collector;
use super::SegmentCollector; use super::SegmentCollector;
use crate::collector::Fruit; use collector::Fruit;
use crate::DocId;
use crate::Result;
use crate::Score;
use crate::SegmentLocalId;
use crate::SegmentReader;
use crate::TantivyError;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::ops::Deref; use DocId;
use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
use TantivyError;
pub struct MultiFruit { pub struct MultiFruit {
sub_fruits: Vec<Option<Box<dyn Fruit>>>, sub_fruits: Vec<Option<Box<Fruit>>>,
} }
pub struct CollectorWrapper<TCollector: Collector>(TCollector); pub struct CollectorWrapper<TCollector: Collector>(TCollector);
impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> { impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
type Fruit = Box<dyn Fruit>; type Fruit = Box<Fruit>;
type Child = Box<dyn BoxableSegmentCollector>; type Child = Box<BoxableSegmentCollector>;
fn for_segment( fn for_segment(
&self, &self,
segment_local_id: u32, segment_local_id: u32,
reader: &SegmentReader, reader: &SegmentReader,
) -> Result<Box<dyn BoxableSegmentCollector>> { ) -> Result<Box<BoxableSegmentCollector>> {
let child = self.0.for_segment(segment_local_id, reader)?; let child = self.0.for_segment(segment_local_id, reader)?;
Ok(Box::new(SegmentCollectorWrapper(child))) Ok(Box::new(SegmentCollectorWrapper(child)))
} }
@@ -33,7 +32,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
self.0.requires_scoring() self.0.requires_scoring()
} }
fn merge_fruits(&self, children: Vec<<Self as Collector>::Fruit>) -> Result<Box<dyn Fruit>> { fn merge_fruits(&self, children: Vec<<Self as Collector>::Fruit>) -> Result<Box<Fruit>> {
let typed_fruit: Vec<TCollector::Fruit> = children let typed_fruit: Vec<TCollector::Fruit> = children
.into_iter() .into_iter()
.map(|untyped_fruit| { .map(|untyped_fruit| {
@@ -50,21 +49,21 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
} }
} }
impl SegmentCollector for Box<dyn BoxableSegmentCollector> { impl SegmentCollector for Box<BoxableSegmentCollector> {
type Fruit = Box<dyn Fruit>; type Fruit = Box<Fruit>;
fn collect(&mut self, doc: u32, score: f32) { fn collect(&mut self, doc: u32, score: f32) {
self.as_mut().collect(doc, score); self.as_mut().collect(doc, score);
} }
fn harvest(self) -> Box<dyn Fruit> { fn harvest(self) -> Box<Fruit> {
BoxableSegmentCollector::harvest_from_box(self) BoxableSegmentCollector::harvest_from_box(self)
} }
} }
pub trait BoxableSegmentCollector { pub trait BoxableSegmentCollector {
fn collect(&mut self, doc: u32, score: f32); fn collect(&mut self, doc: u32, score: f32);
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>; fn harvest_from_box(self: Box<Self>) -> Box<Fruit>;
} }
pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegmentCollector); pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegmentCollector);
@@ -76,7 +75,7 @@ impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
self.0.collect(doc, score); self.0.collect(doc, score);
} }
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit> { fn harvest_from_box(self: Box<Self>) -> Box<Fruit> {
Box::new(self.0.harvest()) Box::new(self.0.harvest())
} }
} }
@@ -105,10 +104,12 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
/// [Combining several collectors section of the collector documentation](./index.html#combining-several-collectors). /// [Combining several collectors section of the collector documentation](./index.html#combining-several-collectors).
/// ///
/// ```rust /// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::{Count, TopDocs, MultiCollector}; /// use tantivy::collector::{Count, TopDocs, MultiCollector};
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Result};
/// ///
/// # fn main() { example().unwrap(); } /// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
@@ -155,9 +156,8 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
#[derive(Default)] #[derive(Default)]
pub struct MultiCollector<'a> { pub struct MultiCollector<'a> {
collector_wrappers: Vec< collector_wrappers:
Box<dyn Collector<Child = Box<dyn BoxableSegmentCollector>, Fruit = Box<dyn Fruit>> + 'a>, Vec<Box<Collector<Child = Box<BoxableSegmentCollector>, Fruit = Box<Fruit>> + 'a>>,
>,
} }
impl<'a> MultiCollector<'a> { impl<'a> MultiCollector<'a> {
@@ -199,14 +199,11 @@ impl<'a> Collector for MultiCollector<'a> {
} }
fn requires_scoring(&self) -> bool { fn requires_scoring(&self) -> bool {
self.collector_wrappers self.collector_wrappers.iter().any(|c| c.requires_scoring())
.iter()
.map(Deref::deref)
.any(Collector::requires_scoring)
} }
fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> Result<MultiFruit> { fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> Result<MultiFruit> {
let mut segment_fruits_list: Vec<Vec<Box<dyn Fruit>>> = (0..self.collector_wrappers.len()) let mut segment_fruits_list: Vec<Vec<Box<Fruit>>> = (0..self.collector_wrappers.len())
.map(|_| Vec::with_capacity(segments_multifruits.len())) .map(|_| Vec::with_capacity(segments_multifruits.len()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
for segment_multifruit in segments_multifruits { for segment_multifruit in segments_multifruits {
@@ -229,7 +226,7 @@ impl<'a> Collector for MultiCollector<'a> {
} }
pub struct MultiCollectorChild { pub struct MultiCollectorChild {
children: Vec<Box<dyn BoxableSegmentCollector>>, children: Vec<Box<BoxableSegmentCollector>>,
} }
impl SegmentCollector for MultiCollectorChild { impl SegmentCollector for MultiCollectorChild {
@@ -256,12 +253,12 @@ impl SegmentCollector for MultiCollectorChild {
mod tests { mod tests {
use super::*; use super::*;
use crate::collector::{Count, TopDocs}; use collector::{Count, TopDocs};
use crate::query::TermQuery; use query::TermQuery;
use crate::schema::IndexRecordOption; use schema::IndexRecordOption;
use crate::schema::{Schema, TEXT}; use schema::{Schema, TEXT};
use crate::Index; use Index;
use crate::Term; use Term;
#[test] #[test]
fn test_multi_collector() { fn test_multi_collector() {

View File

@@ -1,20 +1,12 @@
use super::*; use super::*;
use crate::core::SegmentReader; use core::SegmentReader;
use crate::fastfield::BytesFastFieldReader; use fastfield::BytesFastFieldReader;
use crate::fastfield::FastFieldReader; use fastfield::FastFieldReader;
use crate::schema::Field; use schema::Field;
use crate::DocAddress; use DocAddress;
use crate::DocId; use DocId;
use crate::Score; use Score;
use crate::SegmentLocalId; use SegmentLocalId;
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
compute_score: true,
};
pub const TEST_COLLECTOR_WITHOUT_SCORE: TestCollector = TestCollector {
compute_score: true,
};
/// Stores all of the doc ids. /// Stores all of the doc ids.
/// This collector is only used for tests. /// This collector is only used for tests.
@@ -22,9 +14,7 @@ pub const TEST_COLLECTOR_WITHOUT_SCORE: TestCollector = TestCollector {
/// ///
/// actise, as it does not store /// actise, as it does not store
/// the segment ordinals /// the segment ordinals
pub struct TestCollector { pub struct TestCollector;
pub compute_score: bool,
}
pub struct TestSegmentCollector { pub struct TestSegmentCollector {
segment_id: SegmentLocalId, segment_id: SegmentLocalId,
@@ -42,6 +32,7 @@ impl TestFruit {
pub fn docs(&self) -> &[DocAddress] { pub fn docs(&self) -> &[DocAddress] {
&self.docs[..] &self.docs[..]
} }
pub fn scores(&self) -> &[Score] { pub fn scores(&self) -> &[Score] {
&self.scores[..] &self.scores[..]
} }
@@ -63,7 +54,7 @@ impl Collector for TestCollector {
} }
fn requires_scoring(&self) -> bool { fn requires_scoring(&self) -> bool {
self.compute_score true
} }
fn merge_fruits(&self, mut children: Vec<TestFruit>) -> Result<TestFruit> { fn merge_fruits(&self, mut children: Vec<TestFruit>) -> Result<TestFruit> {
@@ -123,15 +114,11 @@ impl Collector for FastFieldTestCollector {
fn for_segment( fn for_segment(
&self, &self,
_: SegmentLocalId, _: SegmentLocalId,
segment_reader: &SegmentReader, reader: &SegmentReader,
) -> Result<FastFieldSegmentCollector> { ) -> Result<FastFieldSegmentCollector> {
let reader = segment_reader
.fast_fields()
.u64(self.field)
.expect("Requested field is not a fast field.");
Ok(FastFieldSegmentCollector { Ok(FastFieldSegmentCollector {
vals: Vec::new(), vals: Vec::new(),
reader, reader: reader.fast_field_reader(self.field)?,
}) })
} }
@@ -183,14 +170,11 @@ impl Collector for BytesFastFieldTestCollector {
fn for_segment( fn for_segment(
&self, &self,
_segment_local_id: u32, _segment_local_id: u32,
segment_reader: &SegmentReader, segment: &SegmentReader,
) -> Result<BytesFastFieldSegmentCollector> { ) -> Result<BytesFastFieldSegmentCollector> {
Ok(BytesFastFieldSegmentCollector { Ok(BytesFastFieldSegmentCollector {
vals: Vec::new(), vals: Vec::new(),
reader: segment_reader reader: segment.bytes_fast_field_reader(self.field)?,
.fast_fields()
.bytes(self.field)
.expect("Field is not a bytes fast field."),
}) })
} }
@@ -207,7 +191,7 @@ impl SegmentCollector for BytesFastFieldSegmentCollector {
type Fruit = Vec<u8>; type Fruit = Vec<u8>;
fn collect(&mut self, doc: u32, _score: f32) { fn collect(&mut self, doc: u32, _score: f32) {
let data = self.reader.get_bytes(doc); let data = self.reader.get_val(doc);
self.vals.extend(data); self.vals.extend(data);
} }

View File

@@ -1,20 +1,17 @@
use crate::DocAddress;
use crate::DocId;
use crate::Result;
use crate::SegmentLocalId;
use crate::SegmentReader;
use serde::export::PhantomData; use serde::export::PhantomData;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::BinaryHeap; use std::collections::BinaryHeap;
use DocAddress;
use DocId;
use Result;
use SegmentLocalId;
use SegmentReader;
/// Contains a feature (field, score, etc.) of a document along with the document address. /// Contains a feature (field, score, etc.) of a document along with the document address.
/// ///
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the /// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
/// default Rust heap is a max heap, whereas a min heap is needed. /// default Rust heap is a max heap, whereas a min heap is needed.
/// ///
/// Additionally, it guarantees stable sorting: in case of a tie on the feature, the document
/// address is used.
///
/// WARNING: equality is not what you would expect here. /// WARNING: equality is not what you would expect here.
/// Two elements are equal if their feature is equal, and regardless of whether `doc` /// Two elements are equal if their feature is equal, and regardless of whether `doc`
/// is equal. This should be perfectly fine for this usage, but let's make sure this /// is equal. This should be perfectly fine for this usage, but let's make sure this
@@ -24,37 +21,29 @@ struct ComparableDoc<T, D> {
doc: D, doc: D,
} }
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> { impl<T: PartialOrd, D> PartialOrd for ComparableDoc<T, D> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other)) Some(self.cmp(other))
} }
} }
impl<T: PartialOrd, D: PartialOrd> Ord for ComparableDoc<T, D> { impl<T: PartialOrd, D> Ord for ComparableDoc<T, D> {
#[inline] #[inline]
fn cmp(&self, other: &Self) -> Ordering { fn cmp(&self, other: &Self) -> Ordering {
// Reversed to make BinaryHeap work as a min-heap other
let by_feature = other
.feature .feature
.partial_cmp(&self.feature) .partial_cmp(&self.feature)
.unwrap_or(Ordering::Equal); .unwrap_or_else(|| Ordering::Equal)
let lazy_by_doc_address = || self.doc.partial_cmp(&other.doc).unwrap_or(Ordering::Equal);
// In case of a tie on the feature, we sort by ascending
// `DocAddress` in order to ensure a stable sorting of the
// documents.
by_feature.then_with(lazy_by_doc_address)
} }
} }
impl<T: PartialOrd, D: PartialOrd> PartialEq for ComparableDoc<T, D> { impl<T: PartialOrd, D> PartialEq for ComparableDoc<T, D> {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal self.cmp(other) == Ordering::Equal
} }
} }
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {} impl<T: PartialOrd, D> Eq for ComparableDoc<T, D> {}
pub(crate) struct TopCollector<T> { pub(crate) struct TopCollector<T> {
limit: usize, limit: usize,
@@ -109,11 +98,11 @@ where
.collect()) .collect())
} }
pub(crate) fn for_segment<F: PartialOrd>( pub(crate) fn for_segment(
&self, &self,
segment_id: SegmentLocalId, segment_id: SegmentLocalId,
_: &SegmentReader, _: &SegmentReader,
) -> Result<TopSegmentCollector<F>> { ) -> Result<TopSegmentCollector<T>> {
Ok(TopSegmentCollector::new(segment_id, self.limit)) Ok(TopSegmentCollector::new(segment_id, self.limit))
} }
} }
@@ -188,8 +177,9 @@ impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::TopSegmentCollector; use super::{TopCollector, TopSegmentCollector};
use crate::DocAddress; use DocAddress;
use Score;
#[test] #[test]
fn test_top_collector_not_at_capacity() { fn test_top_collector_not_at_capacity() {
@@ -227,92 +217,8 @@ mod tests {
} }
#[test] #[test]
fn test_top_segment_collector_stable_ordering_for_equal_feature() { #[should_panic]
// given that the documents are collected in ascending doc id order, fn test_top_0() {
// when harvesting we have to guarantee stable sorting in case of a tie let _collector: TopCollector<Score> = TopCollector::with_limit(0);
// on the score
let doc_ids_collection = [4, 5, 6];
let score = 3.14;
let mut top_collector_limit_2 = TopSegmentCollector::new(0, 2);
for id in &doc_ids_collection {
top_collector_limit_2.collect(*id, score);
}
let mut top_collector_limit_3 = TopSegmentCollector::new(0, 3);
for id in &doc_ids_collection {
top_collector_limit_3.collect(*id, score);
}
assert_eq!(
top_collector_limit_2.harvest(),
top_collector_limit_3.harvest()[..2].to_vec(),
);
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::TopSegmentCollector;
use test::Bencher;
#[bench]
fn bench_top_segment_collector_collect_not_at_capacity(b: &mut Bencher) {
let mut top_collector = TopSegmentCollector::new(0, 400);
b.iter(|| {
for i in 0..100 {
top_collector.collect(i, 0.8);
}
});
}
#[bench]
fn bench_top_segment_collector_collect_at_capacity(b: &mut Bencher) {
let mut top_collector = TopSegmentCollector::new(0, 100);
for i in 0..100 {
top_collector.collect(i, 0.8);
}
b.iter(|| {
for i in 0..100 {
top_collector.collect(i, 0.8);
}
});
}
#[bench]
fn bench_top_segment_collector_collect_and_harvest_many_ties(b: &mut Bencher) {
b.iter(|| {
let mut top_collector = TopSegmentCollector::new(0, 100);
for i in 0..100 {
top_collector.collect(i, 0.8);
}
// it would be nice to be able to do the setup N times but still
// measure only harvest(). We can't since harvest() consumes
// the top_collector.
top_collector.harvest()
});
}
#[bench]
fn bench_top_segment_collector_collect_and_harvest_no_tie(b: &mut Bencher) {
b.iter(|| {
let mut top_collector = TopSegmentCollector::new(0, 100);
let mut score = 1.0;
for i in 0..100 {
score += 1.0;
top_collector.collect(i, score);
}
// it would be nice to be able to do the setup N times but still
// measure only harvest(). We can't since harvest() consumes
// the top_collector.
top_collector.harvest()
});
} }
} }

View File

@@ -0,0 +1,257 @@
use super::Collector;
use collector::top_collector::TopCollector;
use collector::top_collector::TopSegmentCollector;
use collector::SegmentCollector;
use fastfield::FastFieldReader;
use fastfield::FastValue;
use schema::Field;
use DocAddress;
use Result;
use SegmentLocalId;
use SegmentReader;
/// The Top Field Collector keeps track of the K documents
/// sorted by a fast field in the index
///
/// The implementation is based on a `BinaryHeap`.
/// The theorical complexity for collecting the top `K` out of `n` documents
/// is `O(n log K)`.
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// # use tantivy::schema::{Schema, Field, FAST, TEXT};
/// # use tantivy::{Index, Result, DocAddress};
/// # use tantivy::query::{Query, QueryParser};
/// use tantivy::Searcher;
/// use tantivy::collector::TopDocs;
///
/// # fn main() -> tantivy::Result<()> {
/// # let mut schema_builder = Schema::builder();
/// # let title = schema_builder.add_text_field("title", TEXT);
/// # let rating = schema_builder.add_u64_field("rating", FAST);
/// # let schema = schema_builder.build();
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # index_writer.add_document(doc!(
/// # title => "The Name of the Wind",
/// # rating => 92u64,
/// # ));
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
/// # index_writer.commit()?;
/// # let reader = index.reader()?;
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
/// # assert_eq!(top_docs,
/// # vec![(97u64, DocAddress(0u32, 1)),
/// # (80u64, DocAddress(0u32, 3))]);
/// # Ok(())
/// # }
/// #
/// /// Searches the document matching the given query, and
/// /// collects the top 10 documents, order by the `field`
/// /// given in argument.
/// ///
/// /// `field` is required to be a FAST field.
/// fn docs_sorted_by_rating(searcher: &Searcher,
/// query: &Query,
/// sort_by_field: Field)
/// -> Result<Vec<(u64, DocAddress)>> {
///
/// // This is where we build our collector!
/// let top_docs_by_rating = TopDocs::with_limit(2).order_by_field(sort_by_field);
///
/// // ... and here is our documents. Not this is a simple vec.
/// // The `u64` in the pair is the value of our fast field for each documents.
/// searcher.search(query, &top_docs_by_rating)
/// }
/// ```
pub struct TopDocsByField<T> {
collector: TopCollector<T>,
field: Field,
}
impl<T: FastValue + PartialOrd + Clone> TopDocsByField<T> {
/// Creates a top field collector, with a number of documents equal to "limit".
///
/// The given field name must be a fast field, otherwise the collector have an error while
/// collecting results.
///
/// This constructor is crate-private. Client are supposed to call
/// build `TopDocsByField` object using the `TopDocs` API.
///
/// e.g.:
/// `TopDocs::with_limit(2).order_by_field(sort_by_field)`
///
/// # Panics
/// The method panics if limit is 0
pub(crate) fn new(field: Field, limit: usize) -> TopDocsByField<T> {
TopDocsByField {
collector: TopCollector::with_limit(limit),
field,
}
}
}
impl<T: FastValue + PartialOrd + Send + Sync + 'static> Collector for TopDocsByField<T> {
type Fruit = Vec<(T, DocAddress)>;
type Child = TopFieldSegmentCollector<T>;
fn for_segment(
&self,
segment_local_id: SegmentLocalId,
reader: &SegmentReader,
) -> Result<TopFieldSegmentCollector<T>> {
let collector = self.collector.for_segment(segment_local_id, reader)?;
let reader = reader.fast_field_reader(self.field)?;
Ok(TopFieldSegmentCollector { collector, reader })
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(
&self,
segment_fruits: Vec<Vec<(T, DocAddress)>>,
) -> Result<Vec<(T, DocAddress)>> {
self.collector.merge_fruits(segment_fruits)
}
}
pub struct TopFieldSegmentCollector<T: FastValue + PartialOrd> {
collector: TopSegmentCollector<T>,
reader: FastFieldReader<T>,
}
impl<T: FastValue + PartialOrd + Send + Sync + 'static> SegmentCollector
for TopFieldSegmentCollector<T>
{
type Fruit = Vec<(T, DocAddress)>;
fn collect(&mut self, doc: u32, _score: f32) {
let field_value = self.reader.get(doc);
self.collector.collect(doc, field_value);
}
fn harvest(self) -> Vec<(T, DocAddress)> {
self.collector.harvest()
}
}
#[cfg(test)]
mod tests {
use super::TopDocsByField;
use collector::Collector;
use collector::TopDocs;
use query::Query;
use query::QueryParser;
use schema::Field;
use schema::IntOptions;
use schema::{Schema, FAST, TEXT};
use DocAddress;
use Index;
use IndexWriter;
use TantivyError;
const TITLE: &str = "title";
const SIZE: &str = "size";
#[test]
fn test_top_collector_not_at_capacity() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, FAST);
let schema = schema_builder.build();
let (index, query) = index("beer", title, schema, |index_writer| {
index_writer.add_document(doc!(
title => "bottle of beer",
size => 12u64,
));
index_writer.add_document(doc!(
title => "growler of beer",
size => 64u64,
));
index_writer.add_document(doc!(
title => "pint of beer",
size => 16u64,
));
});
let searcher = index.reader().unwrap().searcher();
let top_collector = TopDocs::with_limit(4).order_by_field(size);
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
assert_eq!(
top_docs,
vec![
(64, DocAddress(0, 1)),
(16, DocAddress(0, 2)),
(12, DocAddress(0, 0))
]
);
}
#[test]
#[should_panic]
fn test_field_does_not_exist() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, FAST);
let schema = schema_builder.build();
let (index, _) = index("beer", title, schema, |index_writer| {
index_writer.add_document(doc!(
title => "bottle of beer",
size => 12u64,
));
});
let searcher = index.reader().unwrap().searcher();
let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(Field(2));
let segment_reader = searcher.segment_reader(0u32);
top_collector
.for_segment(0, segment_reader)
.expect("should panic");
}
#[test]
fn test_field_not_fast_field() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, IntOptions::default());
let schema = schema_builder.build();
let (index, _) = index("beer", title, schema, |index_writer| {
index_writer.add_document(doc!(
title => "bottle of beer",
size => 12u64,
));
});
let searcher = index.reader().unwrap().searcher();
let segment = searcher.segment_reader(0);
let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(size);
assert_matches!(
top_collector
.for_segment(0, segment)
.map(|_| ())
.unwrap_err(),
TantivyError::FastFieldError(_)
);
}
fn index(
query: &str,
query_field: Field,
schema: Schema,
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
) -> (Index, Box<Query>) {
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
doc_adder(&mut index_writer);
index_writer.commit().unwrap();
let query_parser = QueryParser::for_index(&index, vec![query_field]);
let query = query_parser.parse_query(query).unwrap();
(index, query)
}
}

View File

@@ -1,35 +1,32 @@
use super::Collector; use super::Collector;
use crate::collector::custom_score_top_collector::CustomScoreTopCollector; use collector::top_collector::TopCollector;
use crate::collector::top_collector::TopCollector; use collector::top_collector::TopSegmentCollector;
use crate::collector::top_collector::TopSegmentCollector; use collector::SegmentCollector;
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector; use collector::TopDocsByField;
use crate::collector::{ use fastfield::FastValue;
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector, use schema::Field;
}; use DocAddress;
use crate::schema::Field; use DocId;
use crate::DocAddress; use Result;
use crate::DocId; use Score;
use crate::Result; use SegmentLocalId;
use crate::Score; use SegmentReader;
use crate::SegmentLocalId;
use crate::SegmentReader;
use std::fmt;
/// The `TopDocs` collector keeps track of the top `K` documents /// The Top Score Collector keeps track of the K documents
/// sorted by their score. /// sorted by their score.
/// ///
/// The implementation is based on a `BinaryHeap`. /// The implementation is based on a `BinaryHeap`.
/// The theorical complexity for collecting the top `K` out of `n` documents /// The theorical complexity for collecting the top `K` out of `n` documents
/// is `O(n log K)`. /// is `O(n log K)`.
/// ///
/// This collector guarantees a stable sorting in case of a tie on the
/// document score. As such, it is suitable to implement pagination.
///
/// ```rust /// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::DocAddress;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, DocAddress, Index, Result};
/// ///
/// # fn main() { example().unwrap(); } /// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
@@ -69,12 +66,6 @@ use std::fmt;
/// ``` /// ```
pub struct TopDocs(TopCollector<Score>); pub struct TopDocs(TopCollector<Score>);
impl fmt::Debug for TopDocs {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "TopDocs({})", self.0.limit())
}
}
impl TopDocs { impl TopDocs {
/// Creates a top score collector, with a number of documents equal to "limit". /// Creates a top score collector, with a number of documents equal to "limit".
/// ///
@@ -86,306 +77,13 @@ impl TopDocs {
/// Set top-K to rank documents by a given fast field. /// Set top-K to rank documents by a given fast field.
/// ///
/// ```rust /// (By default, `TopDocs` collects the top-K documents sorted by
/// # use tantivy::schema::{Schema, FAST, TEXT}; /// the similarity score.)
/// # use tantivy::{doc, Index, Result, DocAddress}; pub fn order_by_field<T: PartialOrd + FastValue + Clone>(
/// # use tantivy::query::{Query, QueryParser};
/// use tantivy::Searcher;
/// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field;
///
/// # fn main() -> tantivy::Result<()> {
/// # let mut schema_builder = Schema::builder();
/// # let title = schema_builder.add_text_field("title", TEXT);
/// # let rating = schema_builder.add_u64_field("rating", FAST);
/// # let schema = schema_builder.build();
/// #
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # index_writer.add_document(doc!(
/// # title => "The Name of the Wind",
/// # rating => 92u64,
/// # ));
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
/// # index_writer.commit()?;
/// # let reader = index.reader()?;
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
/// # assert_eq!(top_docs,
/// # vec![(97u64, DocAddress(0u32, 1)),
/// # (80u64, DocAddress(0u32, 3))]);
/// # Ok(())
/// # }
///
///
/// /// Searches the document matching the given query, and
/// /// collects the top 10 documents, order by the u64-`field`
/// /// given in argument.
/// ///
/// /// `field` is required to be a FAST field.
/// fn docs_sorted_by_rating(searcher: &Searcher,
/// query: &dyn Query,
/// sort_by_field: Field)
/// -> Result<Vec<(u64, DocAddress)>> {
///
/// // This is where we build our topdocs collector
/// //
/// // Note the generics parameter that needs to match the
/// // type `sort_by_field`.
/// let top_docs_by_rating = TopDocs
/// ::with_limit(10)
/// .order_by_u64_field(sort_by_field);
///
/// // ... and here are our documents. Note this is a simple vec.
/// // The `u64` in the pair is the value of our fast field for
/// // each documents.
/// //
/// // The vec is sorted decreasingly by `sort_by_field`, and has a
/// // length of 10, or less if not enough documents matched the
/// // query.
/// let resulting_docs: Vec<(u64, DocAddress)> =
/// searcher.search(query, &top_docs_by_rating)?;
///
/// Ok(resulting_docs)
/// }
/// ```
///
/// # Panics
///
/// May panic if the field requested is not a fast field.
///
pub fn order_by_u64_field(
self, self,
field: Field, field: Field,
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> { ) -> TopDocsByField<T> {
self.custom_score(move |segment_reader: &SegmentReader| { TopDocsByField::new(field, self.0.limit())
let ff_reader = segment_reader
.fast_fields()
.u64(field)
.expect("Field requested is not a i64/u64 fast field.");
//TODO error message missmatch actual behavior for i64
move |doc: DocId| ff_reader.get(doc)
})
}
/// Ranks the documents using a custom score.
///
/// This method offers a convenient way to tweak or replace
/// the documents score. As suggested by the prototype you can
/// manually define your own [`ScoreTweaker`](./trait.ScoreTweaker.html)
/// and pass it as an argument, but there is a much simpler way to
/// tweak your score: you can use a closure as in the following
/// example.
///
/// # Example
///
/// Typically, you will want to rely on one or more fast fields,
/// to alter the original relevance `Score`.
///
/// For instance, in the following, we assume that we are implementing
/// an e-commerce website that has a fast field called `popularity`
/// that rates whether a product is typically often bought by users.
///
/// In the following example will will tweak our ranking a bit by
/// boosting popular products a notch.
///
/// In more serious application, this tweaking could involved running a
/// learning-to-rank model over various features
///
/// ```rust
/// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, DocAddress, DocId, Score};
/// # use tantivy::query::QueryParser;
/// use tantivy::SegmentReader;
/// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field;
///
/// # fn create_schema() -> Schema {
/// # let mut schema_builder = Schema::builder();
/// # schema_builder.add_text_field("product_name", TEXT);
/// # schema_builder.add_u64_field("popularity", FAST);
/// # schema_builder.build()
/// # }
/// #
/// # fn main() -> tantivy::Result<()> {
/// # let schema = create_schema();
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # let product_name = index.schema().get_field("product_name").unwrap();
/// #
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
/// # index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
/// # index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
/// # index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
/// # index_writer.commit()?;
/// // ...
/// # let user_query = "diary";
/// # let query = QueryParser::for_index(&index, vec![product_name]).parse_query(user_query)?;
///
/// // This is where we build our collector with our custom score.
/// let top_docs_by_custom_score = TopDocs
/// ::with_limit(10)
/// .tweak_score(move |segment_reader: &SegmentReader| {
/// // The argument is a function that returns our scoring
/// // function.
/// //
/// // The point of this "mother" function is to gather all
/// // of the segment level information we need for scoring.
/// // Typically, fast_fields.
/// //
/// // In our case, we will get a reader for the popularity
/// // fast field.
/// let popularity_reader =
/// segment_reader.fast_fields().u64(popularity).unwrap();
///
/// // We can now define our actual scoring function
/// move |doc: DocId, original_score: Score| {
/// let popularity: u64 = popularity_reader.get(doc);
/// // Well.. For the sake of the example we use a simple logarithm
/// // function.
/// let popularity_boost_score = ((2u64 + popularity) as f32).log2();
/// popularity_boost_score * original_score
/// }
/// });
/// # let reader = index.reader()?;
/// # let searcher = reader.searcher();
/// // ... and here are our documents. Note this is a simple vec.
/// // The `Score` in the pair is our tweaked score.
/// let resulting_docs: Vec<(Score, DocAddress)> =
/// searcher.search(&*query, &top_docs_by_custom_score)?;
///
/// # Ok(())
/// # }
/// ```
///
/// # See also
/// [custom_score(...)](#method.custom_score).
pub fn tweak_score<TScore, TScoreSegmentTweaker, TScoreTweaker>(
self,
score_tweaker: TScoreTweaker,
) -> impl Collector<Fruit = Vec<(TScore, DocAddress)>>
where
TScore: 'static + Send + Sync + Clone + PartialOrd,
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
{
TweakedScoreTopCollector::new(score_tweaker, self.0.limit())
}
/// Ranks the documents using a custom score.
///
/// This method offers a convenient way to use a different score.
///
/// As suggested by the prototype you can manually define your
/// own [`CustomScorer`](./trait.CustomScorer.html)
/// and pass it as an argument, but there is a much simpler way to
/// tweak your score: you can use a closure as in the following
/// example.
///
/// # Limitation
///
/// This method only makes it possible to compute the score from a given
/// `DocId`, fastfield values for the doc and any information you could
/// have precomputed beforehands. It does not make it possible for instance
/// to compute something like TfIdf as it does not have access to the list of query
/// terms present in the document, nor the term frequencies for the different terms.
///
/// It can be used if your search engine relies on a learning-to-rank model for instance,
/// which does not rely on the term frequencies or positions as features.
///
/// # Example
///
/// ```rust
/// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, DocAddress, DocId};
/// # use tantivy::query::QueryParser;
/// use tantivy::SegmentReader;
/// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field;
///
/// # fn create_schema() -> Schema {
/// # let mut schema_builder = Schema::builder();
/// # schema_builder.add_text_field("product_name", TEXT);
/// # schema_builder.add_u64_field("popularity", FAST);
/// # schema_builder.add_u64_field("boosted", FAST);
/// # schema_builder.build()
/// # }
/// #
/// # fn main() -> tantivy::Result<()> {
/// # let schema = create_schema();
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # let product_name = index.schema().get_field("product_name").unwrap();
/// #
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
/// let boosted: Field = index.schema().get_field("boosted").unwrap();
/// # index_writer.add_document(doc!(boosted=>1u64, product_name => "The Diary of Muadib", popularity => 1u64));
/// # index_writer.add_document(doc!(boosted=>0u64, product_name => "A Dairy Cow", popularity => 10u64));
/// # index_writer.add_document(doc!(boosted=>0u64, product_name => "The Diary of a Young Girl", popularity => 15u64));
/// # index_writer.commit()?;
/// // ...
/// # let user_query = "diary";
/// # let query = QueryParser::for_index(&index, vec![product_name]).parse_query(user_query)?;
///
/// // This is where we build our collector with our custom score.
/// let top_docs_by_custom_score = TopDocs
/// ::with_limit(10)
/// .custom_score(move |segment_reader: &SegmentReader| {
/// // The argument is a function that returns our scoring
/// // function.
/// //
/// // The point of this "mother" function is to gather all
/// // of the segment level information we need for scoring.
/// // Typically, fast_fields.
/// //
/// // In our case, we will get a reader for the popularity
/// // fast field and a boosted field.
/// //
/// // We want to get boosted items score, and when we get
/// // a tie, return the item with the highest popularity.
/// //
/// // Note that this is implemented by using a `(u64, u64)`
/// // as a score.
/// let popularity_reader =
/// segment_reader.fast_fields().u64(popularity).unwrap();
/// let boosted_reader =
/// segment_reader.fast_fields().u64(boosted).unwrap();
///
/// // We can now define our actual scoring function
/// move |doc: DocId| {
/// let popularity: u64 = popularity_reader.get(doc);
/// let boosted: u64 = boosted_reader.get(doc);
/// // Score do not have to be `f64` in tantivy.
/// // Here we return a couple to get lexicographical order
/// // for free.
/// (boosted, popularity)
/// }
/// });
/// # let reader = index.reader()?;
/// # let searcher = reader.searcher();
/// // ... and here are our documents. Note this is a simple vec.
/// // The `Score` in the pair is our tweaked score.
/// let resulting_docs: Vec<((u64, u64), DocAddress)> =
/// searcher.search(&*query, &top_docs_by_custom_score)?;
///
/// # Ok(())
/// # }
/// ```
///
/// # See also
/// [tweak_score(...)](#method.tweak_score).
pub fn custom_score<TScore, TCustomSegmentScorer, TCustomScorer>(
self,
custom_score: TCustomScorer,
) -> impl Collector<Fruit = Vec<(TScore, DocAddress)>>
where
TScore: 'static + Send + Sync + Clone + PartialOrd,
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
{
CustomScoreTopCollector::new(custom_score, self.0.limit())
} }
} }
@@ -430,14 +128,12 @@ impl SegmentCollector for TopScoreSegmentCollector {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::TopDocs; use super::TopDocs;
use crate::collector::Collector; use query::QueryParser;
use crate::query::{AllQuery, Query, QueryParser}; use schema::Schema;
use crate::schema::{Field, Schema, FAST, STORED, TEXT}; use schema::TEXT;
use crate::DocAddress; use DocAddress;
use crate::Index; use Index;
use crate::IndexWriter; use Score;
use crate::Score;
use itertools::Itertools;
fn make_index() -> Index { fn make_index() -> Index {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -498,125 +194,10 @@ mod tests {
); );
} }
#[test]
fn test_top_collector_stable_sorting() {
let index = make_index();
// using AllQuery to get a constant score
let searcher = index.reader().unwrap().searcher();
let page_1 = searcher.search(&AllQuery, &TopDocs::with_limit(2)).unwrap();
let page_2 = searcher.search(&AllQuery, &TopDocs::with_limit(3)).unwrap();
// precondition for the test to be meaningful: we did get documents
// with the same score
assert!(page_1.iter().map(|result| result.0).all_equal());
assert!(page_2.iter().map(|result| result.0).all_equal());
// sanity check since we're relying on make_index()
assert_eq!(page_1.len(), 2);
assert_eq!(page_2.len(), 3);
assert_eq!(page_1, &page_2[..page_1.len()]);
}
#[test] #[test]
#[should_panic] #[should_panic]
fn test_top_0() { fn test_top_0() {
TopDocs::with_limit(0); TopDocs::with_limit(0);
} }
const TITLE: &str = "title";
const SIZE: &str = "size";
#[test]
fn test_top_field_collector_not_at_capacity() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, FAST);
let schema = schema_builder.build();
let (index, query) = index("beer", title, schema, |index_writer| {
index_writer.add_document(doc!(
title => "bottle of beer",
size => 12u64,
));
index_writer.add_document(doc!(
title => "growler of beer",
size => 64u64,
));
index_writer.add_document(doc!(
title => "pint of beer",
size => 16u64,
));
});
let searcher = index.reader().unwrap().searcher();
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
assert_eq!(
top_docs,
vec![
(64, DocAddress(0, 1)),
(16, DocAddress(0, 2)),
(12, DocAddress(0, 0))
]
);
}
#[test]
#[should_panic]
fn test_field_does_not_exist() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, FAST);
let schema = schema_builder.build();
let (index, _) = index("beer", title, schema, |index_writer| {
index_writer.add_document(doc!(
title => "bottle of beer",
size => 12u64,
));
});
let searcher = index.reader().unwrap().searcher();
let top_collector = TopDocs::with_limit(4).order_by_u64_field(Field::from_field_id(2));
let segment_reader = searcher.segment_reader(0u32);
top_collector
.for_segment(0, segment_reader)
.expect("should panic");
}
#[test]
#[should_panic(expected = "Field requested is not a i64/u64 fast field")]
fn test_field_not_fast_field() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, STORED);
let schema = schema_builder.build();
let (index, _) = index("beer", title, schema, |index_writer| {
index_writer.add_document(doc!(
title => "bottle of beer",
size => 12u64,
));
});
let searcher = index.reader().unwrap().searcher();
let segment = searcher.segment_reader(0);
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
assert!(top_collector.for_segment(0, segment).is_ok());
}
fn index(
query: &str,
query_field: Field,
schema: Schema,
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
) -> (Index, Box<dyn Query>) {
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
doc_adder(&mut index_writer);
index_writer.commit().unwrap();
let query_parser = QueryParser::for_index(&index, vec![query_field]);
let query = query_parser.parse_query(query).unwrap();
(index, query)
}
} }

View File

@@ -1,129 +0,0 @@
use crate::collector::top_collector::{TopCollector, TopSegmentCollector};
use crate::collector::{Collector, SegmentCollector};
use crate::DocAddress;
use crate::{DocId, Result, Score, SegmentReader};
pub(crate) struct TweakedScoreTopCollector<TScoreTweaker, TScore = Score> {
score_tweaker: TScoreTweaker,
collector: TopCollector<TScore>,
}
impl<TScoreTweaker, TScore> TweakedScoreTopCollector<TScoreTweaker, TScore>
where
TScore: Clone + PartialOrd,
{
pub fn new(
score_tweaker: TScoreTweaker,
limit: usize,
) -> TweakedScoreTopCollector<TScoreTweaker, TScore> {
TweakedScoreTopCollector {
score_tweaker,
collector: TopCollector::with_limit(limit),
}
}
}
/// A `ScoreSegmentTweaker` makes it possible to modify the default score
/// for a given document belonging to a specific segment.
///
/// It is the segment local version of the [`ScoreTweaker`](./trait.ScoreTweaker.html).
pub trait ScoreSegmentTweaker<TScore>: 'static {
/// Tweak the given `score` for the document `doc`.
fn score(&self, doc: DocId, score: Score) -> TScore;
}
/// `ScoreTweaker` makes it possible to tweak the score
/// emitted by the scorer into another one.
///
/// The `ScoreTweaker` itself does not make much of the computation itself.
/// Instead, it helps constructing `Self::Child` instances that will compute
/// the score at a segment scale.
pub trait ScoreTweaker<TScore>: Sync {
/// Type of the associated [`ScoreSegmentTweaker`](./trait.ScoreSegmentTweaker.html).
type Child: ScoreSegmentTweaker<TScore>;
/// Builds a child tweaker for a specific segment. The child scorer is associated to
/// a specific segment.
fn segment_tweaker(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
}
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
where
TScoreTweaker: ScoreTweaker<TScore>,
TScore: 'static + PartialOrd + Clone + Send + Sync,
{
type Fruit = Vec<(TScore, DocAddress)>;
type Child = TopTweakedScoreSegmentCollector<TScoreTweaker::Child, TScore>;
fn for_segment(
&self,
segment_local_id: u32,
segment_reader: &SegmentReader,
) -> Result<Self::Child> {
let segment_scorer = self.score_tweaker.segment_tweaker(segment_reader)?;
let segment_collector = self
.collector
.for_segment(segment_local_id, segment_reader)?;
Ok(TopTweakedScoreSegmentCollector {
segment_collector,
segment_scorer,
})
}
fn requires_scoring(&self) -> bool {
true
}
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> Result<Self::Fruit> {
self.collector.merge_fruits(segment_fruits)
}
}
pub struct TopTweakedScoreSegmentCollector<TSegmentScoreTweaker, TScore>
where
TScore: 'static + PartialOrd + Clone + Send + Sync + Sized,
TSegmentScoreTweaker: ScoreSegmentTweaker<TScore>,
{
segment_collector: TopSegmentCollector<TScore>,
segment_scorer: TSegmentScoreTweaker,
}
impl<TSegmentScoreTweaker, TScore> SegmentCollector
for TopTweakedScoreSegmentCollector<TSegmentScoreTweaker, TScore>
where
TScore: 'static + PartialOrd + Clone + Send + Sync,
TSegmentScoreTweaker: 'static + ScoreSegmentTweaker<TScore>,
{
type Fruit = Vec<(TScore, DocAddress)>;
fn collect(&mut self, doc: DocId, score: Score) {
let score = self.segment_scorer.score(doc, score);
self.segment_collector.collect(doc, score);
}
fn harvest(self) -> Vec<(TScore, DocAddress)> {
self.segment_collector.harvest()
}
}
impl<F, TScore, TSegmentScoreTweaker> ScoreTweaker<TScore> for F
where
F: 'static + Send + Sync + Fn(&SegmentReader) -> TSegmentScoreTweaker,
TSegmentScoreTweaker: ScoreSegmentTweaker<TScore>,
{
type Child = TSegmentScoreTweaker;
fn segment_tweaker(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
Ok((self)(segment_reader))
}
}
impl<F, TScore> ScoreSegmentTweaker<TScore> for F
where
F: 'static + Sync + Send + Fn(DocId, Score) -> TScore,
{
fn score(&self, doc: DocId, score: Score) -> TScore {
(self)(doc, score)
}
}

View File

@@ -5,7 +5,7 @@ use std::u64;
pub(crate) struct TinySet(u64); pub(crate) struct TinySet(u64);
impl fmt::Debug for TinySet { impl fmt::Debug for TinySet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.into_iter().collect::<Vec<u32>>().fmt(f) self.into_iter().collect::<Vec<u32>>().fmt(f)
} }
} }
@@ -204,12 +204,12 @@ mod tests {
use super::BitSet; use super::BitSet;
use super::TinySet; use super::TinySet;
use crate::docset::DocSet; use docset::DocSet;
use crate::query::BitSetDocSet; use query::BitSetDocSet;
use crate::tests;
use crate::tests::generate_nonunique_unsorted;
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::collections::HashSet; use std::collections::HashSet;
use tests;
use tests::generate_nonunique_unsorted;
#[test] #[test]
fn test_tiny_set() { fn test_tiny_set() {

View File

@@ -1,11 +1,11 @@
use crate::common::BinarySerializable; use common::BinarySerializable;
use crate::common::CountingWriter; use common::CountingWriter;
use crate::common::VInt; use common::VInt;
use crate::directory::ReadOnlySource; use directory::ReadOnlySource;
use crate::directory::{TerminatingWrite, WritePtr}; use directory::WritePtr;
use crate::schema::Field; use schema::Field;
use crate::space_usage::FieldUsage; use space_usage::FieldUsage;
use crate::space_usage::PerFieldSpaceUsage; use space_usage::PerFieldSpaceUsage;
use std::collections::HashMap; use std::collections::HashMap;
use std::io::Write; use std::io::Write;
use std::io::{self, Read}; use std::io::{self, Read};
@@ -42,7 +42,7 @@ pub struct CompositeWrite<W = WritePtr> {
offsets: HashMap<FileAddr, u64>, offsets: HashMap<FileAddr, u64>,
} }
impl<W: TerminatingWrite + Write> CompositeWrite<W> { impl<W: Write> CompositeWrite<W> {
/// Crate a new API writer that writes a composite file /// Crate a new API writer that writes a composite file
/// in a given write. /// in a given write.
pub fn wrap(w: W) -> CompositeWrite<W> { pub fn wrap(w: W) -> CompositeWrite<W> {
@@ -91,7 +91,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
let footer_len = (self.write.written_bytes() - footer_offset) as u32; let footer_len = (self.write.written_bytes() - footer_offset) as u32;
footer_len.serialize(&mut self.write)?; footer_len.serialize(&mut self.write)?;
self.write.terminate() self.write.flush()?;
Ok(())
} }
} }
@@ -184,10 +185,10 @@ impl CompositeFile {
mod test { mod test {
use super::{CompositeFile, CompositeWrite}; use super::{CompositeFile, CompositeWrite};
use crate::common::BinarySerializable; use common::BinarySerializable;
use crate::common::VInt; use common::VInt;
use crate::directory::{Directory, RAMDirectory}; use directory::{Directory, RAMDirectory};
use crate::schema::Field; use schema::Field;
use std::io::Write; use std::io::Write;
use std::path::Path; use std::path::Path;
@@ -199,13 +200,13 @@ mod test {
let w = directory.open_write(path).unwrap(); let w = directory.open_write(path).unwrap();
let mut composite_write = CompositeWrite::wrap(w); let mut composite_write = CompositeWrite::wrap(w);
{ {
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32)); let mut write_0 = composite_write.for_field(Field(0u32));
VInt(32431123u64).serialize(&mut write_0).unwrap(); VInt(32431123u64).serialize(&mut write_0).unwrap();
write_0.flush().unwrap(); write_0.flush().unwrap();
} }
{ {
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32)); let mut write_4 = composite_write.for_field(Field(4u32));
VInt(2).serialize(&mut write_4).unwrap(); VInt(2).serialize(&mut write_4).unwrap();
write_4.flush().unwrap(); write_4.flush().unwrap();
} }
@@ -215,18 +216,14 @@ mod test {
let r = directory.open_read(path).unwrap(); let r = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&r).unwrap(); let composite_file = CompositeFile::open(&r).unwrap();
{ {
let file0 = composite_file let file0 = composite_file.open_read(Field(0u32)).unwrap();
.open_read(Field::from_field_id(0u32))
.unwrap();
let mut file0_buf = file0.as_slice(); let mut file0_buf = file0.as_slice();
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0; let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
assert_eq!(file0_buf.len(), 0); assert_eq!(file0_buf.len(), 0);
assert_eq!(payload_0, 32431123u64); assert_eq!(payload_0, 32431123u64);
} }
{ {
let file4 = composite_file let file4 = composite_file.open_read(Field(4u32)).unwrap();
.open_read(Field::from_field_id(4u32))
.unwrap();
let mut file4_buf = file4.as_slice(); let mut file4_buf = file4.as_slice();
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0; let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
assert_eq!(file4_buf.len(), 0); assert_eq!(file4_buf.len(), 0);
@@ -234,4 +231,5 @@ mod test {
} }
} }
} }
} }

View File

@@ -1,5 +1,3 @@
use crate::directory::AntiCallToken;
use crate::directory::TerminatingWrite;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
@@ -44,13 +42,6 @@ impl<W: Write> Write for CountingWriter<W> {
} }
} }
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.underlying.terminate_ref(token)
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {

View File

@@ -13,6 +13,7 @@ pub use self::serialize::{BinarySerializable, FixedSize};
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt}; pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
pub use byteorder::LittleEndian as Endianness; pub use byteorder::LittleEndian as Endianness;
/// Segment's max doc must be `< MAX_DOC_LIMIT`. /// Segment's max doc must be `< MAX_DOC_LIMIT`.
/// ///
/// We do not allow segments with more than /// We do not allow segments with more than
@@ -99,53 +100,16 @@ pub fn u64_to_i64(val: u64) -> i64 {
(val ^ HIGHEST_BIT) as i64 (val ^ HIGHEST_BIT) as i64
} }
/// Maps a `f64` to `u64`
///
/// For simplicity, tantivy internally handles `f64` as `u64`.
/// The mapping is defined by this function.
///
/// Maps `f64` to `u64` so that lexical order is preserved.
///
/// This is more suited than simply casting (`val as u64`)
/// which would truncate the result
///
/// # See also
/// The [reverse mapping is `u64_to_f64`](./fn.u64_to_f64.html).
#[inline(always)]
pub fn f64_to_u64(val: f64) -> u64 {
let bits = val.to_bits();
if val.is_sign_positive() {
bits ^ HIGHEST_BIT
} else {
!bits
}
}
/// Reverse the mapping given by [`i64_to_u64`](./fn.i64_to_u64.html).
#[inline(always)]
pub fn u64_to_f64(val: u64) -> f64 {
f64::from_bits(if val & HIGHEST_BIT != 0 {
val ^ HIGHEST_BIT
} else {
!val
})
}
#[cfg(test)] #[cfg(test)]
pub(crate) mod test { pub(crate) mod test {
pub use super::serialize::test::fixed_size_test; pub use super::serialize::test::fixed_size_test;
use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64}; use super::{compute_num_bits, i64_to_u64, u64_to_i64};
use std::f64;
fn test_i64_converter_helper(val: i64) { fn test_i64_converter_helper(val: i64) {
assert_eq!(u64_to_i64(i64_to_u64(val)), val); assert_eq!(u64_to_i64(i64_to_u64(val)), val);
} }
fn test_f64_converter_helper(val: f64) {
assert_eq!(u64_to_f64(f64_to_u64(val)), val);
}
#[test] #[test]
fn test_i64_converter() { fn test_i64_converter() {
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value()); assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
@@ -158,29 +122,6 @@ pub(crate) mod test {
} }
} }
#[test]
fn test_f64_converter() {
test_f64_converter_helper(f64::INFINITY);
test_f64_converter_helper(f64::NEG_INFINITY);
test_f64_converter_helper(0.0);
test_f64_converter_helper(-0.0);
test_f64_converter_helper(1.0);
test_f64_converter_helper(-1.0);
}
#[test]
fn test_f64_order() {
assert!(!(f64_to_u64(f64::NEG_INFINITY)..f64_to_u64(f64::INFINITY))
.contains(&f64_to_u64(f64::NAN))); //nan is not a number
assert!(f64_to_u64(1.5) > f64_to_u64(1.0)); //same exponent, different mantissa
assert!(f64_to_u64(2.0) > f64_to_u64(1.0)); //same mantissa, different exponent
assert!(f64_to_u64(2.0) > f64_to_u64(1.5)); //different exponent and mantissa
assert!(f64_to_u64(1.0) > f64_to_u64(-1.0)); // pos > neg
assert!(f64_to_u64(-1.5) < f64_to_u64(-1.0));
assert!(f64_to_u64(-2.0) < f64_to_u64(1.0));
assert!(f64_to_u64(-2.0) < f64_to_u64(-1.5));
}
#[test] #[test]
fn test_compute_num_bits() { fn test_compute_num_bits() {
assert_eq!(compute_num_bits(1), 1u8); assert_eq!(compute_num_bits(1), 1u8);

View File

@@ -1,6 +1,6 @@
use crate::common::Endianness;
use crate::common::VInt;
use byteorder::{ReadBytesExt, WriteBytesExt}; use byteorder::{ReadBytesExt, WriteBytesExt};
use common::Endianness;
use common::VInt;
use std::fmt; use std::fmt;
use std::io; use std::io;
use std::io::Read; use std::io::Read;
@@ -102,19 +102,6 @@ impl FixedSize for i64 {
const SIZE_IN_BYTES: usize = 8; const SIZE_IN_BYTES: usize = 8;
} }
impl BinarySerializable for f64 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_f64::<Endianness>(*self)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
reader.read_f64::<Endianness>()
}
}
impl FixedSize for f64 {
const SIZE_IN_BYTES: usize = 8;
}
impl BinarySerializable for u8 { impl BinarySerializable for u8 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> { fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_u8(*self) writer.write_u8(*self)
@@ -149,7 +136,7 @@ impl BinarySerializable for String {
pub mod test { pub mod test {
use super::*; use super::*;
use crate::common::VInt; use common::VInt;
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() { pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
@@ -185,11 +172,6 @@ pub mod test {
fixed_size_test::<i64>(); fixed_size_test::<i64>();
} }
#[test]
fn test_serialize_f64() {
fixed_size_test::<f64>();
}
#[test] #[test]
fn test_serialize_u64() { fn test_serialize_u64() {
fixed_size_test::<u64>(); fixed_size_test::<u64>();
@@ -199,7 +181,10 @@ pub mod test {
fn test_serialize_string() { fn test_serialize_string() {
assert_eq!(serialize_test(String::from("")), 1); assert_eq!(serialize_test(String::from("")), 1);
assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4); assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4);
assert_eq!(serialize_test(String::from("富士さん見える。")), 1 + 3 * 8); assert_eq!(
serialize_test(String::from("富士さん見える。")),
1 + 3 * 8
);
} }
#[test] #[test]

View File

@@ -30,16 +30,16 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
let val = u64::from(val); let val = u64::from(val);
const STOP_BIT: u64 = 128u64; const STOP_BIT: u64 = 128u64;
match val { match val {
0..=STOP_1 => (val | STOP_BIT, 1), 0...STOP_1 => (val | STOP_BIT, 1),
START_2..=STOP_2 => ( START_2...STOP_2 => (
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)), (val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
2, 2,
), ),
START_3..=STOP_3 => ( START_3...STOP_3 => (
(val & MASK_1) | ((val & MASK_2) << 1) | ((val & MASK_3) << 2) | (STOP_BIT << (8 * 2)), (val & MASK_1) | ((val & MASK_2) << 1) | ((val & MASK_3) << 2) | (STOP_BIT << (8 * 2)),
3, 3,
), ),
START_4..=STOP_4 => ( START_4...STOP_4 => (
(val & MASK_1) (val & MASK_1)
| ((val & MASK_2) << 1) | ((val & MASK_2) << 1)
| ((val & MASK_3) << 2) | ((val & MASK_3) << 2)
@@ -171,8 +171,8 @@ mod tests {
use super::serialize_vint_u32; use super::serialize_vint_u32;
use super::VInt; use super::VInt;
use crate::common::BinarySerializable;
use byteorder::{ByteOrder, LittleEndian}; use byteorder::{ByteOrder, LittleEndian};
use common::BinarySerializable;
fn aux_test_vint(val: u64) { fn aux_test_vint(val: u64) {
let mut v = [14u8; 10]; let mut v = [14u8; 10];

View File

@@ -1,6 +1,6 @@
use crate::Result;
use crossbeam::channel; use crossbeam::channel;
use scoped_pool::{Pool, ThreadConfig}; use scoped_pool::{Pool, ThreadConfig};
use Result;
/// Search executor whether search request are single thread or multithread. /// Search executor whether search request are single thread or multithread.
/// ///

View File

@@ -1,44 +1,44 @@
use super::segment::create_segment; use super::segment::create_segment;
use super::segment::Segment; use super::segment::Segment;
use crate::core::Executor; use core::Executor;
use crate::core::IndexMeta; use core::IndexMeta;
use crate::core::SegmentId; use core::SegmentId;
use crate::core::SegmentMeta; use core::SegmentMeta;
use crate::core::SegmentMetaInventory; use core::META_FILEPATH;
use crate::core::META_FILEPATH; use directory::ManagedDirectory;
use crate::directory::ManagedDirectory;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
use crate::directory::MmapDirectory; use directory::MmapDirectory;
use crate::directory::INDEX_WRITER_LOCK; use directory::INDEX_WRITER_LOCK;
use crate::directory::{Directory, RAMDirectory}; use directory::{Directory, RAMDirectory};
use crate::error::DataCorruption; use error::DataCorruption;
use crate::error::TantivyError; use error::TantivyError;
use crate::indexer::index_writer::HEAP_SIZE_MIN; use indexer::index_writer::open_index_writer;
use crate::indexer::segment_updater::save_new_metas; use indexer::index_writer::HEAP_SIZE_MIN;
use crate::reader::IndexReader; use indexer::segment_updater::save_new_metas;
use crate::reader::IndexReaderBuilder;
use crate::schema::Field;
use crate::schema::FieldType;
use crate::schema::Schema;
use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::TokenizerManager;
use crate::IndexWriter;
use crate::Result;
use num_cpus; use num_cpus;
use reader::IndexReader;
use reader::IndexReaderBuilder;
use schema::Field;
use schema::FieldType;
use schema::Schema;
use serde_json;
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::collections::HashSet;
use std::fmt; use std::fmt;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
use std::path::{Path, PathBuf}; use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use tokenizer::BoxedTokenizer;
use tokenizer::TokenizerManager;
use IndexWriter;
use Result;
fn load_metas(directory: &dyn Directory, inventory: &SegmentMetaInventory) -> Result<IndexMeta> { fn load_metas(directory: &Directory) -> Result<IndexMeta> {
let meta_data = directory.atomic_read(&META_FILEPATH)?; let meta_data = directory.atomic_read(&META_FILEPATH)?;
let meta_string = String::from_utf8_lossy(&meta_data); let meta_string = String::from_utf8_lossy(&meta_data);
IndexMeta::deserialize(&meta_string, &inventory) serde_json::from_str(&meta_string)
.map_err(|e| { .map_err(|e| {
DataCorruption::new( DataCorruption::new(
META_FILEPATH.to_path_buf(), META_FILEPATH.clone(),
format!("Meta file cannot be deserialized. {:?}.", e), format!("Meta file cannot be deserialized. {:?}.", e),
) )
}) })
@@ -52,7 +52,6 @@ pub struct Index {
schema: Schema, schema: Schema,
executor: Arc<Executor>, executor: Arc<Executor>,
tokenizers: TokenizerManager, tokenizers: TokenizerManager,
inventory: SegmentMetaInventory,
} }
impl Index { impl Index {
@@ -149,23 +148,19 @@ impl Index {
fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> Result<Index> { fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> Result<Index> {
save_new_metas(schema.clone(), directory.borrow_mut())?; save_new_metas(schema.clone(), directory.borrow_mut())?;
let metas = IndexMeta::with_schema(schema); let metas = IndexMeta::with_schema(schema);
Index::create_from_metas(directory, &metas, SegmentMetaInventory::default()) Index::create_from_metas(directory, &metas)
} }
/// Creates a new index given a directory and an `IndexMeta`. /// Creates a new index given a directory and an `IndexMeta`.
fn create_from_metas( fn create_from_metas(directory: ManagedDirectory, metas: &IndexMeta) -> Result<Index> {
directory: ManagedDirectory,
metas: &IndexMeta,
inventory: SegmentMetaInventory,
) -> Result<Index> {
let schema = metas.schema.clone(); let schema = metas.schema.clone();
Ok(Index { let index = Index {
directory, directory,
schema, schema,
tokenizers: TokenizerManager::default(), tokenizers: TokenizerManager::default(),
executor: Arc::new(Executor::single_thread()), executor: Arc::new(Executor::single_thread()),
inventory, };
}) Ok(index)
} }
/// Accessor for the tokenizer manager. /// Accessor for the tokenizer manager.
@@ -174,11 +169,11 @@ impl Index {
} }
/// Helper to access the tokenizer associated to a specific field. /// Helper to access the tokenizer associated to a specific field.
pub fn tokenizer_for_field(&self, field: Field) -> Result<BoxedTokenizer> { pub fn tokenizer_for_field(&self, field: Field) -> Result<Box<BoxedTokenizer>> {
let field_entry = self.schema.get_field_entry(field); let field_entry = self.schema.get_field_entry(field);
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
let tokenizer_manager: &TokenizerManager = self.tokenizers(); let tokenizer_manager: &TokenizerManager = self.tokenizers();
let tokenizer_name_opt: Option<BoxedTokenizer> = match field_type { let tokenizer_name_opt: Option<Box<BoxedTokenizer>> = match field_type {
FieldType::Str(text_options) => text_options FieldType::Str(text_options) => text_options
.get_indexing_options() .get_indexing_options()
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string()) .map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
@@ -217,35 +212,16 @@ impl Index {
Index::open(mmap_directory) Index::open(mmap_directory)
} }
/// Returns the list of the segment metas tracked by the index.
///
/// Such segments can of course be part of the index,
/// but also they could be segments being currently built or in the middle of a merge
/// operation.
pub fn list_all_segment_metas(&self) -> Vec<SegmentMeta> {
self.inventory.all()
}
/// Creates a new segment_meta (Advanced user only).
///
/// As long as the `SegmentMeta` lives, the files associated with the
/// `SegmentMeta` are guaranteed to not be garbage collected, regardless of
/// whether the segment is recorded as part of the index or not.
pub fn new_segment_meta(&self, segment_id: SegmentId, max_doc: u32) -> SegmentMeta {
self.inventory.new_segment_meta(segment_id, max_doc)
}
/// Open the index using the provided directory /// Open the index using the provided directory
pub fn open<D: Directory>(directory: D) -> Result<Index> { pub fn open<D: Directory>(directory: D) -> Result<Index> {
let directory = ManagedDirectory::wrap(directory)?; let directory = ManagedDirectory::wrap(directory)?;
let inventory = SegmentMetaInventory::default(); let metas = load_metas(&directory)?;
let metas = load_metas(&directory, &inventory)?; Index::create_from_metas(directory, &metas)
Index::create_from_metas(directory, &metas, inventory)
} }
/// Reads the index meta file from the directory. /// Reads the index meta file from the directory.
pub fn load_metas(&self) -> Result<IndexMeta> { pub fn load_metas(&self) -> Result<IndexMeta> {
load_metas(self.directory(), &self.inventory) load_metas(self.directory())
} }
/// Open a new index writer. Attempts to acquire a lockfile. /// Open a new index writer. Attempts to acquire a lockfile.
@@ -289,7 +265,7 @@ impl Index {
) )
})?; })?;
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads; let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
IndexWriter::new( open_index_writer(
self, self,
num_threads, num_threads,
heap_size_in_bytes_per_thread, heap_size_in_bytes_per_thread,
@@ -339,9 +315,7 @@ impl Index {
/// Creates a new segment. /// Creates a new segment.
pub fn new_segment(&self) -> Segment { pub fn new_segment(&self) -> Segment {
let segment_meta = self let segment_meta = SegmentMeta::new(SegmentId::generate_random(), 0);
.inventory
.new_segment_meta(SegmentId::generate_random(), 0);
self.segment(segment_meta) self.segment(segment_meta)
} }
@@ -366,33 +340,28 @@ impl Index {
Ok(self Ok(self
.searchable_segment_metas()? .searchable_segment_metas()?
.iter() .iter()
.map(SegmentMeta::id) .map(|segment_meta| segment_meta.id())
.collect()) .collect())
} }
/// Returns the set of corrupted files
pub fn validate_checksum(&self) -> Result<HashSet<PathBuf>> {
self.directory.list_damaged().map_err(Into::into)
}
} }
impl fmt::Debug for Index { impl fmt::Debug for Index {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Index({:?})", self.directory) write!(f, "Index({:?})", self.directory)
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::directory::RAMDirectory; use directory::RAMDirectory;
use crate::schema::Field; use schema::Field;
use crate::schema::{Schema, INDEXED, TEXT}; use schema::{Schema, INDEXED, TEXT};
use crate::Index;
use crate::IndexReader;
use crate::IndexWriter;
use crate::ReloadPolicy;
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
use Index;
use IndexReader;
use IndexWriter;
use ReloadPolicy;
#[test] #[test]
fn test_indexer_for_field() { fn test_indexer_for_field() {
@@ -479,13 +448,13 @@ mod tests {
use super::*; use super::*;
use std::path::PathBuf; use std::path::PathBuf;
use tempfile::TempDir; use tempdir::TempDir;
#[test] #[test]
fn test_index_on_commit_reload_policy_mmap() { fn test_index_on_commit_reload_policy_mmap() {
let schema = throw_away_schema(); let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap(); let field = schema.get_field("num_likes").unwrap();
let tempdir = TempDir::new().unwrap(); let tempdir = TempDir::new("index").unwrap();
let tempdir_path = PathBuf::from(tempdir.path()); let tempdir_path = PathBuf::from(tempdir.path());
let index = Index::create_in_dir(&tempdir_path, schema).unwrap(); let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
@@ -524,7 +493,7 @@ mod tests {
fn test_index_on_commit_reload_policy_different_directories() { fn test_index_on_commit_reload_policy_different_directories() {
let schema = throw_away_schema(); let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap(); let field = schema.get_field("num_likes").unwrap();
let tempdir = TempDir::new().unwrap(); let tempdir = TempDir::new("index").unwrap();
let tempdir_path = PathBuf::from(tempdir.path()); let tempdir_path = PathBuf::from(tempdir.path());
let write_index = Index::create_in_dir(&tempdir_path, schema).unwrap(); let write_index = Index::create_in_dir(&tempdir_path, schema).unwrap();
let read_index = Index::open_in_dir(&tempdir_path).unwrap(); let read_index = Index::open_in_dir(&tempdir_path).unwrap();
@@ -568,37 +537,4 @@ mod tests {
} }
assert_eq!(count, 2); assert_eq!(count, 2);
} }
// This test will not pass on windows, because windows
// prevent deleting files that are MMapped.
#[cfg(not(target_os = "windows"))]
#[test]
fn garbage_collect_works_as_intended() {
let directory = RAMDirectory::create();
let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap();
let index = Index::create(directory.clone(), schema).unwrap();
let mut writer = index.writer_with_num_threads(8, 24_000_000).unwrap();
for i in 0u64..8_000u64 {
writer.add_document(doc!(field => i));
}
writer.commit().unwrap();
let mem_right_after_commit = directory.total_mem_usage();
thread::sleep(Duration::from_millis(1_000));
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 8_000);
writer.wait_merging_threads().unwrap();
let mem_right_after_merge_finished = directory.total_mem_usage();
reader.reload().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 8_000);
assert!(mem_right_after_merge_finished < mem_right_after_commit);
}
} }

View File

@@ -1,184 +1,7 @@
use super::SegmentComponent; use core::SegmentMeta;
use crate::core::SegmentId; use schema::Schema;
use crate::schema::Schema;
use crate::Opstamp;
use census::{Inventory, TrackedObject};
use serde;
use serde_json; use serde_json;
use std::collections::HashSet;
use std::fmt; use std::fmt;
use std::path::PathBuf;
#[derive(Clone, Debug, Serialize, Deserialize)]
struct DeleteMeta {
num_deleted_docs: u32,
opstamp: Opstamp,
}
#[derive(Clone, Default)]
pub struct SegmentMetaInventory {
inventory: Inventory<InnerSegmentMeta>,
}
impl SegmentMetaInventory {
/// Lists all living `SegmentMeta` object at the time of the call.
pub fn all(&self) -> Vec<SegmentMeta> {
self.inventory
.list()
.into_iter()
.map(SegmentMeta::from)
.collect::<Vec<_>>()
}
pub fn new_segment_meta(&self, segment_id: SegmentId, max_doc: u32) -> SegmentMeta {
let inner = InnerSegmentMeta {
segment_id,
max_doc,
deletes: None,
};
SegmentMeta::from(self.inventory.track(inner))
}
}
/// `SegmentMeta` contains simple meta information about a segment.
///
/// For instance the number of docs it contains,
/// how many are deleted, etc.
#[derive(Clone)]
pub struct SegmentMeta {
tracked: TrackedObject<InnerSegmentMeta>,
}
impl fmt::Debug for SegmentMeta {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
self.tracked.fmt(f)
}
}
impl serde::Serialize for SegmentMeta {
fn serialize<S>(
&self,
serializer: S,
) -> Result<<S as serde::Serializer>::Ok, <S as serde::Serializer>::Error>
where
S: serde::Serializer,
{
self.tracked.serialize(serializer)
}
}
impl From<TrackedObject<InnerSegmentMeta>> for SegmentMeta {
fn from(tracked: TrackedObject<InnerSegmentMeta>) -> SegmentMeta {
SegmentMeta { tracked }
}
}
impl SegmentMeta {
// Creates a new `SegmentMeta` object.
/// Returns the segment id.
pub fn id(&self) -> SegmentId {
self.tracked.segment_id
}
/// Returns the number of deleted documents.
pub fn num_deleted_docs(&self) -> u32 {
self.tracked
.deletes
.as_ref()
.map(|delete_meta| delete_meta.num_deleted_docs)
.unwrap_or(0u32)
}
/// Returns the list of files that
/// are required for the segment meta.
///
/// This is useful as the way tantivy removes files
/// is by removing all files that have been created by tantivy
/// and are not used by any segment anymore.
pub fn list_files(&self) -> HashSet<PathBuf> {
SegmentComponent::iterator()
.map(|component| self.relative_path(*component))
.collect::<HashSet<PathBuf>>()
}
/// Returns the relative path of a component of our segment.
///
/// It just joins the segment id with the extension
/// associated to a segment component.
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
let mut path = self.id().uuid_string();
path.push_str(&*match component {
SegmentComponent::POSTINGS => ".idx".to_string(),
SegmentComponent::POSITIONS => ".pos".to_string(),
SegmentComponent::POSITIONSSKIP => ".posidx".to_string(),
SegmentComponent::TERMS => ".term".to_string(),
SegmentComponent::STORE => ".store".to_string(),
SegmentComponent::FASTFIELDS => ".fast".to_string(),
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)),
});
PathBuf::from(path)
}
/// Return the highest doc id + 1
///
/// If there are no deletes, then num_docs = max_docs
/// and all the doc ids contains in this segment
/// are exactly (0..max_doc).
pub fn max_doc(&self) -> u32 {
self.tracked.max_doc
}
/// Return the number of documents in the segment.
pub fn num_docs(&self) -> u32 {
self.max_doc() - self.num_deleted_docs()
}
/// Returns the `Opstamp` of the last delete operation
/// taken in account in this segment.
pub fn delete_opstamp(&self) -> Option<Opstamp> {
self.tracked
.deletes
.as_ref()
.map(|delete_meta| delete_meta.opstamp)
}
/// Returns true iff the segment meta contains
/// delete information.
pub fn has_deletes(&self) -> bool {
self.num_deleted_docs() > 0
}
#[doc(hidden)]
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta {
let delete_meta = DeleteMeta {
num_deleted_docs,
opstamp,
};
let tracked = self.tracked.map(move |inner_meta| InnerSegmentMeta {
segment_id: inner_meta.segment_id,
max_doc: inner_meta.max_doc,
deletes: Some(delete_meta),
});
SegmentMeta { tracked }
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct InnerSegmentMeta {
segment_id: SegmentId,
max_doc: u32,
deletes: Option<DeleteMeta>,
}
impl InnerSegmentMeta {
pub fn track(self, inventory: &SegmentMetaInventory) -> SegmentMeta {
SegmentMeta {
tracked: inventory.inventory.track(self),
}
}
}
/// Meta information about the `Index`. /// Meta information about the `Index`.
/// ///
@@ -188,53 +11,16 @@ impl InnerSegmentMeta {
/// * the index `docstamp` /// * the index `docstamp`
/// * the schema /// * the schema
/// ///
#[derive(Clone, Serialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct IndexMeta { pub struct IndexMeta {
/// List of `SegmentMeta` informations associated to each finalized segment of the index.
pub segments: Vec<SegmentMeta>, pub segments: Vec<SegmentMeta>,
/// Index `Schema`
pub schema: Schema, pub schema: Schema,
/// Opstamp associated to the last `commit` operation. pub opstamp: u64,
pub opstamp: Opstamp,
#[serde(skip_serializing_if = "Option::is_none")]
/// Payload associated to the last commit.
///
/// Upon commit, clients can optionally add a small `Striing` payload to their commit
/// to help identify this commit.
/// This payload is entirely unused by tantivy.
pub payload: Option<String>,
}
#[derive(Deserialize)]
struct UntrackedIndexMeta {
pub segments: Vec<InnerSegmentMeta>,
pub schema: Schema,
pub opstamp: Opstamp,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub payload: Option<String>, pub payload: Option<String>,
} }
impl UntrackedIndexMeta {
pub fn track(self, inventory: &SegmentMetaInventory) -> IndexMeta {
IndexMeta {
segments: self
.segments
.into_iter()
.map(|inner_seg_meta| inner_seg_meta.track(inventory))
.collect::<Vec<SegmentMeta>>(),
schema: self.schema,
opstamp: self.opstamp,
payload: self.payload,
}
}
}
impl IndexMeta { impl IndexMeta {
/// Create an `IndexMeta` object representing a brand new `Index`
/// with the given index.
///
/// This new index does not contains any segments.
/// Opstamp will the value `0u64`.
pub fn with_schema(schema: Schema) -> IndexMeta { pub fn with_schema(schema: Schema) -> IndexMeta {
IndexMeta { IndexMeta {
segments: vec![], segments: vec![],
@@ -243,18 +29,10 @@ impl IndexMeta {
payload: None, payload: None,
} }
} }
pub(crate) fn deserialize(
meta_json: &str,
inventory: &SegmentMetaInventory,
) -> serde_json::Result<IndexMeta> {
let untracked_meta_json: UntrackedIndexMeta = serde_json::from_str(meta_json)?;
Ok(untracked_meta_json.track(inventory))
}
} }
impl fmt::Debug for IndexMeta { impl fmt::Debug for IndexMeta {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!( write!(
f, f,
"{}", "{}",
@@ -268,7 +46,7 @@ impl fmt::Debug for IndexMeta {
mod tests { mod tests {
use super::IndexMeta; use super::IndexMeta;
use crate::schema::{Schema, TEXT}; use schema::{Schema, TEXT};
use serde_json; use serde_json;
#[test] #[test]

View File

@@ -1,13 +1,13 @@
use crate::common::BinarySerializable; use common::BinarySerializable;
use crate::directory::ReadOnlySource; use directory::ReadOnlySource;
use crate::positions::PositionReader;
use crate::postings::TermInfo;
use crate::postings::{BlockSegmentPostings, SegmentPostings};
use crate::schema::FieldType;
use crate::schema::IndexRecordOption;
use crate::schema::Term;
use crate::termdict::TermDictionary;
use owned_read::OwnedRead; use owned_read::OwnedRead;
use positions::PositionReader;
use postings::TermInfo;
use postings::{BlockSegmentPostings, SegmentPostings};
use schema::FieldType;
use schema::IndexRecordOption;
use schema::Term;
use termdict::TermDictionary;
/// The inverted index reader is in charge of accessing /// The inverted index reader is in charge of accessing
/// the inverted index associated to a specific field. /// the inverted index associated to a specific field.
@@ -32,7 +32,7 @@ pub struct InvertedIndexReader {
} }
impl InvertedIndexReader { impl InvertedIndexReader {
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry #[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symetry
pub(crate) fn new( pub(crate) fn new(
termdict: TermDictionary, termdict: TermDictionary,
postings_source: ReadOnlySource, postings_source: ReadOnlySource,

View File

@@ -6,29 +6,33 @@ pub mod searcher;
mod segment; mod segment;
mod segment_component; mod segment_component;
mod segment_id; mod segment_id;
mod segment_meta;
mod segment_reader; mod segment_reader;
pub use self::executor::Executor; pub use self::executor::Executor;
pub use self::index::Index; pub use self::index::Index;
pub use self::index_meta::{IndexMeta, SegmentMeta, SegmentMetaInventory}; pub use self::index_meta::IndexMeta;
pub use self::inverted_index_reader::InvertedIndexReader; pub use self::inverted_index_reader::InvertedIndexReader;
pub use self::searcher::Searcher; pub use self::searcher::Searcher;
pub use self::segment::Segment; pub use self::segment::Segment;
pub use self::segment::SerializableSegment; pub use self::segment::SerializableSegment;
pub use self::segment_component::SegmentComponent; pub use self::segment_component::SegmentComponent;
pub use self::segment_id::SegmentId; pub use self::segment_id::SegmentId;
pub use self::segment_meta::SegmentMeta;
pub use self::segment_reader::SegmentReader; pub use self::segment_reader::SegmentReader;
use once_cell::sync::Lazy; use std::path::PathBuf;
use std::path::Path;
/// The meta file contains all the information about the list of segments and the schema lazy_static! {
/// of the index.
pub static META_FILEPATH: Lazy<&'static Path> = Lazy::new(|| Path::new("meta.json"));
/// The managed file contains a list of files that were created by the tantivy /// The meta file contains all the information about the list of segments and the schema
/// and will therefore be garbage collected when they are deemed useless by tantivy. /// of the index.
/// pub static ref META_FILEPATH: PathBuf = PathBuf::from("meta.json");
/// Removing this file is safe, but will prevent the garbage collection of all of the file that
/// are currently in the directory /// The managed file contains a list of files that were created by the tantivy
pub static MANAGED_FILEPATH: Lazy<&'static Path> = Lazy::new(|| Path::new(".managed.json")); /// and will therefore be garbage collected when they are deemed useless by tantivy.
///
/// Removing this file is safe, but will prevent the garbage collection of all of the file that
/// are currently in the directory
pub static ref MANAGED_FILEPATH: PathBuf = PathBuf::from(".managed.json");
}

View File

@@ -1,26 +1,26 @@
use crate::collector::Collector; use collector::Collector;
use crate::collector::SegmentCollector; use collector::SegmentCollector;
use crate::core::Executor; use core::Executor;
use crate::core::InvertedIndexReader; use core::InvertedIndexReader;
use crate::core::SegmentReader; use core::SegmentReader;
use crate::query::Query; use query::Query;
use crate::query::Scorer; use query::Scorer;
use crate::query::Weight; use query::Weight;
use crate::schema::Document; use schema::Document;
use crate::schema::Schema; use schema::Schema;
use crate::schema::{Field, Term}; use schema::{Field, Term};
use crate::space_usage::SearcherSpaceUsage; use space_usage::SearcherSpaceUsage;
use crate::store::StoreReader;
use crate::termdict::TermMerger;
use crate::DocAddress;
use crate::Index;
use crate::Result;
use std::fmt; use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use store::StoreReader;
use termdict::TermMerger;
use DocAddress;
use Index;
use Result;
fn collect_segment<C: Collector>( fn collect_segment<C: Collector>(
collector: &C, collector: &C,
weight: &dyn Weight, weight: &Weight,
segment_ord: u32, segment_ord: u32,
segment_reader: &SegmentReader, segment_reader: &SegmentReader,
) -> Result<C::Fruit> { ) -> Result<C::Fruit> {
@@ -28,7 +28,7 @@ fn collect_segment<C: Collector>(
let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?; let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?;
if let Some(delete_bitset) = segment_reader.delete_bitset() { if let Some(delete_bitset) = segment_reader.delete_bitset() {
scorer.for_each(&mut |doc, score| { scorer.for_each(&mut |doc, score| {
if delete_bitset.is_alive(doc) { if !delete_bitset.is_deleted(doc) {
segment_collector.collect(doc, score); segment_collector.collect(doc, score);
} }
}); });
@@ -59,7 +59,7 @@ impl Searcher {
) -> Searcher { ) -> Searcher {
let store_readers = segment_readers let store_readers = segment_readers
.iter() .iter()
.map(SegmentReader::get_store_reader) .map(|segment_reader| segment_reader.get_store_reader())
.collect(); .collect();
Searcher { Searcher {
schema, schema,
@@ -132,7 +132,7 @@ impl Searcher {
/// ///
/// Finally, the Collector merges each of the child collectors into itself for result usability /// Finally, the Collector merges each of the child collectors into itself for result usability
/// by the caller. /// by the caller.
pub fn search<C: Collector>(&self, query: &dyn Query, collector: &C) -> Result<C::Fruit> { pub fn search<C: Collector>(&self, query: &Query, collector: &C) -> Result<C::Fruit> {
let executor = self.index.search_executor(); let executor = self.index.search_executor();
self.search_with_executor(query, collector, executor) self.search_with_executor(query, collector, executor)
} }
@@ -151,7 +151,7 @@ impl Searcher {
/// hurt it. It will however, decrease the average response time. /// hurt it. It will however, decrease the average response time.
pub fn search_with_executor<C: Collector>( pub fn search_with_executor<C: Collector>(
&self, &self,
query: &dyn Query, query: &Query,
collector: &C, collector: &C,
executor: &Executor, executor: &Executor,
) -> Result<C::Fruit> { ) -> Result<C::Fruit> {
@@ -203,7 +203,7 @@ impl FieldSearcher {
/// Returns a Stream over all of the sorted unique terms of /// Returns a Stream over all of the sorted unique terms of
/// for the given field. /// for the given field.
pub fn terms(&self) -> TermMerger<'_> { pub fn terms(&self) -> TermMerger {
let term_streamers: Vec<_> = self let term_streamers: Vec<_> = self
.inv_index_readers .inv_index_readers
.iter() .iter()
@@ -214,11 +214,11 @@ impl FieldSearcher {
} }
impl fmt::Debug for Searcher { impl fmt::Debug for Searcher {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let segment_ids = self let segment_ids = self
.segment_readers .segment_readers
.iter() .iter()
.map(SegmentReader::segment_id) .map(|segment_reader| segment_reader.segment_id())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
write!(f, "Searcher({:?})", segment_ids) write!(f, "Searcher({:?})", segment_ids)
} }

View File

@@ -1,17 +1,16 @@
use super::SegmentComponent; use super::SegmentComponent;
use crate::core::Index; use core::Index;
use crate::core::SegmentId; use core::SegmentId;
use crate::core::SegmentMeta; use core::SegmentMeta;
use crate::directory::error::{OpenReadError, OpenWriteError}; use directory::error::{OpenReadError, OpenWriteError};
use crate::directory::Directory; use directory::Directory;
use crate::directory::{ReadOnlySource, WritePtr}; use directory::{ReadOnlySource, WritePtr};
use crate::indexer::segment_serializer::SegmentSerializer; use indexer::segment_serializer::SegmentSerializer;
use crate::schema::Schema; use schema::Schema;
use crate::Opstamp;
use crate::Result;
use std::fmt; use std::fmt;
use std::path::PathBuf; use std::path::PathBuf;
use std::result; use std::result;
use Result;
/// A segment is a piece of the index. /// A segment is a piece of the index.
#[derive(Clone)] #[derive(Clone)]
@@ -21,7 +20,7 @@ pub struct Segment {
} }
impl fmt::Debug for Segment { impl fmt::Debug for Segment {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Segment({:?})", self.id().uuid_string()) write!(f, "Segment({:?})", self.id().uuid_string())
} }
} }
@@ -51,7 +50,7 @@ impl Segment {
} }
#[doc(hidden)] #[doc(hidden)]
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment { pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: u64) -> Segment {
Segment { Segment {
index: self.index, index: self.index,
meta: self.meta.with_delete_meta(num_deleted_docs, opstamp), meta: self.meta.with_delete_meta(num_deleted_docs, opstamp),

View File

@@ -2,10 +2,6 @@ use std::cmp::{Ord, Ordering};
use std::fmt; use std::fmt;
use uuid::Uuid; use uuid::Uuid;
#[cfg(test)]
use once_cell::sync::Lazy;
use std::error::Error;
use std::str::FromStr;
#[cfg(test)] #[cfg(test)]
use std::sync::atomic; use std::sync::atomic;
@@ -21,10 +17,10 @@ use std::sync::atomic;
pub struct SegmentId(Uuid); pub struct SegmentId(Uuid);
#[cfg(test)] #[cfg(test)]
static AUTO_INC_COUNTER: Lazy<atomic::AtomicUsize> = Lazy::new(|| atomic::AtomicUsize::default()); lazy_static! {
static ref AUTO_INC_COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::default();
#[cfg(test)] static ref ZERO_ARRAY: [u8; 8] = [0u8; 8];
const ZERO_ARRAY: [u8; 8] = [0u8; 8]; }
// During tests, we generate the segment id in a autoincrement manner // During tests, we generate the segment id in a autoincrement manner
// for consistency of segment id between run. // for consistency of segment id between run.
@@ -34,7 +30,7 @@ const ZERO_ARRAY: [u8; 8] = [0u8; 8];
#[cfg(test)] #[cfg(test)]
fn create_uuid() -> Uuid { fn create_uuid() -> Uuid {
let new_auto_inc_id = (*AUTO_INC_COUNTER).fetch_add(1, atomic::Ordering::SeqCst); let new_auto_inc_id = (*AUTO_INC_COUNTER).fetch_add(1, atomic::Ordering::SeqCst);
Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &ZERO_ARRAY).unwrap() Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &*ZERO_ARRAY).unwrap()
} }
#[cfg(not(test))] #[cfg(not(test))]
@@ -54,55 +50,19 @@ impl SegmentId {
/// and the rest is random. /// and the rest is random.
/// ///
/// Picking the first 8 chars is ok to identify /// Picking the first 8 chars is ok to identify
/// segments in a display message (e.g. a5c4dfcb). /// segments in a display message.
pub fn short_uuid_string(&self) -> String { pub fn short_uuid_string(&self) -> String {
(&self.0.to_simple_ref().to_string()[..8]).to_string() (&self.0.to_simple_ref().to_string()[..8]).to_string()
} }
/// Returns a segment uuid string. /// Returns a segment uuid string.
///
/// It consists in 32 lowercase hexadecimal chars
/// (e.g. a5c4dfcbdfe645089129e308e26d5523)
pub fn uuid_string(&self) -> String { pub fn uuid_string(&self) -> String {
self.0.to_simple_ref().to_string() self.0.to_simple_ref().to_string()
} }
/// Build a `SegmentId` string from the full uuid string.
///
/// E.g. "a5c4dfcbdfe645089129e308e26d5523"
pub fn from_uuid_string(uuid_string: &str) -> Result<SegmentId, SegmentIdParseError> {
FromStr::from_str(uuid_string)
}
}
/// Error type used when parsing a `SegmentId` from a string fails.
pub struct SegmentIdParseError(uuid::Error);
impl Error for SegmentIdParseError {}
impl fmt::Debug for SegmentIdParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Display for SegmentIdParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl FromStr for SegmentId {
type Err = SegmentIdParseError;
fn from_str(uuid_string: &str) -> Result<Self, SegmentIdParseError> {
let uuid = Uuid::parse_str(uuid_string).map_err(SegmentIdParseError)?;
Ok(SegmentId(uuid))
}
} }
impl fmt::Debug for SegmentId { impl fmt::Debug for SegmentId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Seg({:?})", self.short_uuid_string()) write!(f, "Seg({:?})", self.short_uuid_string())
} }
} }
@@ -118,18 +78,3 @@ impl Ord for SegmentId {
self.0.as_bytes().cmp(other.0.as_bytes()) self.0.as_bytes().cmp(other.0.as_bytes())
} }
} }
#[cfg(test)]
mod tests {
use super::SegmentId;
#[test]
fn test_to_uuid_string() {
let full_uuid = "a5c4dfcbdfe645089129e308e26d5523";
let segment_id = SegmentId::from_uuid_string(full_uuid).unwrap();
assert_eq!(segment_id.uuid_string(), full_uuid);
assert_eq!(segment_id.short_uuid_string(), "a5c4dfcb");
// one extra char
assert!(SegmentId::from_uuid_string("a5c4dfcbdfe645089129e308e26d5523b").is_err());
}
}

174
src/core/segment_meta.rs Normal file
View File

@@ -0,0 +1,174 @@
use super::SegmentComponent;
use census::{Inventory, TrackedObject};
use core::SegmentId;
use serde;
use std::collections::HashSet;
use std::fmt;
use std::path::PathBuf;
lazy_static! {
static ref INVENTORY: Inventory<InnerSegmentMeta> = { Inventory::new() };
}
#[derive(Clone, Debug, Serialize, Deserialize)]
struct DeleteMeta {
num_deleted_docs: u32,
opstamp: u64,
}
/// `SegmentMeta` contains simple meta information about a segment.
///
/// For instance the number of docs it contains,
/// how many are deleted, etc.
#[derive(Clone)]
pub struct SegmentMeta {
tracked: TrackedObject<InnerSegmentMeta>,
}
impl fmt::Debug for SegmentMeta {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
self.tracked.fmt(f)
}
}
impl serde::Serialize for SegmentMeta {
fn serialize<S>(
&self,
serializer: S,
) -> Result<<S as serde::Serializer>::Ok, <S as serde::Serializer>::Error>
where
S: serde::Serializer,
{
self.tracked.serialize(serializer)
}
}
impl<'a> serde::Deserialize<'a> for SegmentMeta {
fn deserialize<D>(deserializer: D) -> Result<Self, <D as serde::Deserializer<'a>>::Error>
where
D: serde::Deserializer<'a>,
{
let inner = InnerSegmentMeta::deserialize(deserializer)?;
let tracked = INVENTORY.track(inner);
Ok(SegmentMeta { tracked })
}
}
impl SegmentMeta {
/// Lists all living `SegmentMeta` object at the time of the call.
pub fn all() -> Vec<SegmentMeta> {
INVENTORY
.list()
.into_iter()
.map(|inner| SegmentMeta { tracked: inner })
.collect::<Vec<_>>()
}
/// Creates a new `SegmentMeta` object.
#[doc(hidden)]
pub fn new(segment_id: SegmentId, max_doc: u32) -> SegmentMeta {
let inner = InnerSegmentMeta {
segment_id,
max_doc,
deletes: None,
};
SegmentMeta {
tracked: INVENTORY.track(inner),
}
}
/// Returns the segment id.
pub fn id(&self) -> SegmentId {
self.tracked.segment_id
}
/// Returns the number of deleted documents.
pub fn num_deleted_docs(&self) -> u32 {
self.tracked
.deletes
.as_ref()
.map(|delete_meta| delete_meta.num_deleted_docs)
.unwrap_or(0u32)
}
/// Returns the list of files that
/// are required for the segment meta.
///
/// This is useful as the way tantivy removes files
/// is by removing all files that have been created by tantivy
/// and are not used by any segment anymore.
pub fn list_files(&self) -> HashSet<PathBuf> {
SegmentComponent::iterator()
.map(|component| self.relative_path(*component))
.collect::<HashSet<PathBuf>>()
}
/// Returns the relative path of a component of our segment.
///
/// It just joins the segment id with the extension
/// associated to a segment component.
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
let mut path = self.id().uuid_string();
path.push_str(&*match component {
SegmentComponent::POSTINGS => ".idx".to_string(),
SegmentComponent::POSITIONS => ".pos".to_string(),
SegmentComponent::POSITIONSSKIP => ".posidx".to_string(),
SegmentComponent::TERMS => ".term".to_string(),
SegmentComponent::STORE => ".store".to_string(),
SegmentComponent::FASTFIELDS => ".fast".to_string(),
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)),
});
PathBuf::from(path)
}
/// Return the highest doc id + 1
///
/// If there are no deletes, then num_docs = max_docs
/// and all the doc ids contains in this segment
/// are exactly (0..max_doc).
pub fn max_doc(&self) -> u32 {
self.tracked.max_doc
}
/// Return the number of documents in the segment.
pub fn num_docs(&self) -> u32 {
self.max_doc() - self.num_deleted_docs()
}
/// Returns the opstamp of the last delete operation
/// taken in account in this segment.
pub fn delete_opstamp(&self) -> Option<u64> {
self.tracked
.deletes
.as_ref()
.map(|delete_meta| delete_meta.opstamp)
}
/// Returns true iff the segment meta contains
/// delete information.
pub fn has_deletes(&self) -> bool {
self.num_deleted_docs() > 0
}
#[doc(hidden)]
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: u64) -> SegmentMeta {
let delete_meta = DeleteMeta {
num_deleted_docs,
opstamp,
};
let tracked = self.tracked.map(move |inner_meta| InnerSegmentMeta {
segment_id: inner_meta.segment_id,
max_doc: inner_meta.max_doc,
deletes: Some(delete_meta),
});
SegmentMeta { tracked }
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
struct InnerSegmentMeta {
segment_id: SegmentId,
max_doc: u32,
deletes: Option<DeleteMeta>,
}

View File

@@ -1,27 +1,30 @@
use crate::common::CompositeFile; use common::CompositeFile;
use crate::common::HasLen; use common::HasLen;
use crate::core::InvertedIndexReader; use core::InvertedIndexReader;
use crate::core::Segment; use core::Segment;
use crate::core::SegmentComponent; use core::SegmentComponent;
use crate::core::SegmentId; use core::SegmentId;
use crate::directory::ReadOnlySource; use directory::ReadOnlySource;
use crate::fastfield::DeleteBitSet; use error::TantivyError;
use crate::fastfield::FacetReader; use fastfield::DeleteBitSet;
use crate::fastfield::FastFieldReaders; use fastfield::FacetReader;
use crate::fieldnorm::FieldNormReader; use fastfield::FastFieldReader;
use crate::schema::Field; use fastfield::{self, FastFieldNotAvailableError};
use crate::schema::FieldType; use fastfield::{BytesFastFieldReader, FastValue, MultiValueIntFastFieldReader};
use crate::schema::Schema; use fieldnorm::FieldNormReader;
use crate::space_usage::SegmentSpaceUsage; use schema::Cardinality;
use crate::store::StoreReader; use schema::Field;
use crate::termdict::TermDictionary; use schema::FieldType;
use crate::DocId; use schema::Schema;
use crate::Result; use space_usage::SegmentSpaceUsage;
use fail::fail_point;
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt; use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
use store::StoreReader;
use termdict::TermDictionary;
use DocId;
use Result;
/// Entry point to access all of the datastructures of the `Segment` /// Entry point to access all of the datastructures of the `Segment`
/// ///
@@ -48,7 +51,7 @@ pub struct SegmentReader {
postings_composite: CompositeFile, postings_composite: CompositeFile,
positions_composite: CompositeFile, positions_composite: CompositeFile,
positions_idx_composite: CompositeFile, positions_idx_composite: CompositeFile,
fast_fields_readers: Arc<FastFieldReaders>, fast_fields_composite: CompositeFile,
fieldnorms_composite: CompositeFile, fieldnorms_composite: CompositeFile,
store_source: ReadOnlySource, store_source: ReadOnlySource,
@@ -102,21 +105,93 @@ impl SegmentReader {
/// ///
/// # Panics /// # Panics
/// May panic if the index is corrupted. /// May panic if the index is corrupted.
pub fn fast_fields(&self) -> &FastFieldReaders { pub fn fast_field_reader<Item: FastValue>(
&self.fast_fields_readers &self,
field: Field,
) -> fastfield::Result<FastFieldReader<Item>> {
let field_entry = self.schema.get_field_entry(field);
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::SingleValue)
{
self.fast_fields_composite
.open_read(field)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
.map(FastFieldReader::open)
} else {
Err(FastFieldNotAvailableError::new(field_entry))
}
}
pub(crate) fn fast_field_reader_with_idx<Item: FastValue>(
&self,
field: Field,
idx: usize,
) -> fastfield::Result<FastFieldReader<Item>> {
if let Some(ff_source) = self.fast_fields_composite.open_read_with_idx(field, idx) {
Ok(FastFieldReader::open(ff_source))
} else {
let field_entry = self.schema.get_field_entry(field);
Err(FastFieldNotAvailableError::new(field_entry))
}
}
/// Accessor to the `MultiValueIntFastFieldReader` associated to a given `Field`.
/// May panick if the field is not a multivalued fastfield of the type `Item`.
pub fn multi_fast_field_reader<Item: FastValue>(
&self,
field: Field,
) -> fastfield::Result<MultiValueIntFastFieldReader<Item>> {
let field_entry = self.schema.get_field_entry(field);
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::MultiValues)
{
let idx_reader = self.fast_field_reader_with_idx(field, 0)?;
let vals_reader = self.fast_field_reader_with_idx(field, 1)?;
Ok(MultiValueIntFastFieldReader::open(idx_reader, vals_reader))
} else {
Err(FastFieldNotAvailableError::new(field_entry))
}
}
/// Accessor to the `BytesFastFieldReader` associated to a given `Field`.
pub fn bytes_fast_field_reader(&self, field: Field) -> fastfield::Result<BytesFastFieldReader> {
let field_entry = self.schema.get_field_entry(field);
match *field_entry.field_type() {
FieldType::Bytes => {}
_ => return Err(FastFieldNotAvailableError::new(field_entry)),
}
let idx_reader = self
.fast_fields_composite
.open_read_with_idx(field, 0)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
.map(FastFieldReader::open)?;
let values = self
.fast_fields_composite
.open_read_with_idx(field, 1)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
Ok(BytesFastFieldReader::open(idx_reader, values))
} }
/// Accessor to the `FacetReader` associated to a given `Field`. /// Accessor to the `FacetReader` associated to a given `Field`.
pub fn facet_reader(&self, field: Field) -> Option<FacetReader> { pub fn facet_reader(&self, field: Field) -> Result<FacetReader> {
let field_entry = self.schema.get_field_entry(field); let field_entry = self.schema.get_field_entry(field);
if field_entry.field_type() != &FieldType::HierarchicalFacet { if field_entry.field_type() != &FieldType::HierarchicalFacet {
return None; return Err(TantivyError::InvalidArgument(format!(
"The field {:?} is not a \
hierarchical facet.",
field_entry
)));
} }
let term_ords_reader = self.fast_fields().u64s(field)?; let term_ords_reader = self.multi_fast_field_reader(field)?;
let termdict_source = self.termdict_composite.open_read(field)?; let termdict_source = self.termdict_composite.open_read(field).ok_or_else(|| {
TantivyError::InvalidArgument(format!(
"The field \"{}\" is a hierarchical \
but this segment does not seem to have the field term \
dictionary.",
field_entry.name()
))
})?;
let termdict = TermDictionary::from_source(&termdict_source); let termdict = TermDictionary::from_source(&termdict_source);
let facet_reader = FacetReader::new(term_ords_reader, termdict); let facet_reader = FacetReader::new(term_ords_reader, termdict);
Some(facet_reader) Ok(facet_reader)
} }
/// Accessor to the segment's `Field norms`'s reader. /// Accessor to the segment's `Field norms`'s reader.
@@ -172,12 +247,8 @@ impl SegmentReader {
} }
}; };
let schema = segment.schema();
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?; let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?; let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
let fast_field_readers =
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?; let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?; let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
@@ -189,13 +260,14 @@ impl SegmentReader {
None None
}; };
let schema = segment.schema();
Ok(SegmentReader { Ok(SegmentReader {
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())), inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
max_doc: segment.meta().max_doc(), max_doc: segment.meta().max_doc(),
num_docs: segment.meta().num_docs(), num_docs: segment.meta().num_docs(),
termdict_composite, termdict_composite,
postings_composite, postings_composite,
fast_fields_readers: fast_field_readers, fast_fields_composite,
fieldnorms_composite, fieldnorms_composite,
segment_id: segment.id(), segment_id: segment.id(),
store_source, store_source,
@@ -244,9 +316,10 @@ impl SegmentReader {
let postings_source = postings_source_opt.unwrap(); let postings_source = postings_source_opt.unwrap();
let termdict_source = self.termdict_composite.open_read(field).expect( let termdict_source = self
"Failed to open field term dictionary in composite file. Is the field indexed?", .termdict_composite
); .open_read(field)
.expect("Failed to open field term dictionary in composite file. Is the field indexed");
let positions_source = self let positions_source = self
.positions_composite .positions_composite
@@ -296,7 +369,7 @@ impl SegmentReader {
} }
/// Returns an iterator that will iterate over the alive document ids /// Returns an iterator that will iterate over the alive document ids
pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator<'_> { pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator {
SegmentReaderAliveDocsIterator::new(&self) SegmentReaderAliveDocsIterator::new(&self)
} }
@@ -308,19 +381,19 @@ impl SegmentReader {
self.postings_composite.space_usage(), self.postings_composite.space_usage(),
self.positions_composite.space_usage(), self.positions_composite.space_usage(),
self.positions_idx_composite.space_usage(), self.positions_idx_composite.space_usage(),
self.fast_fields_readers.space_usage(), self.fast_fields_composite.space_usage(),
self.fieldnorms_composite.space_usage(), self.fieldnorms_composite.space_usage(),
self.get_store_reader().space_usage(), self.get_store_reader().space_usage(),
self.delete_bitset_opt self.delete_bitset_opt
.as_ref() .as_ref()
.map(DeleteBitSet::space_usage) .map(|x| x.space_usage())
.unwrap_or(0), .unwrap_or(0),
) )
} }
} }
impl fmt::Debug for SegmentReader { impl fmt::Debug for SegmentReader {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "SegmentReader({:?})", self.segment_id) write!(f, "SegmentReader({:?})", self.segment_id)
} }
} }
@@ -373,9 +446,9 @@ impl<'a> Iterator for SegmentReaderAliveDocsIterator<'a> {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use crate::core::Index; use core::Index;
use crate::schema::{Schema, Term, STORED, TEXT}; use schema::{Schema, Term, STORED, TEXT};
use crate::DocId; use DocId;
#[test] #[test]
fn test_alive_docs_iterator() { fn test_alive_docs_iterator() {

View File

@@ -1,9 +1,9 @@
use crate::directory::directory_lock::Lock; use directory::directory_lock::Lock;
use crate::directory::error::LockError; use directory::error::LockError;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; use directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::WatchCallback; use directory::WatchCallback;
use crate::directory::WatchHandle; use directory::WatchHandle;
use crate::directory::{ReadOnlySource, WritePtr}; use directory::{ReadOnlySource, WritePtr};
use std::fmt; use std::fmt;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
@@ -48,14 +48,14 @@ impl RetryPolicy {
/// ///
/// It is transparently associated to a lock file, that gets deleted /// It is transparently associated to a lock file, that gets deleted
/// on `Drop.` The lock is released automatically on `Drop`. /// on `Drop.` The lock is released automatically on `Drop`.
pub struct DirectoryLock(Box<dyn Send + Sync + 'static>); pub struct DirectoryLock(Box<Drop + Send + 'static>);
struct DirectoryLockGuard { struct DirectoryLockGuard {
directory: Box<dyn Directory>, directory: Box<Directory>,
path: PathBuf, path: PathBuf,
} }
impl<T: Send + Sync + 'static> From<Box<T>> for DirectoryLock { impl<T: Drop + Send + 'static> From<Box<T>> for DirectoryLock {
fn from(underlying: Box<T>) -> Self { fn from(underlying: Box<T>) -> Self {
DirectoryLock(underlying) DirectoryLock(underlying)
} }
@@ -76,7 +76,7 @@ enum TryAcquireLockError {
fn try_acquire_lock( fn try_acquire_lock(
filepath: &Path, filepath: &Path,
directory: &mut dyn Directory, directory: &mut Directory,
) -> Result<DirectoryLock, TryAcquireLockError> { ) -> Result<DirectoryLock, TryAcquireLockError> {
let mut write = directory.open_write(filepath).map_err(|e| match e { let mut write = directory.open_write(filepath).map_err(|e| match e {
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists, OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
@@ -118,8 +118,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// ///
/// Specifically, subsequent writes or flushes should /// Specifically, subsequent writes or flushes should
/// have no effect on the returned `ReadOnlySource` object. /// have no effect on the returned `ReadOnlySource` object.
///
/// You should only use this to read files create with [`open_write`]
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>; fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Removes a file /// Removes a file
@@ -159,8 +157,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// atomic_write. /// atomic_write.
/// ///
/// This should only be used for small files. /// This should only be used for small files.
///
/// You should only use this to read files create with [`atomic_write`]
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>; fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
/// Atomically replace the content of a file with data. /// Atomically replace the content of a file with data.
@@ -208,20 +204,30 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// Internally, tantivy only uses this API to detect new commits to implement the /// Internally, tantivy only uses this API to detect new commits to implement the
/// `OnCommit` `ReloadPolicy`. Not implementing watch in a `Directory` only prevents the /// `OnCommit` `ReloadPolicy`. Not implementing watch in a `Directory` only prevents the
/// `OnCommit` `ReloadPolicy` to work properly. /// `OnCommit` `ReloadPolicy` to work properly.
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle>; fn watch(&self, watch_callback: WatchCallback) -> WatchHandle;
/// Ensure that all volatile files reach are persisted (in directory where that makes sense.)
///
/// In order to make Near Real Time efficient, tantivy introduced the notion of soft_commit vs
/// commit. Commit will call `.flush()`, while softcommit won't.
///
/// `meta.json` should be the last file to be flushed.
fn flush(&self) -> io::Result<()> {
Ok(())
}
} }
/// DirectoryClone /// DirectoryClone
pub trait DirectoryClone { pub trait DirectoryClone {
/// Clones the directory and boxes the clone /// Clones the directory and boxes the clone
fn box_clone(&self) -> Box<dyn Directory>; fn box_clone(&self) -> Box<Directory>;
} }
impl<T> DirectoryClone for T impl<T> DirectoryClone for T
where where
T: 'static + Directory + Clone, T: 'static + Directory + Clone,
{ {
fn box_clone(&self) -> Box<dyn Directory> { fn box_clone(&self) -> Box<Directory> {
Box::new(self.clone()) Box::new(self.clone())
} }
} }

View File

@@ -1,4 +1,3 @@
use once_cell::sync::Lazy;
use std::path::PathBuf; use std::path::PathBuf;
/// A directory lock. /// A directory lock.
@@ -29,27 +28,29 @@ pub struct Lock {
pub is_blocking: bool, pub is_blocking: bool,
} }
/// Only one process should be able to write tantivy's index at a time. lazy_static! {
/// This lock file, when present, is in charge of preventing other processes to open an IndexWriter. /// Only one process should be able to write tantivy's index at a time.
/// /// This lock file, when present, is in charge of preventing other processes to open an IndexWriter.
/// If the process is killed and this file remains, it is safe to remove it manually. ///
/// /// If the process is killed and this file remains, it is safe to remove it manually.
/// Failing to acquire this lock usually means a misuse of tantivy's API, ///
/// (creating more than one instance of the `IndexWriter`), are a spurious /// Failing to acquire this lock usually means a misuse of tantivy's API,
/// lock file remaining after a crash. In the latter case, removing the file after /// (creating more than one instance of the `IndexWriter`), are a spurious
/// checking no process running tantivy is running is safe. /// lock file remaining after a crash. In the latter case, removing the file after
pub static INDEX_WRITER_LOCK: Lazy<Lock> = Lazy::new(|| Lock { /// checking no process running tantivy is running is safe.
filepath: PathBuf::from(".tantivy-writer.lock"), pub static ref INDEX_WRITER_LOCK: Lock = Lock {
is_blocking: false, filepath: PathBuf::from(".tantivy-writer.lock"),
}); is_blocking: false
/// The meta lock file is here to protect the segment files being opened by };
/// `IndexReader::reload()` from being garbage collected. /// The meta lock file is here to protect the segment files being opened by
/// It makes it possible for another process to safely consume /// `IndexReader::reload()` from being garbage collected.
/// our index in-writing. Ideally, we may have prefered `RWLock` semantics /// It makes it possible for another process to safely consume
/// here, but it is difficult to achieve on Windows. /// our index in-writing. Ideally, we may have prefered `RWLock` semantics
/// /// here, but it is difficult to achieve on Windows.
/// Opening segment readers is a very fast process. ///
pub static META_LOCK: Lazy<Lock> = Lazy::new(|| Lock { /// Opening segment readers is a very fast process.
filepath: PathBuf::from(".tantivy-meta.lock"), pub static ref META_LOCK: Lock = Lock {
is_blocking: true, filepath: PathBuf::from(".tantivy-meta.lock"),
}); is_blocking: true
};
}

View File

@@ -6,7 +6,7 @@ use std::path::PathBuf;
/// Error while trying to acquire a directory lock. /// Error while trying to acquire a directory lock.
#[derive(Debug, Fail)] #[derive(Debug, Fail)]
pub enum LockError { pub enum LockError {
/// Failed to acquired a lock as it is already held by another /// Failed to acquired a lock as it is already hold by another
/// client. /// client.
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period. /// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call. /// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
@@ -33,7 +33,7 @@ impl Into<io::Error> for IOError {
} }
impl fmt::Display for IOError { impl fmt::Display for IOError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.path { match self.path {
Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err), Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err),
None => write!(f, "io error occurred: '{}'", self.err), None => write!(f, "io error occurred: '{}'", self.err),
@@ -46,7 +46,7 @@ impl StdError for IOError {
"io error occurred" "io error occurred"
} }
fn cause(&self) -> Option<&dyn StdError> { fn cause(&self) -> Option<&StdError> {
Some(&self.err) Some(&self.err)
} }
} }
@@ -84,7 +84,7 @@ impl From<io::Error> for OpenDirectoryError {
} }
impl fmt::Display for OpenDirectoryError { impl fmt::Display for OpenDirectoryError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self { match *self {
OpenDirectoryError::DoesNotExist(ref path) => { OpenDirectoryError::DoesNotExist(ref path) => {
write!(f, "the underlying directory '{:?}' does not exist", path) write!(f, "the underlying directory '{:?}' does not exist", path)
@@ -106,7 +106,7 @@ impl StdError for OpenDirectoryError {
"error occurred while opening a directory" "error occurred while opening a directory"
} }
fn cause(&self) -> Option<&dyn StdError> { fn cause(&self) -> Option<&StdError> {
None None
} }
} }
@@ -129,7 +129,7 @@ impl From<IOError> for OpenWriteError {
} }
impl fmt::Display for OpenWriteError { impl fmt::Display for OpenWriteError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self { match *self {
OpenWriteError::FileAlreadyExists(ref path) => { OpenWriteError::FileAlreadyExists(ref path) => {
write!(f, "the file '{:?}' already exists", path) write!(f, "the file '{:?}' already exists", path)
@@ -148,7 +148,7 @@ impl StdError for OpenWriteError {
"error occurred while opening a file for writing" "error occurred while opening a file for writing"
} }
fn cause(&self) -> Option<&dyn StdError> { fn cause(&self) -> Option<&StdError> {
match *self { match *self {
OpenWriteError::FileAlreadyExists(_) => None, OpenWriteError::FileAlreadyExists(_) => None,
OpenWriteError::IOError(ref err) => Some(err), OpenWriteError::IOError(ref err) => Some(err),
@@ -173,7 +173,7 @@ impl From<IOError> for OpenReadError {
} }
impl fmt::Display for OpenReadError { impl fmt::Display for OpenReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self { match *self {
OpenReadError::FileDoesNotExist(ref path) => { OpenReadError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path) write!(f, "the file '{:?}' does not exist", path)
@@ -192,7 +192,7 @@ impl StdError for OpenReadError {
"error occurred while opening a file for reading" "error occurred while opening a file for reading"
} }
fn cause(&self) -> Option<&dyn StdError> { fn cause(&self) -> Option<&StdError> {
match *self { match *self {
OpenReadError::FileDoesNotExist(_) => None, OpenReadError::FileDoesNotExist(_) => None,
OpenReadError::IOError(ref err) => Some(err), OpenReadError::IOError(ref err) => Some(err),
@@ -217,7 +217,7 @@ impl From<IOError> for DeleteError {
} }
impl fmt::Display for DeleteError { impl fmt::Display for DeleteError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self { match *self {
DeleteError::FileDoesNotExist(ref path) => { DeleteError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path) write!(f, "the file '{:?}' does not exist", path)
@@ -234,7 +234,7 @@ impl StdError for DeleteError {
"error occurred while deleting a file" "error occurred while deleting a file"
} }
fn cause(&self) -> Option<&dyn StdError> { fn cause(&self) -> Option<&StdError> {
match *self { match *self {
DeleteError::FileDoesNotExist(_) => None, DeleteError::FileDoesNotExist(_) => None,
DeleteError::IOError(ref err) => Some(err), DeleteError::IOError(ref err) => Some(err),

View File

@@ -1,213 +0,0 @@
use crate::directory::read_only_source::ReadOnlySource;
use crate::directory::{AntiCallToken, TerminatingWrite};
use byteorder::{ByteOrder, LittleEndian};
use crc32fast::Hasher;
use std::io;
use std::io::Write;
const COMMON_FOOTER_SIZE: usize = 4 * 5;
#[derive(Debug, Clone, PartialEq)]
pub struct Footer {
pub tantivy_version: (u32, u32, u32),
pub meta: String,
pub versioned_footer: VersionedFooter,
}
impl Footer {
pub fn new(versioned_footer: VersionedFooter) -> Self {
let tantivy_version = (
env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
);
Footer {
tantivy_version,
meta: format!(
"tantivy {}.{}.{}, index v{}",
tantivy_version.0,
tantivy_version.1,
tantivy_version.2,
versioned_footer.version()
),
versioned_footer,
}
}
pub fn to_bytes(&self) -> Vec<u8> {
let mut res = self.versioned_footer.to_bytes();
res.extend_from_slice(self.meta.as_bytes());
let len = res.len();
res.resize(len + COMMON_FOOTER_SIZE, 0);
let mut common_footer = &mut res[len..];
LittleEndian::write_u32(&mut common_footer, self.meta.len() as u32);
LittleEndian::write_u32(&mut common_footer[4..], self.tantivy_version.0);
LittleEndian::write_u32(&mut common_footer[8..], self.tantivy_version.1);
LittleEndian::write_u32(&mut common_footer[12..], self.tantivy_version.2);
LittleEndian::write_u32(&mut common_footer[16..], (len + COMMON_FOOTER_SIZE) as u32);
res
}
pub fn from_bytes(data: &[u8]) -> Result<Self, io::Error> {
let len = data.len();
if len < COMMON_FOOTER_SIZE + 4 {
// 4 bytes for index version, stored in versioned footer
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!("File corrupted. The footer len must be over 24, while the entire file len is {}", len)
)
);
}
let size = LittleEndian::read_u32(&data[len - 4..]) as usize;
if len < size as usize {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The footer len is {}, while the entire file len is {}",
size, len
),
));
}
let footer = &data[len - size as usize..];
let meta_len = LittleEndian::read_u32(&footer[size - 20..]) as usize;
let tantivy_major = LittleEndian::read_u32(&footer[size - 16..]);
let tantivy_minor = LittleEndian::read_u32(&footer[size - 12..]);
let tantivy_patch = LittleEndian::read_u32(&footer[size - 8..]);
Ok(Footer {
tantivy_version: (tantivy_major, tantivy_minor, tantivy_patch),
meta: String::from_utf8_lossy(&footer[size - meta_len - 20..size - 20]).into_owned(),
versioned_footer: VersionedFooter::from_bytes(&footer[..size - meta_len - 20])?,
})
}
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
let footer = Footer::from_bytes(source.as_slice())?;
let reader = source.slice_to(source.as_slice().len() - footer.size());
Ok((footer, reader))
}
pub fn size(&self) -> usize {
self.versioned_footer.size() as usize + self.meta.len() + 20
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum VersionedFooter {
UnknownVersion { version: u32, size: u32 },
V0(u32), // crc
}
impl VersionedFooter {
pub fn to_bytes(&self) -> Vec<u8> {
match self {
VersionedFooter::V0(crc) => {
let mut res = vec![0; 8];
LittleEndian::write_u32(&mut res, 0);
LittleEndian::write_u32(&mut res[4..], *crc);
res
}
VersionedFooter::UnknownVersion { .. } => {
panic!("Unsupported index should never get serialized");
}
}
}
pub fn from_bytes(footer: &[u8]) -> Result<Self, io::Error> {
assert!(footer.len() >= 4);
let version = LittleEndian::read_u32(footer);
match version {
0 => {
if footer.len() == 8 {
Ok(VersionedFooter::V0(LittleEndian::read_u32(&footer[4..])))
} else {
Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The versioned footer len is {}, while it should be 8",
footer.len()
),
))
}
}
version => Ok(VersionedFooter::UnknownVersion {
version,
size: footer.len() as u32,
}),
}
}
pub fn size(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 8,
VersionedFooter::UnknownVersion { size, .. } => *size,
}
}
pub fn version(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 0,
VersionedFooter::UnknownVersion { version, .. } => *version,
}
}
pub fn crc(&self) -> Option<u32> {
match self {
VersionedFooter::V0(crc) => Some(*crc),
VersionedFooter::UnknownVersion { .. } => None,
}
}
}
pub(crate) struct FooterProxy<W: TerminatingWrite> {
/// always Some except after terminate call
hasher: Option<Hasher>,
/// always Some except after terminate call
writer: Option<W>,
}
impl<W: TerminatingWrite> FooterProxy<W> {
pub fn new(writer: W) -> Self {
FooterProxy {
hasher: Some(Hasher::new()),
writer: Some(writer),
}
}
}
impl<W: TerminatingWrite> Write for FooterProxy<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let count = self.writer.as_mut().unwrap().write(buf)?;
self.hasher.as_mut().unwrap().update(&buf[..count]);
Ok(count)
}
fn flush(&mut self) -> io::Result<()> {
self.writer.as_mut().unwrap().flush()
}
}
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
let crc = self.hasher.take().unwrap().finalize();
let footer = Footer::new(VersionedFooter::V0(crc)).to_bytes();
let mut writer = self.writer.take().unwrap();
writer.write_all(&footer)?;
writer.terminate()
}
}
#[cfg(test)]
mod tests {
use crate::directory::footer::{Footer, VersionedFooter};
#[test]
fn test_serialize_deserialize_footer() {
let crc = 123456;
let footer = Footer::new(VersionedFooter::V0(crc));
let footer_bytes = footer.to_bytes();
assert_eq!(Footer::from_bytes(&footer_bytes).unwrap(), footer);
}
}

View File

@@ -1,15 +1,11 @@
use crate::core::MANAGED_FILEPATH; use core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError}; use directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use crate::directory::footer::{Footer, FooterProxy}; use directory::DirectoryLock;
use crate::directory::DirectoryLock; use directory::Lock;
use crate::directory::Lock; use directory::META_LOCK;
use crate::directory::META_LOCK; use directory::{ReadOnlySource, WritePtr};
use crate::directory::{ReadOnlySource, WritePtr}; use directory::{WatchCallback, WatchHandle};
use crate::directory::{WatchCallback, WatchHandle}; use error::DataCorruption;
use crate::error::DataCorruption;
use crate::Directory;
use crate::Result;
use crc32fast::Hasher;
use serde_json; use serde_json;
use std::collections::HashSet; use std::collections::HashSet;
use std::io; use std::io;
@@ -18,6 +14,8 @@ use std::path::{Path, PathBuf};
use std::result; use std::result;
use std::sync::RwLockWriteGuard; use std::sync::RwLockWriteGuard;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use Directory;
use Result;
/// Returns true iff the file is "managed". /// Returns true iff the file is "managed".
/// Non-managed file are not subject to garbage collection. /// Non-managed file are not subject to garbage collection.
@@ -41,7 +39,7 @@ fn is_managed(path: &Path) -> bool {
/// useful anymore. /// useful anymore.
#[derive(Debug)] #[derive(Debug)]
pub struct ManagedDirectory { pub struct ManagedDirectory {
directory: Box<dyn Directory>, directory: Box<Directory>,
meta_informations: Arc<RwLock<MetaInformation>>, meta_informations: Arc<RwLock<MetaInformation>>,
} }
@@ -53,8 +51,8 @@ struct MetaInformation {
/// Saves the file containing the list of existing files /// Saves the file containing the list of existing files
/// that were created by tantivy. /// that were created by tantivy.
fn save_managed_paths( fn save_managed_paths(
directory: &mut dyn Directory, directory: &mut Directory,
wlock: &RwLockWriteGuard<'_, MetaInformation>, wlock: &RwLockWriteGuard<MetaInformation>,
) -> io::Result<()> { ) -> io::Result<()> {
let mut w = serde_json::to_vec(&wlock.managed_paths)?; let mut w = serde_json::to_vec(&wlock.managed_paths)?;
writeln!(&mut w)?; writeln!(&mut w)?;
@@ -71,7 +69,7 @@ impl ManagedDirectory {
let managed_files: HashSet<PathBuf> = serde_json::from_str(&managed_files_json) let managed_files: HashSet<PathBuf> = serde_json::from_str(&managed_files_json)
.map_err(|e| { .map_err(|e| {
DataCorruption::new( DataCorruption::new(
MANAGED_FILEPATH.to_path_buf(), MANAGED_FILEPATH.clone(),
format!("Managed file cannot be deserialized: {:?}. ", e), format!("Managed file cannot be deserialized: {:?}. ", e),
) )
})?; })?;
@@ -137,28 +135,28 @@ impl ManagedDirectory {
files_to_delete.push(managed_path.clone()); files_to_delete.push(managed_path.clone());
} }
} }
} else {
error!("Failed to acquire lock for GC");
} }
} }
let mut deleted_files = vec![]; let mut deleted_files = vec![];
for file_to_delete in files_to_delete { {
match self.delete(&file_to_delete) { for file_to_delete in files_to_delete {
Ok(_) => { match self.delete(&file_to_delete) {
info!("Deleted {:?}", file_to_delete); Ok(_) => {
deleted_files.push(file_to_delete); info!("Deleted {:?}", file_to_delete);
} deleted_files.push(file_to_delete);
Err(file_error) => { }
match file_error { Err(file_error) => {
DeleteError::FileDoesNotExist(_) => { match file_error {
deleted_files.push(file_to_delete); DeleteError::FileDoesNotExist(_) => {
} deleted_files.push(file_to_delete);
DeleteError::IOError(_) => { }
if !cfg!(target_os = "windows") { DeleteError::IOError(_) => {
// On windows, delete is expected to fail if the file if !cfg!(target_os = "windows") {
// is mmapped. // On windows, delete is expected to fail if the file
error!("Failed to delete {:?}", file_to_delete); // is mmapped.
error!("Failed to delete {:?}", file_to_delete);
}
} }
} }
} }
@@ -173,9 +171,11 @@ impl ManagedDirectory {
.meta_informations .meta_informations
.write() .write()
.expect("Managed directory wlock poisoned (2)."); .expect("Managed directory wlock poisoned (2).");
let managed_paths_write = &mut meta_informations_wlock.managed_paths; {
for delete_file in &deleted_files { let managed_paths_write = &mut meta_informations_wlock.managed_paths;
managed_paths_write.remove(delete_file); for delete_file in &deleted_files {
managed_paths_write.remove(delete_file);
}
} }
if save_managed_paths(self.directory.as_mut(), &meta_informations_wlock).is_err() { if save_managed_paths(self.directory.as_mut(), &meta_informations_wlock).is_err() {
error!("Failed to save the list of managed files."); error!("Failed to save the list of managed files.");
@@ -209,59 +209,17 @@ impl ManagedDirectory {
} }
Ok(()) Ok(())
} }
/// Verify checksum of a managed file
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
let reader = self.directory.open_read(path)?;
let (footer, data) = Footer::extract_footer(reader)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
let mut hasher = Hasher::new();
hasher.update(data.as_slice());
let crc = hasher.finalize();
Ok(footer
.versioned_footer
.crc()
.map(|v| v == crc)
.unwrap_or(false))
}
/// List files for which checksum does not match content
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
let mut hashset = HashSet::new();
let managed_paths = self
.meta_informations
.read()
.expect("Managed directory rlock poisoned in list damaged.")
.managed_paths
.clone();
for path in managed_paths.into_iter() {
if !self.validate_checksum(&path)? {
hashset.insert(path);
}
}
Ok(hashset)
}
} }
impl Directory for ManagedDirectory { impl Directory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let read_only_source = self.directory.open_read(path)?; self.directory.open_read(path)
let (_footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
Ok(reader)
} }
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
self.register_file_as_managed(path) self.register_file_as_managed(path)
.map_err(|e| IOError::with_path(path.to_owned(), e))?; .map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(io::BufWriter::new(Box::new(FooterProxy::new( self.directory.open_write(path)
self.directory
.open_write(path)?
.into_inner()
.map_err(|_| ())
.expect("buffer should be empty"),
))))
} }
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
@@ -285,7 +243,7 @@ impl Directory for ManagedDirectory {
self.directory.acquire_lock(lock) self.directory.acquire_lock(lock)
} }
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> { fn watch(&self, watch_callback: WatchCallback) -> WatchHandle {
self.directory.watch(watch_callback) self.directory.watch(watch_callback)
} }
} }
@@ -299,115 +257,101 @@ impl Clone for ManagedDirectory {
} }
} }
#[cfg(feature = "mmap")]
#[cfg(test)] #[cfg(test)]
mod tests_mmap_specific { mod tests {
use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite}; #[cfg(feature = "mmap")]
use std::collections::HashSet; mod mmap_specific {
use std::fs::OpenOptions;
use std::io::Write;
use std::path::{Path, PathBuf};
use tempfile::TempDir;
#[test] use super::super::*;
fn test_managed_directory() { use std::path::Path;
let tempdir = TempDir::new().unwrap(); use tempdir::TempDir;
let tempdir_path = PathBuf::from(tempdir.path());
lazy_static! {
static ref TEST_PATH1: &'static Path = Path::new("some_path_for_test");
static ref TEST_PATH2: &'static Path = Path::new("some_path_for_test2");
}
use directory::MmapDirectory;
use std::io::Write;
#[test]
fn test_managed_directory() {
let tempdir = TempDir::new("index").unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
{
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
{
let mut write_file = managed_directory.open_write(*TEST_PATH1).unwrap();
write_file.flush().unwrap();
}
{
managed_directory
.atomic_write(*TEST_PATH2, &vec![0u8, 1u8])
.unwrap();
}
{
assert!(managed_directory.exists(*TEST_PATH1));
assert!(managed_directory.exists(*TEST_PATH2));
}
{
let living_files: HashSet<PathBuf> =
[TEST_PATH1.to_owned()].into_iter().cloned().collect();
managed_directory.garbage_collect(|| living_files);
}
{
assert!(managed_directory.exists(*TEST_PATH1));
assert!(!managed_directory.exists(*TEST_PATH2));
}
}
{
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
{
assert!(managed_directory.exists(*TEST_PATH1));
assert!(!managed_directory.exists(*TEST_PATH2));
}
{
let living_files: HashSet<PathBuf> = HashSet::new();
managed_directory.garbage_collect(|| living_files);
}
{
assert!(!managed_directory.exists(*TEST_PATH1));
assert!(!managed_directory.exists(*TEST_PATH2));
}
}
}
#[test]
fn test_managed_directory_gc_while_mmapped() {
let tempdir = TempDir::new("index").unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let living_files = HashSet::new();
let test_path1: &'static Path = Path::new("some_path_for_test");
let test_path2: &'static Path = Path::new("some_path_for_test_2");
{
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap(); let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let write_file = managed_directory.open_write(test_path1).unwrap();
write_file.terminate().unwrap();
managed_directory managed_directory
.atomic_write(test_path2, &[0u8, 1u8]) .atomic_write(*TEST_PATH1, &vec![0u8, 1u8])
.unwrap(); .unwrap();
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(*TEST_PATH1));
assert!(managed_directory.exists(test_path2));
let living_files: HashSet<PathBuf> = let _mmap_read = managed_directory.open_read(*TEST_PATH1).unwrap();
[test_path1.to_owned()].into_iter().cloned().collect(); managed_directory.garbage_collect(|| living_files.clone());
managed_directory.garbage_collect(|| living_files); if cfg!(target_os = "windows") {
assert!(managed_directory.exists(test_path1)); // On Windows, gc should try and fail the file as it is mmapped.
assert!(!managed_directory.exists(test_path2)); assert!(managed_directory.exists(*TEST_PATH1));
} // unmap should happen here.
{ drop(_mmap_read);
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); // The file should still be in the list of managed file and
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap(); // eventually be deleted once mmap is released.
assert!(managed_directory.exists(test_path1)); managed_directory.garbage_collect(|| living_files);
assert!(!managed_directory.exists(test_path2)); assert!(!managed_directory.exists(*TEST_PATH1));
let living_files: HashSet<PathBuf> = HashSet::new(); } else {
managed_directory.garbage_collect(|| living_files); assert!(!managed_directory.exists(*TEST_PATH1));
assert!(!managed_directory.exists(test_path1)); }
assert!(!managed_directory.exists(test_path2));
} }
} }
#[test]
fn test_managed_directory_gc_while_mmapped() {
let test_path1: &'static Path = Path::new("some_path_for_test");
let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let living_files = HashSet::new();
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap();
write.terminate().unwrap();
assert!(managed_directory.exists(test_path1));
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
managed_directory.garbage_collect(|| living_files.clone());
if cfg!(target_os = "windows") {
// On Windows, gc should try and fail the file as it is mmapped.
assert!(managed_directory.exists(test_path1));
// unmap should happen here.
drop(_mmap_read);
// The file should still be in the list of managed file and
// eventually be deleted once mmap is released.
managed_directory.garbage_collect(|| living_files);
assert!(!managed_directory.exists(test_path1));
} else {
assert!(!managed_directory.exists(test_path1));
}
}
#[test]
fn test_checksum() {
let test_path1: &'static Path = Path::new("some_path_for_test");
let test_path2: &'static Path = Path::new("other_test_path");
let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap();
write.terminate().unwrap();
let mut write = managed_directory.open_write(test_path2).unwrap();
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
write.terminate().unwrap();
assert!(managed_directory.list_damaged().unwrap().is_empty());
let mut corrupted_path = tempdir_path.clone();
corrupted_path.push(test_path2);
let mut file = OpenOptions::new()
.write(true)
.open(&corrupted_path)
.unwrap();
file.write_all(&[255u8]).unwrap();
file.flush().unwrap();
drop(file);
let damaged = managed_directory.list_damaged().unwrap();
assert_eq!(damaged.len(), 1);
assert!(damaged.contains(test_path2));
}
} }

View File

@@ -1,26 +1,23 @@
use fs2; extern crate fs2;
use notify; extern crate notify;
use self::fs2::FileExt; use self::fs2::FileExt;
use self::notify::RawEvent; use self::notify::RawEvent;
use self::notify::RecursiveMode; use self::notify::RecursiveMode;
use self::notify::Watcher; use self::notify::Watcher;
use crate::core::META_FILEPATH;
use crate::directory::error::LockError;
use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
};
use crate::directory::read_only_source::BoxedData;
use crate::directory::AntiCallToken;
use crate::directory::Directory;
use crate::directory::DirectoryLock;
use crate::directory::Lock;
use crate::directory::ReadOnlySource;
use crate::directory::WatchCallback;
use crate::directory::WatchCallbackList;
use crate::directory::WatchHandle;
use crate::directory::{TerminatingWrite, WritePtr};
use atomicwrites; use atomicwrites;
use core::META_FILEPATH;
use directory::error::LockError;
use directory::error::{DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use directory::read_only_source::BoxedData;
use directory::Directory;
use directory::DirectoryLock;
use directory::Lock;
use directory::ReadOnlySource;
use directory::WatchCallback;
use directory::WatchCallbackList;
use directory::WatchHandle;
use directory::WritePtr;
use memmap::Mmap; use memmap::Mmap;
use std::collections::HashMap; use std::collections::HashMap;
use std::convert::From; use std::convert::From;
@@ -37,7 +34,7 @@ use std::sync::Mutex;
use std::sync::RwLock; use std::sync::RwLock;
use std::sync::Weak; use std::sync::Weak;
use std::thread; use std::thread;
use tempfile::TempDir; use tempdir::TempDir;
/// Create a default io error given a string. /// Create a default io error given a string.
pub(crate) fn make_io_err(msg: String) -> io::Error { pub(crate) fn make_io_err(msg: String) -> io::Error {
@@ -142,28 +139,42 @@ impl MmapCache {
} }
} }
struct WatcherWrapper { struct InnerWatcherWrapper {
_watcher: Mutex<notify::RecommendedWatcher>, _watcher: Mutex<notify::RecommendedWatcher>,
watcher_router: Arc<WatchCallbackList>, watcher_router: WatchCallbackList,
}
impl InnerWatcherWrapper {
pub fn new(path: &Path) -> Result<(Self, Receiver<notify::RawEvent>), notify::Error> {
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
// We need to initialize the
let mut watcher = notify::raw_watcher(tx)?;
watcher.watch(path, RecursiveMode::Recursive)?;
let inner = InnerWatcherWrapper {
_watcher: Mutex::new(watcher),
watcher_router: Default::default(),
};
Ok((inner, watcher_recv))
}
}
#[derive(Clone)]
pub(crate) struct WatcherWrapper {
inner: Arc<InnerWatcherWrapper>,
} }
impl WatcherWrapper { impl WatcherWrapper {
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> { pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel(); let (inner, watcher_recv) = InnerWatcherWrapper::new(path).map_err(|err| match err {
// We need to initialize the notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
let watcher = notify::raw_watcher(tx) _ => {
.and_then(|mut watcher| { panic!("Unknown error while starting watching directory {:?}", path);
watcher.watch(path, RecursiveMode::Recursive)?; }
Ok(watcher) })?;
}) let watcher_wrapper = WatcherWrapper {
.map_err(|err| match err { inner: Arc::new(inner),
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()), };
_ => { let watcher_wrapper_clone = watcher_wrapper.clone();
panic!("Unknown error while starting watching directory {:?}", path);
}
})?;
let watcher_router: Arc<WatchCallbackList> = Default::default();
let watcher_router_clone = watcher_router.clone();
thread::Builder::new() thread::Builder::new()
.name("meta-file-watch-thread".to_string()) .name("meta-file-watch-thread".to_string())
.spawn(move || { .spawn(move || {
@@ -174,7 +185,7 @@ impl WatcherWrapper {
// We might want to be more accurate than this at one point. // We might want to be more accurate than this at one point.
if let Some(filename) = changed_path.file_name() { if let Some(filename) = changed_path.file_name() {
if filename == *META_FILEPATH { if filename == *META_FILEPATH {
watcher_router_clone.broadcast(); watcher_wrapper_clone.inner.watcher_router.broadcast();
} }
} }
} }
@@ -187,15 +198,13 @@ impl WatcherWrapper {
} }
} }
} }
})?; })
Ok(WatcherWrapper { .expect("Failed to spawn thread to watch meta.json");
_watcher: Mutex::new(watcher), Ok(watcher_wrapper)
watcher_router,
})
} }
pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle { pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle {
self.watcher_router.subscribe(watch_callback) self.inner.watcher_router.subscribe(watch_callback)
} }
} }
@@ -220,7 +229,7 @@ struct MmapDirectoryInner {
root_path: PathBuf, root_path: PathBuf,
mmap_cache: RwLock<MmapCache>, mmap_cache: RwLock<MmapCache>,
_temp_directory: Option<TempDir>, _temp_directory: Option<TempDir>,
watcher: RwLock<Option<WatcherWrapper>>, watcher: RwLock<WatcherWrapper>,
} }
impl MmapDirectoryInner { impl MmapDirectoryInner {
@@ -228,41 +237,24 @@ impl MmapDirectoryInner {
root_path: PathBuf, root_path: PathBuf,
temp_directory: Option<TempDir>, temp_directory: Option<TempDir>,
) -> Result<MmapDirectoryInner, OpenDirectoryError> { ) -> Result<MmapDirectoryInner, OpenDirectoryError> {
let watch_wrapper = WatcherWrapper::new(&root_path)?;
let mmap_directory_inner = MmapDirectoryInner { let mmap_directory_inner = MmapDirectoryInner {
root_path, root_path,
mmap_cache: Default::default(), mmap_cache: Default::default(),
_temp_directory: temp_directory, _temp_directory: temp_directory,
watcher: RwLock::new(None), watcher: RwLock::new(watch_wrapper),
}; };
Ok(mmap_directory_inner) Ok(mmap_directory_inner)
} }
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> { fn watch(&self, watch_callback: WatchCallback) -> WatchHandle {
// a lot of juggling here, to ensure we don't do anything that panics let mut wlock = self.watcher.write().unwrap();
// while the rwlock is held. That way we ensure that the rwlock cannot wlock.watch(watch_callback)
// be poisoned.
//
// The downside is that we might create a watch wrapper that is not useful.
let need_initialization = self.watcher.read().unwrap().is_none();
if need_initialization {
let watch_wrapper = WatcherWrapper::new(&self.root_path)?;
let mut watch_wlock = self.watcher.write().unwrap();
// the watcher could have been initialized when we released the lock, and
// we do not want to lose the watched files that were set.
if watch_wlock.is_none() {
*watch_wlock = Some(watch_wrapper);
}
}
if let Some(watch_wrapper) = self.watcher.write().unwrap().as_mut() {
Ok(watch_wrapper.watch(watch_callback))
} else {
unreachable!("At this point, watch wrapper is supposed to be initialized");
}
} }
} }
impl fmt::Debug for MmapDirectory { impl fmt::Debug for MmapDirectory {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "MmapDirectory({:?})", self.inner.root_path) write!(f, "MmapDirectory({:?})", self.inner.root_path)
} }
} }
@@ -283,7 +275,7 @@ impl MmapDirectory {
/// This is mostly useful to test the MmapDirectory itself. /// This is mostly useful to test the MmapDirectory itself.
/// For your unit tests, prefer the RAMDirectory. /// For your unit tests, prefer the RAMDirectory.
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> { pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
let tempdir = TempDir::new().map_err(OpenDirectoryError::IoError)?; let tempdir = TempDir::new("index").map_err(OpenDirectoryError::IoError)?;
let tempdir_path = PathBuf::from(tempdir.path()); let tempdir_path = PathBuf::from(tempdir.path());
MmapDirectory::new(tempdir_path, Some(tempdir)) MmapDirectory::new(tempdir_path, Some(tempdir))
} }
@@ -328,7 +320,7 @@ impl MmapDirectory {
#[cfg(windows)] #[cfg(windows)]
{ {
use std::os::windows::fs::OpenOptionsExt; use std::os::windows::fs::OpenOptionsExt;
use winapi::um::winbase; use winapi::winbase;
open_opts open_opts
.write(true) .write(true)
@@ -376,7 +368,7 @@ impl Drop for ReleaseLockFile {
/// This Write wraps a File, but has the specificity of /// This Write wraps a File, but has the specificity of
/// call `sync_all` on flush. /// call `sync_all` on flush.
struct SafeFileWriter(File); pub struct SafeFileWriter(File);
impl SafeFileWriter { impl SafeFileWriter {
fn new(file: File) -> SafeFileWriter { fn new(file: File) -> SafeFileWriter {
@@ -401,12 +393,6 @@ impl Seek for SafeFileWriter {
} }
} }
impl TerminatingWrite for SafeFileWriter {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
impl Directory for MmapDirectory { impl Directory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path); debug!("Open Read {:?}", path);
@@ -429,6 +415,7 @@ impl Directory for MmapDirectory {
/// Any entry associated to the path in the mmap will be /// Any entry associated to the path in the mmap will be
/// removed before the file is deleted. /// removed before the file is deleted.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
debug!("Deleting file {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
match fs::remove_file(&full_path) { match fs::remove_file(&full_path) {
Ok(_) => self Ok(_) => self
@@ -526,7 +513,7 @@ impl Directory for MmapDirectory {
}))) })))
} }
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> { fn watch(&self, watch_callback: WatchCallback) -> WatchHandle {
self.inner.watch(watch_callback) self.inner.watch(watch_callback)
} }
} }
@@ -538,13 +525,13 @@ mod tests {
// The following tests are specific to the MmapDirectory // The following tests are specific to the MmapDirectory
use super::*; use super::*;
use crate::schema::{Schema, SchemaBuilder, TEXT}; use schema::{Schema, SchemaBuilder, TEXT};
use crate::Index;
use crate::ReloadPolicy;
use std::fs; use std::fs;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
use Index;
use ReloadPolicy;
#[test] #[test]
fn test_open_non_existant_path() { fn test_open_non_existant_path() {
@@ -637,7 +624,7 @@ mod tests {
fn test_watch_wrapper() { fn test_watch_wrapper() {
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone(); let counter_clone = counter.clone();
let tmp_dir = tempfile::TempDir::new().unwrap(); let tmp_dir: TempDir = tempdir::TempDir::new("test_watch_wrapper").unwrap();
let tmp_dirpath = tmp_dir.path().to_owned(); let tmp_dirpath = tmp_dir.path().to_owned();
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap(); let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
let tmp_file = tmp_dirpath.join("coucou"); let tmp_file = tmp_dirpath.join("coucou");

View File

@@ -9,11 +9,11 @@ mod mmap_directory;
mod directory; mod directory;
mod directory_lock; mod directory_lock;
mod footer;
mod managed_directory; mod managed_directory;
mod ram_directory; mod ram_directory;
mod read_only_source; mod read_only_source;
mod watch_event_router; mod watch_event_router;
mod nrt_directory;
/// Errors specific to the directory module. /// Errors specific to the directory module.
pub mod error; pub mod error;
@@ -25,49 +25,22 @@ pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource; pub use self::read_only_source::ReadOnlySource;
pub(crate) use self::watch_event_router::WatchCallbackList; pub(crate) use self::watch_event_router::WatchCallbackList;
pub use self::watch_event_router::{WatchCallback, WatchHandle}; pub use self::watch_event_router::{WatchCallback, WatchHandle};
use std::io::{self, BufWriter, Write}; use std::io::{BufWriter, Seek, Write};
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
pub use self::mmap_directory::MmapDirectory; pub use self::mmap_directory::MmapDirectory;
pub use self::managed_directory::ManagedDirectory; pub(crate) use self::managed_directory::ManagedDirectory;
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly /// Synonym of Seek + Write
pub struct AntiCallToken(()); pub trait SeekableWrite: Seek + Write {}
impl<T: Seek + Write> SeekableWrite for T {}
/// Trait used to indicate when no more write need to be done on a writer
pub trait TerminatingWrite: Write {
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
fn terminate(mut self) -> io::Result<()>
where
Self: Sized,
{
self.terminate_ref(AntiCallToken(()))
}
/// You should implement this function to define custom behavior.
/// This function should flush any buffer it may hold.
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()>;
}
impl<W: TerminatingWrite + ?Sized> TerminatingWrite for Box<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.as_mut().terminate_ref(token)
}
}
impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
fn terminate_ref(&mut self, a: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.get_mut().terminate_ref(a)
}
}
/// Write object for Directory. /// Write object for Directory.
/// ///
/// `WritePtr` are required to implement both Write /// `WritePtr` are required to implement both Write
/// and Seek. /// and Seek.
pub type WritePtr = BufWriter<Box<dyn TerminatingWrite>>; pub type WritePtr = BufWriter<Box<SeekableWrite>>;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;

View File

@@ -0,0 +1,195 @@
use directory::Directory;
use std::path::{PathBuf, Path};
use directory::ReadOnlySource;
use directory::error::OpenReadError;
use directory::error::DeleteError;
use std::io::{BufWriter, Cursor};
use directory::SeekableWrite;
use directory::error::OpenWriteError;
use directory::WatchHandle;
use directory::ram_directory::InnerRamDirectory;
use std::sync::RwLock;
use std::sync::Arc;
use directory::WatchCallback;
use std::fmt;
use std::io;
use std::io::{Seek, Write};
use directory::DirectoryClone;
const BUFFER_LEN: usize = 1_000_000;
pub enum NRTWriter {
InRam {
buffer: Cursor<Vec<u8>>,
path: PathBuf,
nrt_directory: NRTDirectory
},
UnderlyingFile(BufWriter<Box<SeekableWrite>>)
}
impl NRTWriter {
pub fn new(path: PathBuf, nrt_directory: NRTDirectory) -> NRTWriter {
NRTWriter::InRam {
buffer: Cursor::new(Vec::with_capacity(BUFFER_LEN)),
path,
nrt_directory,
}
}
}
impl io::Seek for NRTWriter {
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
match self {
NRTWriter::InRam { buffer, path, nrt_directory } => {
buffer.seek(pos)
}
NRTWriter::UnderlyingFile(file) => {
file.seek(pos)
}
}
}
}
impl io::Write for NRTWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.write_all(buf)?;
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
match self {
NRTWriter::InRam { buffer, path, nrt_directory } => {
let mut cache_wlock = nrt_directory.cache.write().unwrap();
cache_wlock.write(path.clone(), buffer.get_ref());
Ok(())
}
NRTWriter::UnderlyingFile(file) => {
file.flush()
}
}
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
// Working around the borrow checker.
let mut underlying_write_opt: Option<BufWriter<Box<SeekableWrite>>> = None;
if let NRTWriter::InRam { buffer, path, nrt_directory } = self {
if buffer.get_ref().len() + buf.len() > BUFFER_LEN {
// We can't keep this in RAM. Let's move it to the underlying directory.
underlying_write_opt = Some(nrt_directory.open_write(path)
.map_err(|open_err| {
io::Error::new(io::ErrorKind::Other, open_err)
})?);
}
}
if let Some(underlying_write) = underlying_write_opt {
*self = NRTWriter::UnderlyingFile(underlying_write);
}
match self {
NRTWriter::InRam { buffer, path, nrt_directory } => {
assert!(buffer.get_ref().len() + buf.len() <= BUFFER_LEN);
buffer.write_all(buf)
}
NRTWriter::UnderlyingFile(file) => {
file.write_all(buf)
}
}
}
}
pub struct NRTDirectory {
underlying: Box<Directory>,
cache: Arc<RwLock<InnerRamDirectory>>,
}
impl Clone for NRTDirectory {
fn clone(&self) -> Self {
NRTDirectory {
underlying: self.underlying.box_clone(),
cache: self.cache.clone()
}
}
}
impl NRTDirectory {
fn wrap(underlying: Box<Directory>) -> NRTDirectory {
NRTDirectory {
underlying,
cache: Default::default()
}
}
}
impl fmt::Debug for NRTDirectory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "NRTDirectory({:?})", self.underlying)
}
}
impl Directory for NRTDirectory {
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
unimplemented!()
}
fn delete(&self, path: &Path) -> Result<(), DeleteError> {
// We explicitly release the lock, to prevent a panic on the underlying directory
// to poison the lock.
//
// File can only go from cache to underlying so the result does not lead to
// any inconsistency.
{
let mut cache_wlock = self.cache.write().unwrap();
if cache_wlock.exists(path) {
return cache_wlock.delete(path);
}
}
self.underlying.delete(path)
}
fn exists(&self, path: &Path) -> bool {
// We explicitly release the lock, to prevent a panic on the underlying directory
// to poison the lock.
//
// File can only go from cache to underlying so the result does not lead to
// any inconsistency.
{
let rlock_cache = self.cache.read().unwrap();
if rlock_cache.exists(path) {
return true;
}
}
self.underlying.exists(path)
}
fn open_write(&mut self, path: &Path) -> Result<BufWriter<Box<SeekableWrite>>, OpenWriteError> {
let mut cache_wlock = self.cache.write().unwrap();
// TODO might poison our lock. I don't know have a sound solution yet.
let path_buf = path.to_owned();
if self.underlying.exists(path) {
return Err(OpenWriteError::FileAlreadyExists(path_buf));
}
let exists = cache_wlock.write(path_buf.clone(), &[]);
// force the creation of the file to mimic the MMap directory.
if exists {
Err(OpenWriteError::FileAlreadyExists(path_buf))
} else {
let vec_writer = NRTWriter::new(path_buf.clone(), self.clone());
Ok(BufWriter::new(Box::new(vec_writer)))
}
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
self.underlying.atomic_read(path)
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
self.underlying.atomic_write(path, data)
}
fn watch(&self, watch_callback: WatchCallback) -> WatchHandle {
self.underlying.watch(watch_callback)
}
}

View File

@@ -1,10 +1,8 @@
use crate::core::META_FILEPATH; use core::META_FILEPATH;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; use directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::AntiCallToken; use directory::WatchCallbackList;
use crate::directory::WatchCallbackList; use directory::WritePtr;
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle}; use directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
use crate::directory::{TerminatingWrite, WritePtr};
use fail::fail_point;
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt; use std::fmt;
use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write}; use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write};
@@ -72,53 +70,43 @@ impl Write for VecWriter {
} }
} }
impl TerminatingWrite for VecWriter {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
#[derive(Default)] #[derive(Default)]
struct InnerDirectory { pub(crate) struct InnerRamDirectory {
fs: HashMap<PathBuf, ReadOnlySource>, fs: HashMap<PathBuf, ReadOnlySource>,
watch_router: WatchCallbackList, watch_router: WatchCallbackList,
} }
impl InnerDirectory { impl InnerRamDirectory {
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool { pub fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
let data = ReadOnlySource::new(Vec::from(data)); let data = ReadOnlySource::new(Vec::from(data));
self.fs.insert(path, data).is_some() self.fs.insert(path, data).is_some()
} }
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> { pub fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
self.fs self.fs
.get(path) .get(path)
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path))) .ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
.map(Clone::clone) .map(|el| el.clone())
} }
fn delete(&mut self, path: &Path) -> result::Result<(), DeleteError> { pub fn delete(&mut self, path: &Path) -> result::Result<(), DeleteError> {
match self.fs.remove(path) { match self.fs.remove(path) {
Some(_) => Ok(()), Some(_) => Ok(()),
None => Err(DeleteError::FileDoesNotExist(PathBuf::from(path))), None => Err(DeleteError::FileDoesNotExist(PathBuf::from(path))),
} }
} }
fn exists(&self, path: &Path) -> bool { pub fn exists(&self, path: &Path) -> bool {
self.fs.contains_key(path) self.fs.contains_key(path)
} }
fn watch(&mut self, watch_handle: WatchCallback) -> WatchHandle { pub fn watch(&mut self, watch_handle: WatchCallback) -> WatchHandle {
self.watch_router.subscribe(watch_handle) self.watch_router.subscribe(watch_handle)
} }
fn total_mem_usage(&self) -> usize {
self.fs.values().map(|f| f.len()).sum()
}
} }
impl fmt::Debug for RAMDirectory { impl fmt::Debug for RAMDirectory {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RAMDirectory") write!(f, "RAMDirectory")
} }
} }
@@ -130,7 +118,7 @@ impl fmt::Debug for RAMDirectory {
/// ///
#[derive(Clone, Default)] #[derive(Clone, Default)]
pub struct RAMDirectory { pub struct RAMDirectory {
fs: Arc<RwLock<InnerDirectory>>, fs: Arc<RwLock<InnerRamDirectory>>,
} }
impl RAMDirectory { impl RAMDirectory {
@@ -138,12 +126,6 @@ impl RAMDirectory {
pub fn create() -> RAMDirectory { pub fn create() -> RAMDirectory {
Self::default() Self::default()
} }
/// Returns the sum of the size of the different files
/// in the RAMDirectory.
pub fn total_mem_usage(&self) -> usize {
self.fs.read().unwrap().total_mem_usage()
}
} }
impl Directory for RAMDirectory { impl Directory for RAMDirectory {
@@ -152,11 +134,6 @@ impl Directory for RAMDirectory {
} }
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
fail_point!("RAMDirectory::delete", |_| {
use crate::directory::error::IOError;
let io_error = IOError::from(io::Error::from(io::ErrorKind::Other));
Err(DeleteError::from(io_error))
});
self.fs.write().unwrap().delete(path) self.fs.write().unwrap().delete(path)
} }
@@ -184,7 +161,7 @@ impl Directory for RAMDirectory {
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new( fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Other,
msg.unwrap_or_else(|| "Undefined".to_string()) msg.unwrap_or("Undefined".to_string())
))); )));
let path_buf = PathBuf::from(path); let path_buf = PathBuf::from(path);
@@ -200,7 +177,7 @@ impl Directory for RAMDirectory {
Ok(()) Ok(())
} }
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> { fn watch(&self, watch_callback: WatchCallback) -> WatchHandle {
Ok(self.fs.write().unwrap().watch(watch_callback)) self.fs.write().unwrap().watch(watch_callback)
} }
} }

View File

@@ -1,9 +1,9 @@
use crate::common::HasLen; use common::HasLen;
use stable_deref_trait::{CloneStableDeref, StableDeref}; use stable_deref_trait::{CloneStableDeref, StableDeref};
use std::ops::Deref; use std::ops::Deref;
use std::sync::Arc; use std::sync::Arc;
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>; pub type BoxedData = Box<Deref<Target = [u8]> + Send + Sync + 'static>;
/// Read object that represents files in tantivy. /// Read object that represents files in tantivy.
/// ///

View File

@@ -1,5 +1,5 @@
use super::*; use super::*;
use std::io::Write; use std::io::{Seek, SeekFrom, Write};
use std::mem; use std::mem;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
@@ -9,6 +9,10 @@ use std::thread;
use std::time; use std::time;
use std::time::Duration; use std::time::Duration;
lazy_static! {
static ref TEST_PATH: &'static Path = Path::new("some_path_for_test");
}
#[test] #[test]
fn test_ram_directory() { fn test_ram_directory() {
let mut ram_directory = RAMDirectory::create(); let mut ram_directory = RAMDirectory::create();
@@ -25,82 +29,98 @@ fn test_mmap_directory() {
#[test] #[test]
#[should_panic] #[should_panic]
fn ram_directory_panics_if_flush_forgotten() { fn ram_directory_panics_if_flush_forgotten() {
let test_path: &'static Path = Path::new("some_path_for_test");
let mut ram_directory = RAMDirectory::create(); let mut ram_directory = RAMDirectory::create();
let mut write_file = ram_directory.open_write(test_path).unwrap(); let mut write_file = ram_directory.open_write(*TEST_PATH).unwrap();
assert!(write_file.write_all(&[4]).is_ok()); assert!(write_file.write_all(&[4]).is_ok());
} }
fn test_simple(directory: &mut dyn Directory) { fn test_simple(directory: &mut Directory) {
let test_path: &'static Path = Path::new("some_path_for_test");
{ {
let mut write_file = directory.open_write(test_path).unwrap(); let mut write_file = directory.open_write(*TEST_PATH).unwrap();
assert!(directory.exists(test_path)); assert!(directory.exists(*TEST_PATH));
write_file.write_all(&[4]).unwrap(); write_file.write_all(&[4]).unwrap();
write_file.write_all(&[3]).unwrap(); write_file.write_all(&[3]).unwrap();
write_file.write_all(&[7, 3, 5]).unwrap(); write_file.write_all(&[7, 3, 5]).unwrap();
write_file.flush().unwrap(); write_file.flush().unwrap();
} }
{ {
let read_file = directory.open_read(test_path).unwrap(); let read_file = directory.open_read(*TEST_PATH).unwrap();
let data: &[u8] = &*read_file; let data: &[u8] = &*read_file;
assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]); assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]);
} }
assert!(directory.delete(test_path).is_ok()); assert!(directory.delete(*TEST_PATH).is_ok());
assert!(!directory.exists(test_path)); assert!(!directory.exists(*TEST_PATH));
} }
fn test_rewrite_forbidden(directory: &mut dyn Directory) { fn test_seek(directory: &mut Directory) {
let test_path: &'static Path = Path::new("some_path_for_test");
{ {
directory.open_write(test_path).unwrap(); {
assert!(directory.exists(test_path)); let mut write_file = directory.open_write(*TEST_PATH).unwrap();
write_file.write_all(&[4, 3, 7, 3, 5]).unwrap();
write_file.seek(SeekFrom::Start(0)).unwrap();
write_file.write_all(&[3, 1]).unwrap();
write_file.flush().unwrap();
}
let read_file = directory.open_read(*TEST_PATH).unwrap();
let data: &[u8] = &*read_file;
assert_eq!(data, &[3u8, 1u8, 7u8, 3u8, 5u8]);
} }
{
assert!(directory.open_write(test_path).is_err()); assert!(directory.delete(*TEST_PATH).is_ok());
}
assert!(directory.delete(test_path).is_ok());
} }
fn test_write_create_the_file(directory: &mut dyn Directory) { fn test_rewrite_forbidden(directory: &mut Directory) {
let test_path: &'static Path = Path::new("some_path_for_test");
{ {
assert!(directory.open_read(test_path).is_err()); directory.open_write(*TEST_PATH).unwrap();
let _w = directory.open_write(test_path).unwrap(); assert!(directory.exists(*TEST_PATH));
assert!(directory.exists(test_path)); }
assert!(directory.open_read(test_path).is_ok()); {
assert!(directory.delete(test_path).is_ok()); assert!(directory.open_write(*TEST_PATH).is_err());
}
assert!(directory.delete(*TEST_PATH).is_ok());
}
fn test_write_create_the_file(directory: &mut Directory) {
{
assert!(directory.open_read(*TEST_PATH).is_err());
let _w = directory.open_write(*TEST_PATH).unwrap();
assert!(directory.exists(*TEST_PATH));
assert!(directory.open_read(*TEST_PATH).is_ok());
assert!(directory.delete(*TEST_PATH).is_ok());
} }
} }
fn test_directory_delete(directory: &mut dyn Directory) { fn test_directory_delete(directory: &mut Directory) {
let test_path: &'static Path = Path::new("some_path_for_test"); assert!(directory.open_read(*TEST_PATH).is_err());
assert!(directory.open_read(test_path).is_err()); let mut write_file = directory.open_write(*TEST_PATH).unwrap();
let mut write_file = directory.open_write(&test_path).unwrap();
write_file.write_all(&[1, 2, 3, 4]).unwrap(); write_file.write_all(&[1, 2, 3, 4]).unwrap();
write_file.flush().unwrap(); write_file.flush().unwrap();
{ {
let read_handle = directory.open_read(&test_path).unwrap(); let read_handle = directory.open_read(*TEST_PATH).unwrap();
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]); {
// Mapped files can't be deleted on Windows
if !cfg!(windows) {
assert!(directory.delete(&test_path).is_ok());
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]); assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
}
assert!(directory.delete(Path::new("SomeOtherPath")).is_err()); // Mapped files can't be deleted on Windows
if !cfg!(windows) {
assert!(directory.delete(*TEST_PATH).is_ok());
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
}
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
}
} }
if cfg!(windows) { if cfg!(windows) {
assert!(directory.delete(&test_path).is_ok()); assert!(directory.delete(*TEST_PATH).is_ok());
} }
assert!(directory.open_read(&test_path).is_err()); assert!(directory.open_read(*TEST_PATH).is_err());
assert!(directory.delete(&test_path).is_err()); assert!(directory.delete(*TEST_PATH).is_err());
} }
fn test_directory(directory: &mut dyn Directory) { fn test_directory(directory: &mut Directory) {
test_simple(directory); test_simple(directory);
test_seek(directory);
test_rewrite_forbidden(directory); test_rewrite_forbidden(directory);
test_write_create_the_file(directory); test_write_create_the_file(directory);
test_directory_delete(directory); test_directory_delete(directory);
@@ -109,7 +129,7 @@ fn test_directory(directory: &mut dyn Directory) {
test_watch(directory); test_watch(directory);
} }
fn test_watch(directory: &mut dyn Directory) { fn test_watch(directory: &mut Directory) {
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone(); let counter_clone = counter.clone();
let watch_callback = Box::new(move || { let watch_callback = Box::new(move || {
@@ -121,13 +141,13 @@ fn test_watch(directory: &mut dyn Directory) {
thread::sleep(Duration::new(0, 10_000)); thread::sleep(Duration::new(0, 10_000));
assert_eq!(0, counter.load(Ordering::SeqCst)); assert_eq!(0, counter.load(Ordering::SeqCst));
let watch_handle = directory.watch(watch_callback).unwrap(); let watch_handle = directory.watch(watch_callback);
for i in 0..10 { for i in 0..10 {
assert_eq!(i, counter.load(Ordering::SeqCst)); assert_eq!(i, counter.load(Ordering::SeqCst));
assert!(directory assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data_2") .atomic_write(Path::new("meta.json"), b"random_test_data_2")
.is_ok()); .is_ok());
for _ in 0..1_000 { for _ in 0..100 {
if counter.load(Ordering::SeqCst) > i { if counter.load(Ordering::SeqCst) > i {
break; break;
} }
@@ -143,7 +163,7 @@ fn test_watch(directory: &mut dyn Directory) {
assert_eq!(10, counter.load(Ordering::SeqCst)); assert_eq!(10, counter.load(Ordering::SeqCst));
} }
fn test_lock_non_blocking(directory: &mut dyn Directory) { fn test_lock_non_blocking(directory: &mut Directory) {
{ {
let lock_a_res = directory.acquire_lock(&Lock { let lock_a_res = directory.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"), filepath: PathBuf::from("a.lock"),
@@ -168,7 +188,7 @@ fn test_lock_non_blocking(directory: &mut dyn Directory) {
assert!(lock_a_res.is_ok()); assert!(lock_a_res.is_ok());
} }
fn test_lock_blocking(directory: &mut dyn Directory) { fn test_lock_blocking(directory: &mut Directory) {
let lock_a_res = directory.acquire_lock(&Lock { let lock_a_res = directory.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"), filepath: PathBuf::from("a.lock"),
is_blocking: true, is_blocking: true,

View File

@@ -3,7 +3,7 @@ use std::sync::RwLock;
use std::sync::Weak; use std::sync::Weak;
/// Type alias for callbacks registered when watching files of a `Directory`. /// Type alias for callbacks registered when watching files of a `Directory`.
pub type WatchCallback = Box<dyn Fn() -> () + Sync + Send>; pub type WatchCallback = Box<Fn() -> () + Sync + Send>;
/// Helper struct to implement the watch method in `Directory` implementations. /// Helper struct to implement the watch method in `Directory` implementations.
/// ///
@@ -67,7 +67,7 @@ impl WatchCallbackList {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::directory::WatchCallbackList; use directory::WatchCallbackList;
use std::mem; use std::mem;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
@@ -152,4 +152,5 @@ mod tests {
thread::sleep(Duration::from_millis(WAIT_TIME)); thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(2, counter.load(Ordering::SeqCst)); assert_eq!(2, counter.load(Ordering::SeqCst));
} }
} }

View File

@@ -1,9 +1,8 @@
use crate::common::BitSet; use common::BitSet;
use crate::fastfield::DeleteBitSet;
use crate::DocId;
use std::borrow::Borrow; use std::borrow::Borrow;
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::cmp::Ordering; use std::cmp::Ordering;
use DocId;
/// Expresses the outcome of a call to `DocSet`'s `.skip_next(...)`. /// Expresses the outcome of a call to `DocSet`'s `.skip_next(...)`.
#[derive(PartialEq, Eq, Debug)] #[derive(PartialEq, Eq, Debug)]
@@ -96,23 +95,9 @@ pub trait DocSet {
} }
/// Returns the number documents matching. /// Returns the number documents matching.
/// Calling this method consumes the `DocSet`.
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
let mut count = 0u32;
while self.advance() {
if !delete_bitset.is_deleted(self.doc()) {
count += 1u32;
}
}
count
}
/// Returns the count of documents, deleted or not.
/// Calling this method consumes the `DocSet`.
/// ///
/// Of course, the result is an upper bound of the result /// Calling this method consumes the `DocSet`.
/// given by `count()`. fn count(&mut self) -> u32 {
fn count_including_deleted(&mut self) -> u32 {
let mut count = 0u32; let mut count = 0u32;
while self.advance() { while self.advance() {
count += 1u32; count += 1u32;
@@ -142,14 +127,9 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
unboxed.size_hint() unboxed.size_hint()
} }
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 { fn count(&mut self) -> u32 {
let unboxed: &mut TDocSet = self.borrow_mut(); let unboxed: &mut TDocSet = self.borrow_mut();
unboxed.count(delete_bitset) unboxed.count()
}
fn count_including_deleted(&mut self) -> u32 {
let unboxed: &mut TDocSet = self.borrow_mut();
unboxed.count_including_deleted()
} }
fn append_to_bitset(&mut self, bitset: &mut BitSet) { fn append_to_bitset(&mut self, bitset: &mut BitSet) {

View File

@@ -2,11 +2,11 @@
use std::io; use std::io;
use crate::directory::error::LockError; use directory::error::LockError;
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError}; use directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use crate::fastfield::FastFieldNotAvailableError; use fastfield::FastFieldNotAvailableError;
use crate::query; use query;
use crate::schema; use schema;
use serde_json; use serde_json;
use std::fmt; use std::fmt;
use std::path::PathBuf; use std::path::PathBuf;
@@ -34,7 +34,7 @@ impl DataCorruption {
} }
impl fmt::Debug for DataCorruption { impl fmt::Debug for DataCorruption {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "Data corruption: ")?; write!(f, "Data corruption: ")?;
if let Some(ref filepath) = &self.filepath { if let Some(ref filepath) = &self.filepath {
write!(f, "(in file `{:?}`)", filepath)?; write!(f, "(in file `{:?}`)", filepath)?;
@@ -77,6 +77,9 @@ pub enum TantivyError {
/// An Error appeared related to the schema. /// An Error appeared related to the schema.
#[fail(display = "Schema error: '{}'", _0)] #[fail(display = "Schema error: '{}'", _0)]
SchemaError(String), SchemaError(String),
/// Tried to access a fastfield reader for a field not configured accordingly.
#[fail(display = "Fast field not available: '{:?}'", _0)]
FastFieldError(#[cause] FastFieldNotAvailableError),
/// System error. (e.g.: We failed spawning a new thread) /// System error. (e.g.: We failed spawning a new thread)
#[fail(display = "System error.'{}'", _0)] #[fail(display = "System error.'{}'", _0)]
SystemError(String), SystemError(String),
@@ -90,7 +93,7 @@ impl From<DataCorruption> for TantivyError {
impl From<FastFieldNotAvailableError> for TantivyError { impl From<FastFieldNotAvailableError> for TantivyError {
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError { fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
TantivyError::SchemaError(format!("{}", fastfield_error)) TantivyError::FastFieldError(fastfield_error)
} }
} }

View File

@@ -6,8 +6,8 @@ pub use self::writer::BytesFastFieldWriter;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::schema::Schema; use schema::Schema;
use crate::Index; use Index;
#[test] #[test]
fn test_bytes() { fn test_bytes() {
@@ -23,14 +23,14 @@ mod tests {
index_writer.add_document(doc!(field=>vec![0u8; 1000])); index_writer.add_document(doc!(field=>vec![0u8; 1000]));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
let bytes_reader = segment_reader.fast_fields().bytes(field).unwrap(); let bytes_reader = reader.bytes_fast_field_reader(field).unwrap();
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]); assert_eq!(bytes_reader.get_val(0), &[0u8, 1, 2, 3]);
assert!(bytes_reader.get_bytes(1).is_empty()); assert!(bytes_reader.get_val(1).is_empty());
assert_eq!(bytes_reader.get_bytes(2), &[255u8]); assert_eq!(bytes_reader.get_val(2), &[255u8]);
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]); assert_eq!(bytes_reader.get_val(3), &[1u8, 3, 5, 7, 9]);
let long = vec![0u8; 1000]; let long = vec![0u8; 1000];
assert_eq!(bytes_reader.get_bytes(4), long.as_slice()); assert_eq!(bytes_reader.get_val(4), long.as_slice());
} }
} }

View File

@@ -1,8 +1,8 @@
use owning_ref::OwningRef; use owning_ref::OwningRef;
use crate::directory::ReadOnlySource; use directory::ReadOnlySource;
use crate::fastfield::FastFieldReader; use fastfield::FastFieldReader;
use crate::DocId; use DocId;
/// Reader for byte array fast fields /// Reader for byte array fast fields
/// ///
@@ -14,7 +14,6 @@ use crate::DocId;
/// ///
/// Reading the value for a document is done by reading the start index for it, /// Reading the value for a document is done by reading the start index for it,
/// and the start index for the next document, and keeping the bytes in between. /// and the start index for the next document, and keeping the bytes in between.
#[derive(Clone)]
pub struct BytesFastFieldReader { pub struct BytesFastFieldReader {
idx_reader: FastFieldReader<u64>, idx_reader: FastFieldReader<u64>,
values: OwningRef<ReadOnlySource, [u8]>, values: OwningRef<ReadOnlySource, [u8]>,
@@ -29,20 +28,10 @@ impl BytesFastFieldReader {
BytesFastFieldReader { idx_reader, values } BytesFastFieldReader { idx_reader, values }
} }
fn range(&self, doc: DocId) -> (usize, usize) { /// Returns the bytes associated to the given `doc`
pub fn get_val(&self, doc: DocId) -> &[u8] {
let start = self.idx_reader.get(doc) as usize; let start = self.idx_reader.get(doc) as usize;
let stop = self.idx_reader.get(doc + 1) as usize; let stop = self.idx_reader.get(doc + 1) as usize;
(start, stop)
}
/// Returns the bytes associated to the given `doc`
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
let (start, stop) = self.range(doc);
&self.values[start..stop] &self.values[start..stop]
} }
/// Returns the overall number of bytes in this bytes fast field.
pub fn total_num_bytes(&self) -> usize {
self.values.len()
}
} }

View File

@@ -1,8 +1,8 @@
use std::io; use std::io;
use crate::fastfield::serializer::FastFieldSerializer; use fastfield::serializer::FastFieldSerializer;
use crate::schema::{Document, Field, Value}; use schema::{Document, Field, Value};
use crate::DocId; use DocId;
/// Writer for byte array (as in, any number of bytes per document) fast fields /// Writer for byte array (as in, any number of bytes per document) fast fields
/// ///

View File

@@ -1,11 +1,11 @@
use crate::common::HasLen;
use crate::directory::ReadOnlySource;
use crate::directory::WritePtr;
use crate::space_usage::ByteCount;
use crate::DocId;
use bit_set::BitSet; use bit_set::BitSet;
use common::HasLen;
use directory::ReadOnlySource;
use directory::WritePtr;
use space_usage::ByteCount;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
use DocId;
/// Write a delete `BitSet` /// Write a delete `BitSet`
/// ///
@@ -53,18 +53,16 @@ impl DeleteBitSet {
} }
} }
/// Returns true iff the document is still "alive". In other words, if it has not been deleted. /// Returns whether the document has been marked as deleted.
pub fn is_alive(&self, doc: DocId) -> bool {
!self.is_deleted(doc)
}
/// Returns true iff the document has been marked as deleted.
#[inline(always)]
pub fn is_deleted(&self, doc: DocId) -> bool { pub fn is_deleted(&self, doc: DocId) -> bool {
let byte_offset = doc / 8u32; if self.len == 0 {
let b: u8 = (*self.data)[byte_offset as usize]; false
let shift = (doc & 7u32) as u8; } else {
b & (1u8 << shift) != 0 let byte_offset = doc / 8u32;
let b: u8 = (*self.data)[byte_offset as usize];
let shift = (doc & 7u32) as u8;
b & (1u8 << shift) != 0
}
} }
/// Summarize total space usage of this bitset. /// Summarize total space usage of this bitset.
@@ -82,8 +80,8 @@ impl HasLen for DeleteBitSet {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::directory::*;
use bit_set::BitSet; use bit_set::BitSet;
use directory::*;
use std::path::PathBuf; use std::path::PathBuf;
fn test_delete_bitset_helper(bitset: &BitSet) { fn test_delete_bitset_helper(bitset: &BitSet) {

View File

@@ -1,11 +1,11 @@
use crate::schema::FieldEntry; use schema::FieldEntry;
use std::result; use std::result;
/// `FastFieldNotAvailableError` is returned when the /// `FastFieldNotAvailableError` is returned when the
/// user requested for a fast field reader, and the field was not /// user requested for a fast field reader, and the field was not
/// defined in the schema as a fast field. /// defined in the schema as a fast field.
#[derive(Debug, Fail)] #[derive(Debug, Fail)]
#[fail(display = "Fast field not available: '{:?}'", field_name)] #[fail(display = "field not available: '{:?}'", field_name)]
pub struct FastFieldNotAvailableError { pub struct FastFieldNotAvailableError {
field_name: String, field_name: String,
} }

View File

@@ -1,9 +1,9 @@
use super::MultiValueIntFastFieldReader; use super::MultiValueIntFastFieldReader;
use crate::schema::Facet; use schema::Facet;
use crate::termdict::TermDictionary;
use crate::termdict::TermOrdinal;
use crate::DocId;
use std::str; use std::str;
use termdict::TermDictionary;
use termdict::TermOrdinal;
use DocId;
/// The facet reader makes it possible to access the list of /// The facet reader makes it possible to access the list of
/// facets associated to a given document in a specific /// facets associated to a given document in a specific

View File

@@ -30,13 +30,12 @@ pub use self::error::{FastFieldNotAvailableError, Result};
pub use self::facet_reader::FacetReader; pub use self::facet_reader::FacetReader;
pub use self::multivalued::{MultiValueIntFastFieldReader, MultiValueIntFastFieldWriter}; pub use self::multivalued::{MultiValueIntFastFieldReader, MultiValueIntFastFieldWriter};
pub use self::reader::FastFieldReader; pub use self::reader::FastFieldReader;
pub use self::readers::FastFieldReaders;
pub use self::serializer::FastFieldSerializer; pub use self::serializer::FastFieldSerializer;
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter}; pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use crate::common; use common;
use crate::schema::Cardinality; use schema::Cardinality;
use crate::schema::FieldType; use schema::FieldType;
use crate::schema::Value; use schema::Value;
mod bytes; mod bytes;
mod delete; mod delete;
@@ -44,12 +43,11 @@ mod error;
mod facet_reader; mod facet_reader;
mod multivalued; mod multivalued;
mod reader; mod reader;
mod readers;
mod serializer; mod serializer;
mod writer; mod writer;
/// Trait for types that are allowed for fast fields: (u64, i64 and f64). /// Trait for types that are allowed for fast fields: (u64 or i64).
pub trait FastValue: Default + Clone + Copy + Send + Sync + PartialOrd { pub trait FastValue: Default + Clone + Copy {
/// Converts a value from u64 /// Converts a value from u64
/// ///
/// Internally all fast field values are encoded as u64. /// Internally all fast field values are encoded as u64.
@@ -80,6 +78,10 @@ impl FastValue for u64 {
*self *self
} }
fn as_u64(&self) -> u64 {
*self
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> { fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type { match *field_type {
FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(), FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(),
@@ -87,10 +89,6 @@ impl FastValue for u64 {
_ => None, _ => None,
} }
} }
fn as_u64(&self) -> u64 {
*self
}
} }
impl FastValue for i64 { impl FastValue for i64 {
@@ -114,33 +112,11 @@ impl FastValue for i64 {
} }
} }
impl FastValue for f64 {
fn from_u64(val: u64) -> Self {
common::u64_to_f64(val)
}
fn to_u64(&self) -> u64 {
common::f64_to_u64(*self)
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::F64(ref integer_options) => integer_options.get_fastfield_cardinality(),
_ => None,
}
}
fn as_u64(&self) -> u64 {
self.to_bits()
}
}
fn value_to_u64(value: &Value) -> u64 { fn value_to_u64(value: &Value) -> u64 {
match *value { match *value {
Value::U64(ref val) => *val, Value::U64(ref val) => *val,
Value::I64(ref val) => common::i64_to_u64(*val), Value::I64(ref val) => common::i64_to_u64(*val),
Value::F64(ref val) => common::f64_to_u64(*val), _ => panic!("Expected a u64/i64 field, got {:?} ", value),
_ => panic!("Expected a u64/i64/f64 field, got {:?} ", value),
} }
} }
@@ -148,27 +124,27 @@ fn value_to_u64(value: &Value) -> u64 {
mod tests { mod tests {
use super::*; use super::*;
use crate::common::CompositeFile; use common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::FastFieldReader; use fastfield::FastFieldReader;
use crate::schema::Document;
use crate::schema::Field;
use crate::schema::Schema;
use crate::schema::FAST;
use once_cell::sync::Lazy;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::rngs::StdRng; use rand::rngs::StdRng;
use rand::SeedableRng; use rand::SeedableRng;
use schema::Document;
use schema::Field;
use schema::Schema;
use schema::FAST;
use std::collections::HashMap; use std::collections::HashMap;
use std::path::Path; use std::path::Path;
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| { lazy_static! {
let mut schema_builder = Schema::builder(); pub static ref SCHEMA: Schema = {
schema_builder.add_u64_field("field", FAST); let mut schema_builder = Schema::builder();
schema_builder.build() schema_builder.add_u64_field("field", FAST);
}); schema_builder.build()
};
pub static FIELD: Lazy<Field> = Lazy::new(|| SCHEMA.get_field("field").unwrap()); pub static ref FIELD: Field = { SCHEMA.get_field("field").unwrap() };
}
#[test] #[test]
pub fn test_fastfield() { pub fn test_fastfield() {
@@ -429,6 +405,7 @@ mod tests {
} }
} }
} }
} }
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
@@ -436,9 +413,9 @@ mod bench {
use super::tests::FIELD; use super::tests::FIELD;
use super::tests::{generate_permutation, SCHEMA}; use super::tests::{generate_permutation, SCHEMA};
use super::*; use super::*;
use crate::common::CompositeFile; use common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::FastFieldReader; use fastfield::FastFieldReader;
use std::collections::HashMap; use std::collections::HashMap;
use std::path::Path; use std::path::Path;
use test::{self, Bencher}; use test::{self, Bencher};
@@ -536,4 +513,5 @@ mod bench {
}); });
} }
} }
} }

View File

@@ -7,16 +7,16 @@ pub use self::writer::MultiValueIntFastFieldWriter;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use time; extern crate time;
use self::time::Duration; use self::time::Duration;
use crate::collector::TopDocs; use collector::TopDocs;
use crate::query::QueryParser; use query::QueryParser;
use crate::schema::Cardinality; use schema::Cardinality;
use crate::schema::Facet; use schema::Facet;
use crate::schema::IntOptions; use schema::IntOptions;
use crate::schema::Schema; use schema::Schema;
use crate::Index; use Index;
#[test] #[test]
fn test_multivalued_u64() { fn test_multivalued_u64() {
@@ -37,7 +37,9 @@ mod tests {
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let mut vals = Vec::new(); let mut vals = Vec::new();
let multi_value_reader = segment_reader.fast_fields().u64s(field).unwrap(); let multi_value_reader = segment_reader
.multi_fast_field_reader::<u64>(field)
.unwrap();
{ {
multi_value_reader.get_vals(2, &mut vals); multi_value_reader.get_vals(2, &mut vals);
assert_eq!(&vals, &[4u64]); assert_eq!(&vals, &[4u64]);
@@ -196,9 +198,9 @@ mod tests {
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
let mut vals = Vec::new(); let mut vals = Vec::new();
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap(); let multi_value_reader = reader.multi_fast_field_reader::<i64>(field).unwrap();
{ {
multi_value_reader.get_vals(2, &mut vals); multi_value_reader.get_vals(2, &mut vals);
assert_eq!(&vals, &[-4i64]); assert_eq!(&vals, &[-4i64]);

View File

@@ -1,5 +1,5 @@
use crate::fastfield::{FastFieldReader, FastValue}; use fastfield::{FastFieldReader, FastValue};
use crate::DocId; use DocId;
/// Reader for a multivalued `u64` fast field. /// Reader for a multivalued `u64` fast field.
/// ///
@@ -26,13 +26,6 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
} }
} }
pub(crate) fn into_u64s_reader(self) -> MultiValueIntFastFieldReader<u64> {
MultiValueIntFastFieldReader {
idx_reader: self.idx_reader,
vals_reader: self.vals_reader.into_u64_reader(),
}
}
/// Returns `(start, stop)`, such that the values associated /// Returns `(start, stop)`, such that the values associated
/// to the given document are `start..stop`. /// to the given document are `start..stop`.
fn range(&self, doc: DocId) -> (u64, u64) { fn range(&self, doc: DocId) -> (u64, u64) {
@@ -48,24 +41,13 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
vals.resize(len, Item::default()); vals.resize(len, Item::default());
self.vals_reader.get_range_u64(start, &mut vals[..]); self.vals_reader.get_range_u64(start, &mut vals[..]);
} }
/// Returns the number of values associated with the document `DocId`.
pub fn num_vals(&self, doc: DocId) -> usize {
let (start, stop) = self.range(doc);
(stop - start) as usize
}
/// Returns the overall number of values in this field .
pub fn total_num_vals(&self) -> u64 {
self.idx_reader.max_value()
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::core::Index; use core::Index;
use crate::schema::{Facet, Schema}; use schema::{Document, Facet, Schema};
#[test] #[test]
fn test_multifastfield_reader() { fn test_multifastfield_reader() {
@@ -76,12 +58,22 @@ mod tests {
let mut index_writer = index let mut index_writer = index
.writer_with_num_threads(1, 30_000_000) .writer_with_num_threads(1, 30_000_000)
.expect("Failed to create index writer."); .expect("Failed to create index writer.");
index_writer.add_document(doc!( {
facet_field => Facet::from("/category/cat2"), let mut doc = Document::new();
facet_field => Facet::from("/category/cat1"), doc.add_facet(facet_field, "/category/cat2");
)); doc.add_facet(facet_field, "/category/cat1");
index_writer.add_document(doc!(facet_field => Facet::from("/category/cat2"))); index_writer.add_document(doc);
index_writer.add_document(doc!(facet_field => Facet::from("/category/cat3"))); }
{
let mut doc = Document::new();
doc.add_facet(facet_field, "/category/cat2");
index_writer.add_document(doc);
}
{
let mut doc = Document::new();
doc.add_facet(facet_field, "/category/cat3");
index_writer.add_document(doc);
}
index_writer.commit().expect("Commit failed"); index_writer.commit().expect("Commit failed");
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);

View File

@@ -1,13 +1,13 @@
use crate::fastfield::serializer::FastSingleFieldSerializer; use fastfield::serializer::FastSingleFieldSerializer;
use crate::fastfield::value_to_u64; use fastfield::value_to_u64;
use crate::fastfield::FastFieldSerializer; use fastfield::FastFieldSerializer;
use crate::postings::UnorderedTermId;
use crate::schema::{Document, Field};
use crate::termdict::TermOrdinal;
use crate::DocId;
use fnv::FnvHashMap;
use itertools::Itertools; use itertools::Itertools;
use postings::UnorderedTermId;
use schema::{Document, Field};
use std::collections::HashMap;
use std::io; use std::io;
use termdict::TermOrdinal;
use DocId;
/// Writer for multi-valued (as in, more than one value per document) /// Writer for multi-valued (as in, more than one value per document)
/// int fast field. /// int fast field.
@@ -102,7 +102,7 @@ impl MultiValueIntFastFieldWriter {
pub fn serialize( pub fn serialize(
&self, &self,
serializer: &mut FastFieldSerializer, serializer: &mut FastFieldSerializer,
mapping_opt: Option<&FnvHashMap<UnorderedTermId, TermOrdinal>>, mapping_opt: Option<&HashMap<UnorderedTermId, TermOrdinal>>,
) -> io::Result<()> { ) -> io::Result<()> {
{ {
// writing the offset index // writing the offset index
@@ -116,7 +116,7 @@ impl MultiValueIntFastFieldWriter {
} }
{ {
// writing the values themselves. // writing the values themselves.
let mut value_serializer: FastSingleFieldSerializer<'_, _>; let mut value_serializer: FastSingleFieldSerializer<_>;
match mapping_opt { match mapping_opt {
Some(mapping) => { Some(mapping) => {
value_serializer = serializer.new_u64_fast_field_with_idx( value_serializer = serializer.new_u64_fast_field_with_idx(

View File

@@ -1,18 +1,18 @@
use super::FastValue; use super::FastValue;
use crate::common::bitpacker::BitUnpacker; use common::bitpacker::BitUnpacker;
use crate::common::compute_num_bits; use common::compute_num_bits;
use crate::common::BinarySerializable; use common::BinarySerializable;
use crate::common::CompositeFile; use common::CompositeFile;
use crate::directory::ReadOnlySource; use directory::ReadOnlySource;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter}; use fastfield::{FastFieldSerializer, FastFieldsWriter};
use crate::schema::Schema;
use crate::schema::FAST;
use crate::DocId;
use owning_ref::OwningRef; use owning_ref::OwningRef;
use schema::Schema;
use schema::FAST;
use std::collections::HashMap; use std::collections::HashMap;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::path::Path; use std::path::Path;
use DocId;
/// Trait for accessing a fastfield. /// Trait for accessing a fastfield.
/// ///
@@ -50,15 +50,6 @@ impl<Item: FastValue> FastFieldReader<Item> {
} }
} }
pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> {
FastFieldReader {
bit_unpacker: self.bit_unpacker,
min_value_u64: self.min_value_u64,
max_value_u64: self.max_value_u64,
_phantom: PhantomData,
}
}
/// Return the value associated to the given document. /// Return the value associated to the given document.
/// ///
/// This accessor should return as fast as possible. /// This accessor should return as fast as possible.

View File

@@ -1,228 +0,0 @@
use crate::common::CompositeFile;
use crate::fastfield::BytesFastFieldReader;
use crate::fastfield::MultiValueIntFastFieldReader;
use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
use crate::schema::{Cardinality, Field, FieldType, Schema};
use crate::space_usage::PerFieldSpaceUsage;
use crate::Result;
use std::collections::HashMap;
/// Provides access to all of the FastFieldReader.
///
/// Internally, `FastFieldReaders` have preloaded fast field readers,
/// and just wraps several `HashMap`.
pub struct FastFieldReaders {
fast_field_i64: HashMap<Field, FastFieldReader<i64>>,
fast_field_u64: HashMap<Field, FastFieldReader<u64>>,
fast_field_f64: HashMap<Field, FastFieldReader<f64>>,
fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>,
fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>,
fast_field_f64s: HashMap<Field, MultiValueIntFastFieldReader<f64>>,
fast_bytes: HashMap<Field, BytesFastFieldReader>,
fast_fields_composite: CompositeFile,
}
enum FastType {
I64,
U64,
F64,
}
fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
match field_type {
FieldType::U64(options) => options
.get_fastfield_cardinality()
.map(|cardinality| (FastType::U64, cardinality)),
FieldType::I64(options) => options
.get_fastfield_cardinality()
.map(|cardinality| (FastType::I64, cardinality)),
FieldType::F64(options) => options
.get_fastfield_cardinality()
.map(|cardinality| (FastType::F64, cardinality)),
FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)),
_ => None,
}
}
impl FastFieldReaders {
pub(crate) fn load_all(
schema: &Schema,
fast_fields_composite: &CompositeFile,
) -> Result<FastFieldReaders> {
let mut fast_field_readers = FastFieldReaders {
fast_field_i64: Default::default(),
fast_field_u64: Default::default(),
fast_field_f64: Default::default(),
fast_field_i64s: Default::default(),
fast_field_u64s: Default::default(),
fast_field_f64s: Default::default(),
fast_bytes: Default::default(),
fast_fields_composite: fast_fields_composite.clone(),
};
for (field, field_entry) in schema.fields() {
let field_type = field_entry.field_type();
if field_type == &FieldType::Bytes {
let idx_reader = fast_fields_composite
.open_read_with_idx(field, 0)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
.map(FastFieldReader::open)?;
let data = fast_fields_composite
.open_read_with_idx(field, 1)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
fast_field_readers
.fast_bytes
.insert(field, BytesFastFieldReader::open(idx_reader, data));
} else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) {
match cardinality {
Cardinality::SingleValue => {
if let Some(fast_field_data) = fast_fields_composite.open_read(field) {
match fast_type {
FastType::U64 => {
let fast_field_reader = FastFieldReader::open(fast_field_data);
fast_field_readers
.fast_field_u64
.insert(field, fast_field_reader);
}
FastType::I64 => {
fast_field_readers.fast_field_i64.insert(
field,
FastFieldReader::open(fast_field_data.clone()),
);
}
FastType::F64 => {
fast_field_readers.fast_field_f64.insert(
field,
FastFieldReader::open(fast_field_data.clone()),
);
}
}
} else {
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
}
}
Cardinality::MultiValues => {
let idx_opt = fast_fields_composite.open_read_with_idx(field, 0);
let data_opt = fast_fields_composite.open_read_with_idx(field, 1);
if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) {
let idx_reader = FastFieldReader::open(fast_field_idx);
match fast_type {
FastType::I64 => {
let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers
.fast_field_i64s
.insert(field, multivalued_int_fast_field);
}
FastType::U64 => {
let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers
.fast_field_u64s
.insert(field, multivalued_int_fast_field);
}
FastType::F64 => {
let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers
.fast_field_f64s
.insert(field, multivalued_int_fast_field);
}
}
} else {
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
}
}
}
}
}
Ok(fast_field_readers)
}
pub(crate) fn space_usage(&self) -> PerFieldSpaceUsage {
self.fast_fields_composite.space_usage()
}
/// Returns the `u64` fast field reader reader associated to `field`.
///
/// If `field` is not a u64 fast field, this method returns `None`.
pub fn u64(&self, field: Field) -> Option<FastFieldReader<u64>> {
self.fast_field_u64.get(&field).cloned()
}
/// If the field is a u64-fast field return the associated reader.
/// If the field is a i64-fast field, return the associated u64 reader. Values are
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. ///
///
///TODO should it also be lenient with f64?
///
/// This method is useful when merging segment reader.
pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> {
if let Some(u64_ff_reader) = self.u64(field) {
return Some(u64_ff_reader);
}
if let Some(i64_ff_reader) = self.i64(field) {
return Some(i64_ff_reader.into_u64_reader());
}
None
}
/// Returns the `i64` fast field reader reader associated to `field`.
///
/// If `field` is not a i64 fast field, this method returns `None`.
pub fn i64(&self, field: Field) -> Option<FastFieldReader<i64>> {
self.fast_field_i64.get(&field).cloned()
}
/// Returns the `f64` fast field reader reader associated to `field`.
///
/// If `field` is not a f64 fast field, this method returns `None`.
pub fn f64(&self, field: Field) -> Option<FastFieldReader<f64>> {
self.fast_field_f64.get(&field).cloned()
}
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`.
///
/// If `field` is not a u64 multi-valued fast field, this method returns `None`.
pub fn u64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<u64>> {
self.fast_field_u64s.get(&field).cloned()
}
/// If the field is a u64s-fast field return the associated reader.
/// If the field is a i64s-fast field, return the associated u64s reader. Values are
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping.
///
/// This method is useful when merging segment reader.
pub(crate) fn u64s_lenient(&self, field: Field) -> Option<MultiValueIntFastFieldReader<u64>> {
if let Some(u64s_ff_reader) = self.u64s(field) {
return Some(u64s_ff_reader);
}
if let Some(i64s_ff_reader) = self.i64s(field) {
return Some(i64s_ff_reader.into_u64s_reader());
}
None
}
/// Returns a `i64s` multi-valued fast field reader reader associated to `field`.
///
/// If `field` is not a i64 multi-valued fast field, this method returns `None`.
pub fn i64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<i64>> {
self.fast_field_i64s.get(&field).cloned()
}
/// Returns a `f64s` multi-valued fast field reader reader associated to `field`.
///
/// If `field` is not a f64 multi-valued fast field, this method returns `None`.
pub fn f64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<f64>> {
self.fast_field_f64s.get(&field).cloned()
}
/// Returns the `bytes` fast field reader associated to `field`.
///
/// If `field` is not a bytes fast field, returns `None`.
pub fn bytes(&self, field: Field) -> Option<BytesFastFieldReader> {
self.fast_bytes.get(&field).cloned()
}
}

View File

@@ -1,10 +1,10 @@
use crate::common::bitpacker::BitPacker; use common::bitpacker::BitPacker;
use crate::common::compute_num_bits; use common::compute_num_bits;
use crate::common::BinarySerializable; use common::BinarySerializable;
use crate::common::CompositeWrite; use common::CompositeWrite;
use crate::common::CountingWriter; use common::CountingWriter;
use crate::directory::WritePtr; use directory::WritePtr;
use crate::schema::Field; use schema::Field;
use std::io::{self, Write}; use std::io::{self, Write};
/// `FastFieldSerializer` is in charge of serializing /// `FastFieldSerializer` is in charge of serializing
@@ -45,7 +45,7 @@ impl FastFieldSerializer {
field: Field, field: Field,
min_value: u64, min_value: u64,
max_value: u64, max_value: u64,
) -> io::Result<FastSingleFieldSerializer<'_, CountingWriter<WritePtr>>> { ) -> io::Result<FastSingleFieldSerializer<CountingWriter<WritePtr>>> {
self.new_u64_fast_field_with_idx(field, min_value, max_value, 0) self.new_u64_fast_field_with_idx(field, min_value, max_value, 0)
} }
@@ -56,7 +56,7 @@ impl FastFieldSerializer {
min_value: u64, min_value: u64,
max_value: u64, max_value: u64,
idx: usize, idx: usize,
) -> io::Result<FastSingleFieldSerializer<'_, CountingWriter<WritePtr>>> { ) -> io::Result<FastSingleFieldSerializer<CountingWriter<WritePtr>>> {
let field_write = self.composite_write.for_field_with_idx(field, idx); let field_write = self.composite_write.for_field_with_idx(field, idx);
FastSingleFieldSerializer::open(field_write, min_value, max_value) FastSingleFieldSerializer::open(field_write, min_value, max_value)
} }
@@ -66,7 +66,7 @@ impl FastFieldSerializer {
&mut self, &mut self,
field: Field, field: Field,
idx: usize, idx: usize,
) -> io::Result<FastBytesFieldSerializer<'_, CountingWriter<WritePtr>>> { ) -> io::Result<FastBytesFieldSerializer<CountingWriter<WritePtr>>> {
let field_write = self.composite_write.for_field_with_idx(field, idx); let field_write = self.composite_write.for_field_with_idx(field, idx);
FastBytesFieldSerializer::open(field_write) FastBytesFieldSerializer::open(field_write)
} }
@@ -79,7 +79,7 @@ impl FastFieldSerializer {
} }
} }
pub struct FastSingleFieldSerializer<'a, W: Write> { pub struct FastSingleFieldSerializer<'a, W: Write + 'a> {
bit_packer: BitPacker, bit_packer: BitPacker,
write: &'a mut W, write: &'a mut W,
min_value: u64, min_value: u64,
@@ -127,7 +127,7 @@ impl<'a, W: Write> FastSingleFieldSerializer<'a, W> {
} }
} }
pub struct FastBytesFieldSerializer<'a, W: Write> { pub struct FastBytesFieldSerializer<'a, W: Write + 'a> {
write: &'a mut W, write: &'a mut W,
} }

View File

@@ -1,14 +1,13 @@
use super::multivalued::MultiValueIntFastFieldWriter; use super::multivalued::MultiValueIntFastFieldWriter;
use crate::common; use common;
use crate::common::BinarySerializable; use common::BinarySerializable;
use crate::common::VInt; use common::VInt;
use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer}; use fastfield::{BytesFastFieldWriter, FastFieldSerializer};
use crate::postings::UnorderedTermId; use postings::UnorderedTermId;
use crate::schema::{Cardinality, Document, Field, FieldType, Schema}; use schema::{Cardinality, Document, Field, FieldType, Schema};
use crate::termdict::TermOrdinal;
use fnv::FnvHashMap;
use std::collections::HashMap; use std::collections::HashMap;
use std::io; use std::io;
use termdict::TermOrdinal;
/// The fastfieldswriter regroup all of the fast field writers. /// The fastfieldswriter regroup all of the fast field writers.
pub struct FastFieldsWriter { pub struct FastFieldsWriter {
@@ -24,16 +23,15 @@ impl FastFieldsWriter {
let mut multi_values_writers = Vec::new(); let mut multi_values_writers = Vec::new();
let mut bytes_value_writers = Vec::new(); let mut bytes_value_writers = Vec::new();
for (field, field_entry) in schema.fields() { for (field_id, field_entry) in schema.fields().iter().enumerate() {
let default_value = match *field_entry.field_type() { let field = Field(field_id as u32);
FieldType::I64(_) => common::i64_to_u64(0i64), let default_value = if let FieldType::I64(_) = *field_entry.field_type() {
FieldType::F64(_) => common::f64_to_u64(0.0f64), common::i64_to_u64(0i64)
_ => 0u64, } else {
0u64
}; };
match *field_entry.field_type() { match *field_entry.field_type() {
FieldType::I64(ref int_options) FieldType::I64(ref int_options) | FieldType::U64(ref int_options) => {
| FieldType::U64(ref int_options)
| FieldType::F64(ref int_options) => {
match int_options.get_fastfield_cardinality() { match int_options.get_fastfield_cardinality() {
Some(Cardinality::SingleValue) => { Some(Cardinality::SingleValue) => {
let mut fast_field_writer = IntFastFieldWriter::new(field); let mut fast_field_writer = IntFastFieldWriter::new(field);
@@ -116,7 +114,7 @@ impl FastFieldsWriter {
pub fn serialize( pub fn serialize(
&self, &self,
serializer: &mut FastFieldSerializer, serializer: &mut FastFieldSerializer,
mapping: &HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>, mapping: &HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>,
) -> io::Result<()> { ) -> io::Result<()> {
for field_writer in &self.single_value_writers { for field_writer in &self.single_value_writers {
field_writer.serialize(serializer)?; field_writer.serialize(serializer)?;
@@ -144,9 +142,9 @@ impl FastFieldsWriter {
/// bitpacked and the number of bits required for bitpacking /// bitpacked and the number of bits required for bitpacking
/// can only been known once we have seen all of the values. /// can only been known once we have seen all of the values.
/// ///
/// Both u64, i64 and f64 use the same writer. /// Both u64, and i64 use the same writer.
/// i64 and f64 are just remapped to the `0..2^64 - 1` /// i64 are just remapped to the `0..2^64 - 1`
/// using `common::i64_to_u64` and `common::f64_to_u64`. /// using `common::i64_to_u64`.
pub struct IntFastFieldWriter { pub struct IntFastFieldWriter {
field: Field, field: Field,
vals: Vec<u8>, vals: Vec<u8>,
@@ -205,8 +203,8 @@ impl IntFastFieldWriter {
/// Extract the value associated to the fast field for /// Extract the value associated to the fast field for
/// this document. /// this document.
/// ///
/// i64 and f64 are remapped to u64 using the logic /// i64 are remapped to u64 using the logic
/// in `common::i64_to_u64` and `common::f64_to_u64`. /// in `common::i64_to_u64`.
/// ///
/// If the value is missing, then the default value is used /// If the value is missing, then the default value is used
/// instead. /// instead.

View File

@@ -10,263 +10,28 @@ pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
.unwrap_or_else(|idx| idx - 1) as u8 .unwrap_or_else(|idx| idx - 1) as u8
} }
#[cfg_attr(feature = "cargo-clippy", allow(clippy::unreadable_literal))]
pub const FIELD_NORMS_TABLE: [u32; 256] = [ pub const FIELD_NORMS_TABLE: [u32; 256] = [
0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42, 44, 46, 48, 50, 52, 54, 56, 60,
2, 64, 68, 72, 76, 80, 84, 88, 96, 104, 112, 120, 128, 136, 144, 152, 168, 184, 200, 216, 232,
3, 248, 264, 280, 312, 344, 376, 408, 440, 472, 504, 536, 600, 664, 728, 792, 856, 920, 984,
4, 1_048, 1176, 1304, 1432, 1560, 1688, 1816, 1944, 2072, 2328, 2584, 2840, 3096, 3352, 3608,
5, 3864, 4120, 4632, 5144, 5656, 6168, 6680, 7192, 7704, 8216, 9240, 10264, 11288, 12312, 13336,
6, 14360, 15384, 16408, 18456, 20504, 22552, 24600, 26648, 28696, 30744, 32792, 36888, 40984,
7, 45080, 49176, 53272, 57368, 61464, 65560, 73752, 81944, 90136, 98328, 106520, 114712, 122904,
8, 131096, 147480, 163864, 180248, 196632, 213016, 229400, 245784, 262168, 294936, 327704, 360472,
9, 393240, 426008, 458776, 491544, 524312, 589848, 655384, 720920, 786456, 851992, 917528, 983064,
10, 1048600, 1179672, 1310744, 1441816, 1572888, 1703960, 1835032, 1966104, 2097176, 2359320,
11, 2621464, 2883608, 3145752, 3407896, 3670040, 3932184, 4194328, 4718616, 5242904, 5767192,
12, 6291480, 6815768, 7340056, 7864344, 8388632, 9437208, 10485784, 11534360, 12582936, 13631512,
13, 14680088, 15728664, 16777240, 18874392, 20971544, 23068696, 25165848, 27263000, 29360152,
14, 31457304, 33554456, 37748760, 41943064, 46137368, 50331672, 54525976, 58720280, 62914584,
15, 67108888, 75497496, 83886104, 92274712, 100663320, 109051928, 117440536, 125829144, 134217752,
16, 150994968, 167772184, 184549400, 201326616, 218103832, 234881048, 251658264, 268435480,
17, 301989912, 335544344, 369098776, 402653208, 436207640, 469762072, 503316504, 536870936,
18, 603979800, 671088664, 738197528, 805306392, 872415256, 939524120, 1006632984, 1073741848,
19, 1207959576, 1342177304, 1476395032, 1610612760, 1744830488, 1879048216, 2013265944,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
42,
44,
46,
48,
50,
52,
54,
56,
60,
64,
68,
72,
76,
80,
84,
88,
96,
104,
112,
120,
128,
136,
144,
152,
168,
184,
200,
216,
232,
248,
264,
280,
312,
344,
376,
408,
440,
472,
504,
536,
600,
664,
728,
792,
856,
920,
984,
1_048,
1_176,
1_304,
1_432,
1_560,
1_688,
1_816,
1_944,
2_072,
2_328,
2_584,
2_840,
3_096,
3_352,
3_608,
3_864,
4_120,
4_632,
5_144,
5_656,
6_168,
6_680,
7_192,
7_704,
8_216,
9_240,
10_264,
11_288,
12_312,
13_336,
14_360,
15_384,
16_408,
18_456,
20_504,
22_552,
24_600,
26_648,
28_696,
30_744,
32_792,
36_888,
40_984,
45_080,
49_176,
53_272,
57_368,
61_464,
65_560,
73_752,
81_944,
90_136,
98_328,
106_520,
114_712,
122_904,
131_096,
147_480,
163_864,
180_248,
196_632,
213_016,
229_400,
245_784,
262_168,
294_936,
327_704,
360_472,
393_240,
426_008,
458_776,
491_544,
524_312,
589_848,
655_384,
720_920,
786_456,
851_992,
917_528,
983_064,
1_048_600,
1_179_672,
1_310_744,
1_441_816,
1_572_888,
1_703_960,
1_835_032,
1_966_104,
2_097_176,
2_359_320,
2_621_464,
2_883_608,
3_145_752,
3_407_896,
3_670_040,
3_932_184,
4_194_328,
4_718_616,
5_242_904,
5_767_192,
6_291_480,
6_815_768,
7_340_056,
7_864_344,
8_388_632,
9_437_208,
10_485_784,
11_534_360,
12_582_936,
13_631_512,
14_680_088,
15_728_664,
16_777_240,
18_874_392,
20_971_544,
23_068_696,
25_165_848,
27_263_000,
29_360_152,
31_457_304,
33_554_456,
37_748_760,
41_943_064,
46_137_368,
50_331_672,
54_525_976,
58_720_280,
62_914_584,
67_108_888,
75_497_496,
83_886_104,
92_274_712,
100_663_320,
109_051_928,
117_440_536,
125_829_144,
134_217_752,
150_994_968,
167_772_184,
184_549_400,
201_326_616,
218_103_832,
234_881_048,
251_658_264,
268_435_480,
301_989_912,
335_544_344,
369_098_776,
402_653_208,
436_207_640,
469_762_072,
503_316_504,
536_870_936,
603_979_800,
671_088_664,
738_197_528,
805_306_392,
872_415_256,
939_524_120,
1_006_632_984,
1_073_741_848,
1_207_959_576,
1_342_177_304,
1_476_395_032,
1_610_612_760,
1_744_830_488,
1_879_048_216,
2_013_265_944,
]; ];
#[cfg(test)] #[cfg(test)]

View File

@@ -1,6 +1,6 @@
use super::{fieldnorm_to_id, id_to_fieldnorm}; use super::{fieldnorm_to_id, id_to_fieldnorm};
use crate::directory::ReadOnlySource; use directory::ReadOnlySource;
use crate::DocId; use DocId;
/// Reads the fieldnorm associated to a document. /// Reads the fieldnorm associated to a document.
/// The fieldnorm represents the length associated to /// The fieldnorm represents the length associated to

View File

@@ -1,6 +1,6 @@
use crate::common::CompositeWrite; use common::CompositeWrite;
use crate::directory::WritePtr; use directory::WritePtr;
use crate::schema::Field; use schema::Field;
use std::io; use std::io;
use std::io::Write; use std::io::Write;

View File

@@ -1,9 +1,9 @@
use crate::DocId; use DocId;
use super::fieldnorm_to_id; use super::fieldnorm_to_id;
use super::FieldNormsSerializer; use super::FieldNormsSerializer;
use crate::schema::Field; use schema::Field;
use crate::schema::Schema; use schema::Schema;
use std::io; use std::io;
/// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte /// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte
@@ -22,14 +22,11 @@ impl FieldNormsWriter {
pub(crate) fn fields_with_fieldnorm(schema: &Schema) -> Vec<Field> { pub(crate) fn fields_with_fieldnorm(schema: &Schema) -> Vec<Field> {
schema schema
.fields() .fields()
.filter_map(|(field, field_entry)| { .iter()
if field_entry.is_indexed() { .enumerate()
Some(field) .filter(|&(_, field_entry)| field_entry.is_indexed())
} else { .map(|(field, _)| Field(field as u32))
None .collect::<Vec<Field>>()
}
})
.collect::<Vec<_>>()
} }
/// Initialize with state for tracking the field norm fields /// Initialize with state for tracking the field norm fields
@@ -38,7 +35,7 @@ impl FieldNormsWriter {
let fields = FieldNormsWriter::fields_with_fieldnorm(schema); let fields = FieldNormsWriter::fields_with_fieldnorm(schema);
let max_field = fields let max_field = fields
.iter() .iter()
.map(Field::field_id) .map(|field| field.0)
.max() .max()
.map(|max_field_id| max_field_id as usize + 1) .map(|max_field_id| max_field_id as usize + 1)
.unwrap_or(0); .unwrap_or(0);
@@ -53,8 +50,8 @@ impl FieldNormsWriter {
/// ///
/// Will extend with 0-bytes for documents that have not been seen. /// Will extend with 0-bytes for documents that have not been seen.
pub fn fill_up_to_max_doc(&mut self, max_doc: DocId) { pub fn fill_up_to_max_doc(&mut self, max_doc: DocId) {
for field in self.fields.iter() { for &field in self.fields.iter() {
self.fieldnorms_buffer[field.field_id() as usize].resize(max_doc as usize, 0u8); self.fieldnorms_buffer[field.0 as usize].resize(max_doc as usize, 0u8);
} }
} }
@@ -67,7 +64,7 @@ impl FieldNormsWriter {
/// * field - the field being set /// * field - the field being set
/// * fieldnorm - the number of terms present in document `doc` in field `field` /// * fieldnorm - the number of terms present in document `doc` in field `field`
pub fn record(&mut self, doc: DocId, field: Field, fieldnorm: u32) { pub fn record(&mut self, doc: DocId, field: Field, fieldnorm: u32) {
let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.field_id() as usize]; let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.0 as usize];
assert!( assert!(
fieldnorm_buffer.len() <= doc as usize, fieldnorm_buffer.len() <= doc as usize,
"Cannot register a given fieldnorm twice" "Cannot register a given fieldnorm twice"
@@ -80,7 +77,7 @@ impl FieldNormsWriter {
/// Serialize the seen fieldnorm values to the serializer for all fields. /// Serialize the seen fieldnorm values to the serializer for all fields.
pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> { pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> {
for &field in self.fields.iter() { for &field in self.fields.iter() {
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..]; let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.0 as usize][..];
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?; fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
} }
Ok(()) Ok(())

View File

@@ -1,10 +1,10 @@
use rand::thread_rng; use rand::thread_rng;
use std::collections::HashSet; use std::collections::HashSet;
use crate::schema::*;
use crate::Index;
use crate::Searcher;
use rand::Rng; use rand::Rng;
use schema::*;
use Index;
use Searcher;
fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) { fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
assert!(searcher.segment_readers().len() < 20); assert!(searcher.segment_readers().len() < 20);

View File

@@ -1,5 +1,4 @@
use super::operation::DeleteOperation; use super::operation::DeleteOperation;
use crate::Opstamp;
use std::mem; use std::mem;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
@@ -24,7 +23,7 @@ struct InnerDeleteQueue {
last_block: Option<Arc<Block>>, last_block: Option<Arc<Block>>,
} }
#[derive(Clone)] #[derive(Clone, Default)]
pub struct DeleteQueue { pub struct DeleteQueue {
inner: Arc<RwLock<InnerDeleteQueue>>, inner: Arc<RwLock<InnerDeleteQueue>>,
} }
@@ -37,7 +36,6 @@ impl DeleteQueue {
}; };
let next_block = NextBlock::from(delete_queue.clone()); let next_block = NextBlock::from(delete_queue.clone());
{ {
let mut delete_queue_wlock = delete_queue.inner.write().unwrap(); let mut delete_queue_wlock = delete_queue.inner.write().unwrap();
delete_queue_wlock.last_block = Some(Arc::new(Block { delete_queue_wlock.last_block = Some(Arc::new(Block {
@@ -181,12 +179,17 @@ pub struct DeleteCursor {
} }
impl DeleteCursor { impl DeleteCursor {
pub fn empty() -> DeleteCursor {
DeleteQueue::new().cursor()
}
/// Skips operations and position it so that /// Skips operations and position it so that
/// - either all of the delete operation currently in the /// - either all of the delete operation currently in the
/// queue are consume and the next get will return None. /// queue are consume and the next get will return None.
/// - the next get will return the first operation with an /// - the next get will return the first operation with an
/// `opstamp >= target_opstamp`. /// `opstamp >= target_opstamp`.
pub fn skip_to(&mut self, target_opstamp: Opstamp) { pub fn skip_to(&mut self, target_opstamp: u64) {
// TODO Can be optimize as we work with block. // TODO Can be optimize as we work with block.
while self.is_behind_opstamp(target_opstamp) { while self.is_behind_opstamp(target_opstamp) {
self.advance(); self.advance();
@@ -194,7 +197,7 @@ impl DeleteCursor {
} }
#[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))] #[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))]
fn is_behind_opstamp(&mut self, target_opstamp: Opstamp) -> bool { fn is_behind_opstamp(&mut self, target_opstamp: u64) -> bool {
self.get() self.get()
.map(|operation| operation.opstamp < target_opstamp) .map(|operation| operation.opstamp < target_opstamp)
.unwrap_or(false) .unwrap_or(false)
@@ -251,14 +254,14 @@ impl DeleteCursor {
mod tests { mod tests {
use super::{DeleteOperation, DeleteQueue}; use super::{DeleteOperation, DeleteQueue};
use crate::schema::{Field, Term}; use schema::{Field, Term};
#[test] #[test]
fn test_deletequeue() { fn test_deletequeue() {
let delete_queue = DeleteQueue::new(); let delete_queue = DeleteQueue::new();
let make_op = |i: usize| { let make_op = |i: usize| {
let field = Field::from_field_id(1u32); let field = Field(1u32);
DeleteOperation { DeleteOperation {
opstamp: i as u64, opstamp: i as u64,
term: Term::from_field_u64(field, i as u64), term: Term::from_field_u64(field, i as u64),

View File

@@ -1,5 +1,5 @@
use crate::DocId; use std::sync::Arc;
use crate::Opstamp; use DocId;
// Doc to opstamp is used to identify which // Doc to opstamp is used to identify which
// document should be deleted. // document should be deleted.
@@ -17,25 +17,25 @@ use crate::Opstamp;
// This mapping is (for the moment) stricly increasing // This mapping is (for the moment) stricly increasing
// because of the way document id are allocated. // because of the way document id are allocated.
#[derive(Clone)] #[derive(Clone)]
pub enum DocToOpstampMapping<'a> { pub enum DocToOpstampMapping {
WithMap(&'a [Opstamp]), WithMap(Arc<Vec<u64>>),
None, None,
} }
impl<'a> From<&'a [u64]> for DocToOpstampMapping<'a> { impl From<Vec<u64>> for DocToOpstampMapping {
fn from(opstamps: &[Opstamp]) -> DocToOpstampMapping { fn from(opstamps: Vec<u64>) -> DocToOpstampMapping {
DocToOpstampMapping::WithMap(opstamps) DocToOpstampMapping::WithMap(Arc::new(opstamps))
} }
} }
impl<'a> DocToOpstampMapping<'a> { impl DocToOpstampMapping {
/// Given an opstamp return the limit doc id L /// Given an opstamp return the limit doc id L
/// such that all doc id D such that /// such that all doc id D such that
// D >= L iff opstamp(D) >= than `target_opstamp`. // D >= L iff opstamp(D) >= than `target_opstamp`.
// //
// The edge case opstamp = some doc opstamp is in practise // The edge case opstamp = some doc opstamp is in practise
// never called. // never called.
pub fn compute_doc_limit(&self, target_opstamp: Opstamp) -> DocId { pub fn compute_doc_limit(&self, target_opstamp: u64) -> DocId {
match *self { match *self {
DocToOpstampMapping::WithMap(ref doc_opstamps) => { DocToOpstampMapping::WithMap(ref doc_opstamps) => {
match doc_opstamps.binary_search(&target_opstamp) { match doc_opstamps.binary_search(&target_opstamp) {
@@ -64,18 +64,17 @@ mod tests {
#[test] #[test]
fn test_doc_to_opstamp_mapping_complex() { fn test_doc_to_opstamp_mapping_complex() {
{ {
let doc_to_opstamp_mapping = DocToOpstampMapping::from(&[][..]); let doc_to_opstamp_mapping = DocToOpstampMapping::from(vec![]);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0); assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(2u64), 0); assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(2u64), 0);
} }
{ {
let doc_to_opstamp_mapping = DocToOpstampMapping::from(&[1u64][..]); let doc_to_opstamp_mapping = DocToOpstampMapping::from(vec![1u64]);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0); assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(2u64), 1); assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(2u64), 1);
} }
{ {
let doc_to_opstamp_mapping = let doc_to_opstamp_mapping = DocToOpstampMapping::from(vec![1u64, 12u64, 17u64, 23u64]);
DocToOpstampMapping::from(&[1u64, 12u64, 17u64, 23u64][..]);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0); assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0);
for i in 2u64..13u64 { for i in 2u64..13u64 {
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(i), 1); assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(i), 1);

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
use super::merge_policy::{MergeCandidate, MergePolicy}; use super::merge_policy::{MergeCandidate, MergePolicy};
use crate::core::SegmentMeta; use core::SegmentMeta;
use std::cmp; use std::cmp;
use std::f64; use std::f64;
@@ -52,7 +52,7 @@ impl MergePolicy for LogMergePolicy {
let mut size_sorted_tuples = segments let mut size_sorted_tuples = segments
.iter() .iter()
.map(SegmentMeta::num_docs) .map(|x| x.num_docs())
.enumerate() .enumerate()
.collect::<Vec<(usize, u32)>>(); .collect::<Vec<(usize, u32)>>();
@@ -95,11 +95,8 @@ impl Default for LogMergePolicy {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::core::{SegmentId, SegmentMeta, SegmentMetaInventory}; use core::{SegmentId, SegmentMeta};
use crate::indexer::merge_policy::MergePolicy; use indexer::merge_policy::MergePolicy;
use once_cell::sync::Lazy;
static INVENTORY: Lazy<SegmentMetaInventory> = Lazy::new(SegmentMetaInventory::default);
fn test_merge_policy() -> LogMergePolicy { fn test_merge_policy() -> LogMergePolicy {
let mut log_merge_policy = LogMergePolicy::default(); let mut log_merge_policy = LogMergePolicy::default();
@@ -116,7 +113,7 @@ mod tests {
} }
fn create_random_segment_meta(num_docs: u32) -> SegmentMeta { fn create_random_segment_meta(num_docs: u32) -> SegmentMeta {
INVENTORY.new_segment_meta(SegmentId::generate_random(), num_docs) SegmentMeta::new(SegmentId::generate_random(), num_docs)
} }
#[test] #[test]

View File

@@ -1,16 +1,11 @@
use crate::Opstamp;
use crate::SegmentId;
use census::{Inventory, TrackedObject}; use census::{Inventory, TrackedObject};
use std::collections::HashSet; use std::collections::HashSet;
use SegmentId;
#[derive(Default)] #[derive(Default)]
pub struct MergeOperationInventory(Inventory<InnerMergeOperation>); pub struct MergeOperationInventory(Inventory<InnerMergeOperation>);
impl MergeOperationInventory { impl MergeOperationInventory {
pub fn num_merge_operations(&self) -> usize {
self.0.list().len()
}
pub fn segment_in_merge(&self) -> HashSet<SegmentId> { pub fn segment_in_merge(&self) -> HashSet<SegmentId> {
let mut segment_in_merge = HashSet::default(); let mut segment_in_merge = HashSet::default();
for merge_op in self.0.list() { for merge_op in self.0.list() {
@@ -22,8 +17,8 @@ impl MergeOperationInventory {
} }
} }
/// A `MergeOperation` has two roles. /// A `MergeOperation` has two role.
/// It carries all of the information required to describe a merge: /// It carries all of the information required to describe a merge :
/// - `target_opstamp` is the opstamp up to which we want to consume the /// - `target_opstamp` is the opstamp up to which we want to consume the
/// delete queue and reflect their deletes. /// delete queue and reflect their deletes.
/// - `segment_ids` is the list of segment to be merged. /// - `segment_ids` is the list of segment to be merged.
@@ -40,14 +35,14 @@ pub struct MergeOperation {
} }
struct InnerMergeOperation { struct InnerMergeOperation {
target_opstamp: Opstamp, target_opstamp: u64,
segment_ids: Vec<SegmentId>, segment_ids: Vec<SegmentId>,
} }
impl MergeOperation { impl MergeOperation {
pub fn new( pub fn new(
inventory: &MergeOperationInventory, inventory: &MergeOperationInventory,
target_opstamp: Opstamp, target_opstamp: u64,
segment_ids: Vec<SegmentId>, segment_ids: Vec<SegmentId>,
) -> MergeOperation { ) -> MergeOperation {
let inner_merge_operation = InnerMergeOperation { let inner_merge_operation = InnerMergeOperation {
@@ -59,7 +54,7 @@ impl MergeOperation {
} }
} }
pub fn target_opstamp(&self) -> Opstamp { pub fn target_opstamp(&self) -> u64 {
self.inner.target_opstamp self.inner.target_opstamp
} }

View File

@@ -1,5 +1,5 @@
use crate::core::SegmentId; use core::SegmentId;
use crate::core::SegmentMeta; use core::SegmentMeta;
use std::fmt::Debug; use std::fmt::Debug;
use std::marker; use std::marker;
@@ -12,10 +12,6 @@ pub struct MergeCandidate(pub Vec<SegmentId>);
/// Every time a the list of segments changes, the segment updater /// Every time a the list of segments changes, the segment updater
/// asks the merge policy if some segments should be merged. /// asks the merge policy if some segments should be merged.
pub trait MergePolicy: marker::Send + marker::Sync + Debug { pub trait MergePolicy: marker::Send + marker::Sync + Debug {
fn maximum_num_threads(&self) -> Option<usize> {
None
}
/// Given the list of segment metas, returns the list of merge candidates. /// Given the list of segment metas, returns the list of merge candidates.
/// ///
/// This call happens on the segment updater thread, and will block /// This call happens on the segment updater thread, and will block
@@ -43,8 +39,8 @@ impl MergePolicy for NoMergePolicy {
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::core::SegmentId; use core::SegmentId;
use crate::core::SegmentMeta; use core::SegmentMeta;
/// `MergePolicy` useful for test purposes. /// `MergePolicy` useful for test purposes.
/// ///

View File

@@ -1,31 +1,30 @@
use crate::common::MAX_DOC_LIMIT; use common::MAX_DOC_LIMIT;
use crate::core::Segment; use core::Segment;
use crate::core::SegmentReader; use core::SegmentReader;
use crate::core::SerializableSegment; use core::SerializableSegment;
use crate::docset::DocSet; use docset::DocSet;
use crate::fastfield::BytesFastFieldReader; use fastfield::DeleteBitSet;
use crate::fastfield::DeleteBitSet; use fastfield::FastFieldReader;
use crate::fastfield::FastFieldReader; use fastfield::FastFieldSerializer;
use crate::fastfield::FastFieldSerializer; use fastfield::MultiValueIntFastFieldReader;
use crate::fastfield::MultiValueIntFastFieldReader; use fieldnorm::FieldNormReader;
use crate::fieldnorm::FieldNormReader; use fieldnorm::FieldNormsSerializer;
use crate::fieldnorm::FieldNormsSerializer; use fieldnorm::FieldNormsWriter;
use crate::fieldnorm::FieldNormsWriter; use indexer::SegmentSerializer;
use crate::indexer::SegmentSerializer;
use crate::postings::InvertedIndexSerializer;
use crate::postings::Postings;
use crate::schema::Cardinality;
use crate::schema::FieldType;
use crate::schema::{Field, Schema};
use crate::store::StoreWriter;
use crate::termdict::TermMerger;
use crate::termdict::TermOrdinal;
use crate::DocId;
use crate::Result;
use crate::TantivyError;
use itertools::Itertools; use itertools::Itertools;
use postings::InvertedIndexSerializer;
use postings::Postings;
use schema::Cardinality;
use schema::FieldType;
use schema::{Field, Schema};
use std::cmp; use std::cmp;
use std::collections::HashMap; use std::collections::HashMap;
use store::StoreWriter;
use termdict::TermMerger;
use termdict::TermOrdinal;
use DocId;
use Result;
use TantivyError;
fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 { fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
let mut total_tokens = 0u64; let mut total_tokens = 0u64;
@@ -73,7 +72,7 @@ fn compute_min_max_val(
// some deleted documents, // some deleted documents,
// we need to recompute the max / min // we need to recompute the max / min
(0..max_doc) (0..max_doc)
.filter(|doc_id| delete_bitset.is_alive(*doc_id)) .filter(|doc_id| !delete_bitset.is_deleted(*doc_id))
.map(|doc_id| u64_reader.get(doc_id)) .map(|doc_id| u64_reader.get(doc_id))
.minmax() .minmax()
.into_option() .into_option()
@@ -190,7 +189,8 @@ impl IndexMerger {
fast_field_serializer: &mut FastFieldSerializer, fast_field_serializer: &mut FastFieldSerializer,
mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>, mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>,
) -> Result<()> { ) -> Result<()> {
for (field, field_entry) in self.schema.fields() { for (field_id, field_entry) in self.schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
match *field_type { match *field_type {
FieldType::HierarchicalFacet => { FieldType::HierarchicalFacet => {
@@ -206,7 +206,6 @@ impl IndexMerger {
} }
FieldType::U64(ref options) FieldType::U64(ref options)
| FieldType::I64(ref options) | FieldType::I64(ref options)
| FieldType::F64(ref options)
| FieldType::Date(ref options) => match options.get_fastfield_cardinality() { | FieldType::Date(ref options) => match options.get_fastfield_cardinality() {
Some(Cardinality::SingleValue) => { Some(Cardinality::SingleValue) => {
self.write_single_fast_field(field, fast_field_serializer)?; self.write_single_fast_field(field, fast_field_serializer)?;
@@ -240,10 +239,7 @@ impl IndexMerger {
let mut max_value = u64::min_value(); let mut max_value = u64::min_value();
for reader in &self.readers { for reader in &self.readers {
let u64_reader: FastFieldReader<u64> = reader let u64_reader: FastFieldReader<u64> = reader.fast_field_reader(field)?;
.fast_fields()
.u64_lenient(field)
.expect("Failed to find a reader for single fast field. This is a tantivy bug and it should never happen.");
if let Some((seg_min_val, seg_max_val)) = if let Some((seg_min_val, seg_max_val)) =
compute_min_max_val(&u64_reader, reader.max_doc(), reader.delete_bitset()) compute_min_max_val(&u64_reader, reader.max_doc(), reader.delete_bitset())
{ {
@@ -286,28 +282,24 @@ impl IndexMerger {
fast_field_serializer: &mut FastFieldSerializer, fast_field_serializer: &mut FastFieldSerializer,
) -> Result<()> { ) -> Result<()> {
let mut total_num_vals = 0u64; let mut total_num_vals = 0u64;
let mut u64s_readers: Vec<MultiValueIntFastFieldReader<u64>> = Vec::new();
// In the first pass, we compute the total number of vals. // In the first pass, we compute the total number of vals.
// //
// This is required by the bitpacker, as it needs to know // This is required by the bitpacker, as it needs to know
// what should be the bit length use for bitpacking. // what should be the bit length use for bitpacking.
for reader in &self.readers { for reader in &self.readers {
let u64s_reader = reader.fast_fields() let idx_reader = reader.fast_field_reader_with_idx::<u64>(field, 0)?;
.u64s_lenient(field)
.expect("Failed to find index for multivalued field. This is a bug in tantivy, please report.");
if let Some(delete_bitset) = reader.delete_bitset() { if let Some(delete_bitset) = reader.delete_bitset() {
for doc in 0u32..reader.max_doc() { for doc in 0u32..reader.max_doc() {
if delete_bitset.is_alive(doc) { if !delete_bitset.is_deleted(doc) {
let num_vals = u64s_reader.num_vals(doc) as u64; let start = idx_reader.get(doc);
total_num_vals += num_vals; let end = idx_reader.get(doc + 1);
total_num_vals += end - start;
} }
} }
} else { } else {
total_num_vals += u64s_reader.total_num_vals(); total_num_vals += idx_reader.max_value();
} }
u64s_readers.push(u64s_reader);
} }
// We can now create our `idx` serializer, and in a second pass, // We can now create our `idx` serializer, and in a second pass,
@@ -315,10 +307,13 @@ impl IndexMerger {
let mut serialize_idx = let mut serialize_idx =
fast_field_serializer.new_u64_fast_field_with_idx(field, 0, total_num_vals, 0)?; fast_field_serializer.new_u64_fast_field_with_idx(field, 0, total_num_vals, 0)?;
let mut idx = 0; let mut idx = 0;
for (segment_reader, u64s_reader) in self.readers.iter().zip(&u64s_readers) { for reader in &self.readers {
for doc in segment_reader.doc_ids_alive() { let idx_reader = reader.fast_field_reader_with_idx::<u64>(field, 0)?;
for doc in reader.doc_ids_alive() {
serialize_idx.add_val(idx)?; serialize_idx.add_val(idx)?;
idx += u64s_reader.num_vals(doc) as u64; let start = idx_reader.get(doc);
let end = idx_reader.get(doc + 1);
idx += end - start;
} }
} }
serialize_idx.add_val(idx)?; serialize_idx.add_val(idx)?;
@@ -349,10 +344,8 @@ impl IndexMerger {
for (segment_ord, segment_reader) in self.readers.iter().enumerate() { for (segment_ord, segment_reader) in self.readers.iter().enumerate() {
let term_ordinal_mapping: &[TermOrdinal] = let term_ordinal_mapping: &[TermOrdinal] =
term_ordinal_mappings.get_segment(segment_ord); term_ordinal_mappings.get_segment(segment_ord);
let ff_reader: MultiValueIntFastFieldReader<u64> = segment_reader let ff_reader: MultiValueIntFastFieldReader<u64> =
.fast_fields() segment_reader.multi_fast_field_reader(field)?;
.u64s(field)
.expect("Could not find multivalued u64 fast value reader.");
// TODO optimize if no deletes // TODO optimize if no deletes
for doc in segment_reader.doc_ids_alive() { for doc in segment_reader.doc_ids_alive() {
ff_reader.get_vals(doc, &mut vals); ff_reader.get_vals(doc, &mut vals);
@@ -384,8 +377,6 @@ impl IndexMerger {
let mut vals = Vec::with_capacity(100); let mut vals = Vec::with_capacity(100);
let mut ff_readers = Vec::new();
// Our values are bitpacked and we need to know what should be // Our values are bitpacked and we need to know what should be
// our bitwidth and our minimum value before serializing any values. // our bitwidth and our minimum value before serializing any values.
// //
@@ -394,10 +385,7 @@ impl IndexMerger {
// maximum value and initialize our Serializer. // maximum value and initialize our Serializer.
for reader in &self.readers { for reader in &self.readers {
let ff_reader: MultiValueIntFastFieldReader<u64> = let ff_reader: MultiValueIntFastFieldReader<u64> =
reader.fast_fields().u64s_lenient(field).expect( reader.multi_fast_field_reader(field)?;
"Failed to find multivalued fast field reader. This is a bug in \
tantivy. Please report.",
);
for doc in reader.doc_ids_alive() { for doc in reader.doc_ids_alive() {
ff_reader.get_vals(doc, &mut vals); ff_reader.get_vals(doc, &mut vals);
for &val in &vals { for &val in &vals {
@@ -405,7 +393,6 @@ impl IndexMerger {
max_value = cmp::max(val, max_value); max_value = cmp::max(val, max_value);
} }
} }
ff_readers.push(ff_reader);
// TODO optimize when no deletes // TODO optimize when no deletes
} }
@@ -418,7 +405,9 @@ impl IndexMerger {
{ {
let mut serialize_vals = fast_field_serializer let mut serialize_vals = fast_field_serializer
.new_u64_fast_field_with_idx(field, min_value, max_value, 1)?; .new_u64_fast_field_with_idx(field, min_value, max_value, 1)?;
for (reader, ff_reader) in self.readers.iter().zip(ff_readers) { for reader in &self.readers {
let ff_reader: MultiValueIntFastFieldReader<u64> =
reader.multi_fast_field_reader(field)?;
// TODO optimize if no deletes // TODO optimize if no deletes
for doc in reader.doc_ids_alive() { for doc in reader.doc_ids_alive() {
ff_reader.get_vals(doc, &mut vals); ff_reader.get_vals(doc, &mut vals);
@@ -437,53 +426,19 @@ impl IndexMerger {
field: Field, field: Field,
fast_field_serializer: &mut FastFieldSerializer, fast_field_serializer: &mut FastFieldSerializer,
) -> Result<()> { ) -> Result<()> {
let mut total_num_vals = 0u64; self.write_fast_field_idx(field, fast_field_serializer)?;
let mut bytes_readers: Vec<BytesFastFieldReader> = Vec::new();
for reader in &self.readers {
let bytes_reader = reader.fast_fields().bytes(field).expect(
"Failed to find bytes fast field reader. This is a bug in tantivy, please report.",
);
if let Some(delete_bitset) = reader.delete_bitset() {
for doc in 0u32..reader.max_doc() {
if delete_bitset.is_alive(doc) {
let num_vals = bytes_reader.get_bytes(doc).len() as u64;
total_num_vals += num_vals;
}
}
} else {
total_num_vals += bytes_reader.total_num_bytes() as u64;
}
bytes_readers.push(bytes_reader);
}
{
// We can now create our `idx` serializer, and in a second pass,
// can effectively push the different indexes.
let mut serialize_idx =
fast_field_serializer.new_u64_fast_field_with_idx(field, 0, total_num_vals, 0)?;
let mut idx = 0;
for (segment_reader, bytes_reader) in self.readers.iter().zip(&bytes_readers) {
for doc in segment_reader.doc_ids_alive() {
serialize_idx.add_val(idx)?;
idx += bytes_reader.get_bytes(doc).len() as u64;
}
}
serialize_idx.add_val(idx)?;
serialize_idx.close_field()?;
}
let mut serialize_vals = fast_field_serializer.new_bytes_fast_field_with_idx(field, 1)?; let mut serialize_vals = fast_field_serializer.new_bytes_fast_field_with_idx(field, 1)?;
for segment_reader in &self.readers { for reader in &self.readers {
let bytes_reader = segment_reader.fast_fields().bytes(field) let bytes_reader = reader.bytes_fast_field_reader(field)?;
.expect("Failed to find bytes field in fast field reader. This is a bug in tantivy. Please report.");
// TODO: optimize if no deletes // TODO: optimize if no deletes
for doc in segment_reader.doc_ids_alive() { for doc in reader.doc_ids_alive() {
let val = bytes_reader.get_bytes(doc); let val = bytes_reader.get_val(doc);
serialize_vals.write_all(val)?; serialize_vals.write_all(val)?;
} }
} }
serialize_vals.flush()?; serialize_vals.flush()?;
Ok(()) Ok(())
} }
@@ -648,12 +603,15 @@ impl IndexMerger {
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
) -> Result<HashMap<Field, TermOrdinalMapping>> { ) -> Result<HashMap<Field, TermOrdinalMapping>> {
let mut term_ordinal_mappings = HashMap::new(); let mut term_ordinal_mappings = HashMap::new();
for (field, field_entry) in self.schema.fields() { for (field_ord, field_entry) in self.schema.fields().iter().enumerate() {
if field_entry.is_indexed() { if field_entry.is_indexed() {
if let Some(term_ordinal_mapping) = let indexed_field = Field(field_ord as u32);
self.write_postings_for_field(field, field_entry.field_type(), serializer)? if let Some(term_ordinal_mapping) = self.write_postings_for_field(
{ indexed_field,
term_ordinal_mappings.insert(field, term_ordinal_mapping); field_entry.field_type(),
serializer,
)? {
term_ordinal_mappings.insert(indexed_field, term_ordinal_mapping);
} }
} }
} }
@@ -689,28 +647,28 @@ impl SerializableSegment for IndexMerger {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
use crate::collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
use crate::collector::{Count, FacetCollector};
use crate::core::Index;
use crate::query::AllQuery;
use crate::query::BooleanQuery;
use crate::query::TermQuery;
use crate::schema;
use crate::schema::Cardinality;
use crate::schema::Document;
use crate::schema::Facet;
use crate::schema::IndexRecordOption;
use crate::schema::IntOptions;
use crate::schema::Term;
use crate::schema::TextFieldIndexing;
use crate::schema::INDEXED;
use crate::DocAddress;
use crate::IndexWriter;
use crate::Searcher;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use collector::tests::TestCollector;
use collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
use collector::{Count, FacetCollector};
use core::Index;
use futures::Future; use futures::Future;
use query::AllQuery;
use query::BooleanQuery;
use query::TermQuery;
use schema;
use schema::Cardinality;
use schema::Document;
use schema::Facet;
use schema::IndexRecordOption;
use schema::IntOptions;
use schema::Term;
use schema::TextFieldIndexing;
use schema::INDEXED;
use std::io::Cursor; use std::io::Cursor;
use DocAddress;
use IndexWriter;
use Searcher;
#[test] #[test]
fn test_index_merger_no_deletes() { fn test_index_merger_no_deletes() {
@@ -804,7 +762,7 @@ mod tests {
let searcher = reader.searcher(); let searcher = reader.searcher();
let get_doc_ids = |terms: Vec<Term>| { let get_doc_ids = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
let top_docs = searcher.search(&query, &TEST_COLLECTOR_WITH_SCORE).unwrap(); let top_docs = searcher.search(&query, &TestCollector).unwrap();
top_docs.docs().to_vec() top_docs.docs().to_vec()
}; };
{ {
@@ -1021,16 +979,14 @@ mod tests {
let score_field_reader = searcher let score_field_reader = searcher
.segment_reader(0) .segment_reader(0)
.fast_fields() .fast_field_reader::<u64>(score_field)
.u64(score_field)
.unwrap(); .unwrap();
assert_eq!(score_field_reader.min_value(), 4000); assert_eq!(score_field_reader.min_value(), 4000);
assert_eq!(score_field_reader.max_value(), 7000); assert_eq!(score_field_reader.max_value(), 7000);
let score_field_reader = searcher let score_field_reader = searcher
.segment_reader(1) .segment_reader(1)
.fast_fields() .fast_field_reader::<u64>(score_field)
.u64(score_field)
.unwrap(); .unwrap();
assert_eq!(score_field_reader.min_value(), 1); assert_eq!(score_field_reader.min_value(), 1);
assert_eq!(score_field_reader.max_value(), 3); assert_eq!(score_field_reader.max_value(), 3);
@@ -1081,8 +1037,7 @@ mod tests {
); );
let score_field_reader = searcher let score_field_reader = searcher
.segment_reader(0) .segment_reader(0)
.fast_fields() .fast_field_reader::<u64>(score_field)
.u64(score_field)
.unwrap(); .unwrap();
assert_eq!(score_field_reader.min_value(), 3); assert_eq!(score_field_reader.min_value(), 3);
assert_eq!(score_field_reader.max_value(), 7000); assert_eq!(score_field_reader.max_value(), 7000);
@@ -1128,8 +1083,7 @@ mod tests {
); );
let score_field_reader = searcher let score_field_reader = searcher
.segment_reader(0) .segment_reader(0)
.fast_fields() .fast_field_reader::<u64>(score_field)
.u64(score_field)
.unwrap(); .unwrap();
assert_eq!(score_field_reader.min_value(), 3); assert_eq!(score_field_reader.min_value(), 3);
assert_eq!(score_field_reader.max_value(), 7000); assert_eq!(score_field_reader.max_value(), 7000);
@@ -1181,8 +1135,7 @@ mod tests {
); );
let score_field_reader = searcher let score_field_reader = searcher
.segment_reader(0) .segment_reader(0)
.fast_fields() .fast_field_reader::<u64>(score_field)
.u64(score_field)
.unwrap(); .unwrap();
assert_eq!(score_field_reader.min_value(), 6000); assert_eq!(score_field_reader.min_value(), 6000);
assert_eq!(score_field_reader.max_value(), 7000); assert_eq!(score_field_reader.max_value(), 7000);
@@ -1428,7 +1381,7 @@ mod tests {
{ {
let segment = searcher.segment_reader(0u32); let segment = searcher.segment_reader(0u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap(); let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
ff_reader.get_vals(0, &mut vals); ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[1, 2]); assert_eq!(&vals, &[1, 2]);
@@ -1463,7 +1416,7 @@ mod tests {
{ {
let segment = searcher.segment_reader(1u32); let segment = searcher.segment_reader(1u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap(); let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
ff_reader.get_vals(0, &mut vals); ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[28, 27]); assert_eq!(&vals, &[28, 27]);
@@ -1473,7 +1426,7 @@ mod tests {
{ {
let segment = searcher.segment_reader(2u32); let segment = searcher.segment_reader(2u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap(); let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
ff_reader.get_vals(0, &mut vals); ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[20]); assert_eq!(&vals, &[20]);
} }
@@ -1506,7 +1459,7 @@ mod tests {
.collect::<Vec<_>>() .collect::<Vec<_>>()
); );
let segment = searcher.segment_reader(0u32); let segment = searcher.segment_reader(0u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap(); let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
ff_reader.get_vals(0, &mut vals); ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[1, 2]); assert_eq!(&vals, &[1, 2]);

View File

@@ -1,18 +1,17 @@
use crate::schema::Document; use schema::Document;
use crate::schema::Term; use schema::Term;
use crate::Opstamp;
/// Timestamped Delete operation. /// Timestamped Delete operation.
#[derive(Clone, Eq, PartialEq, Debug)] #[derive(Clone, Eq, PartialEq, Debug)]
pub struct DeleteOperation { pub struct DeleteOperation {
pub opstamp: Opstamp, pub opstamp: u64,
pub term: Term, pub term: Term,
} }
/// Timestamped Add operation. /// Timestamped Add operation.
#[derive(Eq, PartialEq, Debug)] #[derive(Eq, PartialEq, Debug)]
pub struct AddOperation { pub struct AddOperation {
pub opstamp: Opstamp, pub opstamp: u64,
pub document: Document, pub document: Document,
} }

View File

@@ -1,24 +1,29 @@
use super::IndexWriter; use super::IndexWriter;
use crate::Opstamp; use Result;
use crate::Result;
/// A prepared commit /// A prepared commit
pub struct PreparedCommit<'a> { pub struct PreparedCommit<'a> {
index_writer: &'a mut IndexWriter, index_writer: &'a mut IndexWriter,
payload: Option<String>, payload: Option<String>,
opstamp: Opstamp, opstamp: u64,
soft: bool,
} }
impl<'a> PreparedCommit<'a> { impl<'a> PreparedCommit<'a> {
pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: Opstamp) -> PreparedCommit<'_> { pub(crate) fn new(
index_writer: &'a mut IndexWriter,
opstamp: u64,
soft: bool,
) -> PreparedCommit {
PreparedCommit { PreparedCommit {
index_writer, index_writer,
payload: None, payload: None,
opstamp, opstamp,
soft,
} }
} }
pub fn opstamp(&self) -> Opstamp { pub fn opstamp(&self) -> u64 {
self.opstamp self.opstamp
} }
@@ -26,15 +31,15 @@ impl<'a> PreparedCommit<'a> {
self.payload = Some(payload.to_string()) self.payload = Some(payload.to_string())
} }
pub fn abort(self) -> Result<Opstamp> { pub fn abort(self) -> Result<()> {
self.index_writer.rollback() self.index_writer.rollback()
} }
pub fn commit(self) -> Result<Opstamp> { pub fn commit(self) -> Result<u64> {
info!("committing {}", self.opstamp); info!("committing {}", self.opstamp);
self.index_writer self.index_writer
.segment_updater() .segment_updater()
.commit(self.opstamp, self.payload)?; .commit(self.opstamp, self.payload, self.soft)?;
Ok(self.opstamp) Ok(self.opstamp)
} }
} }

View File

@@ -1,7 +1,7 @@
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::indexer::delete_queue::DeleteCursor;
use bit_set::BitSet; use bit_set::BitSet;
use core::SegmentId;
use core::SegmentMeta;
use indexer::delete_queue::DeleteCursor;
use std::fmt; use std::fmt;
/// A segment entry describes the state of /// A segment entry describes the state of
@@ -22,6 +22,7 @@ pub struct SegmentEntry {
meta: SegmentMeta, meta: SegmentMeta,
delete_bitset: Option<BitSet>, delete_bitset: Option<BitSet>,
delete_cursor: DeleteCursor, delete_cursor: DeleteCursor,
opstamp: u64,
} }
impl SegmentEntry { impl SegmentEntry {
@@ -30,14 +31,20 @@ impl SegmentEntry {
segment_meta: SegmentMeta, segment_meta: SegmentMeta,
delete_cursor: DeleteCursor, delete_cursor: DeleteCursor,
delete_bitset: Option<BitSet>, delete_bitset: Option<BitSet>,
opstamp: u64,
) -> SegmentEntry { ) -> SegmentEntry {
SegmentEntry { SegmentEntry {
meta: segment_meta, meta: segment_meta,
delete_bitset, delete_bitset,
delete_cursor, delete_cursor,
opstamp,
} }
} }
pub fn opstamp(&self) -> u64 {
self.opstamp
}
/// Return a reference to the segment entry deleted bitset. /// Return a reference to the segment entry deleted bitset.
/// ///
/// `DocId` in this bitset are flagged as deleted. /// `DocId` in this bitset are flagged as deleted.
@@ -46,7 +53,8 @@ impl SegmentEntry {
} }
/// Set the `SegmentMeta` for this segment. /// Set the `SegmentMeta` for this segment.
pub fn set_meta(&mut self, segment_meta: SegmentMeta) { pub fn set_meta(&mut self, opstamp: u64, segment_meta: SegmentMeta) {
self.opstamp = opstamp;
self.meta = segment_meta; self.meta = segment_meta;
} }
@@ -67,7 +75,7 @@ impl SegmentEntry {
} }
impl fmt::Debug for SegmentEntry { impl fmt::Debug for SegmentEntry {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "SegmentEntry({:?})", self.meta) write!(formatter, "SegmentEntry({:?})", self.meta)
} }
} }

View File

@@ -1,19 +1,57 @@
use super::segment_register::SegmentRegister; use super::segment_register::SegmentRegister;
use crate::core::SegmentId; use core::SegmentId;
use crate::core::SegmentMeta; use core::SegmentMeta;
use crate::error::TantivyError; use core::META_FILEPATH;
use crate::indexer::delete_queue::DeleteCursor; use error::TantivyError;
use crate::indexer::SegmentEntry; use indexer::delete_queue::DeleteCursor;
use crate::Result as TantivyResult; use indexer::SegmentEntry;
use std::collections::hash_set::HashSet; use std::collections::hash_set::HashSet;
use std::fmt::{self, Debug, Formatter}; use std::fmt::{self, Debug, Formatter};
use std::path::PathBuf;
use std::sync::RwLock; use std::sync::RwLock;
use std::sync::{RwLockReadGuard, RwLockWriteGuard}; use std::sync::{RwLockReadGuard, RwLockWriteGuard};
use Result as TantivyResult;
use std::sync::Arc;
use std::collections::HashMap;
/// Provides a read-only view of the available segments.
#[derive(Clone)]
pub struct AvailableSegments {
registers: Arc<RwLock<SegmentRegisters>>,
}
impl AvailableSegments {
pub fn committed(&self) -> Vec<SegmentMeta> {
self.registers
.read()
.unwrap()
.committed
.segment_metas()
}
pub fn soft_committed(&self) -> Vec<SegmentMeta> {
self.registers
.read()
.unwrap()
.soft_committed
.segment_metas()
}
}
#[derive(Default)]
struct SegmentRegisters { struct SegmentRegisters {
uncommitted: SegmentRegister, uncommitted: HashMap<SegmentId, SegmentEntry>,
committed: SegmentRegister, committed: SegmentRegister,
/// soft commits can advance committed segment to a future delete
/// opstamp.
///
/// In that case the same `SegmentId` can appear in both `committed`
/// and in `committed_in_the_future`.
///
/// We do not consider these segments for merges.
soft_committed: SegmentRegister,
/// `DeleteCursor`, positionned on the soft commit.
delete_cursor: DeleteCursor,
} }
/// The segment manager stores the list of segments /// The segment manager stores the list of segments
@@ -21,13 +59,12 @@ struct SegmentRegisters {
/// ///
/// It guarantees the atomicity of the /// It guarantees the atomicity of the
/// changes (merges especially) /// changes (merges especially)
#[derive(Default)]
pub struct SegmentManager { pub struct SegmentManager {
registers: RwLock<SegmentRegisters>, registers: Arc<RwLock<SegmentRegisters>>
} }
impl Debug for SegmentManager { impl Debug for SegmentManager {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
let lock = self.read(); let lock = self.read();
write!( write!(
f, f,
@@ -44,11 +81,17 @@ pub fn get_mergeable_segments(
let registers_lock = segment_manager.read(); let registers_lock = segment_manager.read();
( (
registers_lock registers_lock
.committed .soft_committed
.get_mergeable_segments(in_merge_segment_ids), .get_mergeable_segments(in_merge_segment_ids),
registers_lock registers_lock
.uncommitted .uncommitted
.get_mergeable_segments(in_merge_segment_ids), .values()
.map(|segment_entry| segment_entry.meta())
.filter(|segment_meta| {
!in_merge_segment_ids.contains(&segment_meta.id())
})
.cloned()
.collect::<Vec<_>>()
) )
} }
@@ -56,33 +99,47 @@ impl SegmentManager {
pub fn from_segments( pub fn from_segments(
segment_metas: Vec<SegmentMeta>, segment_metas: Vec<SegmentMeta>,
delete_cursor: &DeleteCursor, delete_cursor: &DeleteCursor,
opstamp: u64,
) -> SegmentManager { ) -> SegmentManager {
SegmentManager { SegmentManager {
registers: RwLock::new(SegmentRegisters { registers: Arc::new(RwLock::new(SegmentRegisters {
uncommitted: SegmentRegister::default(), uncommitted: HashMap::default(),
committed: SegmentRegister::new(segment_metas, delete_cursor), committed: SegmentRegister::new(segment_metas.clone(), opstamp),
}), soft_committed: SegmentRegister::new(segment_metas, opstamp),
delete_cursor: delete_cursor.clone(),
}))
} }
} }
/// Returns all of the segment entries (committed or uncommitted) pub fn available_segments_view(&self) -> AvailableSegments {
pub fn segment_entries(&self) -> Vec<SegmentEntry> { AvailableSegments {
let registers_lock = self.read(); registers: self.registers.clone()
let mut segment_entries = registers_lock.uncommitted.segment_entries(); }
segment_entries.extend(registers_lock.committed.segment_entries()); }
segment_entries
/// List the files that are useful to the index.
///
/// This does not include lock files, or files that are obsolete
/// but have not yet been deleted by the garbage collector.
pub fn list_files(&self) -> HashSet<PathBuf> {
let mut files = HashSet::new();
files.insert(META_FILEPATH.clone());
for segment_meta in SegmentMeta::all() {
files.extend(segment_meta.list_files());
}
files
} }
// Lock poisoning should never happen : // Lock poisoning should never happen :
// The lock is acquired and released within this class, // The lock is acquired and released within this class,
// and the operations cannot panic. // and the operations cannot panic.
fn read(&self) -> RwLockReadGuard<'_, SegmentRegisters> { fn read(&self) -> RwLockReadGuard<SegmentRegisters> {
self.registers self.registers
.read() .read()
.expect("Failed to acquire read lock on SegmentManager.") .expect("Failed to acquire read lock on SegmentManager.")
} }
fn write(&self) -> RwLockWriteGuard<'_, SegmentRegisters> { fn write(&self) -> RwLockWriteGuard<SegmentRegisters> {
self.registers self.registers
.write() .write()
.expect("Failed to acquire write lock on SegmentManager.") .expect("Failed to acquire write lock on SegmentManager.")
@@ -93,50 +150,76 @@ impl SegmentManager {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
registers_lock registers_lock
.committed .committed
.segment_entries() .segment_metas()
.iter() .iter()
.filter(|segment| segment.meta().num_docs() == 0) .filter(|segment_meta| segment_meta.num_docs() == 0)
.for_each(|segment| { .for_each(|segment_meta| {
registers_lock registers_lock
.committed .committed
.remove_segment(&segment.segment_id()) .remove_segment(&segment_meta.id())
});
registers_lock
.soft_committed
.segment_metas()
.iter()
.filter(|segment_meta| segment_meta.num_docs() == 0)
.for_each(|segment_meta| {
registers_lock
.committed
.remove_segment(&segment_meta.id())
}); });
} }
pub(crate) fn remove_all_segments(&self) { /// Returns all of the segment entries (soft committed or uncommitted)
let mut registers_lock = self.write(); pub fn segment_entries(&self) -> Vec<SegmentEntry> {
registers_lock.committed.clear(); let registers_lock = self.read();
registers_lock.uncommitted.clear(); let mut segment_entries: Vec<SegmentEntry > = registers_lock.uncommitted.values().cloned().collect();
segment_entries.extend(registers_lock.soft_committed.segment_entries(&registers_lock.delete_cursor).into_iter());
segment_entries
} }
pub fn commit(&self, segment_entries: Vec<SegmentEntry>) {
pub fn commit(&self, opstamp: u64, segment_entries: Vec<SegmentEntry>) {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
registers_lock.committed.clear();
registers_lock.uncommitted.clear(); registers_lock.uncommitted.clear();
for segment_entry in segment_entries { registers_lock
registers_lock.committed.add_segment_entry(segment_entry); .committed
} .set_commit(opstamp, segment_entries.clone());
registers_lock
.soft_committed
.set_commit(opstamp, segment_entries);
registers_lock.delete_cursor.skip_to(opstamp);
} }
/// Marks a list of segments as in merge. pub fn soft_commit(&self, opstamp: u64, segment_entries: Vec<SegmentEntry>) {
let mut registers_lock = self.write();
registers_lock.uncommitted.clear();
registers_lock
.soft_committed
.set_commit(opstamp, segment_entries);
registers_lock.delete_cursor.skip_to(opstamp);
}
/// Gets the list of segment_entries associated to a list of `segment_ids`.
/// This method is used when starting a merge operations.
/// ///
/// Returns an error if some segments are missing, or if /// Returns an error if some segments are missing, or if
/// the `segment_ids` are not either all committed or all /// the `segment_ids` are not either all soft_committed or all
/// uncommitted. /// uncommitted.
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> TantivyResult<Vec<SegmentEntry>> { pub fn start_merge(&self, segment_ids: &[SegmentId]) -> TantivyResult<Vec<SegmentEntry>> {
let registers_lock = self.read(); let registers_lock = self.read();
let mut segment_entries = vec![]; let mut segment_entries = vec![];
if registers_lock.uncommitted.contains_all(segment_ids) { if segment_ids.iter().all(|segment_id| registers_lock.uncommitted.contains_key(segment_id)) {
for segment_id in segment_ids { for segment_id in segment_ids {
let segment_entry = registers_lock.uncommitted let segment_entry = registers_lock.uncommitted
.get(segment_id) .get(segment_id)
.expect("Segment id not found {}. Should never happen because of the contains all if-block."); .expect("Segment id not found {}. Should never happen because of the contains all if-block.");
segment_entries.push(segment_entry); segment_entries.push(segment_entry.clone());
} }
} else if registers_lock.committed.contains_all(segment_ids) { } else if registers_lock.soft_committed.contains_all(segment_ids) {
for segment_id in segment_ids { for segment_id in segment_ids {
let segment_entry = registers_lock.committed let segment_entry = registers_lock.soft_committed
.get(segment_id) .get(segment_id, &registers_lock.delete_cursor)
.expect("Segment id not found {}. Should never happen because of the contains all if-block."); .expect("Segment id not found {}. Should never happen because of the contains all if-block.");
segment_entries.push(segment_entry); segment_entries.push(segment_entry);
} }
@@ -151,35 +234,32 @@ impl SegmentManager {
pub fn add_segment(&self, segment_entry: SegmentEntry) { pub fn add_segment(&self, segment_entry: SegmentEntry) {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
registers_lock.uncommitted.add_segment_entry(segment_entry); registers_lock
.uncommitted
.insert(segment_entry.segment_id(), segment_entry);
} }
pub fn end_merge( pub fn end_merge(
&self, &self,
before_merge_segment_ids: &[SegmentId], before_merge_segment_ids: &[SegmentId],
after_merge_segment_entry: SegmentEntry, after_merge_segment_entry: SegmentEntry
) { ) {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
let target_register: &mut SegmentRegister = {
if registers_lock if before_merge_segment_ids.iter().all(|seg_id|
registers_lock
.uncommitted .uncommitted
.contains_all(before_merge_segment_ids) .contains_key(seg_id))
{ {
&mut registers_lock.uncommitted for segment_id in before_merge_segment_ids {
} else if registers_lock registers_lock.uncommitted.remove(&segment_id);
.committed
.contains_all(before_merge_segment_ids)
{
&mut registers_lock.committed
} else {
warn!("couldn't find segment in SegmentManager");
return;
} }
}; registers_lock.uncommitted.insert(after_merge_segment_entry.segment_id(),
for segment_id in before_merge_segment_ids { after_merge_segment_entry);
target_register.remove_segment(segment_id); } else {
registers_lock.committed.receive_merge(&before_merge_segment_ids, &after_merge_segment_entry);
registers_lock.soft_committed.receive_merge(&before_merge_segment_ids, &after_merge_segment_entry)
} }
target_register.add_segment_entry(after_merge_segment_entry);
} }
pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> { pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> {

View File

@@ -1,7 +1,7 @@
use crate::core::SegmentId; use core::SegmentId;
use crate::core::SegmentMeta; use core::SegmentMeta;
use crate::indexer::delete_queue::DeleteCursor; use indexer::delete_queue::DeleteCursor;
use crate::indexer::segment_entry::SegmentEntry; use indexer::segment_entry::SegmentEntry;
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
use std::fmt::{self, Debug, Formatter}; use std::fmt::{self, Debug, Formatter};
@@ -16,11 +16,12 @@ use std::fmt::{self, Debug, Formatter};
/// merge candidates. /// merge candidates.
#[derive(Default)] #[derive(Default)]
pub struct SegmentRegister { pub struct SegmentRegister {
segment_states: HashMap<SegmentId, SegmentEntry>, segment_states: HashMap<SegmentId, SegmentMeta>,
opstamp_constraint: u64,
} }
impl Debug for SegmentRegister { impl Debug for SegmentRegister {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(f, "SegmentRegister(")?; write!(f, "SegmentRegister(")?;
for k in self.segment_states.keys() { for k in self.segment_states.keys() {
write!(f, "{}, ", k.short_uuid_string())?; write!(f, "{}, ", k.short_uuid_string())?;
@@ -41,23 +42,28 @@ impl SegmentRegister {
) -> Vec<SegmentMeta> { ) -> Vec<SegmentMeta> {
self.segment_states self.segment_states
.values() .values()
.filter(|segment_entry| !in_merge_segment_ids.contains(&segment_entry.segment_id())) .filter(|segment_meta| !in_merge_segment_ids.contains(&segment_meta.id()))
.map(|segment_entry| segment_entry.meta().clone()) .cloned()
.collect() .collect()
} }
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
self.segment_states.values().cloned().collect()
}
pub fn segment_metas(&self) -> Vec<SegmentMeta> { pub fn segment_metas(&self) -> Vec<SegmentMeta> {
let mut segment_ids: Vec<SegmentMeta> = self let mut segment_metas: Vec<SegmentMeta> = self
.segment_states .segment_states
.values() .values()
.map(|segment_entry| segment_entry.meta().clone()) .cloned()
.collect(); .collect();
segment_ids.sort_by_key(SegmentMeta::id); segment_metas.sort_by_key(|meta| meta.id());
segment_ids segment_metas
}
pub fn segment_entries(&self, delete_cursor: &DeleteCursor) -> Vec<SegmentEntry> {
self.segment_states
.values()
.map(|segment_meta| {
SegmentEntry::new(segment_meta.clone(), delete_cursor.clone(), None, self.opstamp_constraint)
})
.collect()
} }
pub fn contains_all(&self, segment_ids: &[SegmentId]) -> bool { pub fn contains_all(&self, segment_ids: &[SegmentId]) -> bool {
@@ -66,35 +72,86 @@ impl SegmentRegister {
.all(|segment_id| self.segment_states.contains_key(segment_id)) .all(|segment_id| self.segment_states.contains_key(segment_id))
} }
pub fn add_segment_entry(&mut self, segment_entry: SegmentEntry) { pub fn receive_merge(&mut self,
before_merge_segment_ids: &[SegmentId],
after_merge_segment_entry: &SegmentEntry) {
if after_merge_segment_entry.opstamp() != self.opstamp_constraint {
return;
}
if !self.contains_all(before_merge_segment_ids) {
return;
}
for segment_id in before_merge_segment_ids {
self.segment_states.remove(segment_id);
}
self.register_segment_entry(after_merge_segment_entry.clone());
}
/// Registers a `SegmentEntry`.
///
/// If a segment entry associated to this `SegmentId` is already there,
/// override it with the new `SegmentEntry`.
pub fn register_segment_entry(&mut self, segment_entry: SegmentEntry) {
if self.opstamp_constraint != segment_entry.opstamp() {
panic!(format!(
"Invalid segment. Expect opstamp {}, got {}.",
self.opstamp_constraint,
segment_entry.opstamp()
));
}
if segment_entry.meta().num_docs() == 0 {
return;
}
let segment_id = segment_entry.segment_id(); let segment_id = segment_entry.segment_id();
self.segment_states.insert(segment_id, segment_entry); // Check that we are ok with deletes.
self.segment_states.insert(segment_id, segment_entry.meta().clone());
}
pub fn set_commit(&mut self, opstamp: u64, segment_entries: Vec<SegmentEntry>) {
self.segment_states.clear();
self.opstamp_constraint = opstamp;
for segment_entry in segment_entries {
self.register_segment_entry(segment_entry);
}
} }
pub fn remove_segment(&mut self, segment_id: &SegmentId) { pub fn remove_segment(&mut self, segment_id: &SegmentId) {
self.segment_states.remove(segment_id); self.segment_states.remove(&segment_id);
} }
pub fn get(&self, segment_id: &SegmentId) -> Option<SegmentEntry> { pub fn get(&self, segment_id: &SegmentId, delete_cursor: &DeleteCursor) -> Option<SegmentEntry> {
self.segment_states.get(segment_id).cloned() self.segment_states
.get(&segment_id)
.map(|segment_meta|
SegmentEntry::new(
segment_meta.clone(),
delete_cursor.clone(),
None,
self.opstamp_constraint
))
} }
pub fn new(segment_metas: Vec<SegmentMeta>, delete_cursor: &DeleteCursor) -> SegmentRegister { pub fn new(
segment_metas: Vec<SegmentMeta>,
opstamp: u64,
) -> SegmentRegister {
let mut segment_states = HashMap::new(); let mut segment_states = HashMap::new();
for segment_meta in segment_metas { for segment_meta in segment_metas {
let segment_id = segment_meta.id(); segment_states.insert(segment_meta.id(), segment_meta);
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor.clone(), None); }
segment_states.insert(segment_id, segment_entry); SegmentRegister {
segment_states,
opstamp_constraint: opstamp,
} }
SegmentRegister { segment_states }
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::core::{SegmentId, SegmentMetaInventory}; use core::SegmentId;
use crate::indexer::delete_queue::*; use core::SegmentMeta;
use indexer::delete_queue::*;
fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> { fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> {
segment_register segment_register
@@ -106,7 +163,6 @@ mod tests {
#[test] #[test]
fn test_segment_register() { fn test_segment_register() {
let inventory = SegmentMetaInventory::default();
let delete_queue = DeleteQueue::new(); let delete_queue = DeleteQueue::new();
let mut segment_register = SegmentRegister::default(); let mut segment_register = SegmentRegister::default();
@@ -115,23 +171,24 @@ mod tests {
let segment_id_merged = SegmentId::generate_random(); let segment_id_merged = SegmentId::generate_random();
{ {
let segment_meta = inventory.new_segment_meta(segment_id_a, 0u32); let segment_meta = SegmentMeta::new(segment_id_a, 1u32);
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None); let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None, 0u64);
segment_register.add_segment_entry(segment_entry); segment_register.register_segment_entry(segment_entry);
} }
assert_eq!(segment_ids(&segment_register), vec![segment_id_a]); assert_eq!(segment_ids(&segment_register), vec![segment_id_a]);
{ {
let segment_meta = inventory.new_segment_meta(segment_id_b, 0u32); let segment_meta = SegmentMeta::new(segment_id_b, 2u32);
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None); let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None, 0u64);
segment_register.add_segment_entry(segment_entry); segment_register.register_segment_entry(segment_entry);
} }
segment_register.remove_segment(&segment_id_a);
segment_register.remove_segment(&segment_id_b);
{ {
let segment_meta_merged = inventory.new_segment_meta(segment_id_merged, 0u32); let segment_meta_merged = SegmentMeta::new(segment_id_merged, 3u32);
let segment_entry = SegmentEntry::new(segment_meta_merged, delete_queue.cursor(), None); let segment_entry =
segment_register.add_segment_entry(segment_entry); SegmentEntry::new(segment_meta_merged, delete_queue.cursor(), None, 0u64);
segment_register.receive_merge(&[segment_id_a, segment_id_b], &segment_entry);
segment_register.register_segment_entry(segment_entry);
} }
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]); assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
} }
} }

View File

@@ -1,11 +1,11 @@
use crate::Result; use Result;
use crate::core::Segment; use core::Segment;
use crate::core::SegmentComponent; use core::SegmentComponent;
use crate::fastfield::FastFieldSerializer; use fastfield::FastFieldSerializer;
use crate::fieldnorm::FieldNormsSerializer; use fieldnorm::FieldNormsSerializer;
use crate::postings::InvertedIndexSerializer; use postings::InvertedIndexSerializer;
use crate::store::StoreWriter; use store::StoreWriter;
/// Segment serializer is in charge of laying out on disk /// Segment serializer is in charge of laying out on disk
/// the data accumulated and sorted by the `SegmentWriter`. /// the data accumulated and sorted by the `SegmentWriter`.

View File

@@ -1,31 +1,29 @@
use super::segment_manager::{get_mergeable_segments, SegmentManager}; use super::segment_manager::{get_mergeable_segments, SegmentManager};
use crate::core::Index; use core::Index;
use crate::core::IndexMeta; use core::IndexMeta;
use crate::core::Segment; use core::Segment;
use crate::core::SegmentId; use core::SegmentId;
use crate::core::SegmentMeta; use core::SegmentMeta;
use crate::core::SerializableSegment; use core::SerializableSegment;
use crate::core::META_FILEPATH; use core::META_FILEPATH;
use crate::directory::{Directory, DirectoryClone}; use directory::{Directory, DirectoryClone};
use crate::error::TantivyError; use error::TantivyError;
use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::index_writer::advance_deletes;
use crate::indexer::merge_operation::MergeOperationInventory;
use crate::indexer::merger::IndexMerger;
use crate::indexer::stamper::Stamper;
use crate::indexer::MergeOperation;
use crate::indexer::SegmentEntry;
use crate::indexer::SegmentSerializer;
use crate::indexer::{DefaultMergePolicy, MergePolicy};
use crate::schema::Schema;
use crate::Opstamp;
use crate::Result;
use futures::oneshot; use futures::oneshot;
use futures::sync::oneshot::Receiver; use futures::sync::oneshot::Receiver;
use futures::Future; use futures::Future;
use futures_cpupool::Builder as CpuPoolBuilder; use futures_cpupool::Builder as CpuPoolBuilder;
use futures_cpupool::CpuFuture; use futures_cpupool::CpuFuture;
use futures_cpupool::CpuPool; use futures_cpupool::CpuPool;
use indexer::delete_queue::DeleteCursor;
use indexer::index_writer::advance_deletes;
use indexer::merge_operation::MergeOperationInventory;
use indexer::merger::IndexMerger;
use indexer::stamper::Stamper;
use indexer::MergeOperation;
use indexer::SegmentEntry;
use indexer::SegmentSerializer;
use indexer::{DefaultMergePolicy, MergePolicy};
use schema::Schema;
use serde_json; use serde_json;
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::collections::HashMap; use std::collections::HashMap;
@@ -33,24 +31,23 @@ use std::collections::HashSet;
use std::io::Write; use std::io::Write;
use std::mem; use std::mem;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
use std::thread; use std::thread;
use std::thread::JoinHandle; use std::thread::JoinHandle;
use std::time::Duration; use Result;
/// Save the index meta file. /// Save the index meta file.
/// This operation is atomic : /// This operation is atomic :
/// Either /// Either
/// - it fails, in which case an error is returned, // - it fails, in which case an error is returned,
/// and the `meta.json` remains untouched, /// and the `meta.json` remains untouched,
/// - it succeeds, and `meta.json` is written /// - it success, and `meta.json` is written
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> Result<()> { pub fn save_new_metas(schema: Schema, directory: &mut Directory) -> Result<()> {
save_metas( save_metas(
&IndexMeta { &IndexMeta {
segments: Vec::new(), segments: Vec::new(),
@@ -71,8 +68,7 @@ pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> Result<(
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> Result<()> { fn save_metas(metas: &IndexMeta, directory: &mut Directory) -> Result<()> {
info!("save metas");
let mut buffer = serde_json::to_vec_pretty(metas)?; let mut buffer = serde_json::to_vec_pretty(metas)?;
// Just adding a new line at the end of the buffer. // Just adding a new line at the end of the buffer.
writeln!(&mut buffer)?; writeln!(&mut buffer)?;
@@ -127,9 +123,9 @@ fn perform_merge(
let num_docs = merger.write(segment_serializer)?; let num_docs = merger.write(segment_serializer)?;
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs); let segment_meta = SegmentMeta::new(merged_segment.id(), num_docs);
let after_merge_segment_entry = SegmentEntry::new(segment_meta.clone(), delete_cursor, None); let after_merge_segment_entry = SegmentEntry::new(segment_meta.clone(), delete_cursor, None, target_opstamp);
Ok(after_merge_segment_entry) Ok(after_merge_segment_entry)
} }
@@ -144,9 +140,10 @@ struct InnerSegmentUpdater {
pool: CpuPool, pool: CpuPool,
index: Index, index: Index,
segment_manager: SegmentManager, segment_manager: SegmentManager,
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>, merge_policy: RwLock<Arc<Box<MergePolicy>>>,
merging_thread_id: AtomicUsize, merging_thread_id: AtomicUsize,
merging_threads: RwLock<HashMap<usize, JoinHandle<Result<()>>>>, merging_threads: RwLock<HashMap<usize, JoinHandle<Result<()>>>>,
generation: AtomicUsize,
killed: AtomicBool, killed: AtomicBool,
stamper: Stamper, stamper: Stamper,
merge_operations: MergeOperationInventory, merge_operations: MergeOperationInventory,
@@ -158,8 +155,11 @@ impl SegmentUpdater {
stamper: Stamper, stamper: Stamper,
delete_cursor: &DeleteCursor, delete_cursor: &DeleteCursor,
) -> Result<SegmentUpdater> { ) -> Result<SegmentUpdater> {
let index_meta = index.load_metas()?;
let segments = index.searchable_segment_metas()?; let segments = index.searchable_segment_metas()?;
let segment_manager = SegmentManager::from_segments(segments, delete_cursor); let opstamp = index_meta.opstamp;
let segment_manager = SegmentManager::from_segments(segments, delete_cursor, opstamp);
let pool = CpuPoolBuilder::new() let pool = CpuPoolBuilder::new()
.name_prefix("segment_updater") .name_prefix("segment_updater")
.pool_size(1) .pool_size(1)
@@ -173,17 +173,18 @@ impl SegmentUpdater {
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))), merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
merging_thread_id: AtomicUsize::default(), merging_thread_id: AtomicUsize::default(),
merging_threads: RwLock::new(HashMap::new()), merging_threads: RwLock::new(HashMap::new()),
generation: AtomicUsize::default(),
killed: AtomicBool::new(false), killed: AtomicBool::new(false),
stamper, stamper,
merge_operations: Default::default(), merge_operations: Default::default(),
}))) })))
} }
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> { pub fn get_merge_policy(&self) -> Arc<Box<MergePolicy>> {
self.0.merge_policy.read().unwrap().clone() self.0.merge_policy.read().unwrap().clone()
} }
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) { pub fn set_merge_policy(&self, merge_policy: Box<MergePolicy>) {
let arc_merge_policy = Arc::new(merge_policy); let arc_merge_policy = Arc::new(merge_policy);
*self.0.merge_policy.write().unwrap() = arc_merge_policy; *self.0.merge_policy.write().unwrap() = arc_merge_policy;
} }
@@ -200,25 +201,18 @@ impl SegmentUpdater {
self.0.pool.spawn_fn(move || Ok(f(me_clone))) self.0.pool.spawn_fn(move || Ok(f(me_clone)))
} }
pub fn add_segment(&self, segment_entry: SegmentEntry) -> bool { pub fn add_segment(&self, generation: usize, segment_entry: SegmentEntry) -> bool {
let max_num_threads_opt = self.0.merge_policy.read().unwrap().maximum_num_threads(); if generation >= self.0.generation.load(Ordering::Acquire) {
if let Some(max_num_threads) = max_num_threads_opt { self.run_async(|segment_updater| {
while self.0.merge_operations.num_merge_operations() >= max_num_threads_opt { segment_updater.0.segment_manager.add_segment(segment_entry);
std::thread::sleep(Duration::from_secs(1u64)); segment_updater.consider_merge_options();
} true
} })
self.run_async(|segment_updater| { .forget();
segment_updater.0.segment_manager.add_segment(segment_entry);
segment_updater.consider_merge_options();
true true
}) } else {
.forget(); false
true }
}
/// Orders `SegmentManager` to remove all segments
pub(crate) fn remove_all_segments(&self) {
self.0.segment_manager.remove_all_segments();
} }
pub fn kill(&mut self) { pub fn kill(&mut self) {
@@ -231,9 +225,9 @@ impl SegmentUpdater {
/// Apply deletes up to the target opstamp to all segments. /// Apply deletes up to the target opstamp to all segments.
/// ///
/// The method returns copies of the segment entries, /// Tne method returns copies of the segment entries,
/// updated with the delete information. /// updated with the delete information.
fn purge_deletes(&self, target_opstamp: Opstamp) -> Result<Vec<SegmentEntry>> { fn purge_deletes(&self, target_opstamp: u64) -> Result<Vec<SegmentEntry>> {
let mut segment_entries = self.0.segment_manager.segment_entries(); let mut segment_entries = self.0.segment_manager.segment_entries();
for segment_entry in &mut segment_entries { for segment_entry in &mut segment_entries {
let segment = self.0.index.segment(segment_entry.meta().clone()); let segment = self.0.index.segment(segment_entry.meta().clone());
@@ -242,7 +236,7 @@ impl SegmentUpdater {
Ok(segment_entries) Ok(segment_entries)
} }
pub fn save_metas(&self, opstamp: Opstamp, commit_message: Option<String>) { pub fn save_metas(&self, opstamp: u64, commit_message: Option<String>) {
if self.is_alive() { if self.is_alive() {
let index = &self.0.index; let index = &self.0.index;
let directory = index.directory(); let directory = index.directory();
@@ -274,39 +268,45 @@ impl SegmentUpdater {
} }
} }
pub fn garbage_collect_files(&self) -> CpuFuture<(), TantivyError> { pub fn garbage_collect_files(&self) -> Result<()> {
self.run_async(move |segment_updater| { self.run_async(move |segment_updater| {
segment_updater.garbage_collect_files_exec(); segment_updater.garbage_collect_files_exec();
}) })
} .wait()
/// List the files that are useful to the index.
///
/// This does not include lock files, or files that are obsolete
/// but have not yet been deleted by the garbage collector.
fn list_files(&self) -> HashSet<PathBuf> {
let mut files = HashSet::new();
files.insert(META_FILEPATH.to_path_buf());
for segment_meta in self.0.index.list_all_segment_metas() {
files.extend(segment_meta.list_files());
}
files
} }
fn garbage_collect_files_exec(&self) { fn garbage_collect_files_exec(&self) {
info!("Running garbage collection"); info!("Running garbage collection");
let mut index = self.0.index.clone(); let mut index = self.0.index.clone();
index.directory_mut().garbage_collect(|| self.list_files()); index
.directory_mut()
.garbage_collect(|| self.0.segment_manager.list_files());
} }
pub fn commit(&self, opstamp: Opstamp, payload: Option<String>) -> Result<()> { pub fn commit(&self, opstamp: u64, payload: Option<String>, soft: bool) -> Result<()> {
self.run_async(move |segment_updater| { self.run_async(move |segment_updater| {
if segment_updater.is_alive() { if segment_updater.is_alive() {
let segment_entries = segment_updater let segment_entries = segment_updater
.purge_deletes(opstamp) .purge_deletes(opstamp)
.expect("Failed purge deletes"); .expect("Failed purge deletes");
segment_updater.0.segment_manager.commit(segment_entries); if soft {
segment_updater.save_metas(opstamp, payload); // Soft commit.
//
// The list `segment_entries` above is what we might want to use as searchable
// segment. However, we do not want to mark them as committed, and we want
// to keep the current set of committed segment.
segment_updater.0.segment_manager.soft_commit(opstamp, segment_entries);
// ... We do not save the meta file.
} else {
// Hard_commit. We register the new segment entries as committed.
segment_updater
.0
.segment_manager
.commit(opstamp, segment_entries);
// TODO error handling.
segment_updater.save_metas(opstamp, payload);
segment_updater.0.index.directory().flush().unwrap();
}
segment_updater.garbage_collect_files_exec(); segment_updater.garbage_collect_files_exec();
segment_updater.consider_merge_options(); segment_updater.consider_merge_options();
} }
@@ -464,41 +464,38 @@ impl SegmentUpdater {
) -> Result<()> { ) -> Result<()> {
self.run_async(move |segment_updater| { self.run_async(move |segment_updater| {
info!("End merge {:?}", after_merge_segment_entry.meta()); info!("End merge {:?}", after_merge_segment_entry.meta());
{ let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone(); if let Some(delete_operation) = delete_cursor.get() {
if let Some(delete_operation) = delete_cursor.get() { let committed_opstamp = segment_updater.load_metas().opstamp;
let committed_opstamp = segment_updater.load_metas().opstamp; if delete_operation.opstamp < committed_opstamp {
if delete_operation.opstamp < committed_opstamp { let index = &segment_updater.0.index;
let index = &segment_updater.0.index; let segment = index.segment(after_merge_segment_entry.meta().clone());
let segment = index.segment(after_merge_segment_entry.meta().clone()); if let Err(e) =
if let Err(e) = advance_deletes( advance_deletes(segment, &mut after_merge_segment_entry, committed_opstamp)
segment, {
&mut after_merge_segment_entry, error!(
committed_opstamp, "Merge of {:?} was cancelled (advancing deletes failed): {:?}",
) { merge_operation.segment_ids(),
error!( e
"Merge of {:?} was cancelled (advancing deletes failed): {:?}", );
merge_operation.segment_ids(), if cfg!(test) {
e panic!("Merge failed.");
);
if cfg!(test) {
panic!("Merge failed.");
}
// ... cancel merge
// `merge_operations` are tracked. As it is dropped, the
// the segment_ids will be available again for merge.
return;
} }
// ... cancel merge
// `merge_operations` are tracked. As it is dropped, the
// the segment_ids will be available again for merge.
return;
} }
} }
let previous_metas = segment_updater.load_metas(); }
segment_updater segment_updater
.0 .0
.segment_manager .segment_manager
.end_merge(merge_operation.segment_ids(), after_merge_segment_entry); .end_merge(merge_operation.segment_ids(), after_merge_segment_entry);
segment_updater.consider_merge_options(); segment_updater.consider_merge_options();
segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload.clone()); info!("save metas");
} // we drop all possible handle to a now useless `SegmentMeta`. let previous_metas = segment_updater.load_metas();
segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload.clone());
segment_updater.garbage_collect_files_exec(); segment_updater.garbage_collect_files_exec();
}) })
.wait() .wait()
@@ -545,9 +542,9 @@ impl SegmentUpdater {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::indexer::merge_policy::tests::MergeWheneverPossible; use indexer::merge_policy::tests::MergeWheneverPossible;
use crate::schema::*; use schema::*;
use crate::Index; use Index;
#[test] #[test]
fn test_delete_during_merge() { fn test_delete_during_merge() {
@@ -672,31 +669,4 @@ mod tests {
assert!(index.searchable_segment_metas().unwrap().is_empty()); assert!(index.searchable_segment_metas().unwrap().is_empty());
assert!(reader.searcher().segment_readers().is_empty()); assert!(reader.searcher().segment_readers().is_empty());
} }
#[test]
fn test_remove_all_segments() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
for _ in 0..100 {
index_writer.add_document(doc!(text_field=>"a"));
index_writer.add_document(doc!(text_field=>"b"));
}
assert!(index_writer.commit().is_ok());
}
index_writer.segment_updater().remove_all_segments();
let seg_vec = index_writer
.segment_updater()
.0
.segment_manager
.segment_entries();
assert!(seg_vec.is_empty());
}
} }

View File

@@ -1,44 +1,24 @@
use super::operation::AddOperation; use super::operation::AddOperation;
use crate::core::Segment; use core::Segment;
use crate::core::SerializableSegment; use core::SerializableSegment;
use crate::fastfield::FastFieldsWriter; use fastfield::FastFieldsWriter;
use crate::fieldnorm::FieldNormsWriter; use fieldnorm::FieldNormsWriter;
use crate::indexer::segment_serializer::SegmentSerializer; use indexer::segment_serializer::SegmentSerializer;
use crate::postings::compute_table_size; use postings::MultiFieldPostingsWriter;
use crate::postings::MultiFieldPostingsWriter; use schema::FieldType;
use crate::schema::FieldType; use schema::Schema;
use crate::schema::Schema; use schema::Term;
use crate::schema::Term; use schema::Value;
use crate::schema::Value;
use crate::schema::{Field, FieldEntry};
use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::FacetTokenizer;
use crate::tokenizer::{TokenStream, Tokenizer};
use crate::DocId;
use crate::Opstamp;
use crate::Result;
use crate::TantivyError;
use std::io; use std::io;
use std::str; use std::str;
use tokenizer::BoxedTokenizer;
/// Computes the initial size of the hash table. use tokenizer::FacetTokenizer;
/// use tokenizer::{TokenStream, Tokenizer};
/// Returns a number of bit `b`, such that the recommended initial table size is 2^b. use DocId;
fn initial_table_size(per_thread_memory_budget: usize) -> Result<usize> { use Result;
let table_memory_upper_bound = per_thread_memory_budget / 3;
if let Some(limit) = (10..)
.take_while(|num_bits: &usize| compute_table_size(*num_bits) < table_memory_upper_bound)
.last()
{
Ok(limit.min(19)) // we cap it at 2^19 = 512K.
} else {
Err(TantivyError::InvalidArgument(
format!("per thread memory budget (={}) is too small. Raise the memory budget or lower the number of threads.", per_thread_memory_budget)))
}
}
/// A `SegmentWriter` is in charge of creating segment index from a /// A `SegmentWriter` is in charge of creating segment index from a
/// set of documents. /// documents.
/// ///
/// They creates the postings list in anonymous memory. /// They creates the postings list in anonymous memory.
/// The segment is layed on disk when the segment gets `finalized`. /// The segment is layed on disk when the segment gets `finalized`.
@@ -48,8 +28,8 @@ pub struct SegmentWriter {
segment_serializer: SegmentSerializer, segment_serializer: SegmentSerializer,
fast_field_writers: FastFieldsWriter, fast_field_writers: FastFieldsWriter,
fieldnorms_writer: FieldNormsWriter, fieldnorms_writer: FieldNormsWriter,
doc_opstamps: Vec<Opstamp>, doc_opstamps: Vec<u64>,
tokenizers: Vec<Option<BoxedTokenizer>>, tokenizers: Vec<Option<Box<BoxedTokenizer>>>,
} }
impl SegmentWriter { impl SegmentWriter {
@@ -63,17 +43,18 @@ impl SegmentWriter {
/// - segment: The segment being written /// - segment: The segment being written
/// - schema /// - schema
pub fn for_segment( pub fn for_segment(
memory_budget: usize, table_bits: usize,
mut segment: Segment, mut segment: Segment,
schema: &Schema, schema: &Schema,
) -> Result<SegmentWriter> { ) -> Result<SegmentWriter> {
let table_num_bits = initial_table_size(memory_budget)?;
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?; let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits); let multifield_postings = MultiFieldPostingsWriter::new(schema, table_bits);
let tokenizers = schema let tokenizers =
.fields() schema
.map( .fields()
|(_, field_entry): (Field, &FieldEntry)| match field_entry.field_type() { .iter()
.map(|field_entry| field_entry.field_type())
.map(|field_type| match *field_type {
FieldType::Str(ref text_options) => text_options FieldType::Str(ref text_options) => text_options
.get_indexing_options() .get_indexing_options()
.and_then(|text_index_option| { .and_then(|text_index_option| {
@@ -81,9 +62,8 @@ impl SegmentWriter {
segment.index().tokenizers().get(tokenizer_name) segment.index().tokenizers().get(tokenizer_name)
}), }),
_ => None, _ => None,
}, })
) .collect();
.collect();
Ok(SegmentWriter { Ok(SegmentWriter {
max_doc: 0, max_doc: 0,
multifield_postings, multifield_postings,
@@ -159,7 +139,7 @@ impl SegmentWriter {
} }
FieldType::Str(_) => { FieldType::Str(_) => {
let num_tokens = if let Some(ref mut tokenizer) = let num_tokens = if let Some(ref mut tokenizer) =
self.tokenizers[field.field_id() as usize] self.tokenizers[field.0 as usize]
{ {
let texts: Vec<&str> = field_values let texts: Vec<&str> = field_values
.iter() .iter()
@@ -213,17 +193,6 @@ impl SegmentWriter {
} }
} }
} }
FieldType::F64(ref int_option) => {
if int_option.is_indexed() {
for field_value in field_values {
let term = Term::from_field_f64(
field_value.field(),
field_value.value().f64_value(),
);
self.multifield_postings.subscribe(doc_id, &term);
}
}
}
FieldType::Bytes => { FieldType::Bytes => {
// Do nothing. Bytes only supports fast fields. // Do nothing. Bytes only supports fast fields.
} }
@@ -283,16 +252,3 @@ impl SerializableSegment for SegmentWriter {
Ok(max_doc) Ok(max_doc)
} }
} }
#[cfg(test)]
mod tests {
use super::initial_table_size;
#[test]
fn test_hashmap_size() {
assert_eq!(initial_table_size(100_000).unwrap(), 11);
assert_eq!(initial_table_size(1_000_000).unwrap(), 14);
assert_eq!(initial_table_size(10_000_000).unwrap(), 17);
assert_eq!(initial_table_size(1_000_000_000).unwrap(), 19);
}
}

View File

@@ -1,39 +1,76 @@
use crate::Opstamp;
use std::ops::Range; use std::ops::Range;
use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::atomic::Ordering;
use std::sync::Arc; use std::sync::Arc;
/// Stamper provides Opstamps, which is just an auto-increment id to label // AtomicU64 have not landed in stable.
/// an operation. // For the moment let's just use AtomicUsize on
/// // x86/64 bit platform, and a mutex on other platform.
/// Cloning does not "fork" the stamp generation. The stamper actually wraps an `Arc`. #[cfg(target_arch = "x86_64")]
mod archicture_impl {
use std::sync::atomic::{AtomicUsize, Ordering};
#[derive(Default)]
pub struct AtomicU64Ersatz(AtomicUsize);
impl AtomicU64Ersatz {
pub fn new(first_opstamp: u64) -> AtomicU64Ersatz {
AtomicU64Ersatz(AtomicUsize::new(first_opstamp as usize))
}
pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 {
self.0.fetch_add(val as usize, order) as u64
}
}
}
#[cfg(not(target_arch = "x86_64"))]
mod archicture_impl {
use std::sync::atomic::Ordering;
/// Under other architecture, we rely on a mutex.
use std::sync::RwLock;
#[derive(Default)]
pub struct AtomicU64Ersatz(RwLock<u64>);
impl AtomicU64Ersatz {
pub fn new(first_opstamp: u64) -> AtomicU64Ersatz {
AtomicU64Ersatz(RwLock::new(first_opstamp))
}
pub fn fetch_add(&self, incr: u64, _order: Ordering) -> u64 {
let mut lock = self.0.write().unwrap();
let previous_val = *lock;
*lock = previous_val + incr;
previous_val
}
}
}
use self::archicture_impl::AtomicU64Ersatz;
#[derive(Clone, Default)] #[derive(Clone, Default)]
pub struct Stamper(Arc<AtomicU64>); pub struct Stamper(Arc<AtomicU64Ersatz>);
impl Stamper { impl Stamper {
pub fn new(first_opstamp: Opstamp) -> Stamper { pub fn new(first_opstamp: u64) -> Stamper {
Stamper(Arc::new(AtomicU64::new(first_opstamp))) Stamper(Arc::new(AtomicU64Ersatz::new(first_opstamp)))
} }
pub fn stamp(&self) -> Opstamp { pub fn stamp(&self) -> u64 {
self.0.fetch_add(1u64, Ordering::SeqCst) as u64 self.0.fetch_add(1u64, Ordering::SeqCst) as u64
} }
/// Given a desired count `n`, `stamps` returns an iterator that /// Given a desired count `n`, `stamps` returns an iterator that
/// will supply `n` number of u64 stamps. /// will supply `n` number of u64 stamps.
pub fn stamps(&self, n: u64) -> Range<Opstamp> { pub fn stamps(&self, n: u64) -> Range<u64> {
let start = self.0.fetch_add(n, Ordering::SeqCst); let start = self.0.fetch_add(n, Ordering::SeqCst);
Range { Range {
start, start,
end: start + n, end: start + n,
} }
} }
/// Reverts the stamper to a given `Opstamp` value and returns it
pub fn revert(&self, to_opstamp: Opstamp) -> Opstamp {
self.0.store(to_opstamp, Ordering::SeqCst);
to_opstamp
}
} }
#[cfg(test)] #[cfg(test)]
@@ -55,18 +92,4 @@ mod test {
assert_eq!(stamper.stamps(3u64), (12..15)); assert_eq!(stamper.stamps(3u64), (12..15));
assert_eq!(stamper.stamp(), 15u64); assert_eq!(stamper.stamp(), 15u64);
} }
#[test]
fn test_stamper_revert() {
let stamper = Stamper::new(7u64);
assert_eq!(stamper.stamp(), 7u64);
assert_eq!(stamper.stamp(), 8u64);
let stamper_clone = stamper.clone();
assert_eq!(stamper_clone.stamp(), 9u64);
stamper.revert(6);
assert_eq!(stamper.stamp(), 6);
assert_eq!(stamper_clone.stamp(), 7);
}
} }

View File

@@ -3,6 +3,7 @@
#![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))] #![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))]
#![doc(test(attr(allow(unused_variables), deny(warnings))))] #![doc(test(attr(allow(unused_variables), deny(warnings))))]
#![warn(missing_docs)] #![warn(missing_docs)]
#![recursion_limit = "80"]
//! # `tantivy` //! # `tantivy`
//! //!
@@ -10,17 +11,26 @@
//! Think `Lucene`, but in Rust. //! Think `Lucene`, but in Rust.
//! //!
//! ```rust //! ```rust
//! # extern crate tempdir;
//! #
//! #[macro_use]
//! extern crate tantivy;
//!
//! // ...
//!
//! # use std::path::Path; //! # use std::path::Path;
//! # use tempfile::TempDir; //! # use tempdir::TempDir;
//! # use tantivy::Index;
//! # use tantivy::schema::*;
//! # use tantivy::{Score, DocAddress};
//! # use tantivy::collector::TopDocs; //! # use tantivy::collector::TopDocs;
//! # use tantivy::query::QueryParser; //! # use tantivy::query::QueryParser;
//! # use tantivy::schema::*;
//! # use tantivy::{doc, DocAddress, Index, Score};
//! # //! #
//! # fn main() { //! # fn main() {
//! # // Let's create a temporary directory for the //! # // Let's create a temporary directory for the
//! # // sake of this example //! # // sake of this example
//! # if let Ok(dir) = TempDir::new() { //! # if let Ok(dir) = TempDir::new("tantivy_example_dir") {
//! # run_example(dir.path()).unwrap(); //! # run_example(dir.path()).unwrap();
//! # dir.close().unwrap(); //! # dir.close().unwrap();
//! # } //! # }
@@ -95,8 +105,11 @@
//! //!
//! A good place for you to get started is to check out //! A good place for you to get started is to check out
//! the example code ( //! the example code (
//! [literate programming](https://tantivy-search.github.io/examples/basic_search.html) / //! [literate programming](http://fulmicoton.com/tantivy-examples/simple_search.html) /
//! [source code](https://github.com/tantivy-search/tantivy/blob/master/examples/basic_search.rs)) //! [source code](https://github.com/fulmicoton/tantivy/blob/master/examples/simple_search.rs))
#[macro_use]
extern crate lazy_static;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
@@ -110,9 +123,57 @@ extern crate log;
#[macro_use] #[macro_use]
extern crate failure; extern crate failure;
#[cfg(feature = "mmap")]
extern crate atomicwrites;
extern crate base64;
extern crate bit_set;
extern crate bitpacking;
extern crate byteorder;
extern crate combine;
extern crate crossbeam;
extern crate fnv;
extern crate futures;
extern crate futures_cpupool;
extern crate htmlescape;
extern crate itertools;
extern crate levenshtein_automata;
#[cfg(feature = "mmap")]
extern crate memmap;
extern crate num_cpus;
extern crate owning_ref;
extern crate regex;
extern crate rust_stemmers;
extern crate scoped_pool;
extern crate serde;
extern crate stable_deref_trait;
extern crate tantivy_fst;
extern crate tempdir;
extern crate tempfile;
extern crate uuid;
#[cfg(test)]
#[macro_use]
extern crate matches;
#[cfg(windows)]
extern crate winapi;
#[cfg(test)]
extern crate rand;
#[cfg(test)]
#[macro_use]
extern crate maplit;
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
extern crate test; extern crate test;
#[macro_use]
extern crate downcast_rs;
#[macro_use]
extern crate fail;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
#[cfg(test)] #[cfg(test)]
mod functional_test; mod functional_test;
@@ -120,11 +181,14 @@ mod functional_test;
#[macro_use] #[macro_use]
mod macros; mod macros;
pub use crate::error::TantivyError; pub use error::TantivyError;
#[deprecated(since = "0.7.0", note = "please use `tantivy::TantivyError` instead")] #[deprecated(since = "0.7.0", note = "please use `tantivy::TantivyError` instead")]
pub use crate::error::TantivyError as Error; pub use error::TantivyError as Error;
pub use chrono;
extern crate census;
pub extern crate chrono;
extern crate owned_read;
/// Tantivy result. /// Tantivy result.
pub type Result<T> = std::result::Result<T, error::TantivyError>; pub type Result<T> = std::result::Result<T, error::TantivyError>;
@@ -161,15 +225,15 @@ pub use self::snippet::{Snippet, SnippetGenerator};
mod docset; mod docset;
pub use self::docset::{DocSet, SkipResult}; pub use self::docset::{DocSet, SkipResult};
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64}; pub use core::SegmentComponent;
pub use crate::core::SegmentComponent; pub use core::{Index, Searcher, Segment, SegmentId, SegmentMeta};
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta}; pub use core::{InvertedIndexReader, SegmentReader};
pub use crate::core::{InvertedIndexReader, SegmentReader}; pub use directory::Directory;
pub use crate::directory::Directory; pub use indexer::IndexWriter;
pub use crate::indexer::IndexWriter; pub use postings::Postings;
pub use crate::postings::Postings; pub use schema::{Document, Term};
pub use crate::reader::LeasedItem;
pub use crate::schema::{Document, Term}; pub use common::{i64_to_u64, u64_to_i64};
/// Expose the current version of tantivy, as well /// Expose the current version of tantivy, as well
/// whether it was compiled with the simd compression. /// whether it was compiled with the simd compression.
@@ -179,10 +243,10 @@ pub fn version() -> &'static str {
/// Defines tantivy's merging strategy /// Defines tantivy's merging strategy
pub mod merge_policy { pub mod merge_policy {
pub use crate::indexer::DefaultMergePolicy; pub use indexer::DefaultMergePolicy;
pub use crate::indexer::LogMergePolicy; pub use indexer::LogMergePolicy;
pub use crate::indexer::MergePolicy; pub use indexer::MergePolicy;
pub use crate::indexer::NoMergePolicy; pub use indexer::NoMergePolicy;
} }
/// A `u32` identifying a document within a segment. /// A `u32` identifying a document within a segment.
@@ -190,16 +254,6 @@ pub mod merge_policy {
/// as they are added in the segment. /// as they are added in the segment.
pub type DocId = u32; pub type DocId = u32;
/// A u64 assigned to every operation incrementally
///
/// All operations modifying the index receives an monotonic Opstamp.
/// The resulting state of the index is consistent with the opstamp ordering.
///
/// For instance, a commit with opstamp `32_423` will reflect all Add and Delete operations
/// with an opstamp `<= 32_423`. A delete operation with opstamp n will no affect a document added
/// with opstamp `n+1`.
pub type Opstamp = u64;
/// A f32 that represents the relevance of the document to the query /// A f32 that represents the relevance of the document to the query
/// ///
/// This is modelled internally as a `f32`. The /// This is modelled internally as a `f32`. The
@@ -212,13 +266,15 @@ pub type Score = f32;
pub type SegmentLocalId = u32; pub type SegmentLocalId = u32;
impl DocAddress { impl DocAddress {
/// Return the segment ordinal id that identifies the segment /// Return the segment ordinal.
/// hosting the document in the `Searcher` it is called from. /// The segment ordinal is an id identifying the segment
/// hosting the document. It is only meaningful, in the context
/// of a searcher.
pub fn segment_ord(self) -> SegmentLocalId { pub fn segment_ord(self) -> SegmentLocalId {
self.0 self.0
} }
/// Return the segment-local `DocId` /// Return the segment local `DocId`
pub fn doc(self) -> DocId { pub fn doc(self) -> DocId {
self.1 self.1
} }
@@ -227,31 +283,31 @@ impl DocAddress {
/// `DocAddress` contains all the necessary information /// `DocAddress` contains all the necessary information
/// to identify a document given a `Searcher` object. /// to identify a document given a `Searcher` object.
/// ///
/// It consists of an id identifying its segment, and /// It consists in an id identifying its segment, and
/// a segment-local `DocId`. /// its segment-local `DocId`.
/// ///
/// The id used for the segment is actually an ordinal /// The id used for the segment is actually an ordinal
/// in the list of `Segment`s held by a `Searcher`. /// in the list of segment hold by a `Searcher`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct DocAddress(pub SegmentLocalId, pub DocId); pub struct DocAddress(pub SegmentLocalId, pub DocId);
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE; use collector::tests::TestCollector;
use crate::core::SegmentReader; use core::SegmentReader;
use crate::docset::DocSet; use docset::DocSet;
use crate::query::BooleanQuery; use query::BooleanQuery;
use crate::schema::*;
use crate::DocAddress;
use crate::Index;
use crate::IndexWriter;
use crate::Postings;
use crate::ReloadPolicy;
use rand::distributions::Bernoulli; use rand::distributions::Bernoulli;
use rand::distributions::Uniform; use rand::distributions::Uniform;
use rand::rngs::StdRng; use rand::rngs::StdRng;
use rand::{Rng, SeedableRng}; use rand::{Rng, SeedableRng};
use schema::*;
use DocAddress;
use Index;
use IndexWriter;
use Postings;
use ReloadPolicy;
pub fn assert_nearly_equals(expected: f32, val: f32) { pub fn assert_nearly_equals(expected: f32, val: f32) {
assert!( assert!(
@@ -276,7 +332,7 @@ mod tests {
pub fn sample_with_seed(n: u32, ratio: f64, seed_val: u8) -> Vec<u32> { pub fn sample_with_seed(n: u32, ratio: f64, seed_val: u8) -> Vec<u32> {
StdRng::from_seed([seed_val; 32]) StdRng::from_seed([seed_val; 32])
.sample_iter(&Bernoulli::new(ratio).unwrap()) .sample_iter(&Bernoulli::new(ratio))
.take(n as usize) .take(n as usize)
.enumerate() .enumerate()
.filter_map(|(val, keep)| if keep { Some(val as u32) } else { None }) .filter_map(|(val, keep)| if keep { Some(val as u32) } else { None })
@@ -414,7 +470,7 @@ mod tests {
} }
} }
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool { fn advance_undeleted(docset: &mut DocSet, reader: &SegmentReader) -> bool {
while docset.advance() { while docset.advance() {
if !reader.is_deleted(docset.doc()) { if !reader.is_deleted(docset.doc()) {
return true; return true;
@@ -613,30 +669,6 @@ mod tests {
assert!(!postings.advance()); assert!(!postings.advance());
} }
#[test]
fn test_indexed_f64() {
let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_f64_field("value", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let val = std::f64::consts::PI;
index_writer.add_document(doc!(value_field => val));
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let term = Term::from_field_f64(value_field, val);
let mut postings = searcher
.segment_reader(0)
.inverted_index(term.field())
.read_postings(&term, IndexRecordOption::Basic)
.unwrap();
assert!(postings.advance());
assert_eq!(postings.doc(), 0);
assert!(!postings.advance());
}
#[test] #[test]
fn test_indexedfield_not_in_documents() { fn test_indexedfield_not_in_documents() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -749,7 +781,7 @@ mod tests {
let searcher = reader.searcher(); let searcher = reader.searcher();
let get_doc_ids = |terms: Vec<Term>| { let get_doc_ids = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
let topdocs = searcher.search(&query, &TEST_COLLECTOR_WITH_SCORE).unwrap(); let topdocs = searcher.search(&query, &TestCollector).unwrap();
topdocs.docs().to_vec() topdocs.docs().to_vec()
}; };
assert_eq!( assert_eq!(
@@ -829,7 +861,6 @@ mod tests {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST); let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
let fast_field_signed = schema_builder.add_i64_field("signed", FAST); let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
let fast_field_float = schema_builder.add_f64_field("float", FAST);
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let stored_int_field = schema_builder.add_u64_field("text", STORED); let stored_int_field = schema_builder.add_u64_field("text", STORED);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -837,8 +868,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 50_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 50_000_000).unwrap();
{ {
let document = let document = doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64);
doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64);
index_writer.add_document(document); index_writer.add_document(document);
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
@@ -846,40 +876,29 @@ mod tests {
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0); let segment_reader: &SegmentReader = searcher.segment_reader(0);
{ {
let fast_field_reader_opt = segment_reader.fast_fields().u64(text_field); let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(text_field);
assert!(fast_field_reader_opt.is_none()); assert!(fast_field_reader_res.is_err());
} }
{ {
let fast_field_reader_opt = segment_reader.fast_fields().u64(stored_int_field); let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(stored_int_field);
assert!(fast_field_reader_opt.is_none()); assert!(fast_field_reader_res.is_err());
} }
{ {
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_signed); let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(fast_field_signed);
assert!(fast_field_reader_opt.is_none()); assert!(fast_field_reader_res.is_err());
} }
{ {
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_float); let fast_field_reader_res = segment_reader.fast_field_reader::<i64>(fast_field_signed);
assert!(fast_field_reader_opt.is_none()); assert!(fast_field_reader_res.is_ok());
} let fast_field_reader = fast_field_reader_res.unwrap();
{
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_unsigned);
assert!(fast_field_reader_opt.is_some());
let fast_field_reader = fast_field_reader_opt.unwrap();
assert_eq!(fast_field_reader.get(0), 4u64)
}
{
let fast_field_reader_opt = segment_reader.fast_fields().i64(fast_field_signed);
assert!(fast_field_reader_opt.is_some());
let fast_field_reader = fast_field_reader_opt.unwrap();
assert_eq!(fast_field_reader.get(0), 4i64) assert_eq!(fast_field_reader.get(0), 4i64)
} }
{ {
let fast_field_reader_opt = segment_reader.fast_fields().f64(fast_field_float); let fast_field_reader_res = segment_reader.fast_field_reader::<i64>(fast_field_signed);
assert!(fast_field_reader_opt.is_some()); assert!(fast_field_reader_res.is_ok());
let fast_field_reader = fast_field_reader_opt.unwrap(); let fast_field_reader = fast_field_reader_res.unwrap();
assert_eq!(fast_field_reader.get(0), 4f64) assert_eq!(fast_field_reader.get(0), 4i64)
} }
} }
} }

View File

@@ -22,9 +22,11 @@
/// ///
/// # Example /// # Example
/// ///
/// ```rust /// ```
/// #[macro_use]
/// extern crate tantivy;
///
/// use tantivy::schema::{Schema, TEXT, FAST}; /// use tantivy::schema::{Schema, TEXT, FAST};
/// use tantivy::doc;
/// ///
/// //... /// //...
/// ///
@@ -65,7 +67,7 @@ macro_rules! doc(
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use crate::schema::{Schema, FAST, TEXT}; use schema::{Schema, FAST, TEXT};
#[test] #[test]
fn test_doc_basic() { fn test_doc_basic() {

Some files were not shown because too many files have changed in this diff Show More