Compare commits

..

23 Commits

Author SHA1 Message Date
Paul Masurel
80c25ae9f3 Release 0.10 2019-07-11 19:10:12 +09:00
Paul Masurel
4867be3d3b Kompass master (#590)
* Use once_cell in place of lazy_static

* Minor changes
2019-07-10 19:24:54 +09:00
Paul Masurel
697c7e721d Only compile bitpacker4x (#589) 2019-07-10 18:53:46 +09:00
Paul Masurel
3e368d92cb Issue/479 (#578)
* Sort by field relying on tweaked score
* Sort by u64/i64 get independent methods.
2019-07-07 17:12:31 +09:00
Paul Masurel
0bc2c64a53 2018 (#585)
* removing macro import for fail-rs

* Downcast-rs

* matches
2019-07-07 17:09:04 +09:00
Paul Masurel
35236c8634 Seek not required in Directory's write anymore (#584) 2019-07-03 10:12:33 +09:00
Paul Masurel
462774b15c Tiqb feature/2018 (#583)
* rust 2018

* Added CHANGELOG comment
2019-07-01 10:01:46 +09:00
Paul Masurel
185a5b8d31 updating rand (#582) 2019-06-29 13:11:42 +09:00
petr-tik
73d7791479 Add instructions for contributors (#574) 2019-06-27 09:59:07 +09:00
Kirill Zaborsky
f52b1e68d1 Fix typo (#573) 2019-06-27 09:57:37 +09:00
Paul Masurel
3e0907fe05 Fixed CHANGELOG and disable one test on windows (#577) 2019-06-27 09:48:53 +09:00
dependabot-preview[bot]
ab4a8916d3 Update bitpacking requirement from 0.6 to 0.7 (#575)
Updates the requirements on bitpacking to permit the latest version.

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-06-27 09:39:26 +09:00
Antoine Catton
bcd7386fc5 Add crates.io shield to the README (#572) 2019-06-18 11:19:06 +09:00
Paul Masurel
c23a7c992b Closes #552 (#570)
The different handles to `SegmentMeta` are closed before calling gc on
end_merge.
2019-06-16 14:12:13 +09:00
Paul Masurel
2a88094ec4 Disabling travis on OSX (#571) 2019-06-16 14:12:01 +09:00
Paul Masurel
ca3cfddab4 adding cond (#568) 2019-06-16 11:59:26 +09:00
Paul Masurel
7bd9f9773b trying to fix doc upload (#567) 2019-06-16 11:22:51 +09:00
Paul Masurel
e2da92fcb5 Petr tik n510 clear index (#566)
* Enables clearing the index

Closes #510

* Adds an examples to clear and rebuild index

* Addressing code review

Moved the example from examples/ to docstring above `clear`

* Corrected minor typos and missed/duplicate words

* Added stamper.revert method to be used for rollback

Added type alias for Opstamp

Moved to AtomicU64 on stable rust (since 1.34)

* Change the method name and doc-string

* Remove rollback from delete_all_documents

test_add_then_delete_all_documents fails with --test-threads 2

* Passes all the tests with any number of test-threads

(ran locally 5 times)

* Addressed code review

Deleted comments with debug info
changed ReloadPolicy to Manual

* Removing useless garbage_collect call and updated CHANGELOG
2019-06-12 09:40:03 +09:00
petr-tik
876e1451c4 Resume uploading docs to gh-pages (#565)
* Fixes #546

Generate docs and upload them. Need GH_TOKEN env var to be set in travis settings

* Investigate what TRAVIS* env vars are set
2019-06-12 09:30:09 +09:00
dependabot-preview[bot]
a37d2f9777 Update winapi requirement from 0.2 to 0.3 (#537)
* Update winapi requirement from 0.2 to 0.3

Updates the requirements on [winapi](https://github.com/retep998/winapi-rs) to permit the latest version.
- [Release notes](https://github.com/retep998/winapi-rs/releases)
- [Commits](https://github.com/retep998/winapi-rs/commits/0.3.7)

Signed-off-by: dependabot[bot] <support@dependabot.com>

* Fixing upgrading winapi (hopefully).
2019-06-06 10:23:13 +09:00
Paul Masurel
4822940b19 Issue/36 (#559)
* Added explanation

* Explain

* Splitting weight and idf

* Added comments

Closes #36
2019-06-06 10:03:54 +09:00
Paul Masurel
d590f4c6b0 Comments for IndexMeta (#560) 2019-06-06 09:24:31 +09:00
Paul Masurel
edfa619519 Update .travis.yml 2019-05-29 16:45:56 +09:00
168 changed files with 2898 additions and 2168 deletions

View File

@@ -10,7 +10,7 @@ env:
global: global:
- CRATE_NAME=tantivy - CRATE_NAME=tantivy
- TRAVIS_CARGO_NIGHTLY_FEATURE="" - TRAVIS_CARGO_NIGHTLY_FEATURE=""
- secure: eC8HjTi1wgRVCsMAeXEXt8Ckr0YBSGOEnQkkW4/Nde/OZ9jJjz2nmP1ELQlDE7+czHub2QvYtDMG0parcHZDx/Kus0yvyn08y3g2rhGIiE7y8OCvQm1Mybu2D/p7enm6shXquQ6Z5KRfRq+18mHy80wy9ABMA/ukEZdvnfQ76/Een8/Lb0eHaDoXDXn3PqLVtByvSfQQ7OhS60dEScu8PWZ6/l1057P5NpdWbMExBE7Ro4zYXNhkJeGZx0nP/Bd4Jjdt1XfPzMEybV6NZ5xsTILUBFTmOOt603IsqKGov089NExqxYu5bD3K+S4MzF1Nd6VhomNPJqLDCfhlymJCUj5n5Ku4yidlhQbM4Ej9nGrBalJnhcjBjPua5tmMF2WCxP9muKn/2tIOu1/+wc0vMf9Yd3wKIkf5+FtUxCgs2O+NslWvmOMAMI/yD25m7hb4t1IwE/4Bk+GVcWJRWXbo0/m6ZUHzRzdjUY2a1qvw7C9udzdhg7gcnXwsKrSWi2NjMiIVw86l+Zim0nLpKIN41sxZHLaFRG63Ki8zQ/481LGn32awJ6i3sizKS0WD+N1DfR2qYMrwYHaMN0uR0OFXYTJkFvTFttAeUY3EKmRKAuMhmO2YRdSr4/j/G5E9HMc1gSGJj6PxgpQU7EpvxRsmoVAEJr0mszmOj9icGHep/FM= # - secure: eC8HjTi1wgRVCsMAeXEXt8Ckr0YBSGOEnQkkW4/Nde/OZ9jJjz2nmP1ELQlDE7+czHub2QvYtDMG0parcHZDx/Kus0yvyn08y3g2rhGIiE7y8OCvQm1Mybu2D/p7enm6shXquQ6Z5KRfRq+18mHy80wy9ABMA/ukEZdvnfQ76/Een8/Lb0eHaDoXDXn3PqLVtByvSfQQ7OhS60dEScu8PWZ6/l1057P5NpdWbMExBE7Ro4zYXNhkJeGZx0nP/Bd4Jjdt1XfPzMEybV6NZ5xsTILUBFTmOOt603IsqKGov089NExqxYu5bD3K+S4MzF1Nd6VhomNPJqLDCfhlymJCUj5n5Ku4yidlhQbM4Ej9nGrBalJnhcjBjPua5tmMF2WCxP9muKn/2tIOu1/+wc0vMf9Yd3wKIkf5+FtUxCgs2O+NslWvmOMAMI/yD25m7hb4t1IwE/4Bk+GVcWJRWXbo0/m6ZUHzRzdjUY2a1qvw7C9udzdhg7gcnXwsKrSWi2NjMiIVw86l+Zim0nLpKIN41sxZHLaFRG63Ki8zQ/481LGn32awJ6i3sizKS0WD+N1DfR2qYMrwYHaMN0uR0OFXYTJkFvTFttAeUY3EKmRKAuMhmO2YRdSr4/j/G5E9HMc1gSGJj6PxgpQU7EpvxRsmoVAEJr0mszmOj9icGHep/FM=
addons: addons:
apt: apt:
@@ -38,12 +38,12 @@ matrix:
# Linux # Linux
#- env: TARGET=aarch64-unknown-linux-gnu #- env: TARGET=aarch64-unknown-linux-gnu
#- env: TARGET=i686-unknown-linux-gnu #- env: TARGET=i686-unknown-linux-gnu
- env: TARGET=x86_64-unknown-linux-gnu CODECOV=1 - env: TARGET=x86_64-unknown-linux-gnu CODECOV=1 UPLOAD_DOCS=1
# - env: TARGET=x86_64-unknown-linux-musl CODECOV=1 # - env: TARGET=x86_64-unknown-linux-musl CODECOV=1
# OSX # OSX
- env: TARGET=x86_64-apple-darwin #- env: TARGET=x86_64-apple-darwin
os: osx # os: osx
before_install: before_install:
- set -e - set -e
@@ -52,6 +52,7 @@ before_install:
install: install:
- sh ci/install.sh - sh ci/install.sh
- source ~/.cargo/env || true - source ~/.cargo/env || true
- env | grep "TRAVIS"
before_script: before_script:
- export PATH=$HOME/.cargo/bin:$PATH - export PATH=$HOME/.cargo/bin:$PATH
@@ -61,12 +62,14 @@ before_script:
script: script:
- bash ci/script.sh - bash ci/script.sh
after_success:
- cargo doc-upload
before_deploy: before_deploy:
- sh ci/before_deploy.sh - sh ci/before_deploy.sh
after_success:
# Needs GH_TOKEN env var to be set in travis settings
- if [[ -v GH_TOKEN ]]; then echo "GH TOKEN IS SET"; else echo "GH TOKEN NOT SET"; fi
- if [[ -v UPLOAD_DOCS ]]; then cargo doc; cargo doc-upload; else echo "doc upload disabled."; fi
cache: cargo cache: cargo
before_cache: before_cache:
# Travis can't cache files that are not readable by "others" # Travis can't cache files that are not readable by "others"

View File

@@ -3,11 +3,17 @@ Tantivy 0.10.0
*Tantivy 0.10.0 index format is compatible with the index format in 0.9.0.* *Tantivy 0.10.0 index format is compatible with the index format in 0.9.0.*
- Added an API to easily tweak or entirely replace the
default score. See `TopDocs::tweak_score`and `TopScore::custom_score` (@pmasurel)
- Added an ASCII folding filter (@drusellers) - Added an ASCII folding filter (@drusellers)
- Bugfix in `query.count` in presence of deletes (@pmasurel) - Bugfix in `query.count` in presence of deletes (@pmasurel)
- Added `.explain(...)` in `Query` and `Weight` to (@pmasurel)
- Added an efficient way to `delete_all_documents` in `IndexWriter` (@petr-tik).
All segments are simply removed.
Minor Minor
--------- ---------
- Switched to Rust 2018 (@uvd)
- Small simplification of the code. - Small simplification of the code.
Calling .freq() or .doc() when .advance() has never been called Calling .freq() or .doc() when .advance() has never been called
on segment postings should panic from now on. on segment postings should panic from now on.
@@ -19,11 +25,12 @@ Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
only require a read lock. (@pmasurel) only require a read lock. (@pmasurel)
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik) - Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik) - Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
- Bugfix - Files get deleted slightly earlier
- Compilation resources improved (@fdb-hiroshima)
## How to update? ## How to update?
Your existing indexes are usable as is, but you may need some Your program should be usable as is.
trivial updates.
### Fast fields ### Fast fields

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy" name = "tantivy"
version = "0.10.0-dev" version = "0.10.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
@@ -10,11 +10,12 @@ homepage = "https://github.com/tantivy-search/tantivy"
repository = "https://github.com/tantivy-search/tantivy" repository = "https://github.com/tantivy-search/tantivy"
readme = "README.md" readme = "README.md"
keywords = ["search", "information", "retrieval"] keywords = ["search", "information", "retrieval"]
edition = "2018"
[dependencies] [dependencies]
base64 = "0.10.0" base64 = "0.10.0"
byteorder = "1.0" byteorder = "1.0"
lazy_static = "1" once_cell = "0.2"
regex = "1.0" regex = "1.0"
tantivy-fst = "0.1" tantivy-fst = "0.1"
memmap = {version = "0.7", optional=true} memmap = {version = "0.7", optional=true}
@@ -42,7 +43,7 @@ owning_ref = "0.4"
stable_deref_trait = "1.0.0" stable_deref_trait = "1.0.0"
rust-stemmers = "1.1" rust-stemmers = "1.1"
downcast-rs = { version="1.0" } downcast-rs = { version="1.0" }
bitpacking = "0.6" bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.2" census = "0.2"
fnv = "1.0.6" fnv = "1.0.6"
owned-read = "0.4" owned-read = "0.4"
@@ -54,10 +55,10 @@ murmurhash32 = "0.2"
chrono = "0.4" chrono = "0.4"
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
winapi = "0.2" winapi = "0.3"
[dev-dependencies] [dev-dependencies]
rand = "0.6" rand = "0.7"
maplit = "1" maplit = "1"
matches = "0.1.8" matches = "0.1.8"
time = "0.1.42" time = "0.1.42"

View File

@@ -4,6 +4,7 @@
[![Join the chat at https://gitter.im/tantivy-search/tantivy](https://badges.gitter.im/tantivy-search/tantivy.svg)](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Join the chat at https://gitter.im/tantivy-search/tantivy](https://badges.gitter.im/tantivy-search/tantivy.svg)](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![Build status](https://ci.appveyor.com/api/projects/status/r7nb13kj23u8m9pj/branch/master?svg=true)](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master) [![Build status](https://ci.appveyor.com/api/projects/status/r7nb13kj23u8m9pj/branch/master?svg=true)](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
[![Crates.io](https://img.shields.io/crates/v/tantivy.svg)](https://crates.io/crates/tantivy)
[![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton)
![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png) ![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png)
@@ -64,7 +65,6 @@ performance for different type of queries / collection.
library upon which one could build a distributed search. Serializable/mergeable collector state for instance, library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
are within the scope of tantivy. are within the scope of tantivy.
# Supported OS and compiler # Supported OS and compiler
Tantivy works on stable rust (>= 1.27) and supports Linux, MacOS and Windows. Tantivy works on stable rust (>= 1.27) and supports Linux, MacOS and Windows.
@@ -80,30 +80,61 @@ It will walk you through getting a wikipedia search engine up and running in a f
- [For the last released version](https://docs.rs/tantivy/) - [For the last released version](https://docs.rs/tantivy/)
- [For the last master branch](https://tantivy-search.github.io/tantivy/tantivy/index.html) - [For the last master branch](https://tantivy-search.github.io/tantivy/tantivy/index.html)
# Compiling # How can I support this project?
## Development There are many ways to support this project.
- Use tantivy and tell us about your experience on [gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com)
- Report bugs
- Write a blog post
- Help with documentation by asking questions or submitting PRs
- Contribute code (you can join [our gitter](https://gitter.im/tantivy-search/tantivy) )
- Talk about tantivy around you
- Drop a word on on [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) or even [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
# Contributing code
We use the GitHub Pull Request workflow - reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
## Clone and build locally
Tantivy compiles on stable rust but requires `Rust >= 1.27`. Tantivy compiles on stable rust but requires `Rust >= 1.27`.
To check out and run tests, you can simply run : To check out and run tests, you can simply run :
```bash
git clone https://github.com/tantivy-search/tantivy.git git clone https://github.com/tantivy-search/tantivy.git
cd tantivy cd tantivy
cargo build cargo build
```
## Running tests ## Run tests
Some tests will not run with just `cargo test` because of `fail-rs`. Some tests will not run with just `cargo test` because of `fail-rs`.
To run the tests exhaustively, run `./run-tests.sh`. To run the tests exhaustively, run `./run-tests.sh`
# How can I support this project ? ## Debug
There are many ways to support this project. You might find it useful to step through the programme with a debugger.
- If you use tantivy, tell us about your experience on [gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com) ### A failing test
- Report bugs
- Write a blog post Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that `target/` dir exists. Use this bash script to find the most name of the most recent debug build of tantivy and run it under rust-gdb.
- Complete documentation
- Contribute code (you can join [our gitter](https://gitter.im/tantivy-search/tantivy) ) ```bash
- Talk about tantivy around you find target/debug/ -maxdepth 1 -executable -type f -name "tantivy*" -printf '%TY-%Tm-%Td %TT %p\n' | sort -r | cut -d " " -f 3 | xargs -I RECENT_DBG_TANTIVY rust-gdb RECENT_DBG_TANTIVY
- Drop a word on on [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) or even [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton) ```
Now that you are in rust-gdb, you can set breakpoints on lines and methods that match your source-code and run the debug executable with flags that you normally pass to `cargo test` to like this
```bash
$gdb run --test-threads 1 --test $NAME_OF_TEST
```
### An example
By default, rustc compiles everything in the `examples/` dir in debug mode. This makes it easy for you to make examples to reproduce bugs.
```bash
rust-gdb target/debug/examples/$EXAMPLE_NAME
$ gdb run
```

View File

@@ -10,8 +10,6 @@
// - search for the best document matchings "sea whale" // - search for the best document matchings "sea whale"
// - retrieve the best document original content. // - retrieve the best document original content.
extern crate tempdir;
// --- // ---
// Importing tantivy... // Importing tantivy...
#[macro_use] #[macro_use]

View File

@@ -7,8 +7,6 @@
// Of course, you can have a look at the tantivy's built-in collectors // Of course, you can have a look at the tantivy's built-in collectors
// such as the `CountCollector` for more examples. // such as the `CountCollector` for more examples.
extern crate tempdir;
// --- // ---
// Importing tantivy... // Importing tantivy...
#[macro_use] #[macro_use]

View File

@@ -10,8 +10,6 @@
// - search for the best document matchings "sea whale" // - search for the best document matchings "sea whale"
// - retrieve the best document original content. // - retrieve the best document original content.
extern crate tempdir;
// --- // ---
// Importing tantivy... // Importing tantivy...
#[macro_use] #[macro_use]

View File

@@ -23,8 +23,6 @@
// index a single document?), but aims at demonstrating the mechanism that makes indexing // index a single document?), but aims at demonstrating the mechanism that makes indexing
// from several threads possible. // from several threads possible.
extern crate tempdir;
// --- // ---
// Importing tantivy... // Importing tantivy...
#[macro_use] #[macro_use]

View File

@@ -4,7 +4,6 @@
// your hit result. // your hit result.
// Snippet are an extracted of a target document, and returned in HTML format. // Snippet are an extracted of a target document, and returned in HTML format.
// The keyword searched by the user are highlighted with a `<b>` tag. // The keyword searched by the user are highlighted with a `<b>` tag.
extern crate tempdir;
// --- // ---
// Importing tantivy... // Importing tantivy...

View File

@@ -9,8 +9,6 @@
// - add a few stop words // - add a few stop words
// - index few documents in our index // - index few documents in our index
extern crate tempdir;
// --- // ---
// Importing tantivy... // Importing tantivy...
#[macro_use] #[macro_use]

View File

@@ -1,4 +1,4 @@
extern crate tantivy; use tantivy;
use tantivy::schema::*; use tantivy::schema::*;
// # Document from json // # Document from json

View File

@@ -1,10 +1,10 @@
use super::Collector; use super::Collector;
use collector::SegmentCollector; use crate::collector::SegmentCollector;
use DocId; use crate::DocId;
use Result; use crate::Result;
use Score; use crate::Score;
use SegmentLocalId; use crate::SegmentLocalId;
use SegmentReader; use crate::SegmentReader;
/// `CountCollector` collector only counts how many /// `CountCollector` collector only counts how many
/// documents match the query. /// documents match the query.
@@ -94,8 +94,8 @@ impl SegmentCollector for SegmentCountCollector {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{Count, SegmentCountCollector}; use super::{Count, SegmentCountCollector};
use collector::Collector; use crate::collector::Collector;
use collector::SegmentCollector; use crate::collector::SegmentCollector;
#[test] #[test]
fn test_count_collect_does_not_requires_scoring() { fn test_count_collect_does_not_requires_scoring() {

View File

@@ -0,0 +1,126 @@
use crate::collector::top_collector::{TopCollector, TopSegmentCollector};
use crate::collector::{Collector, SegmentCollector};
use crate::Result;
use crate::{DocAddress, DocId, Score, SegmentReader};
pub(crate) struct CustomScoreTopCollector<TCustomScorer, TScore = Score> {
custom_scorer: TCustomScorer,
collector: TopCollector<TScore>,
}
impl<TCustomScorer, TScore> CustomScoreTopCollector<TCustomScorer, TScore>
where
TScore: Clone + PartialOrd,
{
pub fn new(
custom_scorer: TCustomScorer,
limit: usize,
) -> CustomScoreTopCollector<TCustomScorer, TScore> {
CustomScoreTopCollector {
custom_scorer,
collector: TopCollector::with_limit(limit),
}
}
}
/// A custom segment scorer makes it possible to define any kind of score
/// for a given document belonging to a specific segment.
///
/// It is the segment local version of the [`CustomScorer`](./trait.CustomScorer.html).
pub trait CustomSegmentScorer<TScore>: 'static {
/// Computes the score of a specific `doc`.
fn score(&self, doc: DocId) -> TScore;
}
/// `CustomScorer` makes it possible to define any kind of score.
///
/// The `CustomerScorer` itself does not make much of the computation itself.
/// Instead, it helps constructing `Self::Child` instances that will compute
/// the score at a segment scale.
pub trait CustomScorer<TScore>: Sync {
/// Type of the associated [`CustomSegmentScorer`](./trait.CustomSegmentScorer.html).
type Child: CustomSegmentScorer<TScore>;
/// Builds a child scorer for a specific segment. The child scorer is associated to
/// a specific segment.
fn segment_scorer(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
}
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
where
TCustomScorer: CustomScorer<TScore>,
TScore: 'static + PartialOrd + Clone + Send + Sync,
{
type Fruit = Vec<(TScore, DocAddress)>;
type Child = CustomScoreTopSegmentCollector<TCustomScorer::Child, TScore>;
fn for_segment(
&self,
segment_local_id: u32,
segment_reader: &SegmentReader,
) -> Result<Self::Child> {
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
let segment_collector = self
.collector
.for_segment(segment_local_id, segment_reader)?;
Ok(CustomScoreTopSegmentCollector {
segment_collector,
segment_scorer,
})
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> Result<Self::Fruit> {
self.collector.merge_fruits(segment_fruits)
}
}
pub struct CustomScoreTopSegmentCollector<T, TScore>
where
TScore: 'static + PartialOrd + Clone + Send + Sync + Sized,
T: CustomSegmentScorer<TScore>,
{
segment_collector: TopSegmentCollector<TScore>,
segment_scorer: T,
}
impl<T, TScore> SegmentCollector for CustomScoreTopSegmentCollector<T, TScore>
where
TScore: 'static + PartialOrd + Clone + Send + Sync,
T: 'static + CustomSegmentScorer<TScore>,
{
type Fruit = Vec<(TScore, DocAddress)>;
fn collect(&mut self, doc: DocId, _score: Score) {
let score = self.segment_scorer.score(doc);
self.segment_collector.collect(doc, score);
}
fn harvest(self) -> Vec<(TScore, DocAddress)> {
self.segment_collector.harvest()
}
}
impl<F, TScore, T> CustomScorer<TScore> for F
where
F: 'static + Send + Sync + Fn(&SegmentReader) -> T,
T: CustomSegmentScorer<TScore>,
{
type Child = T;
fn segment_scorer(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
Ok((self)(segment_reader))
}
}
impl<F, TScore> CustomSegmentScorer<TScore> for F
where
F: 'static + Sync + Send + Fn(DocId) -> TScore,
{
fn score(&self, doc: DocId) -> TScore {
(self)(doc)
}
}

View File

@@ -1,9 +1,15 @@
use collector::Collector; use crate::collector::Collector;
use collector::SegmentCollector; use crate::collector::SegmentCollector;
use docset::SkipResult; use crate::docset::SkipResult;
use fastfield::FacetReader; use crate::fastfield::FacetReader;
use schema::Facet; use crate::schema::Facet;
use schema::Field; use crate::schema::Field;
use crate::DocId;
use crate::Result;
use crate::Score;
use crate::SegmentLocalId;
use crate::SegmentReader;
use crate::TantivyError;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::btree_map; use std::collections::btree_map;
use std::collections::BTreeMap; use std::collections::BTreeMap;
@@ -12,12 +18,6 @@ use std::collections::BinaryHeap;
use std::collections::Bound; use std::collections::Bound;
use std::iter::Peekable; use std::iter::Peekable;
use std::{u64, usize}; use std::{u64, usize};
use DocId;
use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
use TantivyError;
struct Hit<'a> { struct Hit<'a> {
count: u64, count: u64,
@@ -27,13 +27,13 @@ struct Hit<'a> {
impl<'a> Eq for Hit<'a> {} impl<'a> Eq for Hit<'a> {}
impl<'a> PartialEq<Hit<'a>> for Hit<'a> { impl<'a> PartialEq<Hit<'a>> for Hit<'a> {
fn eq(&self, other: &Hit) -> bool { fn eq(&self, other: &Hit<'_>) -> bool {
self.count == other.count self.count == other.count
} }
} }
impl<'a> PartialOrd<Hit<'a>> for Hit<'a> { impl<'a> PartialOrd<Hit<'a>> for Hit<'a> {
fn partial_cmp(&self, other: &Hit) -> Option<Ordering> { fn partial_cmp(&self, other: &Hit<'_>) -> Option<Ordering> {
Some(self.cmp(other)) Some(self.cmp(other))
} }
} }
@@ -398,7 +398,7 @@ impl<'a> Iterator for FacetChildIterator<'a> {
} }
impl FacetCounts { impl FacetCounts {
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_>
where where
Facet: From<T>, Facet: From<T>,
{ {
@@ -412,7 +412,8 @@ impl FacetCounts {
let facet_after = Facet::from_encoded_string(facet_after_bytes); let facet_after = Facet::from_encoded_string(facet_after_bytes);
Bound::Excluded(facet_after) Bound::Excluded(facet_after)
}; };
let underlying: btree_map::Range<_, _> = self.facet_counts.range((left_bound, right_bound)); let underlying: btree_map::Range<'_, _, _> =
self.facet_counts.range((left_bound, right_bound));
FacetChildIterator { underlying } FacetChildIterator { underlying }
} }
@@ -453,12 +454,12 @@ impl FacetCounts {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{FacetCollector, FacetCounts}; use super::{FacetCollector, FacetCounts};
use core::Index; use crate::core::Index;
use query::AllQuery; use crate::query::AllQuery;
use crate::schema::{Document, Facet, Field, Schema};
use rand::distributions::Uniform; use rand::distributions::Uniform;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use schema::{Document, Facet, Field, Schema};
use std::iter; use std::iter;
#[test] #[test]

View File

@@ -66,7 +66,7 @@ let (doc_count, top_docs): (usize, Vec<(Score, DocAddress)>) =
The `Collector` trait is implemented for up to 4 collectors. The `Collector` trait is implemented for up to 4 collectors.
If you have more than 4 collectors, you can either group them into If you have more than 4 collectors, you can either group them into
tuples of tuples `(a,(b,(c,d)))`, or rely on `MultiCollector`'s. tuples of tuples `(a,(b,(c,d)))`, or rely on [`MultiCollector`](./struct.MultiCollector.html).
# Combining several collectors dynamically # Combining several collectors dynamically
@@ -85,12 +85,12 @@ See the `custom_collector` example.
*/ */
use downcast_rs; use crate::DocId;
use DocId; use crate::Result;
use Result; use crate::Score;
use Score; use crate::SegmentLocalId;
use SegmentLocalId; use crate::SegmentReader;
use SegmentReader; use downcast_rs::impl_downcast;
mod count_collector; mod count_collector;
pub use self::count_collector::Count; pub use self::count_collector::Count;
@@ -103,8 +103,11 @@ mod top_collector;
mod top_score_collector; mod top_score_collector;
pub use self::top_score_collector::TopDocs; pub use self::top_score_collector::TopDocs;
mod top_field_collector; mod custom_score_top_collector;
pub use self::top_field_collector::TopDocsByField; pub use self::custom_score_top_collector::{CustomScorer, CustomSegmentScorer};
mod tweak_score_top_collector;
pub use self::tweak_score_top_collector::{ScoreSegmentTweaker, ScoreTweaker};
mod facet_collector; mod facet_collector;
pub use self::facet_collector::FacetCollector; pub use self::facet_collector::FacetCollector;

View File

@@ -1,30 +1,30 @@
use super::Collector; use super::Collector;
use super::SegmentCollector; use super::SegmentCollector;
use collector::Fruit; use crate::collector::Fruit;
use crate::DocId;
use crate::Result;
use crate::Score;
use crate::SegmentLocalId;
use crate::SegmentReader;
use crate::TantivyError;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::ops::Deref; use std::ops::Deref;
use DocId;
use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
use TantivyError;
pub struct MultiFruit { pub struct MultiFruit {
sub_fruits: Vec<Option<Box<Fruit>>>, sub_fruits: Vec<Option<Box<dyn Fruit>>>,
} }
pub struct CollectorWrapper<TCollector: Collector>(TCollector); pub struct CollectorWrapper<TCollector: Collector>(TCollector);
impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> { impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
type Fruit = Box<Fruit>; type Fruit = Box<dyn Fruit>;
type Child = Box<BoxableSegmentCollector>; type Child = Box<dyn BoxableSegmentCollector>;
fn for_segment( fn for_segment(
&self, &self,
segment_local_id: u32, segment_local_id: u32,
reader: &SegmentReader, reader: &SegmentReader,
) -> Result<Box<BoxableSegmentCollector>> { ) -> Result<Box<dyn BoxableSegmentCollector>> {
let child = self.0.for_segment(segment_local_id, reader)?; let child = self.0.for_segment(segment_local_id, reader)?;
Ok(Box::new(SegmentCollectorWrapper(child))) Ok(Box::new(SegmentCollectorWrapper(child)))
} }
@@ -33,7 +33,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
self.0.requires_scoring() self.0.requires_scoring()
} }
fn merge_fruits(&self, children: Vec<<Self as Collector>::Fruit>) -> Result<Box<Fruit>> { fn merge_fruits(&self, children: Vec<<Self as Collector>::Fruit>) -> Result<Box<dyn Fruit>> {
let typed_fruit: Vec<TCollector::Fruit> = children let typed_fruit: Vec<TCollector::Fruit> = children
.into_iter() .into_iter()
.map(|untyped_fruit| { .map(|untyped_fruit| {
@@ -50,21 +50,21 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
} }
} }
impl SegmentCollector for Box<BoxableSegmentCollector> { impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
type Fruit = Box<Fruit>; type Fruit = Box<dyn Fruit>;
fn collect(&mut self, doc: u32, score: f32) { fn collect(&mut self, doc: u32, score: f32) {
self.as_mut().collect(doc, score); self.as_mut().collect(doc, score);
} }
fn harvest(self) -> Box<Fruit> { fn harvest(self) -> Box<dyn Fruit> {
BoxableSegmentCollector::harvest_from_box(self) BoxableSegmentCollector::harvest_from_box(self)
} }
} }
pub trait BoxableSegmentCollector { pub trait BoxableSegmentCollector {
fn collect(&mut self, doc: u32, score: f32); fn collect(&mut self, doc: u32, score: f32);
fn harvest_from_box(self: Box<Self>) -> Box<Fruit>; fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
} }
pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegmentCollector); pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegmentCollector);
@@ -76,7 +76,7 @@ impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
self.0.collect(doc, score); self.0.collect(doc, score);
} }
fn harvest_from_box(self: Box<Self>) -> Box<Fruit> { fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit> {
Box::new(self.0.harvest()) Box::new(self.0.harvest())
} }
} }
@@ -157,8 +157,9 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
#[derive(Default)] #[derive(Default)]
pub struct MultiCollector<'a> { pub struct MultiCollector<'a> {
collector_wrappers: collector_wrappers: Vec<
Vec<Box<Collector<Child = Box<BoxableSegmentCollector>, Fruit = Box<Fruit>> + 'a>>, Box<dyn Collector<Child = Box<dyn BoxableSegmentCollector>, Fruit = Box<dyn Fruit>> + 'a>,
>,
} }
impl<'a> MultiCollector<'a> { impl<'a> MultiCollector<'a> {
@@ -207,7 +208,7 @@ impl<'a> Collector for MultiCollector<'a> {
} }
fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> Result<MultiFruit> { fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> Result<MultiFruit> {
let mut segment_fruits_list: Vec<Vec<Box<Fruit>>> = (0..self.collector_wrappers.len()) let mut segment_fruits_list: Vec<Vec<Box<dyn Fruit>>> = (0..self.collector_wrappers.len())
.map(|_| Vec::with_capacity(segments_multifruits.len())) .map(|_| Vec::with_capacity(segments_multifruits.len()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
for segment_multifruit in segments_multifruits { for segment_multifruit in segments_multifruits {
@@ -230,7 +231,7 @@ impl<'a> Collector for MultiCollector<'a> {
} }
pub struct MultiCollectorChild { pub struct MultiCollectorChild {
children: Vec<Box<BoxableSegmentCollector>>, children: Vec<Box<dyn BoxableSegmentCollector>>,
} }
impl SegmentCollector for MultiCollectorChild { impl SegmentCollector for MultiCollectorChild {
@@ -257,12 +258,12 @@ impl SegmentCollector for MultiCollectorChild {
mod tests { mod tests {
use super::*; use super::*;
use collector::{Count, TopDocs}; use crate::collector::{Count, TopDocs};
use query::TermQuery; use crate::query::TermQuery;
use schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use schema::{Schema, TEXT}; use crate::schema::{Schema, TEXT};
use Index; use crate::Index;
use Term; use crate::Term;
#[test] #[test]
fn test_multi_collector() { fn test_multi_collector() {

View File

@@ -1,12 +1,12 @@
use super::*; use super::*;
use core::SegmentReader; use crate::core::SegmentReader;
use fastfield::BytesFastFieldReader; use crate::fastfield::BytesFastFieldReader;
use fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use schema::Field; use crate::schema::Field;
use DocAddress; use crate::DocAddress;
use DocId; use crate::DocId;
use Score; use crate::Score;
use SegmentLocalId; use crate::SegmentLocalId;
/// Stores all of the doc ids. /// Stores all of the doc ids.
/// This collector is only used for tests. /// This collector is only used for tests.

View File

@@ -1,11 +1,11 @@
use crate::DocAddress;
use crate::DocId;
use crate::Result;
use crate::SegmentLocalId;
use crate::SegmentReader;
use serde::export::PhantomData; use serde::export::PhantomData;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::BinaryHeap; use std::collections::BinaryHeap;
use DocAddress;
use DocId;
use Result;
use SegmentLocalId;
use SegmentReader;
/// Contains a feature (field, score, etc.) of a document along with the document address. /// Contains a feature (field, score, etc.) of a document along with the document address.
/// ///
@@ -177,9 +177,8 @@ impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{TopCollector, TopSegmentCollector}; use super::TopSegmentCollector;
use DocAddress; use crate::DocAddress;
use Score;
#[test] #[test]
fn test_top_collector_not_at_capacity() { fn test_top_collector_not_at_capacity() {
@@ -215,10 +214,4 @@ mod tests {
] ]
); );
} }
#[test]
#[should_panic]
fn test_top_0() {
let _collector: TopCollector<Score> = TopCollector::with_limit(0);
}
} }

View File

@@ -1,271 +0,0 @@
use super::Collector;
use collector::top_collector::TopCollector;
use collector::top_collector::TopSegmentCollector;
use collector::SegmentCollector;
use fastfield::FastFieldReader;
use fastfield::FastValue;
use schema::Field;
use std::marker::PhantomData;
use DocAddress;
use Result;
use SegmentLocalId;
use SegmentReader;
use TantivyError;
/// The Top Field Collector keeps track of the K documents
/// sorted by a fast field in the index
///
/// The implementation is based on a `BinaryHeap`.
/// The theorical complexity for collecting the top `K` out of `n` documents
/// is `O(n log K)`.
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// # use tantivy::schema::{Schema, Field, FAST, TEXT};
/// # use tantivy::{Index, Result, DocAddress};
/// # use tantivy::query::{Query, QueryParser};
/// use tantivy::Searcher;
/// use tantivy::collector::TopDocs;
///
/// # fn main() -> tantivy::Result<()> {
/// # let mut schema_builder = Schema::builder();
/// # let title = schema_builder.add_text_field("title", TEXT);
/// # let rating = schema_builder.add_u64_field("rating", FAST);
/// # let schema = schema_builder.build();
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # index_writer.add_document(doc!(
/// # title => "The Name of the Wind",
/// # rating => 92u64,
/// # ));
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
/// # index_writer.commit()?;
/// # let reader = index.reader()?;
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
/// # assert_eq!(top_docs,
/// # vec![(97u64, DocAddress(0u32, 1)),
/// # (80u64, DocAddress(0u32, 3))]);
/// # Ok(())
/// # }
/// #
/// /// Searches the document matching the given query, and
/// /// collects the top 10 documents, order by the `field`
/// /// given in argument.
/// ///
/// /// `field` is required to be a FAST field.
/// fn docs_sorted_by_rating(searcher: &Searcher,
/// query: &Query,
/// sort_by_field: Field)
/// -> Result<Vec<(u64, DocAddress)>> {
///
/// // This is where we build our collector!
/// let top_docs_by_rating = TopDocs::with_limit(2).order_by_field(sort_by_field);
///
/// // ... and here is our documents. Not this is a simple vec.
/// // The `u64` in the pair is the value of our fast field for each documents.
/// searcher.search(query, &top_docs_by_rating)
/// }
/// ```
pub struct TopDocsByField<T> {
collector: TopCollector<T>,
field: Field,
}
impl<T: FastValue + PartialOrd + Clone> TopDocsByField<T> {
/// Creates a top field collector, with a number of documents equal to "limit".
///
/// The given field name must be a fast field, otherwise the collector have an error while
/// collecting results.
///
/// This constructor is crate-private. Client are supposed to call
/// build `TopDocsByField` object using the `TopDocs` API.
///
/// e.g.:
/// `TopDocs::with_limit(2).order_by_field(sort_by_field)`
///
/// # Panics
/// The method panics if limit is 0
pub(crate) fn new(field: Field, limit: usize) -> TopDocsByField<T> {
TopDocsByField {
collector: TopCollector::with_limit(limit),
field,
}
}
}
impl<T: FastValue + PartialOrd + Send + Sync + 'static> Collector for TopDocsByField<T> {
type Fruit = Vec<(T, DocAddress)>;
type Child = TopFieldSegmentCollector<T>;
fn for_segment(
&self,
segment_local_id: SegmentLocalId,
reader: &SegmentReader,
) -> Result<TopFieldSegmentCollector<T>> {
let collector = self.collector.for_segment(segment_local_id, reader)?;
let reader = reader.fast_fields().u64(self.field).ok_or_else(|| {
let field_name = reader.schema().get_field_name(self.field);
TantivyError::SchemaError(format!("Failed to find fast field reader {:?}", field_name))
})?;
Ok(TopFieldSegmentCollector {
collector,
reader,
_type: PhantomData,
})
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(
&self,
segment_fruits: Vec<Vec<(T, DocAddress)>>,
) -> Result<Vec<(T, DocAddress)>> {
self.collector.merge_fruits(segment_fruits)
}
}
pub struct TopFieldSegmentCollector<T> {
collector: TopSegmentCollector<u64>,
reader: FastFieldReader<u64>,
_type: PhantomData<T>,
}
impl<T: FastValue + PartialOrd + Send + Sync + 'static> SegmentCollector
for TopFieldSegmentCollector<T>
{
type Fruit = Vec<(T, DocAddress)>;
fn collect(&mut self, doc: u32, _score: f32) {
let field_value = self.reader.get(doc);
self.collector.collect(doc, field_value);
}
fn harvest(self) -> Vec<(T, DocAddress)> {
self.collector
.harvest()
.into_iter()
.map(|(val, doc_address)| (T::from_u64(val), doc_address))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::TopDocsByField;
use collector::Collector;
use collector::TopDocs;
use query::Query;
use query::QueryParser;
use schema::Field;
use schema::IntOptions;
use schema::{Schema, FAST, TEXT};
use DocAddress;
use Index;
use IndexWriter;
use TantivyError;
const TITLE: &str = "title";
const SIZE: &str = "size";
#[test]
fn test_top_collector_not_at_capacity() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, FAST);
let schema = schema_builder.build();
let (index, query) = index("beer", title, schema, |index_writer| {
index_writer.add_document(doc!(
title => "bottle of beer",
size => 12u64,
));
index_writer.add_document(doc!(
title => "growler of beer",
size => 64u64,
));
index_writer.add_document(doc!(
title => "pint of beer",
size => 16u64,
));
});
let searcher = index.reader().unwrap().searcher();
let top_collector = TopDocs::with_limit(4).order_by_field(size);
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
assert_eq!(
top_docs,
vec![
(64, DocAddress(0, 1)),
(16, DocAddress(0, 2)),
(12, DocAddress(0, 0))
]
);
}
#[test]
#[should_panic]
fn test_field_does_not_exist() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, FAST);
let schema = schema_builder.build();
let (index, _) = index("beer", title, schema, |index_writer| {
index_writer.add_document(doc!(
title => "bottle of beer",
size => 12u64,
));
});
let searcher = index.reader().unwrap().searcher();
let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(Field(2));
let segment_reader = searcher.segment_reader(0u32);
top_collector
.for_segment(0, segment_reader)
.expect("should panic");
}
#[test]
fn test_field_not_fast_field() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, IntOptions::default());
let schema = schema_builder.build();
let (index, _) = index("beer", title, schema, |index_writer| {
index_writer.add_document(doc!(
title => "bottle of beer",
size => 12u64,
));
});
let searcher = index.reader().unwrap().searcher();
let segment = searcher.segment_reader(0);
let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(size);
assert_matches!(
top_collector
.for_segment(0, segment)
.map(|_| ())
.unwrap_err(),
TantivyError::SchemaError(_)
);
}
fn index(
query: &str,
query_field: Field,
schema: Schema,
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
) -> (Index, Box<Query>) {
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
doc_adder(&mut index_writer);
index_writer.commit().unwrap();
let query_parser = QueryParser::for_index(&index, vec![query_field]);
let query = query_parser.parse_query(query).unwrap();
(index, query)
}
}

View File

@@ -1,16 +1,18 @@
use super::Collector; use super::Collector;
use collector::top_collector::TopCollector; use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
use collector::top_collector::TopSegmentCollector; use crate::collector::top_collector::TopCollector;
use collector::SegmentCollector; use crate::collector::top_collector::TopSegmentCollector;
use collector::TopDocsByField; use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
use fastfield::FastValue; use crate::collector::{
use schema::Field; CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
use DocAddress; };
use DocId; use crate::schema::Field;
use Result; use crate::DocAddress;
use Score; use crate::DocId;
use SegmentLocalId; use crate::Result;
use SegmentReader; use crate::Score;
use crate::SegmentLocalId;
use crate::SegmentReader;
/// The Top Score Collector keeps track of the K documents /// The Top Score Collector keeps track of the K documents
/// sorted by their score. /// sorted by their score.
@@ -77,13 +79,311 @@ impl TopDocs {
/// Set top-K to rank documents by a given fast field. /// Set top-K to rank documents by a given fast field.
/// ///
/// (By default, `TopDocs` collects the top-K documents sorted by /// ```rust
/// the similarity score.) /// #[macro_use]
pub fn order_by_field<T: PartialOrd + FastValue + Clone>( /// extern crate tantivy;
/// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{Index, Result, DocAddress};
/// # use tantivy::query::{Query, QueryParser};
/// use tantivy::Searcher;
/// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field;
///
/// # fn main() -> tantivy::Result<()> {
/// # let mut schema_builder = Schema::builder();
/// # let title = schema_builder.add_text_field("title", TEXT);
/// # let rating = schema_builder.add_u64_field("rating", FAST);
/// # let schema = schema_builder.build();
/// #
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # index_writer.add_document(doc!(
/// # title => "The Name of the Wind",
/// # rating => 92u64,
/// # ));
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
/// # index_writer.commit()?;
/// # let reader = index.reader()?;
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
/// # assert_eq!(top_docs,
/// # vec![(97u64, DocAddress(0u32, 1)),
/// # (80u64, DocAddress(0u32, 3))]);
/// # Ok(())
/// # }
///
///
/// /// Searches the document matching the given query, and
/// /// collects the top 10 documents, order by the u64-`field`
/// /// given in argument.
/// ///
/// /// `field` is required to be a FAST field.
/// fn docs_sorted_by_rating(searcher: &Searcher,
/// query: &Query,
/// sort_by_field: Field)
/// -> Result<Vec<(u64, DocAddress)>> {
///
/// // This is where we build our topdocs collector
/// //
/// // Note the generics parameter that needs to match the
/// // type `sort_by_field`.
/// let top_docs_by_rating = TopDocs
/// ::with_limit(10)
/// .order_by_u64_field(sort_by_field);
///
/// // ... and here are our documents. Note this is a simple vec.
/// // The `u64` in the pair is the value of our fast field for
/// // each documents.
/// //
/// // The vec is sorted decreasingly by `sort_by_field`, and has a
/// // length of 10, or less if not enough documents matched the
/// // query.
/// let resulting_docs: Vec<(u64, DocAddress)> =
/// searcher.search(query, &top_docs_by_rating)?;
///
/// Ok(resulting_docs)
/// }
/// ```
///
/// # Panics
///
/// May panic if the field requested is not a fast field.
///
pub fn order_by_u64_field(
self, self,
field: Field, field: Field,
) -> TopDocsByField<T> { ) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
TopDocsByField::new(field, self.0.limit()) self.custom_score(move |segment_reader: &SegmentReader| {
let ff_reader = segment_reader
.fast_fields()
.u64(field)
.expect("Field requested is not a i64/u64 fast field.");
move |doc: DocId| ff_reader.get(doc)
})
}
/// Ranks the documents using a custom score.
///
/// This method offers a convenient way to tweak or replace
/// the documents score. As suggested by the prototype you can
/// manually define your own [`ScoreTweaker`](./trait.ScoreTweaker.html)
/// and pass it as an argument, but there is a much simpler way to
/// tweak your score: you can use a closure as in the following
/// example.
///
/// # Example
///
/// Typically, you will want to rely on one or more fast fields,
/// to alter the original relevance `Score`.
///
/// For instance, in the following, we assume that we are implementing
/// an e-commerce website that has a fast field called `popularity`
/// that rates whether a product is typically often bought by users.
///
/// In the following example will will tweak our ranking a bit by
/// boosting popular products a notch.
///
/// In more serious application, this tweaking could involved running a
/// learning-to-rank model over various features
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{Index, DocAddress, DocId, Score};
/// # use tantivy::query::QueryParser;
/// use tantivy::SegmentReader;
/// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field;
///
/// # fn create_schema() -> Schema {
/// # let mut schema_builder = Schema::builder();
/// # schema_builder.add_text_field("product_name", TEXT);
/// # schema_builder.add_u64_field("popularity", FAST);
/// # schema_builder.build()
/// # }
/// #
/// # fn main() -> tantivy::Result<()> {
/// # let schema = create_schema();
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # let product_name = index.schema().get_field("product_name").unwrap();
/// #
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
/// # index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
/// # index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
/// # index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
/// # index_writer.commit()?;
/// // ...
/// # let user_query = "diary";
/// # let query = QueryParser::for_index(&index, vec![product_name]).parse_query(user_query)?;
///
/// // This is where we build our collector with our custom score.
/// let top_docs_by_custom_score = TopDocs
/// ::with_limit(10)
/// .tweak_score(move |segment_reader: &SegmentReader| {
/// // The argument is a function that returns our scoring
/// // function.
/// //
/// // The point of this "mother" function is to gather all
/// // of the segment level information we need for scoring.
/// // Typically, fast_fields.
/// //
/// // In our case, we will get a reader for the popularity
/// // fast field.
/// let popularity_reader =
/// segment_reader.fast_fields().u64(popularity).unwrap();
///
/// // We can now define our actual scoring function
/// move |doc: DocId, original_score: Score| {
/// let popularity: u64 = popularity_reader.get(doc);
/// // Well.. For the sake of the example we use a simple logarithm
/// // function.
/// let popularity_boost_score = ((2u64 + popularity) as f32).log2();
/// popularity_boost_score * original_score
/// }
/// });
/// # let reader = index.reader()?;
/// # let searcher = reader.searcher();
/// // ... and here are our documents. Note this is a simple vec.
/// // The `Score` in the pair is our tweaked score.
/// let resulting_docs: Vec<(Score, DocAddress)> =
/// searcher.search(&*query, &top_docs_by_custom_score)?;
///
/// # Ok(())
/// # }
/// ```
///
/// # See also
/// [custom_score(...)](#method.custom_score).
pub fn tweak_score<TScore, TScoreSegmentTweaker, TScoreTweaker>(
self,
score_tweaker: TScoreTweaker,
) -> impl Collector<Fruit = Vec<(TScore, DocAddress)>>
where
TScore: 'static + Send + Sync + Clone + PartialOrd,
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
{
TweakedScoreTopCollector::new(score_tweaker, self.0.limit())
}
/// Ranks the documents using a custom score.
///
/// This method offers a convenient way to use a different score.
///
/// As suggested by the prototype you can manually define your
/// own [`CustomScorer`](./trait.CustomScorer.html)
/// and pass it as an argument, but there is a much simpler way to
/// tweak your score: you can use a closure as in the following
/// example.
///
/// # Limitation
///
/// This method only makes it possible to compute the score from a given
/// `DocId`, fastfield values for the doc and any information you could
/// have precomputed beforehands. It does not make it possible for instance
/// to compute something like TfIdf as it does not have access to the list of query
/// terms present in the document, nor the term frequencies for the different terms.
///
/// It can be used if your search engine relies on a learning-to-rank model for instance,
/// which does not rely on the term frequencies or positions as features.
///
/// # Example
///
/// ```rust
/// # #[macro_use]
/// # extern crate tantivy;
/// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{Index, DocAddress, DocId};
/// # use tantivy::query::QueryParser;
/// use tantivy::SegmentReader;
/// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field;
///
/// # fn create_schema() -> Schema {
/// # let mut schema_builder = Schema::builder();
/// # schema_builder.add_text_field("product_name", TEXT);
/// # schema_builder.add_u64_field("popularity", FAST);
/// # schema_builder.add_u64_field("boosted", FAST);
/// # schema_builder.build()
/// # }
/// #
/// # fn main() -> tantivy::Result<()> {
/// # let schema = create_schema();
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # let product_name = index.schema().get_field("product_name").unwrap();
/// #
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
/// let boosted: Field = index.schema().get_field("boosted").unwrap();
/// # index_writer.add_document(doc!(boosted=>1u64, product_name => "The Diary of Muadib", popularity => 1u64));
/// # index_writer.add_document(doc!(boosted=>0u64, product_name => "A Dairy Cow", popularity => 10u64));
/// # index_writer.add_document(doc!(boosted=>0u64, product_name => "The Diary of a Young Girl", popularity => 15u64));
/// # index_writer.commit()?;
/// // ...
/// # let user_query = "diary";
/// # let query = QueryParser::for_index(&index, vec![product_name]).parse_query(user_query)?;
///
/// // This is where we build our collector with our custom score.
/// let top_docs_by_custom_score = TopDocs
/// ::with_limit(10)
/// .custom_score(move |segment_reader: &SegmentReader| {
/// // The argument is a function that returns our scoring
/// // function.
/// //
/// // The point of this "mother" function is to gather all
/// // of the segment level information we need for scoring.
/// // Typically, fast_fields.
/// //
/// // In our case, we will get a reader for the popularity
/// // fast field and a boosted field.
/// //
/// // We want to get boosted items score, and when we get
/// // a tie, return the item with the highest popularity.
/// //
/// // Note that this is implemented by using a `(u64, u64)`
/// // as a score.
/// let popularity_reader =
/// segment_reader.fast_fields().u64(popularity).unwrap();
/// let boosted_reader =
/// segment_reader.fast_fields().u64(boosted).unwrap();
///
/// // We can now define our actual scoring function
/// move |doc: DocId| {
/// let popularity: u64 = popularity_reader.get(doc);
/// let boosted: u64 = boosted_reader.get(doc);
/// // Score do not have to be `f64` in tantivy.
/// // Here we return a couple to get lexicographical order
/// // for free.
/// (boosted, popularity)
/// }
/// });
/// # let reader = index.reader()?;
/// # let searcher = reader.searcher();
/// // ... and here are our documents. Note this is a simple vec.
/// // The `Score` in the pair is our tweaked score.
/// let resulting_docs: Vec<((u64, u64), DocAddress)> =
/// searcher.search(&*query, &top_docs_by_custom_score)?;
///
/// # Ok(())
/// # }
/// ```
///
/// # See also
/// [tweak_score(...)](#method.tweak_score).
pub fn custom_score<TScore, TCustomSegmentScorer, TCustomScorer>(
self,
custom_score: TCustomScorer,
) -> impl Collector<Fruit = Vec<(TScore, DocAddress)>>
where
TScore: 'static + Send + Sync + Clone + PartialOrd,
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
{
CustomScoreTopCollector::new(custom_score, self.0.limit())
} }
} }
@@ -128,12 +428,13 @@ impl SegmentCollector for TopScoreSegmentCollector {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::TopDocs; use super::TopDocs;
use query::QueryParser; use crate::collector::Collector;
use schema::Schema; use crate::query::{Query, QueryParser};
use schema::TEXT; use crate::schema::{Field, Schema, FAST, STORED, TEXT};
use DocAddress; use crate::DocAddress;
use Index; use crate::Index;
use Score; use crate::IndexWriter;
use crate::Score;
fn make_index() -> Index { fn make_index() -> Index {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -200,4 +501,97 @@ mod tests {
TopDocs::with_limit(0); TopDocs::with_limit(0);
} }
const TITLE: &str = "title";
const SIZE: &str = "size";
#[test]
fn test_top_field_collector_not_at_capacity() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, FAST);
let schema = schema_builder.build();
let (index, query) = index("beer", title, schema, |index_writer| {
index_writer.add_document(doc!(
title => "bottle of beer",
size => 12u64,
));
index_writer.add_document(doc!(
title => "growler of beer",
size => 64u64,
));
index_writer.add_document(doc!(
title => "pint of beer",
size => 16u64,
));
});
let searcher = index.reader().unwrap().searcher();
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
assert_eq!(
top_docs,
vec![
(64, DocAddress(0, 1)),
(16, DocAddress(0, 2)),
(12, DocAddress(0, 0))
]
);
}
#[test]
#[should_panic]
fn test_field_does_not_exist() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, FAST);
let schema = schema_builder.build();
let (index, _) = index("beer", title, schema, |index_writer| {
index_writer.add_document(doc!(
title => "bottle of beer",
size => 12u64,
));
});
let searcher = index.reader().unwrap().searcher();
let top_collector = TopDocs::with_limit(4).order_by_u64_field(Field(2));
let segment_reader = searcher.segment_reader(0u32);
top_collector
.for_segment(0, segment_reader)
.expect("should panic");
}
#[test]
#[should_panic(expected = "Field requested is not a i64/u64 fast field")]
fn test_field_not_fast_field() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, STORED);
let schema = schema_builder.build();
let (index, _) = index("beer", title, schema, |index_writer| {
index_writer.add_document(doc!(
title => "bottle of beer",
size => 12u64,
));
});
let searcher = index.reader().unwrap().searcher();
let segment = searcher.segment_reader(0);
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
assert!(top_collector.for_segment(0, segment).is_ok());
}
fn index(
query: &str,
query_field: Field,
schema: Schema,
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
) -> (Index, Box<Query>) {
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
doc_adder(&mut index_writer);
index_writer.commit().unwrap();
let query_parser = QueryParser::for_index(&index, vec![query_field]);
let query = query_parser.parse_query(query).unwrap();
(index, query)
}
} }

View File

@@ -0,0 +1,129 @@
use crate::collector::top_collector::{TopCollector, TopSegmentCollector};
use crate::collector::{Collector, SegmentCollector};
use crate::DocAddress;
use crate::{DocId, Result, Score, SegmentReader};
pub(crate) struct TweakedScoreTopCollector<TScoreTweaker, TScore = Score> {
score_tweaker: TScoreTweaker,
collector: TopCollector<TScore>,
}
impl<TScoreTweaker, TScore> TweakedScoreTopCollector<TScoreTweaker, TScore>
where
TScore: Clone + PartialOrd,
{
pub fn new(
score_tweaker: TScoreTweaker,
limit: usize,
) -> TweakedScoreTopCollector<TScoreTweaker, TScore> {
TweakedScoreTopCollector {
score_tweaker,
collector: TopCollector::with_limit(limit),
}
}
}
/// A `ScoreSegmentTweaker` makes it possible to modify the default score
/// for a given document belonging to a specific segment.
///
/// It is the segment local version of the [`ScoreTweaker`](./trait.ScoreTweaker.html).
pub trait ScoreSegmentTweaker<TScore>: 'static {
/// Tweak the given `score` for the document `doc`.
fn score(&self, doc: DocId, score: Score) -> TScore;
}
/// `ScoreTweaker` makes it possible to tweak the score
/// emitted by the scorer into another one.
///
/// The `ScoreTweaker` itself does not make much of the computation itself.
/// Instead, it helps constructing `Self::Child` instances that will compute
/// the score at a segment scale.
pub trait ScoreTweaker<TScore>: Sync {
/// Type of the associated [`ScoreSegmentTweaker`](./trait.ScoreSegmentTweaker.html).
type Child: ScoreSegmentTweaker<TScore>;
/// Builds a child tweaker for a specific segment. The child scorer is associated to
/// a specific segment.
fn segment_tweaker(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
}
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
where
TScoreTweaker: ScoreTweaker<TScore>,
TScore: 'static + PartialOrd + Clone + Send + Sync,
{
type Fruit = Vec<(TScore, DocAddress)>;
type Child = TopTweakedScoreSegmentCollector<TScoreTweaker::Child, TScore>;
fn for_segment(
&self,
segment_local_id: u32,
segment_reader: &SegmentReader,
) -> Result<Self::Child> {
let segment_scorer = self.score_tweaker.segment_tweaker(segment_reader)?;
let segment_collector = self
.collector
.for_segment(segment_local_id, segment_reader)?;
Ok(TopTweakedScoreSegmentCollector {
segment_collector,
segment_scorer,
})
}
fn requires_scoring(&self) -> bool {
true
}
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> Result<Self::Fruit> {
self.collector.merge_fruits(segment_fruits)
}
}
pub struct TopTweakedScoreSegmentCollector<TSegmentScoreTweaker, TScore>
where
TScore: 'static + PartialOrd + Clone + Send + Sync + Sized,
TSegmentScoreTweaker: ScoreSegmentTweaker<TScore>,
{
segment_collector: TopSegmentCollector<TScore>,
segment_scorer: TSegmentScoreTweaker,
}
impl<TSegmentScoreTweaker, TScore> SegmentCollector
for TopTweakedScoreSegmentCollector<TSegmentScoreTweaker, TScore>
where
TScore: 'static + PartialOrd + Clone + Send + Sync,
TSegmentScoreTweaker: 'static + ScoreSegmentTweaker<TScore>,
{
type Fruit = Vec<(TScore, DocAddress)>;
fn collect(&mut self, doc: DocId, score: Score) {
let score = self.segment_scorer.score(doc, score);
self.segment_collector.collect(doc, score);
}
fn harvest(self) -> Vec<(TScore, DocAddress)> {
self.segment_collector.harvest()
}
}
impl<F, TScore, TSegmentScoreTweaker> ScoreTweaker<TScore> for F
where
F: 'static + Send + Sync + Fn(&SegmentReader) -> TSegmentScoreTweaker,
TSegmentScoreTweaker: ScoreSegmentTweaker<TScore>,
{
type Child = TSegmentScoreTweaker;
fn segment_tweaker(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
Ok((self)(segment_reader))
}
}
impl<F, TScore> ScoreSegmentTweaker<TScore> for F
where
F: 'static + Sync + Send + Fn(DocId, Score) -> TScore,
{
fn score(&self, doc: DocId, score: Score) -> TScore {
(self)(doc, score)
}
}

View File

@@ -5,7 +5,7 @@ use std::u64;
pub(crate) struct TinySet(u64); pub(crate) struct TinySet(u64);
impl fmt::Debug for TinySet { impl fmt::Debug for TinySet {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.into_iter().collect::<Vec<u32>>().fmt(f) self.into_iter().collect::<Vec<u32>>().fmt(f)
} }
} }
@@ -204,12 +204,12 @@ mod tests {
use super::BitSet; use super::BitSet;
use super::TinySet; use super::TinySet;
use docset::DocSet; use crate::docset::DocSet;
use query::BitSetDocSet; use crate::query::BitSetDocSet;
use crate::tests;
use crate::tests::generate_nonunique_unsorted;
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::collections::HashSet; use std::collections::HashSet;
use tests;
use tests::generate_nonunique_unsorted;
#[test] #[test]
fn test_tiny_set() { fn test_tiny_set() {

View File

@@ -1,11 +1,11 @@
use common::BinarySerializable; use crate::common::BinarySerializable;
use common::CountingWriter; use crate::common::CountingWriter;
use common::VInt; use crate::common::VInt;
use directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use directory::WritePtr; use crate::directory::WritePtr;
use schema::Field; use crate::schema::Field;
use space_usage::FieldUsage; use crate::space_usage::FieldUsage;
use space_usage::PerFieldSpaceUsage; use crate::space_usage::PerFieldSpaceUsage;
use std::collections::HashMap; use std::collections::HashMap;
use std::io::Write; use std::io::Write;
use std::io::{self, Read}; use std::io::{self, Read};
@@ -185,10 +185,10 @@ impl CompositeFile {
mod test { mod test {
use super::{CompositeFile, CompositeWrite}; use super::{CompositeFile, CompositeWrite};
use common::BinarySerializable; use crate::common::BinarySerializable;
use common::VInt; use crate::common::VInt;
use directory::{Directory, RAMDirectory}; use crate::directory::{Directory, RAMDirectory};
use schema::Field; use crate::schema::Field;
use std::io::Write; use std::io::Write;
use std::path::Path; use std::path::Path;

View File

@@ -1,6 +1,6 @@
use crate::common::Endianness;
use crate::common::VInt;
use byteorder::{ReadBytesExt, WriteBytesExt}; use byteorder::{ReadBytesExt, WriteBytesExt};
use common::Endianness;
use common::VInt;
use std::fmt; use std::fmt;
use std::io; use std::io;
use std::io::Read; use std::io::Read;
@@ -136,7 +136,7 @@ impl BinarySerializable for String {
pub mod test { pub mod test {
use super::*; use super::*;
use common::VInt; use crate::common::VInt;
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() { pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new(); let mut buffer = Vec::new();

View File

@@ -30,16 +30,16 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
let val = u64::from(val); let val = u64::from(val);
const STOP_BIT: u64 = 128u64; const STOP_BIT: u64 = 128u64;
match val { match val {
0...STOP_1 => (val | STOP_BIT, 1), 0..=STOP_1 => (val | STOP_BIT, 1),
START_2...STOP_2 => ( START_2..=STOP_2 => (
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)), (val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
2, 2,
), ),
START_3...STOP_3 => ( START_3..=STOP_3 => (
(val & MASK_1) | ((val & MASK_2) << 1) | ((val & MASK_3) << 2) | (STOP_BIT << (8 * 2)), (val & MASK_1) | ((val & MASK_2) << 1) | ((val & MASK_3) << 2) | (STOP_BIT << (8 * 2)),
3, 3,
), ),
START_4...STOP_4 => ( START_4..=STOP_4 => (
(val & MASK_1) (val & MASK_1)
| ((val & MASK_2) << 1) | ((val & MASK_2) << 1)
| ((val & MASK_3) << 2) | ((val & MASK_3) << 2)
@@ -171,8 +171,8 @@ mod tests {
use super::serialize_vint_u32; use super::serialize_vint_u32;
use super::VInt; use super::VInt;
use crate::common::BinarySerializable;
use byteorder::{ByteOrder, LittleEndian}; use byteorder::{ByteOrder, LittleEndian};
use common::BinarySerializable;
fn aux_test_vint(val: u64) { fn aux_test_vint(val: u64) {
let mut v = [14u8; 10]; let mut v = [14u8; 10];

View File

@@ -1,6 +1,6 @@
use crate::Result;
use crossbeam::channel; use crossbeam::channel;
use scoped_pool::{Pool, ThreadConfig}; use scoped_pool::{Pool, ThreadConfig};
use Result;
/// Search executor whether search request are single thread or multithread. /// Search executor whether search request are single thread or multithread.
/// ///

View File

@@ -1,44 +1,44 @@
use super::segment::create_segment; use super::segment::create_segment;
use super::segment::Segment; use super::segment::Segment;
use core::Executor; use crate::core::Executor;
use core::IndexMeta; use crate::core::IndexMeta;
use core::SegmentId; use crate::core::SegmentId;
use core::SegmentMeta; use crate::core::SegmentMeta;
use core::META_FILEPATH; use crate::core::META_FILEPATH;
use directory::ManagedDirectory; use crate::directory::ManagedDirectory;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
use directory::MmapDirectory; use crate::directory::MmapDirectory;
use directory::INDEX_WRITER_LOCK; use crate::directory::INDEX_WRITER_LOCK;
use directory::{Directory, RAMDirectory}; use crate::directory::{Directory, RAMDirectory};
use error::DataCorruption; use crate::error::DataCorruption;
use error::TantivyError; use crate::error::TantivyError;
use indexer::index_writer::open_index_writer; use crate::indexer::index_writer::open_index_writer;
use indexer::index_writer::HEAP_SIZE_MIN; use crate::indexer::index_writer::HEAP_SIZE_MIN;
use indexer::segment_updater::save_new_metas; use crate::indexer::segment_updater::save_new_metas;
use crate::reader::IndexReader;
use crate::reader::IndexReaderBuilder;
use crate::schema::Field;
use crate::schema::FieldType;
use crate::schema::Schema;
use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::TokenizerManager;
use crate::IndexWriter;
use crate::Result;
use num_cpus; use num_cpus;
use reader::IndexReader;
use reader::IndexReaderBuilder;
use schema::Field;
use schema::FieldType;
use schema::Schema;
use serde_json; use serde_json;
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::fmt; use std::fmt;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
use std::path::Path; use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use tokenizer::BoxedTokenizer;
use tokenizer::TokenizerManager;
use IndexWriter;
use Result;
fn load_metas(directory: &Directory) -> Result<IndexMeta> { fn load_metas(directory: &dyn Directory) -> Result<IndexMeta> {
let meta_data = directory.atomic_read(&META_FILEPATH)?; let meta_data = directory.atomic_read(&META_FILEPATH)?;
let meta_string = String::from_utf8_lossy(&meta_data); let meta_string = String::from_utf8_lossy(&meta_data);
serde_json::from_str(&meta_string) serde_json::from_str(&meta_string)
.map_err(|e| { .map_err(|e| {
DataCorruption::new( DataCorruption::new(
META_FILEPATH.clone(), META_FILEPATH.to_path_buf(),
format!("Meta file cannot be deserialized. {:?}.", e), format!("Meta file cannot be deserialized. {:?}.", e),
) )
}) })
@@ -169,11 +169,11 @@ impl Index {
} }
/// Helper to access the tokenizer associated to a specific field. /// Helper to access the tokenizer associated to a specific field.
pub fn tokenizer_for_field(&self, field: Field) -> Result<Box<BoxedTokenizer>> { pub fn tokenizer_for_field(&self, field: Field) -> Result<Box<dyn BoxedTokenizer>> {
let field_entry = self.schema.get_field_entry(field); let field_entry = self.schema.get_field_entry(field);
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
let tokenizer_manager: &TokenizerManager = self.tokenizers(); let tokenizer_manager: &TokenizerManager = self.tokenizers();
let tokenizer_name_opt: Option<Box<BoxedTokenizer>> = match field_type { let tokenizer_name_opt: Option<Box<dyn BoxedTokenizer>> = match field_type {
FieldType::Str(text_options) => text_options FieldType::Str(text_options) => text_options
.get_indexing_options() .get_indexing_options()
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string()) .map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
@@ -346,22 +346,22 @@ impl Index {
} }
impl fmt::Debug for Index { impl fmt::Debug for Index {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Index({:?})", self.directory) write!(f, "Index({:?})", self.directory)
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use directory::RAMDirectory; use crate::directory::RAMDirectory;
use schema::Field; use crate::schema::Field;
use schema::{Schema, INDEXED, TEXT}; use crate::schema::{Schema, INDEXED, TEXT};
use crate::Index;
use crate::IndexReader;
use crate::IndexWriter;
use crate::ReloadPolicy;
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
use Index;
use IndexReader;
use IndexWriter;
use ReloadPolicy;
#[test] #[test]
fn test_indexer_for_field() { fn test_indexer_for_field() {
@@ -537,4 +537,38 @@ mod tests {
} }
assert_eq!(count, 2); assert_eq!(count, 2);
} }
// This test will not pass on windows, because windows
// prevent deleting files that are MMapped.
#[cfg(not(target_os = "windows"))]
#[test]
fn garbage_collect_works_as_intended() {
let directory = RAMDirectory::create();
let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap();
let index = Index::create(directory.clone(), schema).unwrap();
let mut writer = index.writer_with_num_threads(8, 24_000_000).unwrap();
for i in 0u64..8_000u64 {
writer.add_document(doc!(field => i));
}
writer.commit().unwrap();
let mem_right_after_commit = directory.total_mem_usage();
thread::sleep(Duration::from_millis(1_000));
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 8_000);
writer.wait_merging_threads().unwrap();
let mem_right_after_merge_finished = directory.total_mem_usage();
reader.reload().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 8_000);
assert!(mem_right_after_merge_finished < mem_right_after_commit);
}
} }

View File

@@ -1,8 +1,8 @@
use core::SegmentMeta; use crate::core::SegmentMeta;
use schema::Schema; use crate::schema::Schema;
use crate::Opstamp;
use serde_json; use serde_json;
use std::fmt; use std::fmt;
use Opstamp;
/// Meta information about the `Index`. /// Meta information about the `Index`.
/// ///
@@ -14,14 +14,27 @@ use Opstamp;
/// ///
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct IndexMeta { pub struct IndexMeta {
/// List of `SegmentMeta` informations associated to each finalized segment of the index.
pub segments: Vec<SegmentMeta>, pub segments: Vec<SegmentMeta>,
/// Index `Schema`
pub schema: Schema, pub schema: Schema,
/// Opstamp associated to the last `commit` operation.
pub opstamp: Opstamp, pub opstamp: Opstamp,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
/// Payload associated to the last commit.
///
/// Upon commit, clients can optionally add a small `Striing` payload to their commit
/// to help identify this commit.
/// This payload is entirely unused by tantivy.
pub payload: Option<String>, pub payload: Option<String>,
} }
impl IndexMeta { impl IndexMeta {
/// Create an `IndexMeta` object representing a brand new `Index`
/// with the given index.
///
/// This new index does not contains any segments.
/// Opstamp will the value `0u64`.
pub fn with_schema(schema: Schema) -> IndexMeta { pub fn with_schema(schema: Schema) -> IndexMeta {
IndexMeta { IndexMeta {
segments: vec![], segments: vec![],
@@ -33,7 +46,7 @@ impl IndexMeta {
} }
impl fmt::Debug for IndexMeta { impl fmt::Debug for IndexMeta {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!( write!(
f, f,
"{}", "{}",
@@ -47,7 +60,7 @@ impl fmt::Debug for IndexMeta {
mod tests { mod tests {
use super::IndexMeta; use super::IndexMeta;
use schema::{Schema, TEXT}; use crate::schema::{Schema, TEXT};
use serde_json; use serde_json;
#[test] #[test]

View File

@@ -1,13 +1,13 @@
use common::BinarySerializable; use crate::common::BinarySerializable;
use directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::positions::PositionReader;
use crate::postings::TermInfo;
use crate::postings::{BlockSegmentPostings, SegmentPostings};
use crate::schema::FieldType;
use crate::schema::IndexRecordOption;
use crate::schema::Term;
use crate::termdict::TermDictionary;
use owned_read::OwnedRead; use owned_read::OwnedRead;
use positions::PositionReader;
use postings::TermInfo;
use postings::{BlockSegmentPostings, SegmentPostings};
use schema::FieldType;
use schema::IndexRecordOption;
use schema::Term;
use termdict::TermDictionary;
/// The inverted index reader is in charge of accessing /// The inverted index reader is in charge of accessing
/// the inverted index associated to a specific field. /// the inverted index associated to a specific field.

View File

@@ -21,18 +21,16 @@ pub use self::segment_id::SegmentId;
pub use self::segment_meta::SegmentMeta; pub use self::segment_meta::SegmentMeta;
pub use self::segment_reader::SegmentReader; pub use self::segment_reader::SegmentReader;
use std::path::PathBuf; use once_cell::sync::Lazy;
use std::path::Path;
lazy_static! { /// The meta file contains all the information about the list of segments and the schema
/// of the index.
pub static META_FILEPATH: Lazy<&'static Path> = Lazy::new(|| Path::new("meta.json"));
/// The meta file contains all the information about the list of segments and the schema /// The managed file contains a list of files that were created by the tantivy
/// of the index. /// and will therefore be garbage collected when they are deemed useless by tantivy.
pub static ref META_FILEPATH: PathBuf = PathBuf::from("meta.json"); ///
/// Removing this file is safe, but will prevent the garbage collection of all of the file that
/// The managed file contains a list of files that were created by the tantivy /// are currently in the directory
/// and will therefore be garbage collected when they are deemed useless by tantivy. pub static MANAGED_FILEPATH: Lazy<&'static Path> = Lazy::new(|| Path::new(".managed.json"));
///
/// Removing this file is safe, but will prevent the garbage collection of all of the file that
/// are currently in the directory
pub static ref MANAGED_FILEPATH: PathBuf = PathBuf::from(".managed.json");
}

View File

@@ -1,26 +1,26 @@
use collector::Collector; use crate::collector::Collector;
use collector::SegmentCollector; use crate::collector::SegmentCollector;
use core::Executor; use crate::core::Executor;
use core::InvertedIndexReader; use crate::core::InvertedIndexReader;
use core::SegmentReader; use crate::core::SegmentReader;
use query::Query; use crate::query::Query;
use query::Scorer; use crate::query::Scorer;
use query::Weight; use crate::query::Weight;
use schema::Document; use crate::schema::Document;
use schema::Schema; use crate::schema::Schema;
use schema::{Field, Term}; use crate::schema::{Field, Term};
use space_usage::SearcherSpaceUsage; use crate::space_usage::SearcherSpaceUsage;
use crate::store::StoreReader;
use crate::termdict::TermMerger;
use crate::DocAddress;
use crate::Index;
use crate::Result;
use std::fmt; use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use store::StoreReader;
use termdict::TermMerger;
use DocAddress;
use Index;
use Result;
fn collect_segment<C: Collector>( fn collect_segment<C: Collector>(
collector: &C, collector: &C,
weight: &Weight, weight: &dyn Weight,
segment_ord: u32, segment_ord: u32,
segment_reader: &SegmentReader, segment_reader: &SegmentReader,
) -> Result<C::Fruit> { ) -> Result<C::Fruit> {
@@ -28,7 +28,7 @@ fn collect_segment<C: Collector>(
let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?; let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?;
if let Some(delete_bitset) = segment_reader.delete_bitset() { if let Some(delete_bitset) = segment_reader.delete_bitset() {
scorer.for_each(&mut |doc, score| { scorer.for_each(&mut |doc, score| {
if !delete_bitset.is_deleted(doc) { if delete_bitset.is_alive(doc) {
segment_collector.collect(doc, score); segment_collector.collect(doc, score);
} }
}); });
@@ -132,7 +132,7 @@ impl Searcher {
/// ///
/// Finally, the Collector merges each of the child collectors into itself for result usability /// Finally, the Collector merges each of the child collectors into itself for result usability
/// by the caller. /// by the caller.
pub fn search<C: Collector>(&self, query: &Query, collector: &C) -> Result<C::Fruit> { pub fn search<C: Collector>(&self, query: &dyn Query, collector: &C) -> Result<C::Fruit> {
let executor = self.index.search_executor(); let executor = self.index.search_executor();
self.search_with_executor(query, collector, executor) self.search_with_executor(query, collector, executor)
} }
@@ -151,7 +151,7 @@ impl Searcher {
/// hurt it. It will however, decrease the average response time. /// hurt it. It will however, decrease the average response time.
pub fn search_with_executor<C: Collector>( pub fn search_with_executor<C: Collector>(
&self, &self,
query: &Query, query: &dyn Query,
collector: &C, collector: &C,
executor: &Executor, executor: &Executor,
) -> Result<C::Fruit> { ) -> Result<C::Fruit> {
@@ -203,7 +203,7 @@ impl FieldSearcher {
/// Returns a Stream over all of the sorted unique terms of /// Returns a Stream over all of the sorted unique terms of
/// for the given field. /// for the given field.
pub fn terms(&self) -> TermMerger { pub fn terms(&self) -> TermMerger<'_> {
let term_streamers: Vec<_> = self let term_streamers: Vec<_> = self
.inv_index_readers .inv_index_readers
.iter() .iter()
@@ -214,7 +214,7 @@ impl FieldSearcher {
} }
impl fmt::Debug for Searcher { impl fmt::Debug for Searcher {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let segment_ids = self let segment_ids = self
.segment_readers .segment_readers
.iter() .iter()

View File

@@ -1,17 +1,17 @@
use super::SegmentComponent; use super::SegmentComponent;
use core::Index; use crate::core::Index;
use core::SegmentId; use crate::core::SegmentId;
use core::SegmentMeta; use crate::core::SegmentMeta;
use directory::error::{OpenReadError, OpenWriteError}; use crate::directory::error::{OpenReadError, OpenWriteError};
use directory::Directory; use crate::directory::Directory;
use directory::{ReadOnlySource, WritePtr}; use crate::directory::{ReadOnlySource, WritePtr};
use indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::SegmentSerializer;
use schema::Schema; use crate::schema::Schema;
use crate::Opstamp;
use crate::Result;
use std::fmt; use std::fmt;
use std::path::PathBuf; use std::path::PathBuf;
use std::result; use std::result;
use Opstamp;
use Result;
/// A segment is a piece of the index. /// A segment is a piece of the index.
#[derive(Clone)] #[derive(Clone)]
@@ -21,7 +21,7 @@ pub struct Segment {
} }
impl fmt::Debug for Segment { impl fmt::Debug for Segment {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Segment({:?})", self.id().uuid_string()) write!(f, "Segment({:?})", self.id().uuid_string())
} }
} }

View File

@@ -2,6 +2,8 @@ use std::cmp::{Ord, Ordering};
use std::fmt; use std::fmt;
use uuid::Uuid; use uuid::Uuid;
#[cfg(test)]
use once_cell::sync::Lazy;
#[cfg(test)] #[cfg(test)]
use std::sync::atomic; use std::sync::atomic;
@@ -17,10 +19,10 @@ use std::sync::atomic;
pub struct SegmentId(Uuid); pub struct SegmentId(Uuid);
#[cfg(test)] #[cfg(test)]
lazy_static! { static AUTO_INC_COUNTER: Lazy<atomic::AtomicUsize> = Lazy::new(|| atomic::AtomicUsize::default());
static ref AUTO_INC_COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::default();
static ref ZERO_ARRAY: [u8; 8] = [0u8; 8]; #[cfg(test)]
} const ZERO_ARRAY: [u8; 8] = [0u8; 8];
// During tests, we generate the segment id in a autoincrement manner // During tests, we generate the segment id in a autoincrement manner
// for consistency of segment id between run. // for consistency of segment id between run.
@@ -30,7 +32,7 @@ lazy_static! {
#[cfg(test)] #[cfg(test)]
fn create_uuid() -> Uuid { fn create_uuid() -> Uuid {
let new_auto_inc_id = (*AUTO_INC_COUNTER).fetch_add(1, atomic::Ordering::SeqCst); let new_auto_inc_id = (*AUTO_INC_COUNTER).fetch_add(1, atomic::Ordering::SeqCst);
Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &*ZERO_ARRAY).unwrap() Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &ZERO_ARRAY).unwrap()
} }
#[cfg(not(test))] #[cfg(not(test))]
@@ -62,7 +64,7 @@ impl SegmentId {
} }
impl fmt::Debug for SegmentId { impl fmt::Debug for SegmentId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Seg({:?})", self.short_uuid_string()) write!(f, "Seg({:?})", self.short_uuid_string())
} }
} }

View File

@@ -1,15 +1,14 @@
use super::SegmentComponent; use super::SegmentComponent;
use crate::core::SegmentId;
use crate::Opstamp;
use census::{Inventory, TrackedObject}; use census::{Inventory, TrackedObject};
use core::SegmentId; use once_cell::sync::Lazy;
use serde; use serde;
use std::collections::HashSet; use std::collections::HashSet;
use std::fmt; use std::fmt;
use std::path::PathBuf; use std::path::PathBuf;
use Opstamp;
lazy_static! { static INVENTORY: Lazy<Inventory<InnerSegmentMeta>> = Lazy::new(Inventory::new);
static ref INVENTORY: Inventory<InnerSegmentMeta> = { Inventory::new() };
}
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
struct DeleteMeta { struct DeleteMeta {
@@ -27,7 +26,7 @@ pub struct SegmentMeta {
} }
impl fmt::Debug for SegmentMeta { impl fmt::Debug for SegmentMeta {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
self.tracked.fmt(f) self.tracked.fmt(f)
} }
} }

View File

@@ -1,26 +1,27 @@
use common::CompositeFile; use crate::common::CompositeFile;
use common::HasLen; use crate::common::HasLen;
use core::InvertedIndexReader; use crate::core::InvertedIndexReader;
use core::Segment; use crate::core::Segment;
use core::SegmentComponent; use crate::core::SegmentComponent;
use core::SegmentId; use crate::core::SegmentId;
use directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use fastfield::DeleteBitSet; use crate::fastfield::DeleteBitSet;
use fastfield::FacetReader; use crate::fastfield::FacetReader;
use fastfield::FastFieldReaders; use crate::fastfield::FastFieldReaders;
use fieldnorm::FieldNormReader; use crate::fieldnorm::FieldNormReader;
use schema::Field; use crate::schema::Field;
use schema::FieldType; use crate::schema::FieldType;
use schema::Schema; use crate::schema::Schema;
use space_usage::SegmentSpaceUsage; use crate::space_usage::SegmentSpaceUsage;
use crate::store::StoreReader;
use crate::termdict::TermDictionary;
use crate::DocId;
use crate::Result;
use fail::fail_point;
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt; use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
use store::StoreReader;
use termdict::TermDictionary;
use DocId;
use Result;
/// Entry point to access all of the datastructures of the `Segment` /// Entry point to access all of the datastructures of the `Segment`
/// ///
@@ -243,10 +244,9 @@ impl SegmentReader {
let postings_source = postings_source_opt.unwrap(); let postings_source = postings_source_opt.unwrap();
let termdict_source = self let termdict_source = self.termdict_composite.open_read(field).expect(
.termdict_composite "Failed to open field term dictionary in composite file. Is the field indexed?",
.open_read(field) );
.expect("Failed to open field term dictionary in composite file. Is the field indexed");
let positions_source = self let positions_source = self
.positions_composite .positions_composite
@@ -296,7 +296,7 @@ impl SegmentReader {
} }
/// Returns an iterator that will iterate over the alive document ids /// Returns an iterator that will iterate over the alive document ids
pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator { pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator<'_> {
SegmentReaderAliveDocsIterator::new(&self) SegmentReaderAliveDocsIterator::new(&self)
} }
@@ -320,7 +320,7 @@ impl SegmentReader {
} }
impl fmt::Debug for SegmentReader { impl fmt::Debug for SegmentReader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "SegmentReader({:?})", self.segment_id) write!(f, "SegmentReader({:?})", self.segment_id)
} }
} }
@@ -373,9 +373,9 @@ impl<'a> Iterator for SegmentReaderAliveDocsIterator<'a> {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use core::Index; use crate::core::Index;
use schema::{Schema, Term, STORED, TEXT}; use crate::schema::{Schema, Term, STORED, TEXT};
use DocId; use crate::DocId;
#[test] #[test]
fn test_alive_docs_iterator() { fn test_alive_docs_iterator() {

View File

@@ -1,9 +1,9 @@
use directory::directory_lock::Lock; use crate::directory::directory_lock::Lock;
use directory::error::LockError; use crate::directory::error::LockError;
use directory::error::{DeleteError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use directory::WatchCallback; use crate::directory::WatchCallback;
use directory::WatchHandle; use crate::directory::WatchHandle;
use directory::{ReadOnlySource, WritePtr}; use crate::directory::{ReadOnlySource, WritePtr};
use std::fmt; use std::fmt;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
@@ -48,10 +48,10 @@ impl RetryPolicy {
/// ///
/// It is transparently associated to a lock file, that gets deleted /// It is transparently associated to a lock file, that gets deleted
/// on `Drop.` The lock is released automatically on `Drop`. /// on `Drop.` The lock is released automatically on `Drop`.
pub struct DirectoryLock(Box<Drop + Send + Sync + 'static>); pub struct DirectoryLock(Box<dyn Drop + Send + Sync + 'static>);
struct DirectoryLockGuard { struct DirectoryLockGuard {
directory: Box<Directory>, directory: Box<dyn Directory>,
path: PathBuf, path: PathBuf,
} }
@@ -76,7 +76,7 @@ enum TryAcquireLockError {
fn try_acquire_lock( fn try_acquire_lock(
filepath: &Path, filepath: &Path,
directory: &mut Directory, directory: &mut dyn Directory,
) -> Result<DirectoryLock, TryAcquireLockError> { ) -> Result<DirectoryLock, TryAcquireLockError> {
let mut write = directory.open_write(filepath).map_err(|e| match e { let mut write = directory.open_write(filepath).map_err(|e| match e {
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists, OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
@@ -210,14 +210,14 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// DirectoryClone /// DirectoryClone
pub trait DirectoryClone { pub trait DirectoryClone {
/// Clones the directory and boxes the clone /// Clones the directory and boxes the clone
fn box_clone(&self) -> Box<Directory>; fn box_clone(&self) -> Box<dyn Directory>;
} }
impl<T> DirectoryClone for T impl<T> DirectoryClone for T
where where
T: 'static + Directory + Clone, T: 'static + Directory + Clone,
{ {
fn box_clone(&self) -> Box<Directory> { fn box_clone(&self) -> Box<dyn Directory> {
Box::new(self.clone()) Box::new(self.clone())
} }
} }

View File

@@ -1,3 +1,4 @@
use once_cell::sync::Lazy;
use std::path::PathBuf; use std::path::PathBuf;
/// A directory lock. /// A directory lock.
@@ -28,29 +29,27 @@ pub struct Lock {
pub is_blocking: bool, pub is_blocking: bool,
} }
lazy_static! { /// Only one process should be able to write tantivy's index at a time.
/// Only one process should be able to write tantivy's index at a time. /// This lock file, when present, is in charge of preventing other processes to open an IndexWriter.
/// This lock file, when present, is in charge of preventing other processes to open an IndexWriter. ///
/// /// If the process is killed and this file remains, it is safe to remove it manually.
/// If the process is killed and this file remains, it is safe to remove it manually. ///
/// /// Failing to acquire this lock usually means a misuse of tantivy's API,
/// Failing to acquire this lock usually means a misuse of tantivy's API, /// (creating more than one instance of the `IndexWriter`), are a spurious
/// (creating more than one instance of the `IndexWriter`), are a spurious /// lock file remaining after a crash. In the latter case, removing the file after
/// lock file remaining after a crash. In the latter case, removing the file after /// checking no process running tantivy is running is safe.
/// checking no process running tantivy is running is safe. pub static INDEX_WRITER_LOCK: Lazy<Lock> = Lazy::new(|| Lock {
pub static ref INDEX_WRITER_LOCK: Lock = Lock { filepath: PathBuf::from(".tantivy-writer.lock"),
filepath: PathBuf::from(".tantivy-writer.lock"), is_blocking: false,
is_blocking: false });
}; /// The meta lock file is here to protect the segment files being opened by
/// The meta lock file is here to protect the segment files being opened by /// `IndexReader::reload()` from being garbage collected.
/// `IndexReader::reload()` from being garbage collected. /// It makes it possible for another process to safely consume
/// It makes it possible for another process to safely consume /// our index in-writing. Ideally, we may have prefered `RWLock` semantics
/// our index in-writing. Ideally, we may have prefered `RWLock` semantics /// here, but it is difficult to achieve on Windows.
/// here, but it is difficult to achieve on Windows. ///
/// /// Opening segment readers is a very fast process.
/// Opening segment readers is a very fast process. pub static META_LOCK: Lazy<Lock> = Lazy::new(|| Lock {
pub static ref META_LOCK: Lock = Lock { filepath: PathBuf::from(".tantivy-meta.lock"),
filepath: PathBuf::from(".tantivy-meta.lock"), is_blocking: true,
is_blocking: true });
};
}

View File

@@ -6,7 +6,7 @@ use std::path::PathBuf;
/// Error while trying to acquire a directory lock. /// Error while trying to acquire a directory lock.
#[derive(Debug, Fail)] #[derive(Debug, Fail)]
pub enum LockError { pub enum LockError {
/// Failed to acquired a lock as it is already hold by another /// Failed to acquired a lock as it is already held by another
/// client. /// client.
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period. /// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call. /// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
@@ -33,7 +33,7 @@ impl Into<io::Error> for IOError {
} }
impl fmt::Display for IOError { impl fmt::Display for IOError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.path { match self.path {
Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err), Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err),
None => write!(f, "io error occurred: '{}'", self.err), None => write!(f, "io error occurred: '{}'", self.err),
@@ -46,7 +46,7 @@ impl StdError for IOError {
"io error occurred" "io error occurred"
} }
fn cause(&self) -> Option<&StdError> { fn cause(&self) -> Option<&dyn StdError> {
Some(&self.err) Some(&self.err)
} }
} }
@@ -84,7 +84,7 @@ impl From<io::Error> for OpenDirectoryError {
} }
impl fmt::Display for OpenDirectoryError { impl fmt::Display for OpenDirectoryError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self { match *self {
OpenDirectoryError::DoesNotExist(ref path) => { OpenDirectoryError::DoesNotExist(ref path) => {
write!(f, "the underlying directory '{:?}' does not exist", path) write!(f, "the underlying directory '{:?}' does not exist", path)
@@ -106,7 +106,7 @@ impl StdError for OpenDirectoryError {
"error occurred while opening a directory" "error occurred while opening a directory"
} }
fn cause(&self) -> Option<&StdError> { fn cause(&self) -> Option<&dyn StdError> {
None None
} }
} }
@@ -129,7 +129,7 @@ impl From<IOError> for OpenWriteError {
} }
impl fmt::Display for OpenWriteError { impl fmt::Display for OpenWriteError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self { match *self {
OpenWriteError::FileAlreadyExists(ref path) => { OpenWriteError::FileAlreadyExists(ref path) => {
write!(f, "the file '{:?}' already exists", path) write!(f, "the file '{:?}' already exists", path)
@@ -148,7 +148,7 @@ impl StdError for OpenWriteError {
"error occurred while opening a file for writing" "error occurred while opening a file for writing"
} }
fn cause(&self) -> Option<&StdError> { fn cause(&self) -> Option<&dyn StdError> {
match *self { match *self {
OpenWriteError::FileAlreadyExists(_) => None, OpenWriteError::FileAlreadyExists(_) => None,
OpenWriteError::IOError(ref err) => Some(err), OpenWriteError::IOError(ref err) => Some(err),
@@ -173,7 +173,7 @@ impl From<IOError> for OpenReadError {
} }
impl fmt::Display for OpenReadError { impl fmt::Display for OpenReadError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self { match *self {
OpenReadError::FileDoesNotExist(ref path) => { OpenReadError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path) write!(f, "the file '{:?}' does not exist", path)
@@ -192,7 +192,7 @@ impl StdError for OpenReadError {
"error occurred while opening a file for reading" "error occurred while opening a file for reading"
} }
fn cause(&self) -> Option<&StdError> { fn cause(&self) -> Option<&dyn StdError> {
match *self { match *self {
OpenReadError::FileDoesNotExist(_) => None, OpenReadError::FileDoesNotExist(_) => None,
OpenReadError::IOError(ref err) => Some(err), OpenReadError::IOError(ref err) => Some(err),
@@ -217,7 +217,7 @@ impl From<IOError> for DeleteError {
} }
impl fmt::Display for DeleteError { impl fmt::Display for DeleteError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self { match *self {
DeleteError::FileDoesNotExist(ref path) => { DeleteError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path) write!(f, "the file '{:?}' does not exist", path)
@@ -234,7 +234,7 @@ impl StdError for DeleteError {
"error occurred while deleting a file" "error occurred while deleting a file"
} }
fn cause(&self) -> Option<&StdError> { fn cause(&self) -> Option<&dyn StdError> {
match *self { match *self {
DeleteError::FileDoesNotExist(_) => None, DeleteError::FileDoesNotExist(_) => None,
DeleteError::IOError(ref err) => Some(err), DeleteError::IOError(ref err) => Some(err),

View File

@@ -1,11 +1,13 @@
use core::MANAGED_FILEPATH; use crate::core::MANAGED_FILEPATH;
use directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use directory::DirectoryLock; use crate::directory::DirectoryLock;
use directory::Lock; use crate::directory::Lock;
use directory::META_LOCK; use crate::directory::META_LOCK;
use directory::{ReadOnlySource, WritePtr}; use crate::directory::{ReadOnlySource, WritePtr};
use directory::{WatchCallback, WatchHandle}; use crate::directory::{WatchCallback, WatchHandle};
use error::DataCorruption; use crate::error::DataCorruption;
use crate::Directory;
use crate::Result;
use serde_json; use serde_json;
use std::collections::HashSet; use std::collections::HashSet;
use std::io; use std::io;
@@ -14,8 +16,6 @@ use std::path::{Path, PathBuf};
use std::result; use std::result;
use std::sync::RwLockWriteGuard; use std::sync::RwLockWriteGuard;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use Directory;
use Result;
/// Returns true iff the file is "managed". /// Returns true iff the file is "managed".
/// Non-managed file are not subject to garbage collection. /// Non-managed file are not subject to garbage collection.
@@ -39,7 +39,7 @@ fn is_managed(path: &Path) -> bool {
/// useful anymore. /// useful anymore.
#[derive(Debug)] #[derive(Debug)]
pub struct ManagedDirectory { pub struct ManagedDirectory {
directory: Box<Directory>, directory: Box<dyn Directory>,
meta_informations: Arc<RwLock<MetaInformation>>, meta_informations: Arc<RwLock<MetaInformation>>,
} }
@@ -51,8 +51,8 @@ struct MetaInformation {
/// Saves the file containing the list of existing files /// Saves the file containing the list of existing files
/// that were created by tantivy. /// that were created by tantivy.
fn save_managed_paths( fn save_managed_paths(
directory: &mut Directory, directory: &mut dyn Directory,
wlock: &RwLockWriteGuard<MetaInformation>, wlock: &RwLockWriteGuard<'_, MetaInformation>,
) -> io::Result<()> { ) -> io::Result<()> {
let mut w = serde_json::to_vec(&wlock.managed_paths)?; let mut w = serde_json::to_vec(&wlock.managed_paths)?;
writeln!(&mut w)?; writeln!(&mut w)?;
@@ -69,7 +69,7 @@ impl ManagedDirectory {
let managed_files: HashSet<PathBuf> = serde_json::from_str(&managed_files_json) let managed_files: HashSet<PathBuf> = serde_json::from_str(&managed_files_json)
.map_err(|e| { .map_err(|e| {
DataCorruption::new( DataCorruption::new(
MANAGED_FILEPATH.clone(), MANAGED_FILEPATH.to_path_buf(),
format!("Managed file cannot be deserialized: {:?}. ", e), format!("Managed file cannot be deserialized: {:?}. ", e),
) )
})?; })?;
@@ -264,15 +264,14 @@ mod tests {
mod mmap_specific { mod mmap_specific {
use super::super::*; use super::super::*;
use once_cell::sync::Lazy;
use std::path::Path; use std::path::Path;
use tempdir::TempDir; use tempdir::TempDir;
lazy_static! { static TEST_PATH1: Lazy<&'static Path> = Lazy::new(|| Path::new("some_path_for_test"));
static ref TEST_PATH1: &'static Path = Path::new("some_path_for_test"); static TEST_PATH2: Lazy<&'static Path> = Lazy::new(|| Path::new("some_path_for_test2"));
static ref TEST_PATH2: &'static Path = Path::new("some_path_for_test2");
}
use directory::MmapDirectory; use crate::directory::MmapDirectory;
use std::io::Write; use std::io::Write;
#[test] #[test]

View File

@@ -1,23 +1,25 @@
extern crate fs2; use fs2;
extern crate notify; use notify;
use self::fs2::FileExt; use self::fs2::FileExt;
use self::notify::RawEvent; use self::notify::RawEvent;
use self::notify::RecursiveMode; use self::notify::RecursiveMode;
use self::notify::Watcher; use self::notify::Watcher;
use crate::core::META_FILEPATH;
use crate::directory::error::LockError;
use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
};
use crate::directory::read_only_source::BoxedData;
use crate::directory::Directory;
use crate::directory::DirectoryLock;
use crate::directory::Lock;
use crate::directory::ReadOnlySource;
use crate::directory::WatchCallback;
use crate::directory::WatchCallbackList;
use crate::directory::WatchHandle;
use crate::directory::WritePtr;
use atomicwrites; use atomicwrites;
use core::META_FILEPATH;
use directory::error::LockError;
use directory::error::{DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use directory::read_only_source::BoxedData;
use directory::Directory;
use directory::DirectoryLock;
use directory::Lock;
use directory::ReadOnlySource;
use directory::WatchCallback;
use directory::WatchCallbackList;
use directory::WatchHandle;
use directory::WritePtr;
use memmap::Mmap; use memmap::Mmap;
use std::collections::HashMap; use std::collections::HashMap;
use std::convert::From; use std::convert::From;
@@ -254,7 +256,7 @@ impl MmapDirectoryInner {
} }
impl fmt::Debug for MmapDirectory { impl fmt::Debug for MmapDirectory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "MmapDirectory({:?})", self.inner.root_path) write!(f, "MmapDirectory({:?})", self.inner.root_path)
} }
} }
@@ -320,7 +322,7 @@ impl MmapDirectory {
#[cfg(windows)] #[cfg(windows)]
{ {
use std::os::windows::fs::OpenOptionsExt; use std::os::windows::fs::OpenOptionsExt;
use winapi::winbase; use winapi::um::winbase;
open_opts open_opts
.write(true) .write(true)
@@ -525,13 +527,13 @@ mod tests {
// The following tests are specific to the MmapDirectory // The following tests are specific to the MmapDirectory
use super::*; use super::*;
use schema::{Schema, SchemaBuilder, TEXT}; use crate::schema::{Schema, SchemaBuilder, TEXT};
use crate::Index;
use crate::ReloadPolicy;
use std::fs; use std::fs;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
use Index;
use ReloadPolicy;
#[test] #[test]
fn test_open_non_existant_path() { fn test_open_non_existant_path() {

View File

@@ -24,22 +24,18 @@ pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource; pub use self::read_only_source::ReadOnlySource;
pub(crate) use self::watch_event_router::WatchCallbackList; pub(crate) use self::watch_event_router::WatchCallbackList;
pub use self::watch_event_router::{WatchCallback, WatchHandle}; pub use self::watch_event_router::{WatchCallback, WatchHandle};
use std::io::{BufWriter, Seek, Write}; use std::io::{BufWriter, Write};
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
pub use self::mmap_directory::MmapDirectory; pub use self::mmap_directory::MmapDirectory;
pub(crate) use self::managed_directory::ManagedDirectory; pub(crate) use self::managed_directory::ManagedDirectory;
/// Synonym of Seek + Write
pub trait SeekableWrite: Seek + Write {}
impl<T: Seek + Write> SeekableWrite for T {}
/// Write object for Directory. /// Write object for Directory.
/// ///
/// `WritePtr` are required to implement both Write /// `WritePtr` are required to implement both Write
/// and Seek. /// and Seek.
pub type WritePtr = BufWriter<Box<SeekableWrite>>; pub type WritePtr = BufWriter<Box<dyn Write>>;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;

View File

@@ -1,8 +1,9 @@
use core::META_FILEPATH; use crate::core::META_FILEPATH;
use directory::error::{DeleteError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use directory::WatchCallbackList; use crate::directory::WatchCallbackList;
use directory::WritePtr; use crate::directory::WritePtr;
use directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle}; use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
use fail::fail_point;
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt; use std::fmt;
use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write}; use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write};
@@ -103,10 +104,14 @@ impl InnerDirectory {
fn watch(&mut self, watch_handle: WatchCallback) -> WatchHandle { fn watch(&mut self, watch_handle: WatchCallback) -> WatchHandle {
self.watch_router.subscribe(watch_handle) self.watch_router.subscribe(watch_handle)
} }
fn total_mem_usage(&self) -> usize {
self.fs.values().map(|f| f.len()).sum()
}
} }
impl fmt::Debug for RAMDirectory { impl fmt::Debug for RAMDirectory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "RAMDirectory") write!(f, "RAMDirectory")
} }
} }
@@ -126,6 +131,12 @@ impl RAMDirectory {
pub fn create() -> RAMDirectory { pub fn create() -> RAMDirectory {
Self::default() Self::default()
} }
/// Returns the sum of the size of the different files
/// in the RAMDirectory.
pub fn total_mem_usage(&self) -> usize {
self.fs.read().unwrap().total_mem_usage()
}
} }
impl Directory for RAMDirectory { impl Directory for RAMDirectory {

View File

@@ -1,9 +1,9 @@
use common::HasLen; use crate::common::HasLen;
use stable_deref_trait::{CloneStableDeref, StableDeref}; use stable_deref_trait::{CloneStableDeref, StableDeref};
use std::ops::Deref; use std::ops::Deref;
use std::sync::Arc; use std::sync::Arc;
pub type BoxedData = Box<Deref<Target = [u8]> + Send + Sync + 'static>; pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
/// Read object that represents files in tantivy. /// Read object that represents files in tantivy.
/// ///

View File

@@ -1,5 +1,6 @@
use super::*; use super::*;
use std::io::{Seek, SeekFrom, Write}; use once_cell::sync::Lazy;
use std::io::Write;
use std::mem; use std::mem;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
@@ -9,9 +10,7 @@ use std::thread;
use std::time; use std::time;
use std::time::Duration; use std::time::Duration;
lazy_static! { static TEST_PATH: Lazy<&'static Path> = Lazy::new(|| Path::new("some_path_for_test"));
static ref TEST_PATH: &'static Path = Path::new("some_path_for_test");
}
#[test] #[test]
fn test_ram_directory() { fn test_ram_directory() {
@@ -34,7 +33,7 @@ fn ram_directory_panics_if_flush_forgotten() {
assert!(write_file.write_all(&[4]).is_ok()); assert!(write_file.write_all(&[4]).is_ok());
} }
fn test_simple(directory: &mut Directory) { fn test_simple(directory: &mut dyn Directory) {
{ {
let mut write_file = directory.open_write(*TEST_PATH).unwrap(); let mut write_file = directory.open_write(*TEST_PATH).unwrap();
assert!(directory.exists(*TEST_PATH)); assert!(directory.exists(*TEST_PATH));
@@ -52,24 +51,7 @@ fn test_simple(directory: &mut Directory) {
assert!(!directory.exists(*TEST_PATH)); assert!(!directory.exists(*TEST_PATH));
} }
fn test_seek(directory: &mut Directory) { fn test_rewrite_forbidden(directory: &mut dyn Directory) {
{
{
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
write_file.write_all(&[4, 3, 7, 3, 5]).unwrap();
write_file.seek(SeekFrom::Start(0)).unwrap();
write_file.write_all(&[3, 1]).unwrap();
write_file.flush().unwrap();
}
let read_file = directory.open_read(*TEST_PATH).unwrap();
let data: &[u8] = &*read_file;
assert_eq!(data, &[3u8, 1u8, 7u8, 3u8, 5u8]);
}
assert!(directory.delete(*TEST_PATH).is_ok());
}
fn test_rewrite_forbidden(directory: &mut Directory) {
{ {
directory.open_write(*TEST_PATH).unwrap(); directory.open_write(*TEST_PATH).unwrap();
assert!(directory.exists(*TEST_PATH)); assert!(directory.exists(*TEST_PATH));
@@ -80,7 +62,7 @@ fn test_rewrite_forbidden(directory: &mut Directory) {
assert!(directory.delete(*TEST_PATH).is_ok()); assert!(directory.delete(*TEST_PATH).is_ok());
} }
fn test_write_create_the_file(directory: &mut Directory) { fn test_write_create_the_file(directory: &mut dyn Directory) {
{ {
assert!(directory.open_read(*TEST_PATH).is_err()); assert!(directory.open_read(*TEST_PATH).is_err());
let _w = directory.open_write(*TEST_PATH).unwrap(); let _w = directory.open_write(*TEST_PATH).unwrap();
@@ -90,7 +72,7 @@ fn test_write_create_the_file(directory: &mut Directory) {
} }
} }
fn test_directory_delete(directory: &mut Directory) { fn test_directory_delete(directory: &mut dyn Directory) {
assert!(directory.open_read(*TEST_PATH).is_err()); assert!(directory.open_read(*TEST_PATH).is_err());
let mut write_file = directory.open_write(*TEST_PATH).unwrap(); let mut write_file = directory.open_write(*TEST_PATH).unwrap();
write_file.write_all(&[1, 2, 3, 4]).unwrap(); write_file.write_all(&[1, 2, 3, 4]).unwrap();
@@ -118,9 +100,8 @@ fn test_directory_delete(directory: &mut Directory) {
assert!(directory.delete(*TEST_PATH).is_err()); assert!(directory.delete(*TEST_PATH).is_err());
} }
fn test_directory(directory: &mut Directory) { fn test_directory(directory: &mut dyn Directory) {
test_simple(directory); test_simple(directory);
test_seek(directory);
test_rewrite_forbidden(directory); test_rewrite_forbidden(directory);
test_write_create_the_file(directory); test_write_create_the_file(directory);
test_directory_delete(directory); test_directory_delete(directory);
@@ -129,7 +110,7 @@ fn test_directory(directory: &mut Directory) {
test_watch(directory); test_watch(directory);
} }
fn test_watch(directory: &mut Directory) { fn test_watch(directory: &mut dyn Directory) {
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone(); let counter_clone = counter.clone();
let watch_callback = Box::new(move || { let watch_callback = Box::new(move || {
@@ -163,7 +144,7 @@ fn test_watch(directory: &mut Directory) {
assert_eq!(10, counter.load(Ordering::SeqCst)); assert_eq!(10, counter.load(Ordering::SeqCst));
} }
fn test_lock_non_blocking(directory: &mut Directory) { fn test_lock_non_blocking(directory: &mut dyn Directory) {
{ {
let lock_a_res = directory.acquire_lock(&Lock { let lock_a_res = directory.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"), filepath: PathBuf::from("a.lock"),
@@ -188,7 +169,7 @@ fn test_lock_non_blocking(directory: &mut Directory) {
assert!(lock_a_res.is_ok()); assert!(lock_a_res.is_ok());
} }
fn test_lock_blocking(directory: &mut Directory) { fn test_lock_blocking(directory: &mut dyn Directory) {
let lock_a_res = directory.acquire_lock(&Lock { let lock_a_res = directory.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"), filepath: PathBuf::from("a.lock"),
is_blocking: true, is_blocking: true,

View File

@@ -3,7 +3,7 @@ use std::sync::RwLock;
use std::sync::Weak; use std::sync::Weak;
/// Type alias for callbacks registered when watching files of a `Directory`. /// Type alias for callbacks registered when watching files of a `Directory`.
pub type WatchCallback = Box<Fn() -> () + Sync + Send>; pub type WatchCallback = Box<dyn Fn() -> () + Sync + Send>;
/// Helper struct to implement the watch method in `Directory` implementations. /// Helper struct to implement the watch method in `Directory` implementations.
/// ///
@@ -67,7 +67,7 @@ impl WatchCallbackList {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use directory::WatchCallbackList; use crate::directory::WatchCallbackList;
use std::mem; use std::mem;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;

View File

@@ -1,9 +1,9 @@
use common::BitSet; use crate::common::BitSet;
use fastfield::DeleteBitSet; use crate::fastfield::DeleteBitSet;
use crate::DocId;
use std::borrow::Borrow; use std::borrow::Borrow;
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::cmp::Ordering; use std::cmp::Ordering;
use DocId;
/// Expresses the outcome of a call to `DocSet`'s `.skip_next(...)`. /// Expresses the outcome of a call to `DocSet`'s `.skip_next(...)`.
#[derive(PartialEq, Eq, Debug)] #[derive(PartialEq, Eq, Debug)]

View File

@@ -2,11 +2,11 @@
use std::io; use std::io;
use directory::error::LockError; use crate::directory::error::LockError;
use directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError}; use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use fastfield::FastFieldNotAvailableError; use crate::fastfield::FastFieldNotAvailableError;
use query; use crate::query;
use schema; use crate::schema;
use serde_json; use serde_json;
use std::fmt; use std::fmt;
use std::path::PathBuf; use std::path::PathBuf;
@@ -34,7 +34,7 @@ impl DataCorruption {
} }
impl fmt::Debug for DataCorruption { impl fmt::Debug for DataCorruption {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "Data corruption: ")?; write!(f, "Data corruption: ")?;
if let Some(ref filepath) = &self.filepath { if let Some(ref filepath) = &self.filepath {
write!(f, "(in file `{:?}`)", filepath)?; write!(f, "(in file `{:?}`)", filepath)?;
@@ -77,9 +77,6 @@ pub enum TantivyError {
/// An Error appeared related to the schema. /// An Error appeared related to the schema.
#[fail(display = "Schema error: '{}'", _0)] #[fail(display = "Schema error: '{}'", _0)]
SchemaError(String), SchemaError(String),
/// Tried to access a fastfield reader for a field not configured accordingly.
#[fail(display = "Fast field not available: '{:?}'", _0)]
FastFieldError(#[cause] FastFieldNotAvailableError),
/// System error. (e.g.: We failed spawning a new thread) /// System error. (e.g.: We failed spawning a new thread)
#[fail(display = "System error.'{}'", _0)] #[fail(display = "System error.'{}'", _0)]
SystemError(String), SystemError(String),
@@ -93,7 +90,7 @@ impl From<DataCorruption> for TantivyError {
impl From<FastFieldNotAvailableError> for TantivyError { impl From<FastFieldNotAvailableError> for TantivyError {
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError { fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
TantivyError::FastFieldError(fastfield_error) TantivyError::SchemaError(format!("{}", fastfield_error))
} }
} }

View File

@@ -6,8 +6,8 @@ pub use self::writer::BytesFastFieldWriter;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use schema::Schema; use crate::schema::Schema;
use Index; use crate::Index;
#[test] #[test]
fn test_bytes() { fn test_bytes() {

View File

@@ -1,8 +1,8 @@
use owning_ref::OwningRef; use owning_ref::OwningRef;
use directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use DocId; use crate::DocId;
/// Reader for byte array fast fields /// Reader for byte array fast fields
/// ///

View File

@@ -1,8 +1,8 @@
use std::io; use std::io;
use fastfield::serializer::FastFieldSerializer; use crate::fastfield::serializer::FastFieldSerializer;
use schema::{Document, Field, Value}; use crate::schema::{Document, Field, Value};
use DocId; use crate::DocId;
/// Writer for byte array (as in, any number of bytes per document) fast fields /// Writer for byte array (as in, any number of bytes per document) fast fields
/// ///

View File

@@ -1,11 +1,11 @@
use crate::common::HasLen;
use crate::directory::ReadOnlySource;
use crate::directory::WritePtr;
use crate::space_usage::ByteCount;
use crate::DocId;
use bit_set::BitSet; use bit_set::BitSet;
use common::HasLen;
use directory::ReadOnlySource;
use directory::WritePtr;
use space_usage::ByteCount;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
use DocId;
/// Write a delete `BitSet` /// Write a delete `BitSet`
/// ///
@@ -82,8 +82,8 @@ impl HasLen for DeleteBitSet {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::directory::*;
use bit_set::BitSet; use bit_set::BitSet;
use directory::*;
use std::path::PathBuf; use std::path::PathBuf;
fn test_delete_bitset_helper(bitset: &BitSet) { fn test_delete_bitset_helper(bitset: &BitSet) {

View File

@@ -1,11 +1,11 @@
use schema::FieldEntry; use crate::schema::FieldEntry;
use std::result; use std::result;
/// `FastFieldNotAvailableError` is returned when the /// `FastFieldNotAvailableError` is returned when the
/// user requested for a fast field reader, and the field was not /// user requested for a fast field reader, and the field was not
/// defined in the schema as a fast field. /// defined in the schema as a fast field.
#[derive(Debug, Fail)] #[derive(Debug, Fail)]
#[fail(display = "field not available: '{:?}'", field_name)] #[fail(display = "Fast field not available: '{:?}'", field_name)]
pub struct FastFieldNotAvailableError { pub struct FastFieldNotAvailableError {
field_name: String, field_name: String,
} }

View File

@@ -1,9 +1,9 @@
use super::MultiValueIntFastFieldReader; use super::MultiValueIntFastFieldReader;
use schema::Facet; use crate::schema::Facet;
use crate::termdict::TermDictionary;
use crate::termdict::TermOrdinal;
use crate::DocId;
use std::str; use std::str;
use termdict::TermDictionary;
use termdict::TermOrdinal;
use DocId;
/// The facet reader makes it possible to access the list of /// The facet reader makes it possible to access the list of
/// facets associated to a given document in a specific /// facets associated to a given document in a specific

View File

@@ -33,10 +33,10 @@ pub use self::reader::FastFieldReader;
pub use self::readers::FastFieldReaders; pub use self::readers::FastFieldReaders;
pub use self::serializer::FastFieldSerializer; pub use self::serializer::FastFieldSerializer;
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter}; pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use common; use crate::common;
use schema::Cardinality; use crate::schema::Cardinality;
use schema::FieldType; use crate::schema::FieldType;
use schema::Value; use crate::schema::Value;
mod bytes; mod bytes;
mod delete; mod delete;
@@ -49,7 +49,7 @@ mod serializer;
mod writer; mod writer;
/// Trait for types that are allowed for fast fields: (u64 or i64). /// Trait for types that are allowed for fast fields: (u64 or i64).
pub trait FastValue: Default + Clone + Copy { pub trait FastValue: Default + Clone + Copy + Send + Sync + PartialOrd {
/// Converts a value from u64 /// Converts a value from u64
/// ///
/// Internally all fast field values are encoded as u64. /// Internally all fast field values are encoded as u64.
@@ -126,27 +126,27 @@ fn value_to_u64(value: &Value) -> u64 {
mod tests { mod tests {
use super::*; use super::*;
use common::CompositeFile; use crate::common::CompositeFile;
use directory::{Directory, RAMDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, WritePtr};
use fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use crate::schema::Document;
use crate::schema::Field;
use crate::schema::Schema;
use crate::schema::FAST;
use once_cell::sync::Lazy;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::rngs::StdRng; use rand::rngs::StdRng;
use rand::SeedableRng; use rand::SeedableRng;
use schema::Document;
use schema::Field;
use schema::Schema;
use schema::FAST;
use std::collections::HashMap; use std::collections::HashMap;
use std::path::Path; use std::path::Path;
lazy_static! { pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
pub static ref SCHEMA: Schema = { let mut schema_builder = Schema::builder();
let mut schema_builder = Schema::builder(); schema_builder.add_u64_field("field", FAST);
schema_builder.add_u64_field("field", FAST); schema_builder.build()
schema_builder.build() });
};
pub static ref FIELD: Field = { SCHEMA.get_field("field").unwrap() }; pub static FIELD: Lazy<Field> = Lazy::new(|| SCHEMA.get_field("field").unwrap());
}
#[test] #[test]
pub fn test_fastfield() { pub fn test_fastfield() {

View File

@@ -7,16 +7,16 @@ pub use self::writer::MultiValueIntFastFieldWriter;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
extern crate time; use time;
use self::time::Duration; use self::time::Duration;
use collector::TopDocs; use crate::collector::TopDocs;
use query::QueryParser; use crate::query::QueryParser;
use schema::Cardinality; use crate::schema::Cardinality;
use schema::Facet; use crate::schema::Facet;
use schema::IntOptions; use crate::schema::IntOptions;
use schema::Schema; use crate::schema::Schema;
use Index; use crate::Index;
#[test] #[test]
fn test_multivalued_u64() { fn test_multivalued_u64() {

View File

@@ -1,5 +1,5 @@
use fastfield::{FastFieldReader, FastValue}; use crate::fastfield::{FastFieldReader, FastValue};
use DocId; use crate::DocId;
/// Reader for a multivalued `u64` fast field. /// Reader for a multivalued `u64` fast field.
/// ///
@@ -64,8 +64,8 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use core::Index; use crate::core::Index;
use schema::{Facet, Schema}; use crate::schema::{Facet, Schema};
#[test] #[test]
fn test_multifastfield_reader() { fn test_multifastfield_reader() {

View File

@@ -1,13 +1,13 @@
use fastfield::serializer::FastSingleFieldSerializer; use crate::fastfield::serializer::FastSingleFieldSerializer;
use fastfield::value_to_u64; use crate::fastfield::value_to_u64;
use fastfield::FastFieldSerializer; use crate::fastfield::FastFieldSerializer;
use crate::postings::UnorderedTermId;
use crate::schema::{Document, Field};
use crate::termdict::TermOrdinal;
use crate::DocId;
use itertools::Itertools; use itertools::Itertools;
use postings::UnorderedTermId;
use schema::{Document, Field};
use std::collections::HashMap; use std::collections::HashMap;
use std::io; use std::io;
use termdict::TermOrdinal;
use DocId;
/// Writer for multi-valued (as in, more than one value per document) /// Writer for multi-valued (as in, more than one value per document)
/// int fast field. /// int fast field.
@@ -116,7 +116,7 @@ impl MultiValueIntFastFieldWriter {
} }
{ {
// writing the values themselves. // writing the values themselves.
let mut value_serializer: FastSingleFieldSerializer<_>; let mut value_serializer: FastSingleFieldSerializer<'_, _>;
match mapping_opt { match mapping_opt {
Some(mapping) => { Some(mapping) => {
value_serializer = serializer.new_u64_fast_field_with_idx( value_serializer = serializer.new_u64_fast_field_with_idx(

View File

@@ -1,18 +1,18 @@
use super::FastValue; use super::FastValue;
use common::bitpacker::BitUnpacker; use crate::common::bitpacker::BitUnpacker;
use common::compute_num_bits; use crate::common::compute_num_bits;
use common::BinarySerializable; use crate::common::BinarySerializable;
use common::CompositeFile; use crate::common::CompositeFile;
use directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use directory::{Directory, RAMDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, WritePtr};
use fastfield::{FastFieldSerializer, FastFieldsWriter}; use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
use crate::schema::Schema;
use crate::schema::FAST;
use crate::DocId;
use owning_ref::OwningRef; use owning_ref::OwningRef;
use schema::Schema;
use schema::FAST;
use std::collections::HashMap; use std::collections::HashMap;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::path::Path; use std::path::Path;
use DocId;
/// Trait for accessing a fastfield. /// Trait for accessing a fastfield.
/// ///

View File

@@ -1,11 +1,11 @@
use common::CompositeFile; use crate::common::CompositeFile;
use fastfield::BytesFastFieldReader; use crate::fastfield::BytesFastFieldReader;
use fastfield::MultiValueIntFastFieldReader; use crate::fastfield::MultiValueIntFastFieldReader;
use fastfield::{FastFieldNotAvailableError, FastFieldReader}; use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
use schema::{Cardinality, Field, FieldType, Schema}; use crate::schema::{Cardinality, Field, FieldType, Schema};
use space_usage::PerFieldSpaceUsage; use crate::space_usage::PerFieldSpaceUsage;
use crate::Result;
use std::collections::HashMap; use std::collections::HashMap;
use Result;
/// Provides access to all of the FastFieldReader. /// Provides access to all of the FastFieldReader.
/// ///

View File

@@ -1,10 +1,10 @@
use common::bitpacker::BitPacker; use crate::common::bitpacker::BitPacker;
use common::compute_num_bits; use crate::common::compute_num_bits;
use common::BinarySerializable; use crate::common::BinarySerializable;
use common::CompositeWrite; use crate::common::CompositeWrite;
use common::CountingWriter; use crate::common::CountingWriter;
use directory::WritePtr; use crate::directory::WritePtr;
use schema::Field; use crate::schema::Field;
use std::io::{self, Write}; use std::io::{self, Write};
/// `FastFieldSerializer` is in charge of serializing /// `FastFieldSerializer` is in charge of serializing
@@ -45,7 +45,7 @@ impl FastFieldSerializer {
field: Field, field: Field,
min_value: u64, min_value: u64,
max_value: u64, max_value: u64,
) -> io::Result<FastSingleFieldSerializer<CountingWriter<WritePtr>>> { ) -> io::Result<FastSingleFieldSerializer<'_, CountingWriter<WritePtr>>> {
self.new_u64_fast_field_with_idx(field, min_value, max_value, 0) self.new_u64_fast_field_with_idx(field, min_value, max_value, 0)
} }
@@ -56,7 +56,7 @@ impl FastFieldSerializer {
min_value: u64, min_value: u64,
max_value: u64, max_value: u64,
idx: usize, idx: usize,
) -> io::Result<FastSingleFieldSerializer<CountingWriter<WritePtr>>> { ) -> io::Result<FastSingleFieldSerializer<'_, CountingWriter<WritePtr>>> {
let field_write = self.composite_write.for_field_with_idx(field, idx); let field_write = self.composite_write.for_field_with_idx(field, idx);
FastSingleFieldSerializer::open(field_write, min_value, max_value) FastSingleFieldSerializer::open(field_write, min_value, max_value)
} }
@@ -66,7 +66,7 @@ impl FastFieldSerializer {
&mut self, &mut self,
field: Field, field: Field,
idx: usize, idx: usize,
) -> io::Result<FastBytesFieldSerializer<CountingWriter<WritePtr>>> { ) -> io::Result<FastBytesFieldSerializer<'_, CountingWriter<WritePtr>>> {
let field_write = self.composite_write.for_field_with_idx(field, idx); let field_write = self.composite_write.for_field_with_idx(field, idx);
FastBytesFieldSerializer::open(field_write) FastBytesFieldSerializer::open(field_write)
} }
@@ -79,7 +79,7 @@ impl FastFieldSerializer {
} }
} }
pub struct FastSingleFieldSerializer<'a, W: Write + 'a> { pub struct FastSingleFieldSerializer<'a, W: Write> {
bit_packer: BitPacker, bit_packer: BitPacker,
write: &'a mut W, write: &'a mut W,
min_value: u64, min_value: u64,
@@ -127,7 +127,7 @@ impl<'a, W: Write> FastSingleFieldSerializer<'a, W> {
} }
} }
pub struct FastBytesFieldSerializer<'a, W: Write + 'a> { pub struct FastBytesFieldSerializer<'a, W: Write> {
write: &'a mut W, write: &'a mut W,
} }

View File

@@ -1,13 +1,13 @@
use super::multivalued::MultiValueIntFastFieldWriter; use super::multivalued::MultiValueIntFastFieldWriter;
use common; use crate::common;
use common::BinarySerializable; use crate::common::BinarySerializable;
use common::VInt; use crate::common::VInt;
use fastfield::{BytesFastFieldWriter, FastFieldSerializer}; use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
use postings::UnorderedTermId; use crate::postings::UnorderedTermId;
use schema::{Cardinality, Document, Field, FieldType, Schema}; use crate::schema::{Cardinality, Document, Field, FieldType, Schema};
use crate::termdict::TermOrdinal;
use std::collections::HashMap; use std::collections::HashMap;
use std::io; use std::io;
use termdict::TermOrdinal;
/// The fastfieldswriter regroup all of the fast field writers. /// The fastfieldswriter regroup all of the fast field writers.
pub struct FastFieldsWriter { pub struct FastFieldsWriter {

View File

@@ -1,6 +1,6 @@
use super::{fieldnorm_to_id, id_to_fieldnorm}; use super::{fieldnorm_to_id, id_to_fieldnorm};
use directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use DocId; use crate::DocId;
/// Reads the fieldnorm associated to a document. /// Reads the fieldnorm associated to a document.
/// The fieldnorm represents the length associated to /// The fieldnorm represents the length associated to

View File

@@ -1,6 +1,6 @@
use common::CompositeWrite; use crate::common::CompositeWrite;
use directory::WritePtr; use crate::directory::WritePtr;
use schema::Field; use crate::schema::Field;
use std::io; use std::io;
use std::io::Write; use std::io::Write;

View File

@@ -1,9 +1,9 @@
use DocId; use crate::DocId;
use super::fieldnorm_to_id; use super::fieldnorm_to_id;
use super::FieldNormsSerializer; use super::FieldNormsSerializer;
use schema::Field; use crate::schema::Field;
use schema::Schema; use crate::schema::Schema;
use std::io; use std::io;
/// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte /// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte

View File

@@ -1,10 +1,10 @@
use rand::thread_rng; use rand::thread_rng;
use std::collections::HashSet; use std::collections::HashSet;
use crate::schema::*;
use crate::Index;
use crate::Searcher;
use rand::Rng; use rand::Rng;
use schema::*;
use Index;
use Searcher;
fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) { fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
assert!(searcher.segment_readers().len() < 20); assert!(searcher.segment_readers().len() < 20);

View File

@@ -1,8 +1,8 @@
use super::operation::DeleteOperation; use super::operation::DeleteOperation;
use crate::Opstamp;
use std::mem; use std::mem;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use Opstamp;
// The DeleteQueue is similar in conceptually to a multiple // The DeleteQueue is similar in conceptually to a multiple
// consumer single producer broadcast channel. // consumer single producer broadcast channel.
@@ -250,7 +250,7 @@ impl DeleteCursor {
mod tests { mod tests {
use super::{DeleteOperation, DeleteQueue}; use super::{DeleteOperation, DeleteQueue};
use schema::{Field, Term}; use crate::schema::{Field, Term};
#[test] #[test]
fn test_deletequeue() { fn test_deletequeue() {

View File

@@ -1,6 +1,6 @@
use crate::DocId;
use crate::Opstamp;
use std::sync::Arc; use std::sync::Arc;
use DocId;
use Opstamp;
// Doc to opstamp is used to identify which // Doc to opstamp is used to identify which
// document should be deleted. // document should be deleted.

View File

@@ -1,37 +1,37 @@
use super::operation::{AddOperation, UserOperation}; use super::operation::{AddOperation, UserOperation};
use super::segment_updater::SegmentUpdater; use super::segment_updater::SegmentUpdater;
use super::PreparedCommit; use super::PreparedCommit;
use crate::core::Index;
use crate::core::Segment;
use crate::core::SegmentComponent;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::core::SegmentReader;
use crate::directory::DirectoryLock;
use crate::docset::DocSet;
use crate::error::TantivyError;
use crate::fastfield::write_delete_bitset;
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
use crate::indexer::doc_opstamp_mapping::DocToOpstampMapping;
use crate::indexer::operation::DeleteOperation;
use crate::indexer::stamper::Stamper;
use crate::indexer::MergePolicy;
use crate::indexer::SegmentEntry;
use crate::indexer::SegmentWriter;
use crate::postings::compute_table_size;
use crate::schema::Document;
use crate::schema::IndexRecordOption;
use crate::schema::Term;
use crate::Opstamp;
use crate::Result;
use bit_set::BitSet; use bit_set::BitSet;
use core::Index;
use core::Segment;
use core::SegmentComponent;
use core::SegmentId;
use core::SegmentMeta;
use core::SegmentReader;
use crossbeam::channel; use crossbeam::channel;
use directory::DirectoryLock;
use docset::DocSet;
use error::TantivyError;
use fastfield::write_delete_bitset;
use futures::{Canceled, Future}; use futures::{Canceled, Future};
use indexer::delete_queue::{DeleteCursor, DeleteQueue};
use indexer::doc_opstamp_mapping::DocToOpstampMapping;
use indexer::operation::DeleteOperation;
use indexer::stamper::Stamper;
use indexer::MergePolicy;
use indexer::SegmentEntry;
use indexer::SegmentWriter;
use postings::compute_table_size;
use schema::Document;
use schema::IndexRecordOption;
use schema::Term;
use std::mem; use std::mem;
use std::ops::Range; use std::ops::Range;
use std::sync::Arc; use std::sync::Arc;
use std::thread; use std::thread;
use std::thread::JoinHandle; use std::thread::JoinHandle;
use Opstamp;
use Result;
// Size of the margin for the heap. A segment is closed when the remaining memory // Size of the margin for the heap. A segment is closed when the remaining memory
// in the heap goes below MARGIN_IN_BYTES. // in the heap goes below MARGIN_IN_BYTES.
@@ -268,7 +268,7 @@ fn index_documents(
memory_budget: usize, memory_budget: usize,
segment: &Segment, segment: &Segment,
generation: usize, generation: usize,
document_iterator: &mut Iterator<Item = Vec<AddOperation>>, document_iterator: &mut dyn Iterator<Item = Vec<AddOperation>>,
segment_updater: &mut SegmentUpdater, segment_updater: &mut SegmentUpdater,
mut delete_cursor: DeleteCursor, mut delete_cursor: DeleteCursor,
) -> Result<bool> { ) -> Result<bool> {
@@ -332,7 +332,8 @@ fn index_documents(
} }
impl IndexWriter { impl IndexWriter {
/// The index writer /// If there are some merging threads, blocks until they all finish their work and
/// then drop the `IndexWriter`.
pub fn wait_merging_threads(mut self) -> Result<()> { pub fn wait_merging_threads(mut self) -> Result<()> {
// this will stop the indexing thread, // this will stop the indexing thread,
// dropping the last reference to the segment_updater. // dropping the last reference to the segment_updater.
@@ -383,7 +384,6 @@ impl IndexWriter {
/// Spawns a new worker thread for indexing. /// Spawns a new worker thread for indexing.
/// The thread consumes documents from the pipeline. /// The thread consumes documents from the pipeline.
///
fn add_indexing_worker(&mut self) -> Result<()> { fn add_indexing_worker(&mut self) -> Result<()> {
let document_receiver_clone = self.operation_receiver.clone(); let document_receiver_clone = self.operation_receiver.clone();
let mut segment_updater = self.segment_updater.clone(); let mut segment_updater = self.segment_updater.clone();
@@ -440,12 +440,12 @@ impl IndexWriter {
} }
/// Accessor to the merge policy. /// Accessor to the merge policy.
pub fn get_merge_policy(&self) -> Arc<Box<MergePolicy>> { pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
self.segment_updater.get_merge_policy() self.segment_updater.get_merge_policy()
} }
/// Set the merge policy. /// Set the merge policy.
pub fn set_merge_policy(&self, merge_policy: Box<MergePolicy>) { pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
self.segment_updater.set_merge_policy(merge_policy); self.segment_updater.set_merge_policy(merge_policy);
} }
@@ -462,6 +462,52 @@ impl IndexWriter {
self.segment_updater.garbage_collect_files() self.segment_updater.garbage_collect_files()
} }
/// Deletes all documents from the index
///
/// Requires `commit`ing
/// Enables users to rebuild the index,
/// by clearing and resubmitting necessary documents
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::query::QueryParser;
/// use tantivy::collector::TopDocs;
/// use tantivy::schema::*;
/// use tantivy::Index;
///
/// fn main() -> tantivy::Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT | STORED);
/// let schema = schema_builder.build();
///
/// let index = Index::create_in_ram(schema.clone());
///
/// let mut index_writer = index.writer_with_num_threads(1, 50_000_000)?;
/// index_writer.add_document(doc!(title => "The modern Promotheus"));
/// index_writer.commit()?;
///
/// let clear_res = index_writer.delete_all_documents().unwrap();
/// // have to commit, otherwise deleted terms remain available
/// index_writer.commit()?;
///
/// let searcher = index.reader()?.searcher();
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query_promo = query_parser.parse_query("Promotheus")?;
/// let top_docs_promo = searcher.search(&query_promo, &TopDocs::with_limit(1))?;
///
/// assert!(top_docs_promo.is_empty());
/// Ok(())
/// }
/// ```
pub fn delete_all_documents(&mut self) -> Result<Opstamp> {
// Delete segments
self.segment_updater.remove_all_segments();
// Return new stamp - reverted stamp
self.stamper.revert(self.committed_opstamp);
Ok(self.committed_opstamp)
}
/// Merges a given list of segments /// Merges a given list of segments
/// ///
/// `segment_ids` is required to be non-empty. /// `segment_ids` is required to be non-empty.
@@ -489,19 +535,22 @@ impl IndexWriter {
/// Rollback to the last commit /// Rollback to the last commit
/// ///
/// This cancels all of the update that /// This cancels all of the updates that
/// happened before after the last commit. /// happened after the last commit.
/// After calling rollback, the index is in the same /// After calling rollback, the index is in the same
/// state as it was after the last commit. /// state as it was after the last commit.
/// ///
/// The opstamp at the last commit is returned. /// The opstamp at the last commit is returned.
pub fn rollback(&mut self) -> Result<Opstamp> { pub fn rollback(&mut self) -> Result<Opstamp> {
info!("Rolling back to opstamp {}", self.committed_opstamp); info!("Rolling back to opstamp {}", self.committed_opstamp);
self.rollback_impl()
}
/// Private, implementation of rollback
fn rollback_impl(&mut self) -> Result<Opstamp> {
// marks the segment updater as killed. From now on, all // marks the segment updater as killed. From now on, all
// segment updates will be ignored. // segment updates will be ignored.
self.segment_updater.kill(); self.segment_updater.kill();
let document_receiver = self.operation_receiver.clone(); let document_receiver = self.operation_receiver.clone();
// take the directory lock to create a new index_writer. // take the directory lock to create a new index_writer.
@@ -554,7 +603,7 @@ impl IndexWriter {
/// It is also possible to add a payload to the `commit` /// It is also possible to add a payload to the `commit`
/// using this API. /// using this API.
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html) /// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
pub fn prepare_commit(&mut self) -> Result<PreparedCommit> { pub fn prepare_commit(&mut self) -> Result<PreparedCommit<'_>> {
// Here, because we join all of the worker threads, // Here, because we join all of the worker threads,
// all of the segment update for this commit have been // all of the segment update for this commit have been
// sent. // sent.
@@ -724,15 +773,15 @@ mod tests {
use super::super::operation::UserOperation; use super::super::operation::UserOperation;
use super::initial_table_size; use super::initial_table_size;
use collector::TopDocs; use crate::collector::TopDocs;
use directory::error::LockError; use crate::directory::error::LockError;
use error::*; use crate::error::*;
use indexer::NoMergePolicy; use crate::indexer::NoMergePolicy;
use query::TermQuery; use crate::query::TermQuery;
use schema::{self, IndexRecordOption}; use crate::schema::{self, IndexRecordOption};
use Index; use crate::Index;
use ReloadPolicy; use crate::ReloadPolicy;
use Term; use crate::Term;
#[test] #[test]
fn test_operations_group() { fn test_operations_group() {
@@ -1049,4 +1098,145 @@ mod tests {
assert_eq!(num_docs_containing("b"), 0); assert_eq!(num_docs_containing("b"), 0);
fail::cfg("RAMDirectory::atomic_write", "off").unwrap(); fail::cfg("RAMDirectory::atomic_write", "off").unwrap();
} }
#[test]
fn test_add_then_delete_all_documents() {
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
let num_docs_containing = |s: &str| {
reader.reload().unwrap();
let searcher = reader.searcher();
let term = Term::from_field_text(text_field, s);
searcher.doc_freq(&term)
};
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
let add_tstamp = index_writer.add_document(doc!(text_field => "a"));
let commit_tstamp = index_writer.commit().unwrap();
assert!(commit_tstamp > add_tstamp);
index_writer.delete_all_documents().unwrap();
index_writer.commit().unwrap();
// Search for documents with the same term that we added
assert_eq!(num_docs_containing("a"), 0);
}
#[test]
fn test_delete_all_documents_rollback_correct_stamp() {
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
let add_tstamp = index_writer.add_document(doc!(text_field => "a"));
// commit documents - they are now available
let first_commit = index_writer.commit();
assert!(first_commit.is_ok());
let first_commit_tstamp = first_commit.unwrap();
assert!(first_commit_tstamp > add_tstamp);
// delete_all_documents the index
let clear_tstamp = index_writer.delete_all_documents().unwrap();
assert_eq!(clear_tstamp, add_tstamp);
// commit the clear command - now documents aren't available
let second_commit = index_writer.commit();
assert!(second_commit.is_ok());
let second_commit_tstamp = second_commit.unwrap();
// add new documents again
for _ in 0..100 {
index_writer.add_document(doc!(text_field => "b"));
}
// rollback to last commit, when index was empty
let rollback = index_writer.rollback();
assert!(rollback.is_ok());
let rollback_tstamp = rollback.unwrap();
assert_eq!(rollback_tstamp, second_commit_tstamp);
// working with an empty index == no documents
let term_b = Term::from_field_text(text_field, "b");
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_b), 0);
}
#[test]
fn test_delete_all_documents_then_add() {
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
// writing the segment
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
let res = index_writer.delete_all_documents();
assert!(res.is_ok());
assert!(index_writer.commit().is_ok());
// add one simple doc
index_writer.add_document(doc!(text_field => "a"));
assert!(index_writer.commit().is_ok());
let term_a = Term::from_field_text(text_field, "a");
// expect the document with that term to be in the index
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
}
#[test]
fn test_delete_all_documents_and_rollback() {
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
// add one simple doc
index_writer.add_document(doc!(text_field => "a"));
let comm = index_writer.commit();
assert!(comm.is_ok());
let commit_tstamp = comm.unwrap();
// clear but don't commit!
let clear_tstamp = index_writer.delete_all_documents().unwrap();
// clear_tstamp should reset to before the last commit
assert!(clear_tstamp < commit_tstamp);
// rollback
let _rollback_tstamp = index_writer.rollback().unwrap();
// Find original docs in the index
let term_a = Term::from_field_text(text_field, "a");
// expect the document with that term to be in the index
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
}
#[test]
fn test_delete_all_documents_empty_index() {
let schema_builder = schema::Schema::builder();
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
let clear = index_writer.delete_all_documents();
let commit = index_writer.commit();
assert!(clear.is_ok());
assert!(commit.is_ok());
}
#[test]
fn test_delete_all_documents_index_twice() {
let schema_builder = schema::Schema::builder();
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
let clear = index_writer.delete_all_documents();
let commit = index_writer.commit();
assert!(clear.is_ok());
assert!(commit.is_ok());
let clear_again = index_writer.delete_all_documents();
let commit_again = index_writer.commit();
assert!(clear_again.is_ok());
assert!(commit_again.is_ok());
}
} }

View File

@@ -1,5 +1,5 @@
use super::merge_policy::{MergeCandidate, MergePolicy}; use super::merge_policy::{MergeCandidate, MergePolicy};
use core::SegmentMeta; use crate::core::SegmentMeta;
use std::cmp; use std::cmp;
use std::f64; use std::f64;
@@ -95,8 +95,8 @@ impl Default for LogMergePolicy {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use core::{SegmentId, SegmentMeta}; use crate::core::{SegmentId, SegmentMeta};
use indexer::merge_policy::MergePolicy; use crate::indexer::merge_policy::MergePolicy;
fn test_merge_policy() -> LogMergePolicy { fn test_merge_policy() -> LogMergePolicy {
let mut log_merge_policy = LogMergePolicy::default(); let mut log_merge_policy = LogMergePolicy::default();

View File

@@ -1,7 +1,7 @@
use crate::Opstamp;
use crate::SegmentId;
use census::{Inventory, TrackedObject}; use census::{Inventory, TrackedObject};
use std::collections::HashSet; use std::collections::HashSet;
use Opstamp;
use SegmentId;
#[derive(Default)] #[derive(Default)]
pub struct MergeOperationInventory(Inventory<InnerMergeOperation>); pub struct MergeOperationInventory(Inventory<InnerMergeOperation>);

View File

@@ -1,5 +1,5 @@
use core::SegmentId; use crate::core::SegmentId;
use core::SegmentMeta; use crate::core::SegmentMeta;
use std::fmt::Debug; use std::fmt::Debug;
use std::marker; use std::marker;
@@ -39,8 +39,8 @@ impl MergePolicy for NoMergePolicy {
pub mod tests { pub mod tests {
use super::*; use super::*;
use core::SegmentId; use crate::core::SegmentId;
use core::SegmentMeta; use crate::core::SegmentMeta;
/// `MergePolicy` useful for test purposes. /// `MergePolicy` useful for test purposes.
/// ///

View File

@@ -1,31 +1,31 @@
use common::MAX_DOC_LIMIT; use crate::common::MAX_DOC_LIMIT;
use core::Segment; use crate::core::Segment;
use core::SegmentReader; use crate::core::SegmentReader;
use core::SerializableSegment; use crate::core::SerializableSegment;
use docset::DocSet; use crate::docset::DocSet;
use fastfield::BytesFastFieldReader; use crate::fastfield::BytesFastFieldReader;
use fastfield::DeleteBitSet; use crate::fastfield::DeleteBitSet;
use fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use fastfield::FastFieldSerializer; use crate::fastfield::FastFieldSerializer;
use fastfield::MultiValueIntFastFieldReader; use crate::fastfield::MultiValueIntFastFieldReader;
use fieldnorm::FieldNormReader; use crate::fieldnorm::FieldNormReader;
use fieldnorm::FieldNormsSerializer; use crate::fieldnorm::FieldNormsSerializer;
use fieldnorm::FieldNormsWriter; use crate::fieldnorm::FieldNormsWriter;
use indexer::SegmentSerializer; use crate::indexer::SegmentSerializer;
use crate::postings::InvertedIndexSerializer;
use crate::postings::Postings;
use crate::schema::Cardinality;
use crate::schema::FieldType;
use crate::schema::{Field, Schema};
use crate::store::StoreWriter;
use crate::termdict::TermMerger;
use crate::termdict::TermOrdinal;
use crate::DocId;
use crate::Result;
use crate::TantivyError;
use itertools::Itertools; use itertools::Itertools;
use postings::InvertedIndexSerializer;
use postings::Postings;
use schema::Cardinality;
use schema::FieldType;
use schema::{Field, Schema};
use std::cmp; use std::cmp;
use std::collections::HashMap; use std::collections::HashMap;
use store::StoreWriter;
use termdict::TermMerger;
use termdict::TermOrdinal;
use DocId;
use Result;
use TantivyError;
fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 { fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
let mut total_tokens = 0u64; let mut total_tokens = 0u64;
@@ -692,28 +692,28 @@ impl SerializableSegment for IndexMerger {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::collector::tests::TestCollector;
use crate::collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
use crate::collector::{Count, FacetCollector};
use crate::core::Index;
use crate::query::AllQuery;
use crate::query::BooleanQuery;
use crate::query::TermQuery;
use crate::schema;
use crate::schema::Cardinality;
use crate::schema::Document;
use crate::schema::Facet;
use crate::schema::IndexRecordOption;
use crate::schema::IntOptions;
use crate::schema::Term;
use crate::schema::TextFieldIndexing;
use crate::schema::INDEXED;
use crate::DocAddress;
use crate::IndexWriter;
use crate::Searcher;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use collector::tests::TestCollector;
use collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
use collector::{Count, FacetCollector};
use core::Index;
use futures::Future; use futures::Future;
use query::AllQuery;
use query::BooleanQuery;
use query::TermQuery;
use schema;
use schema::Cardinality;
use schema::Document;
use schema::Facet;
use schema::IndexRecordOption;
use schema::IntOptions;
use schema::Term;
use schema::TextFieldIndexing;
use schema::INDEXED;
use std::io::Cursor; use std::io::Cursor;
use DocAddress;
use IndexWriter;
use Searcher;
#[test] #[test]
fn test_index_merger_no_deletes() { fn test_index_merger_no_deletes() {

View File

@@ -1,6 +1,6 @@
use schema::Document; use crate::schema::Document;
use schema::Term; use crate::schema::Term;
use Opstamp; use crate::Opstamp;
/// Timestamped Delete operation. /// Timestamped Delete operation.
#[derive(Clone, Eq, PartialEq, Debug)] #[derive(Clone, Eq, PartialEq, Debug)]

View File

@@ -1,6 +1,6 @@
use super::IndexWriter; use super::IndexWriter;
use Opstamp; use crate::Opstamp;
use Result; use crate::Result;
/// A prepared commit /// A prepared commit
pub struct PreparedCommit<'a> { pub struct PreparedCommit<'a> {
@@ -10,7 +10,7 @@ pub struct PreparedCommit<'a> {
} }
impl<'a> PreparedCommit<'a> { impl<'a> PreparedCommit<'a> {
pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: Opstamp) -> PreparedCommit { pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: Opstamp) -> PreparedCommit<'_> {
PreparedCommit { PreparedCommit {
index_writer, index_writer,
payload: None, payload: None,

View File

@@ -1,7 +1,7 @@
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::indexer::delete_queue::DeleteCursor;
use bit_set::BitSet; use bit_set::BitSet;
use core::SegmentId;
use core::SegmentMeta;
use indexer::delete_queue::DeleteCursor;
use std::fmt; use std::fmt;
/// A segment entry describes the state of /// A segment entry describes the state of
@@ -67,7 +67,7 @@ impl SegmentEntry {
} }
impl fmt::Debug for SegmentEntry { impl fmt::Debug for SegmentEntry {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "SegmentEntry({:?})", self.meta) write!(formatter, "SegmentEntry({:?})", self.meta)
} }
} }

View File

@@ -1,16 +1,16 @@
use super::segment_register::SegmentRegister; use super::segment_register::SegmentRegister;
use core::SegmentId; use crate::core::SegmentId;
use core::SegmentMeta; use crate::core::SegmentMeta;
use core::META_FILEPATH; use crate::core::META_FILEPATH;
use error::TantivyError; use crate::error::TantivyError;
use indexer::delete_queue::DeleteCursor; use crate::indexer::delete_queue::DeleteCursor;
use indexer::SegmentEntry; use crate::indexer::SegmentEntry;
use crate::Result as TantivyResult;
use std::collections::hash_set::HashSet; use std::collections::hash_set::HashSet;
use std::fmt::{self, Debug, Formatter}; use std::fmt::{self, Debug, Formatter};
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::RwLock; use std::sync::RwLock;
use std::sync::{RwLockReadGuard, RwLockWriteGuard}; use std::sync::{RwLockReadGuard, RwLockWriteGuard};
use Result as TantivyResult;
#[derive(Default)] #[derive(Default)]
struct SegmentRegisters { struct SegmentRegisters {
@@ -29,7 +29,7 @@ pub struct SegmentManager {
} }
impl Debug for SegmentManager { impl Debug for SegmentManager {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> {
let lock = self.read(); let lock = self.read();
write!( write!(
f, f,
@@ -81,7 +81,7 @@ impl SegmentManager {
/// but have not yet been deleted by the garbage collector. /// but have not yet been deleted by the garbage collector.
pub fn list_files(&self) -> HashSet<PathBuf> { pub fn list_files(&self) -> HashSet<PathBuf> {
let mut files = HashSet::new(); let mut files = HashSet::new();
files.insert(META_FILEPATH.clone()); files.insert(META_FILEPATH.to_path_buf());
for segment_meta in SegmentMeta::all() { for segment_meta in SegmentMeta::all() {
files.extend(segment_meta.list_files()); files.extend(segment_meta.list_files());
} }
@@ -91,13 +91,13 @@ impl SegmentManager {
// Lock poisoning should never happen : // Lock poisoning should never happen :
// The lock is acquired and released within this class, // The lock is acquired and released within this class,
// and the operations cannot panic. // and the operations cannot panic.
fn read(&self) -> RwLockReadGuard<SegmentRegisters> { fn read(&self) -> RwLockReadGuard<'_, SegmentRegisters> {
self.registers self.registers
.read() .read()
.expect("Failed to acquire read lock on SegmentManager.") .expect("Failed to acquire read lock on SegmentManager.")
} }
fn write(&self) -> RwLockWriteGuard<SegmentRegisters> { fn write(&self) -> RwLockWriteGuard<'_, SegmentRegisters> {
self.registers self.registers
.write() .write()
.expect("Failed to acquire write lock on SegmentManager.") .expect("Failed to acquire write lock on SegmentManager.")
@@ -118,6 +118,12 @@ impl SegmentManager {
}); });
} }
pub(crate) fn remove_all_segments(&self) {
let mut registers_lock = self.write();
registers_lock.committed.clear();
registers_lock.uncommitted.clear();
}
pub fn commit(&self, segment_entries: Vec<SegmentEntry>) { pub fn commit(&self, segment_entries: Vec<SegmentEntry>) {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
registers_lock.committed.clear(); registers_lock.committed.clear();

View File

@@ -1,7 +1,7 @@
use core::SegmentId; use crate::core::SegmentId;
use core::SegmentMeta; use crate::core::SegmentMeta;
use indexer::delete_queue::DeleteCursor; use crate::indexer::delete_queue::DeleteCursor;
use indexer::segment_entry::SegmentEntry; use crate::indexer::segment_entry::SegmentEntry;
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
use std::fmt::{self, Debug, Formatter}; use std::fmt::{self, Debug, Formatter};
@@ -20,7 +20,7 @@ pub struct SegmentRegister {
} }
impl Debug for SegmentRegister { impl Debug for SegmentRegister {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "SegmentRegister(")?; write!(f, "SegmentRegister(")?;
for k in self.segment_states.keys() { for k in self.segment_states.keys() {
write!(f, "{}, ", k.short_uuid_string())?; write!(f, "{}, ", k.short_uuid_string())?;
@@ -93,9 +93,9 @@ impl SegmentRegister {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use core::SegmentId; use crate::core::SegmentId;
use core::SegmentMeta; use crate::core::SegmentMeta;
use indexer::delete_queue::*; use crate::indexer::delete_queue::*;
fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> { fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> {
segment_register segment_register

View File

@@ -1,11 +1,11 @@
use Result; use crate::Result;
use core::Segment; use crate::core::Segment;
use core::SegmentComponent; use crate::core::SegmentComponent;
use fastfield::FastFieldSerializer; use crate::fastfield::FastFieldSerializer;
use fieldnorm::FieldNormsSerializer; use crate::fieldnorm::FieldNormsSerializer;
use postings::InvertedIndexSerializer; use crate::postings::InvertedIndexSerializer;
use store::StoreWriter; use crate::store::StoreWriter;
/// Segment serializer is in charge of laying out on disk /// Segment serializer is in charge of laying out on disk
/// the data accumulated and sorted by the `SegmentWriter`. /// the data accumulated and sorted by the `SegmentWriter`.

View File

@@ -1,29 +1,31 @@
use super::segment_manager::{get_mergeable_segments, SegmentManager}; use super::segment_manager::{get_mergeable_segments, SegmentManager};
use core::Index; use crate::core::Index;
use core::IndexMeta; use crate::core::IndexMeta;
use core::Segment; use crate::core::Segment;
use core::SegmentId; use crate::core::SegmentId;
use core::SegmentMeta; use crate::core::SegmentMeta;
use core::SerializableSegment; use crate::core::SerializableSegment;
use core::META_FILEPATH; use crate::core::META_FILEPATH;
use directory::{Directory, DirectoryClone}; use crate::directory::{Directory, DirectoryClone};
use error::TantivyError; use crate::error::TantivyError;
use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::index_writer::advance_deletes;
use crate::indexer::merge_operation::MergeOperationInventory;
use crate::indexer::merger::IndexMerger;
use crate::indexer::stamper::Stamper;
use crate::indexer::MergeOperation;
use crate::indexer::SegmentEntry;
use crate::indexer::SegmentSerializer;
use crate::indexer::{DefaultMergePolicy, MergePolicy};
use crate::schema::Schema;
use crate::Opstamp;
use crate::Result;
use futures::oneshot; use futures::oneshot;
use futures::sync::oneshot::Receiver; use futures::sync::oneshot::Receiver;
use futures::Future; use futures::Future;
use futures_cpupool::Builder as CpuPoolBuilder; use futures_cpupool::Builder as CpuPoolBuilder;
use futures_cpupool::CpuFuture; use futures_cpupool::CpuFuture;
use futures_cpupool::CpuPool; use futures_cpupool::CpuPool;
use indexer::delete_queue::DeleteCursor;
use indexer::index_writer::advance_deletes;
use indexer::merge_operation::MergeOperationInventory;
use indexer::merger::IndexMerger;
use indexer::stamper::Stamper;
use indexer::MergeOperation;
use indexer::SegmentEntry;
use indexer::SegmentSerializer;
use indexer::{DefaultMergePolicy, MergePolicy};
use schema::Schema;
use serde_json; use serde_json;
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::collections::HashMap; use std::collections::HashMap;
@@ -36,19 +38,17 @@ use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
use std::thread; use std::thread;
use std::thread::JoinHandle; use std::thread::JoinHandle;
use Opstamp;
use Result;
/// Save the index meta file. /// Save the index meta file.
/// This operation is atomic : /// This operation is atomic :
/// Either /// Either
// - it fails, in which case an error is returned, /// - it fails, in which case an error is returned,
/// and the `meta.json` remains untouched, /// and the `meta.json` remains untouched,
/// - it success, and `meta.json` is written /// - it succeeds, and `meta.json` is written
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
pub fn save_new_metas(schema: Schema, directory: &mut Directory) -> Result<()> { pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> Result<()> {
save_metas( save_metas(
&IndexMeta { &IndexMeta {
segments: Vec::new(), segments: Vec::new(),
@@ -69,7 +69,8 @@ pub fn save_new_metas(schema: Schema, directory: &mut Directory) -> Result<()> {
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
fn save_metas(metas: &IndexMeta, directory: &mut Directory) -> Result<()> { fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> Result<()> {
info!("save metas");
let mut buffer = serde_json::to_vec_pretty(metas)?; let mut buffer = serde_json::to_vec_pretty(metas)?;
// Just adding a new line at the end of the buffer. // Just adding a new line at the end of the buffer.
writeln!(&mut buffer)?; writeln!(&mut buffer)?;
@@ -141,7 +142,7 @@ struct InnerSegmentUpdater {
pool: CpuPool, pool: CpuPool,
index: Index, index: Index,
segment_manager: SegmentManager, segment_manager: SegmentManager,
merge_policy: RwLock<Arc<Box<MergePolicy>>>, merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
merging_thread_id: AtomicUsize, merging_thread_id: AtomicUsize,
merging_threads: RwLock<HashMap<usize, JoinHandle<Result<()>>>>, merging_threads: RwLock<HashMap<usize, JoinHandle<Result<()>>>>,
generation: AtomicUsize, generation: AtomicUsize,
@@ -178,11 +179,11 @@ impl SegmentUpdater {
}))) })))
} }
pub fn get_merge_policy(&self) -> Arc<Box<MergePolicy>> { pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
self.0.merge_policy.read().unwrap().clone() self.0.merge_policy.read().unwrap().clone()
} }
pub fn set_merge_policy(&self, merge_policy: Box<MergePolicy>) { pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
let arc_merge_policy = Arc::new(merge_policy); let arc_merge_policy = Arc::new(merge_policy);
*self.0.merge_policy.write().unwrap() = arc_merge_policy; *self.0.merge_policy.write().unwrap() = arc_merge_policy;
} }
@@ -213,6 +214,11 @@ impl SegmentUpdater {
} }
} }
/// Orders `SegmentManager` to remove all segments
pub(crate) fn remove_all_segments(&self) {
self.0.segment_manager.remove_all_segments();
}
pub fn kill(&mut self) { pub fn kill(&mut self) {
self.0.killed.store(true, Ordering::Release); self.0.killed.store(true, Ordering::Release);
} }
@@ -223,7 +229,7 @@ impl SegmentUpdater {
/// Apply deletes up to the target opstamp to all segments. /// Apply deletes up to the target opstamp to all segments.
/// ///
/// Tne method returns copies of the segment entries, /// The method returns copies of the segment entries,
/// updated with the delete information. /// updated with the delete information.
fn purge_deletes(&self, target_opstamp: Opstamp) -> Result<Vec<SegmentEntry>> { fn purge_deletes(&self, target_opstamp: Opstamp) -> Result<Vec<SegmentEntry>> {
let mut segment_entries = self.0.segment_manager.segment_entries(); let mut segment_entries = self.0.segment_manager.segment_entries();
@@ -446,38 +452,41 @@ impl SegmentUpdater {
) -> Result<()> { ) -> Result<()> {
self.run_async(move |segment_updater| { self.run_async(move |segment_updater| {
info!("End merge {:?}", after_merge_segment_entry.meta()); info!("End merge {:?}", after_merge_segment_entry.meta());
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone(); {
if let Some(delete_operation) = delete_cursor.get() { let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
let committed_opstamp = segment_updater.load_metas().opstamp; if let Some(delete_operation) = delete_cursor.get() {
if delete_operation.opstamp < committed_opstamp { let committed_opstamp = segment_updater.load_metas().opstamp;
let index = &segment_updater.0.index; if delete_operation.opstamp < committed_opstamp {
let segment = index.segment(after_merge_segment_entry.meta().clone()); let index = &segment_updater.0.index;
if let Err(e) = let segment = index.segment(after_merge_segment_entry.meta().clone());
advance_deletes(segment, &mut after_merge_segment_entry, committed_opstamp) if let Err(e) = advance_deletes(
{ segment,
error!( &mut after_merge_segment_entry,
"Merge of {:?} was cancelled (advancing deletes failed): {:?}", committed_opstamp,
merge_operation.segment_ids(), ) {
e error!(
); "Merge of {:?} was cancelled (advancing deletes failed): {:?}",
if cfg!(test) { merge_operation.segment_ids(),
panic!("Merge failed."); e
);
if cfg!(test) {
panic!("Merge failed.");
}
// ... cancel merge
// `merge_operations` are tracked. As it is dropped, the
// the segment_ids will be available again for merge.
return;
} }
// ... cancel merge
// `merge_operations` are tracked. As it is dropped, the
// the segment_ids will be available again for merge.
return;
} }
} }
} let previous_metas = segment_updater.load_metas();
segment_updater segment_updater
.0 .0
.segment_manager .segment_manager
.end_merge(merge_operation.segment_ids(), after_merge_segment_entry); .end_merge(merge_operation.segment_ids(), after_merge_segment_entry);
segment_updater.consider_merge_options(); segment_updater.consider_merge_options();
info!("save metas"); segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload.clone());
let previous_metas = segment_updater.load_metas(); } // we drop all possible handle to a now useless `SegmentMeta`.
segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload.clone());
segment_updater.garbage_collect_files_exec(); segment_updater.garbage_collect_files_exec();
}) })
.wait() .wait()
@@ -524,9 +533,9 @@ impl SegmentUpdater {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use indexer::merge_policy::tests::MergeWheneverPossible; use crate::indexer::merge_policy::tests::MergeWheneverPossible;
use schema::*; use crate::schema::*;
use Index; use crate::Index;
#[test] #[test]
fn test_delete_during_merge() { fn test_delete_during_merge() {
@@ -651,4 +660,31 @@ mod tests {
assert!(index.searchable_segment_metas().unwrap().is_empty()); assert!(index.searchable_segment_metas().unwrap().is_empty());
assert!(reader.searcher().segment_readers().is_empty()); assert!(reader.searcher().segment_readers().is_empty());
} }
#[test]
fn test_remove_all_segments() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
for _ in 0..100 {
index_writer.add_document(doc!(text_field=>"a"));
index_writer.add_document(doc!(text_field=>"b"));
}
assert!(index_writer.commit().is_ok());
}
index_writer.segment_updater().remove_all_segments();
let seg_vec = index_writer
.segment_updater()
.0
.segment_manager
.segment_entries();
assert!(seg_vec.is_empty());
}
} }

View File

@@ -1,26 +1,26 @@
use super::operation::AddOperation; use super::operation::AddOperation;
use core::Segment; use crate::core::Segment;
use core::SerializableSegment; use crate::core::SerializableSegment;
use fastfield::FastFieldsWriter; use crate::fastfield::FastFieldsWriter;
use fieldnorm::FieldNormsWriter; use crate::fieldnorm::FieldNormsWriter;
use indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::SegmentSerializer;
use postings::MultiFieldPostingsWriter; use crate::postings::MultiFieldPostingsWriter;
use schema::FieldEntry; use crate::schema::FieldEntry;
use schema::FieldType; use crate::schema::FieldType;
use schema::Schema; use crate::schema::Schema;
use schema::Term; use crate::schema::Term;
use schema::Value; use crate::schema::Value;
use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::FacetTokenizer;
use crate::tokenizer::{TokenStream, Tokenizer};
use crate::DocId;
use crate::Opstamp;
use crate::Result;
use std::io; use std::io;
use std::str; use std::str;
use tokenizer::BoxedTokenizer;
use tokenizer::FacetTokenizer;
use tokenizer::{TokenStream, Tokenizer};
use DocId;
use Opstamp;
use Result;
/// A `SegmentWriter` is in charge of creating segment index from a /// A `SegmentWriter` is in charge of creating segment index from a
/// documents. /// set of documents.
/// ///
/// They creates the postings list in anonymous memory. /// They creates the postings list in anonymous memory.
/// The segment is layed on disk when the segment gets `finalized`. /// The segment is layed on disk when the segment gets `finalized`.
@@ -31,7 +31,7 @@ pub struct SegmentWriter {
fast_field_writers: FastFieldsWriter, fast_field_writers: FastFieldsWriter,
fieldnorms_writer: FieldNormsWriter, fieldnorms_writer: FieldNormsWriter,
doc_opstamps: Vec<Opstamp>, doc_opstamps: Vec<Opstamp>,
tokenizers: Vec<Option<Box<BoxedTokenizer>>>, tokenizers: Vec<Option<Box<dyn BoxedTokenizer>>>,
} }
impl SegmentWriter { impl SegmentWriter {

View File

@@ -1,7 +1,7 @@
use crate::Opstamp;
use std::ops::Range; use std::ops::Range;
use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc; use std::sync::Arc;
use Opstamp;
/// Stamper provides Opstamps, which is just an auto-increment id to label /// Stamper provides Opstamps, which is just an auto-increment id to label
/// an operation. /// an operation.
@@ -28,6 +28,12 @@ impl Stamper {
end: start + n, end: start + n,
} }
} }
/// Reverts the stamper to a given `Opstamp` value and returns it
pub fn revert(&self, to_opstamp: Opstamp) -> Opstamp {
self.0.store(to_opstamp, Ordering::SeqCst);
to_opstamp
}
} }
#[cfg(test)] #[cfg(test)]
@@ -50,4 +56,17 @@ mod test {
assert_eq!(stamper.stamp(), 15u64); assert_eq!(stamper.stamp(), 15u64);
} }
#[test]
fn test_stamper_revert() {
let stamper = Stamper::new(7u64);
assert_eq!(stamper.stamp(), 7u64);
assert_eq!(stamper.stamp(), 8u64);
let stamper_clone = stamper.clone();
assert_eq!(stamper_clone.stamp(), 9u64);
stamper.revert(6);
assert_eq!(stamper.stamp(), 6);
assert_eq!(stamper_clone.stamp(), 7);
}
} }

View File

@@ -108,9 +108,6 @@
//! [literate programming](http://fulmicoton.com/tantivy-examples/simple_search.html) / //! [literate programming](http://fulmicoton.com/tantivy-examples/simple_search.html) /
//! [source code](https://github.com/fulmicoton/tantivy/blob/master/examples/simple_search.rs)) //! [source code](https://github.com/fulmicoton/tantivy/blob/master/examples/simple_search.rs))
#[macro_use]
extern crate lazy_static;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
@@ -123,57 +120,9 @@ extern crate log;
#[macro_use] #[macro_use]
extern crate failure; extern crate failure;
#[cfg(feature = "mmap")]
extern crate atomicwrites;
extern crate base64;
extern crate bit_set;
extern crate bitpacking;
extern crate byteorder;
extern crate combine;
extern crate crossbeam;
extern crate fnv;
extern crate futures;
extern crate futures_cpupool;
extern crate htmlescape;
extern crate itertools;
extern crate levenshtein_automata;
#[cfg(feature = "mmap")]
extern crate memmap;
extern crate num_cpus;
extern crate owning_ref;
extern crate regex;
extern crate rust_stemmers;
extern crate scoped_pool;
extern crate serde;
extern crate stable_deref_trait;
extern crate tantivy_fst;
extern crate tempdir;
extern crate tempfile;
extern crate uuid;
#[cfg(test)]
#[macro_use]
extern crate matches;
#[cfg(windows)]
extern crate winapi;
#[cfg(test)]
extern crate rand;
#[cfg(test)]
#[macro_use]
extern crate maplit;
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
extern crate test; extern crate test;
#[macro_use]
extern crate downcast_rs;
#[macro_use]
extern crate fail;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
#[cfg(test)] #[cfg(test)]
mod functional_test; mod functional_test;
@@ -181,14 +130,11 @@ mod functional_test;
#[macro_use] #[macro_use]
mod macros; mod macros;
pub use error::TantivyError; pub use crate::error::TantivyError;
#[deprecated(since = "0.7.0", note = "please use `tantivy::TantivyError` instead")] #[deprecated(since = "0.7.0", note = "please use `tantivy::TantivyError` instead")]
pub use error::TantivyError as Error; pub use crate::error::TantivyError as Error;
pub use chrono;
extern crate census;
pub extern crate chrono;
extern crate owned_read;
/// Tantivy result. /// Tantivy result.
pub type Result<T> = std::result::Result<T, error::TantivyError>; pub type Result<T> = std::result::Result<T, error::TantivyError>;
@@ -225,15 +171,15 @@ pub use self::snippet::{Snippet, SnippetGenerator};
mod docset; mod docset;
pub use self::docset::{DocSet, SkipResult}; pub use self::docset::{DocSet, SkipResult};
pub use core::SegmentComponent; pub use crate::core::SegmentComponent;
pub use core::{Index, Searcher, Segment, SegmentId, SegmentMeta, IndexMeta}; pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
pub use core::{InvertedIndexReader, SegmentReader}; pub use crate::core::{InvertedIndexReader, SegmentReader};
pub use directory::Directory; pub use crate::directory::Directory;
pub use indexer::IndexWriter; pub use crate::indexer::IndexWriter;
pub use postings::Postings; pub use crate::postings::Postings;
pub use schema::{Document, Term}; pub use crate::schema::{Document, Term};
pub use common::{i64_to_u64, u64_to_i64}; pub use crate::common::{i64_to_u64, u64_to_i64};
/// Expose the current version of tantivy, as well /// Expose the current version of tantivy, as well
/// whether it was compiled with the simd compression. /// whether it was compiled with the simd compression.
@@ -243,10 +189,10 @@ pub fn version() -> &'static str {
/// Defines tantivy's merging strategy /// Defines tantivy's merging strategy
pub mod merge_policy { pub mod merge_policy {
pub use indexer::DefaultMergePolicy; pub use crate::indexer::DefaultMergePolicy;
pub use indexer::LogMergePolicy; pub use crate::indexer::LogMergePolicy;
pub use indexer::MergePolicy; pub use crate::indexer::MergePolicy;
pub use indexer::NoMergePolicy; pub use crate::indexer::NoMergePolicy;
} }
/// A `u32` identifying a document within a segment. /// A `u32` identifying a document within a segment.
@@ -304,20 +250,20 @@ pub struct DocAddress(pub SegmentLocalId, pub DocId);
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use collector::tests::TestCollector; use crate::collector::tests::TestCollector;
use core::SegmentReader; use crate::core::SegmentReader;
use docset::DocSet; use crate::docset::DocSet;
use query::BooleanQuery; use crate::query::BooleanQuery;
use crate::schema::*;
use crate::DocAddress;
use crate::Index;
use crate::IndexWriter;
use crate::Postings;
use crate::ReloadPolicy;
use rand::distributions::Bernoulli; use rand::distributions::Bernoulli;
use rand::distributions::Uniform; use rand::distributions::Uniform;
use rand::rngs::StdRng; use rand::rngs::StdRng;
use rand::{Rng, SeedableRng}; use rand::{Rng, SeedableRng};
use schema::*;
use DocAddress;
use Index;
use IndexWriter;
use Postings;
use ReloadPolicy;
pub fn assert_nearly_equals(expected: f32, val: f32) { pub fn assert_nearly_equals(expected: f32, val: f32) {
assert!( assert!(
@@ -342,7 +288,7 @@ mod tests {
pub fn sample_with_seed(n: u32, ratio: f64, seed_val: u8) -> Vec<u32> { pub fn sample_with_seed(n: u32, ratio: f64, seed_val: u8) -> Vec<u32> {
StdRng::from_seed([seed_val; 32]) StdRng::from_seed([seed_val; 32])
.sample_iter(&Bernoulli::new(ratio)) .sample_iter(&Bernoulli::new(ratio).unwrap())
.take(n as usize) .take(n as usize)
.enumerate() .enumerate()
.filter_map(|(val, keep)| if keep { Some(val as u32) } else { None }) .filter_map(|(val, keep)| if keep { Some(val as u32) } else { None })
@@ -480,7 +426,7 @@ mod tests {
} }
} }
fn advance_undeleted(docset: &mut DocSet, reader: &SegmentReader) -> bool { fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
while docset.advance() { while docset.advance() {
if !reader.is_deleted(docset.doc()) { if !reader.is_deleted(docset.doc()) {
return true; return true;

View File

@@ -67,7 +67,7 @@ macro_rules! doc(
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use schema::{Schema, FAST, TEXT}; use crate::schema::{Schema, FAST, TEXT};
#[test] #[test]
fn test_doc_basic() { fn test_doc_basic() {

View File

@@ -38,8 +38,8 @@ const LONG_SKIP_INTERVAL: u64 = (LONG_SKIP_IN_BLOCKS * COMPRESSION_BLOCK_SIZE) a
pub mod tests { pub mod tests {
use super::{PositionReader, PositionSerializer}; use super::{PositionReader, PositionSerializer};
use directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use positions::COMPRESSION_BLOCK_SIZE; use crate::positions::COMPRESSION_BLOCK_SIZE;
use std::iter; use std::iter;
fn create_stream_buffer(vals: &[u32]) -> (ReadOnlySource, ReadOnlySource) { fn create_stream_buffer(vals: &[u32]) -> (ReadOnlySource, ReadOnlySource) {

View File

@@ -1,3 +1,9 @@
use crate::common::{BinarySerializable, FixedSize};
use crate::directory::ReadOnlySource;
use crate::positions::COMPRESSION_BLOCK_SIZE;
use crate::positions::LONG_SKIP_INTERVAL;
use crate::positions::LONG_SKIP_IN_BLOCKS;
use crate::postings::compression::compressed_block_size;
/// Positions works as a long sequence of compressed block. /// Positions works as a long sequence of compressed block.
/// All terms are chained one after the other. /// All terms are chained one after the other.
/// ///
@@ -19,13 +25,7 @@
/// so skipping a block without decompressing it is just a matter of advancing that many /// so skipping a block without decompressing it is just a matter of advancing that many
/// bytes. /// bytes.
use bitpacking::{BitPacker, BitPacker4x}; use bitpacking::{BitPacker, BitPacker4x};
use common::{BinarySerializable, FixedSize};
use directory::ReadOnlySource;
use owned_read::OwnedRead; use owned_read::OwnedRead;
use positions::COMPRESSION_BLOCK_SIZE;
use positions::LONG_SKIP_INTERVAL;
use positions::LONG_SKIP_IN_BLOCKS;
use postings::compression::compressed_block_size;
struct Positions { struct Positions {
bit_packer: BitPacker4x, bit_packer: BitPacker4x,

View File

@@ -1,8 +1,8 @@
use crate::common::BinarySerializable;
use crate::common::CountingWriter;
use crate::positions::{COMPRESSION_BLOCK_SIZE, LONG_SKIP_INTERVAL};
use bitpacking::BitPacker; use bitpacking::BitPacker;
use bitpacking::BitPacker4x; use bitpacking::BitPacker4x;
use common::BinarySerializable;
use common::CountingWriter;
use positions::{COMPRESSION_BLOCK_SIZE, LONG_SKIP_INTERVAL};
use std::io::{self, Write}; use std::io::{self, Write};
pub struct PositionSerializer<W: io::Write> { pub struct PositionSerializer<W: io::Write> {

View File

@@ -1,4 +1,4 @@
use postings::compression::AlignedBuffer; use crate::postings::compression::AlignedBuffer;
/// This modules define the logic used to search for a doc in a given /// This modules define the logic used to search for a doc in a given
/// block. (at most 128 docs) /// block. (at most 128 docs)
@@ -8,7 +8,7 @@ use postings::compression::AlignedBuffer;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
mod sse2 { mod sse2 {
use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE}; use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
use std::arch::x86_64::__m128i as DataType; use std::arch::x86_64::__m128i as DataType;
use std::arch::x86_64::_mm_add_epi32 as op_add; use std::arch::x86_64::_mm_add_epi32 as op_add;
use std::arch::x86_64::_mm_cmplt_epi32 as op_lt; use std::arch::x86_64::_mm_cmplt_epi32 as op_lt;
@@ -49,7 +49,7 @@ mod sse2 {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::linear_search_sse2_128; use super::linear_search_sse2_128;
use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE}; use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
#[test] #[test]
fn test_linear_search_sse2_128_u32() { fn test_linear_search_sse2_128_u32() {
@@ -140,7 +140,7 @@ impl BlockSearcher {
) -> usize { ) -> usize {
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
{ {
use postings::compression::COMPRESSION_BLOCK_SIZE; use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
if self == BlockSearcher::SSE2 && len == COMPRESSION_BLOCK_SIZE { if self == BlockSearcher::SSE2 && len == COMPRESSION_BLOCK_SIZE {
return sse2::linear_search_sse2_128(block_docs, target); return sse2::linear_search_sse2_128(block_docs, target);
} }
@@ -166,7 +166,7 @@ mod tests {
use super::exponential_search; use super::exponential_search;
use super::linear_search; use super::linear_search;
use super::BlockSearcher; use super::BlockSearcher;
use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE}; use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
#[test] #[test]
fn test_linear_search() { fn test_linear_search() {

View File

@@ -1,5 +1,5 @@
use crate::common::FixedSize;
use bitpacking::{BitPacker, BitPacker4x}; use bitpacking::{BitPacker, BitPacker4x};
use common::FixedSize;
pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN; pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
const COMPRESSED_BLOCK_MAX_SIZE: usize = COMPRESSION_BLOCK_SIZE * u32::SIZE_IN_BYTES; const COMPRESSED_BLOCK_MAX_SIZE: usize = COMPRESSION_BLOCK_SIZE * u32::SIZE_IN_BYTES;

View File

@@ -31,7 +31,7 @@ pub use self::segment_postings::{BlockSegmentPostings, SegmentPostings};
pub(crate) use self::stacker::compute_table_size; pub(crate) use self::stacker::compute_table_size;
pub use common::HasLen; pub use crate::common::HasLen;
pub(crate) const USE_SKIP_INFO_LIMIT: u32 = COMPRESSION_BLOCK_SIZE as u32; pub(crate) const USE_SKIP_INFO_LIMIT: u32 = COMPRESSION_BLOCK_SIZE as u32;
pub(crate) type UnorderedTermId = u64; pub(crate) type UnorderedTermId = u64;
@@ -48,24 +48,25 @@ pub(crate) enum FreqReadingOption {
pub mod tests { pub mod tests {
use super::*; use super::*;
use core::Index; use crate::core::Index;
use core::SegmentComponent; use crate::core::SegmentComponent;
use core::SegmentReader; use crate::core::SegmentReader;
use docset::{DocSet, SkipResult}; use crate::docset::{DocSet, SkipResult};
use fieldnorm::FieldNormReader; use crate::fieldnorm::FieldNormReader;
use indexer::operation::AddOperation; use crate::indexer::operation::AddOperation;
use indexer::SegmentWriter; use crate::indexer::SegmentWriter;
use merge_policy::NoMergePolicy; use crate::merge_policy::NoMergePolicy;
use query::Scorer; use crate::query::Scorer;
use crate::schema::{Document, Schema, Term, INDEXED, STRING, TEXT};
use crate::schema::{Field, TextOptions};
use crate::schema::{IndexRecordOption, TextFieldIndexing};
use crate::tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN};
use crate::DocId;
use crate::Score;
use once_cell::sync::Lazy;
use rand::rngs::StdRng; use rand::rngs::StdRng;
use rand::{Rng, SeedableRng}; use rand::{Rng, SeedableRng};
use schema::{Document, Schema, Term, INDEXED, STRING, TEXT};
use schema::{Field, TextOptions};
use schema::{IndexRecordOption, TextFieldIndexing};
use std::iter; use std::iter;
use tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN};
use DocId;
use Score;
#[test] #[test]
pub fn test_position_write() { pub fn test_position_write() {
@@ -509,53 +510,52 @@ pub mod tests {
} }
} }
lazy_static! { pub static TERM_A: Lazy<Term> = Lazy::new(|| {
pub static ref TERM_A: Term = { let field = Field(0);
let field = Field(0); Term::from_field_text(field, "a")
Term::from_field_text(field, "a") });
}; pub static TERM_B: Lazy<Term> = Lazy::new(|| {
pub static ref TERM_B: Term = { let field = Field(0);
let field = Field(0); Term::from_field_text(field, "b")
Term::from_field_text(field, "b") });
}; pub static TERM_C: Lazy<Term> = Lazy::new(|| {
pub static ref TERM_C: Term = { let field = Field(0);
let field = Field(0); Term::from_field_text(field, "c")
Term::from_field_text(field, "c") });
}; pub static TERM_D: Lazy<Term> = Lazy::new(|| {
pub static ref TERM_D: Term = { let field = Field(0);
let field = Field(0); Term::from_field_text(field, "d")
Term::from_field_text(field, "d") });
};
pub static ref INDEX: Index = {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let mut rng: StdRng = StdRng::from_seed([1u8; 32]); pub static INDEX: Lazy<Index> = Lazy::new(|| {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
let posting_list_size = 1_000_000;
{ let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let posting_list_size = 1_000_000;
for _ in 0..posting_list_size { {
let mut doc = Document::default(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
if rng.gen_bool(1f64 / 15f64) { for _ in 0..posting_list_size {
doc.add_text(text_field, "a"); let mut doc = Document::default();
} if rng.gen_bool(1f64 / 15f64) {
if rng.gen_bool(1f64 / 10f64) { doc.add_text(text_field, "a");
doc.add_text(text_field, "b");
}
if rng.gen_bool(1f64 / 5f64) {
doc.add_text(text_field, "c");
}
doc.add_text(text_field, "d");
index_writer.add_document(doc);
} }
assert!(index_writer.commit().is_ok()); if rng.gen_bool(1f64 / 10f64) {
doc.add_text(text_field, "b");
}
if rng.gen_bool(1f64 / 5f64) {
doc.add_text(text_field, "c");
}
doc.add_text(text_field, "d");
index_writer.add_document(doc);
} }
index assert!(index_writer.commit().is_ok());
}; }
} index
});
/// Wraps a given docset, and forward alls call but the /// Wraps a given docset, and forward alls call but the
/// `.skip_next(...)`. This is useful to test that a specialized /// `.skip_next(...)`. This is useful to test that a specialized
@@ -589,7 +589,7 @@ pub mod tests {
} }
} }
pub fn test_skip_against_unoptimized<F: Fn() -> Box<DocSet>>( pub fn test_skip_against_unoptimized<F: Fn() -> Box<dyn DocSet>>(
postings_factory: F, postings_factory: F,
targets: Vec<u32>, targets: Vec<u32>,
) { ) {

View File

@@ -1,4 +1,4 @@
use docset::DocSet; use crate::docset::DocSet;
/// Postings (also called inverted list) /// Postings (also called inverted list)
/// ///

View File

@@ -1,23 +1,23 @@
use super::stacker::{Addr, MemoryArena, TermHashMap}; use super::stacker::{Addr, MemoryArena, TermHashMap};
use postings::recorder::{ use crate::postings::recorder::{
BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder, BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder,
}; };
use postings::UnorderedTermId; use crate::postings::UnorderedTermId;
use postings::{FieldSerializer, InvertedIndexSerializer}; use crate::postings::{FieldSerializer, InvertedIndexSerializer};
use schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use schema::{Field, FieldEntry, FieldType, Schema, Term}; use crate::schema::{Field, FieldEntry, FieldType, Schema, Term};
use crate::termdict::TermOrdinal;
use crate::tokenizer::TokenStream;
use crate::tokenizer::{Token, MAX_TOKEN_LEN};
use crate::DocId;
use crate::Result;
use std::collections::HashMap; use std::collections::HashMap;
use std::io; use std::io;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::ops::DerefMut; use std::ops::DerefMut;
use termdict::TermOrdinal;
use tokenizer::TokenStream;
use tokenizer::{Token, MAX_TOKEN_LEN};
use DocId;
use Result;
fn posting_from_field_entry(field_entry: &FieldEntry) -> Box<PostingsWriter> { fn posting_from_field_entry(field_entry: &FieldEntry) -> Box<dyn PostingsWriter> {
match *field_entry.field_type() { match *field_entry.field_type() {
FieldType::Str(ref text_options) => text_options FieldType::Str(ref text_options) => text_options
.get_indexing_options() .get_indexing_options()
@@ -49,7 +49,7 @@ pub struct MultiFieldPostingsWriter {
heap: MemoryArena, heap: MemoryArena,
schema: Schema, schema: Schema,
term_index: TermHashMap, term_index: TermHashMap,
per_field_postings_writers: Vec<Box<PostingsWriter>>, per_field_postings_writers: Vec<Box<dyn PostingsWriter>>,
} }
fn make_field_partition( fn make_field_partition(
@@ -99,7 +99,12 @@ impl MultiFieldPostingsWriter {
self.term_index.mem_usage() + self.heap.mem_usage() self.term_index.mem_usage() + self.heap.mem_usage()
} }
pub fn index_text(&mut self, doc: DocId, field: Field, token_stream: &mut TokenStream) -> u32 { pub fn index_text(
&mut self,
doc: DocId,
field: Field,
token_stream: &mut dyn TokenStream,
) -> u32 {
let postings_writer = self.per_field_postings_writers[field.0 as usize].deref_mut(); let postings_writer = self.per_field_postings_writers[field.0 as usize].deref_mut();
postings_writer.index_text( postings_writer.index_text(
&mut self.term_index, &mut self.term_index,
@@ -138,10 +143,10 @@ impl MultiFieldPostingsWriter {
FieldType::Str(_) | FieldType::HierarchicalFacet => { FieldType::Str(_) | FieldType::HierarchicalFacet => {
// populating the (unordered term ord) -> (ordered term ord) mapping // populating the (unordered term ord) -> (ordered term ord) mapping
// for the field. // for the field.
let mut unordered_term_ids = term_offsets[start..stop] let unordered_term_ids = term_offsets[start..stop]
.iter() .iter()
.map(|&(_, _, bucket)| bucket); .map(|&(_, _, bucket)| bucket);
let mut mapping: HashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids let mapping: HashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
.enumerate() .enumerate()
.map(|(term_ord, unord_term_id)| { .map(|(term_ord, unord_term_id)| {
(unord_term_id as UnorderedTermId, term_ord as TermOrdinal) (unord_term_id as UnorderedTermId, term_ord as TermOrdinal)
@@ -194,7 +199,7 @@ pub trait PostingsWriter {
fn serialize( fn serialize(
&self, &self,
term_addrs: &[(&[u8], Addr, UnorderedTermId)], term_addrs: &[(&[u8], Addr, UnorderedTermId)],
serializer: &mut FieldSerializer, serializer: &mut FieldSerializer<'_>,
term_heap: &MemoryArena, term_heap: &MemoryArena,
heap: &MemoryArena, heap: &MemoryArena,
) -> io::Result<()>; ) -> io::Result<()>;
@@ -205,7 +210,7 @@ pub trait PostingsWriter {
term_index: &mut TermHashMap, term_index: &mut TermHashMap,
doc_id: DocId, doc_id: DocId,
field: Field, field: Field,
token_stream: &mut TokenStream, token_stream: &mut dyn TokenStream,
heap: &mut MemoryArena, heap: &mut MemoryArena,
) -> u32 { ) -> u32 {
let mut term = Term::for_field(field); let mut term = Term::for_field(field);
@@ -246,7 +251,7 @@ impl<Rec: Recorder + 'static> SpecializedPostingsWriter<Rec> {
} }
/// Builds a `SpecializedPostingsWriter` storing its data in a heap. /// Builds a `SpecializedPostingsWriter` storing its data in a heap.
pub fn new_boxed() -> Box<PostingsWriter> { pub fn new_boxed() -> Box<dyn PostingsWriter> {
Box::new(SpecializedPostingsWriter::<Rec>::new()) Box::new(SpecializedPostingsWriter::<Rec>::new())
} }
} }
@@ -283,7 +288,7 @@ impl<Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<Rec>
fn serialize( fn serialize(
&self, &self,
term_addrs: &[(&[u8], Addr, UnorderedTermId)], term_addrs: &[(&[u8], Addr, UnorderedTermId)],
serializer: &mut FieldSerializer, serializer: &mut FieldSerializer<'_>,
termdict_heap: &MemoryArena, termdict_heap: &MemoryArena,
heap: &MemoryArena, heap: &MemoryArena,
) -> io::Result<()> { ) -> io::Result<()> {

View File

@@ -1,8 +1,8 @@
use super::stacker::{ExpUnrolledLinkedList, MemoryArena}; use super::stacker::{ExpUnrolledLinkedList, MemoryArena};
use common::{read_u32_vint, write_u32_vint}; use crate::common::{read_u32_vint, write_u32_vint};
use postings::FieldSerializer; use crate::postings::FieldSerializer;
use crate::DocId;
use std::io; use std::io;
use DocId;
const POSITION_END: u32 = 0; const POSITION_END: u32 = 0;
@@ -72,7 +72,7 @@ pub(crate) trait Recorder: Copy + 'static {
fn serialize( fn serialize(
&self, &self,
buffer_lender: &mut BufferLender, buffer_lender: &mut BufferLender,
serializer: &mut FieldSerializer, serializer: &mut FieldSerializer<'_>,
heap: &MemoryArena, heap: &MemoryArena,
) -> io::Result<()>; ) -> io::Result<()>;
} }
@@ -108,7 +108,7 @@ impl Recorder for NothingRecorder {
fn serialize( fn serialize(
&self, &self,
buffer_lender: &mut BufferLender, buffer_lender: &mut BufferLender,
serializer: &mut FieldSerializer, serializer: &mut FieldSerializer<'_>,
heap: &MemoryArena, heap: &MemoryArena,
) -> io::Result<()> { ) -> io::Result<()> {
let buffer = buffer_lender.lend_u8(); let buffer = buffer_lender.lend_u8();
@@ -159,7 +159,7 @@ impl Recorder for TermFrequencyRecorder {
fn serialize( fn serialize(
&self, &self,
buffer_lender: &mut BufferLender, buffer_lender: &mut BufferLender,
serializer: &mut FieldSerializer, serializer: &mut FieldSerializer<'_>,
heap: &MemoryArena, heap: &MemoryArena,
) -> io::Result<()> { ) -> io::Result<()> {
let buffer = buffer_lender.lend_u8(); let buffer = buffer_lender.lend_u8();
@@ -208,7 +208,7 @@ impl Recorder for TFAndPositionRecorder {
fn serialize( fn serialize(
&self, &self,
buffer_lender: &mut BufferLender, buffer_lender: &mut BufferLender,
serializer: &mut FieldSerializer, serializer: &mut FieldSerializer<'_>,
heap: &MemoryArena, heap: &MemoryArena,
) -> io::Result<()> { ) -> io::Result<()> {
let (buffer_u8, buffer_positions) = buffer_lender.lend_all(); let (buffer_u8, buffer_positions) = buffer_lender.lend_all();

View File

@@ -1,21 +1,21 @@
use common::BitSet; use crate::common::BitSet;
use common::HasLen; use crate::common::HasLen;
use common::{BinarySerializable, VInt}; use crate::common::{BinarySerializable, VInt};
use docset::{DocSet, SkipResult}; use crate::docset::{DocSet, SkipResult};
use crate::positions::PositionReader;
use crate::postings::compression::{compressed_block_size, AlignedBuffer};
use crate::postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE};
use crate::postings::serializer::PostingsSerializer;
use crate::postings::BlockSearcher;
use crate::postings::FreqReadingOption;
use crate::postings::Postings;
use crate::postings::SkipReader;
use crate::postings::USE_SKIP_INFO_LIMIT;
use crate::schema::IndexRecordOption;
use crate::DocId;
use owned_read::OwnedRead; use owned_read::OwnedRead;
use positions::PositionReader;
use postings::compression::{compressed_block_size, AlignedBuffer};
use postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE};
use postings::serializer::PostingsSerializer;
use postings::BlockSearcher;
use postings::FreqReadingOption;
use postings::Postings;
use postings::SkipReader;
use postings::USE_SKIP_INFO_LIMIT;
use schema::IndexRecordOption;
use std::cmp::Ordering; use std::cmp::Ordering;
use tantivy_fst::Streamer; use tantivy_fst::Streamer;
use DocId;
struct PositionComputer { struct PositionComputer {
// store the amount of position int // store the amount of position int
@@ -611,17 +611,17 @@ mod tests {
use super::BlockSegmentPostings; use super::BlockSegmentPostings;
use super::BlockSegmentPostingsSkipResult; use super::BlockSegmentPostingsSkipResult;
use super::SegmentPostings; use super::SegmentPostings;
use common::HasLen; use crate::common::HasLen;
use core::Index; use crate::core::Index;
use docset::DocSet; use crate::docset::DocSet;
use postings::postings::Postings; use crate::postings::postings::Postings;
use schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use schema::Schema; use crate::schema::Schema;
use schema::Term; use crate::schema::Term;
use schema::INDEXED; use crate::schema::INDEXED;
use crate::DocId;
use crate::SkipResult;
use tantivy_fst::Streamer; use tantivy_fst::Streamer;
use DocId;
use SkipResult;
#[test] #[test]
fn test_empty_segment_postings() { fn test_empty_segment_postings() {

View File

@@ -1,18 +1,18 @@
use super::TermInfo; use super::TermInfo;
use common::{BinarySerializable, VInt}; use crate::common::{BinarySerializable, VInt};
use common::{CompositeWrite, CountingWriter}; use crate::common::{CompositeWrite, CountingWriter};
use core::Segment; use crate::core::Segment;
use directory::WritePtr; use crate::directory::WritePtr;
use positions::PositionSerializer; use crate::positions::PositionSerializer;
use postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE}; use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
use postings::skip::SkipSerializer; use crate::postings::skip::SkipSerializer;
use postings::USE_SKIP_INFO_LIMIT; use crate::postings::USE_SKIP_INFO_LIMIT;
use schema::Schema; use crate::schema::Schema;
use schema::{Field, FieldEntry, FieldType}; use crate::schema::{Field, FieldEntry, FieldType};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::DocId;
use crate::Result;
use std::io::{self, Write}; use std::io::{self, Write};
use termdict::{TermDictionaryBuilder, TermOrdinal};
use DocId;
use Result;
/// `InvertedIndexSerializer` is in charge of serializing /// `InvertedIndexSerializer` is in charge of serializing
/// postings on disk, in the /// postings on disk, in the
@@ -73,7 +73,7 @@ impl InvertedIndexSerializer {
/// Open a new `PostingsSerializer` for the given segment /// Open a new `PostingsSerializer` for the given segment
pub fn open(segment: &mut Segment) -> Result<InvertedIndexSerializer> { pub fn open(segment: &mut Segment) -> Result<InvertedIndexSerializer> {
use SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS}; use crate::SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS};
InvertedIndexSerializer::create( InvertedIndexSerializer::create(
CompositeWrite::wrap(segment.open_write(TERMS)?), CompositeWrite::wrap(segment.open_write(TERMS)?),
CompositeWrite::wrap(segment.open_write(POSTINGS)?), CompositeWrite::wrap(segment.open_write(POSTINGS)?),
@@ -91,7 +91,7 @@ impl InvertedIndexSerializer {
&mut self, &mut self,
field: Field, field: Field,
total_num_tokens: u64, total_num_tokens: u64,
) -> io::Result<FieldSerializer> { ) -> io::Result<FieldSerializer<'_>> {
let field_entry: &FieldEntry = self.schema.get_field_entry(field); let field_entry: &FieldEntry = self.schema.get_field_entry(field);
let term_dictionary_write = self.terms_write.for_field(field); let term_dictionary_write = self.terms_write.for_field(field);
let postings_write = self.postings_write.for_field(field); let postings_write = self.postings_write.for_field(field);

View File

@@ -1,8 +1,8 @@
use common::BinarySerializable; use crate::common::BinarySerializable;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::schema::IndexRecordOption;
use crate::DocId;
use owned_read::OwnedRead; use owned_read::OwnedRead;
use postings::compression::COMPRESSION_BLOCK_SIZE;
use schema::IndexRecordOption;
use DocId;
pub struct SkipSerializer { pub struct SkipSerializer {
buffer: Vec<u8>, buffer: Vec<u8>,

View File

@@ -1,7 +1,7 @@
use super::{Addr, MemoryArena}; use super::{Addr, MemoryArena};
use postings::stacker::memory_arena::load; use crate::postings::stacker::memory_arena::load;
use postings::stacker::memory_arena::store; use crate::postings::stacker::memory_arena::store;
use std::io; use std::io;
use std::mem; use std::mem;
@@ -16,8 +16,8 @@ enum CapacityResult {
fn len_to_capacity(len: u32) -> CapacityResult { fn len_to_capacity(len: u32) -> CapacityResult {
match len { match len {
0...15 => CapacityResult::Available(FIRST_BLOCK as u32 - len), 0..=15 => CapacityResult::Available(FIRST_BLOCK as u32 - len),
16...MAX_BLOCK_LEN => { 16..=MAX_BLOCK_LEN => {
let cap = 1 << (32u32 - (len - 1u32).leading_zeros()); let cap = 1 << (32u32 - (len - 1u32).leading_zeros());
let available = cap - len; let available = cap - len;
if available == 0 { if available == 0 {

View File

@@ -1,11 +1,11 @@
extern crate murmurhash32; use murmurhash32;
use self::murmurhash32::murmurhash2; use self::murmurhash32::murmurhash2;
use super::{Addr, MemoryArena}; use super::{Addr, MemoryArena};
use crate::postings::stacker::memory_arena::store;
use crate::postings::UnorderedTermId;
use byteorder::{ByteOrder, NativeEndian}; use byteorder::{ByteOrder, NativeEndian};
use postings::stacker::memory_arena::store;
use postings::UnorderedTermId;
use std::iter; use std::iter;
use std::mem; use std::mem;
use std::slice; use std::slice;
@@ -154,7 +154,7 @@ impl TermHashMap {
unordered_term_id unordered_term_id
} }
pub fn iter(&self) -> Iter { pub fn iter(&self) -> Iter<'_> {
Iter { Iter {
inner: self.occupied.iter(), inner: self.occupied.iter(),
hashmap: &self, hashmap: &self,

View File

@@ -1,4 +1,4 @@
use common::{BinarySerializable, FixedSize}; use crate::common::{BinarySerializable, FixedSize};
use std::io; use std::io;
/// `TermInfo` wraps the metadata associated to a Term. /// `TermInfo` wraps the metadata associated to a Term.
@@ -45,7 +45,7 @@ impl BinarySerializable for TermInfo {
mod tests { mod tests {
use super::TermInfo; use super::TermInfo;
use common::test::fixed_size_test; use crate::common::test::fixed_size_test;
#[test] #[test]
fn test_fixed_size() { fn test_fixed_size() {

View File

@@ -1,10 +1,11 @@
use core::Searcher; use crate::core::Searcher;
use core::SegmentReader; use crate::core::SegmentReader;
use docset::DocSet; use crate::docset::DocSet;
use query::{Query, Scorer, Weight}; use crate::query::explanation::does_not_match;
use DocId; use crate::query::{Explanation, Query, Scorer, Weight};
use Result; use crate::DocId;
use Score; use crate::Result;
use crate::Score;
/// Query that matches all of the documents. /// Query that matches all of the documents.
/// ///
@@ -13,7 +14,7 @@ use Score;
pub struct AllQuery; pub struct AllQuery;
impl Query for AllQuery { impl Query for AllQuery {
fn weight(&self, _: &Searcher, _: bool) -> Result<Box<Weight>> { fn weight(&self, _: &Searcher, _: bool) -> Result<Box<dyn Weight>> {
Ok(Box::new(AllWeight)) Ok(Box::new(AllWeight))
} }
} }
@@ -22,13 +23,20 @@ impl Query for AllQuery {
pub struct AllWeight; pub struct AllWeight;
impl Weight for AllWeight { impl Weight for AllWeight {
fn scorer(&self, reader: &SegmentReader) -> Result<Box<Scorer>> { fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
Ok(Box::new(AllScorer { Ok(Box::new(AllScorer {
state: State::NotStarted, state: State::NotStarted,
doc: 0u32, doc: 0u32,
max_doc: reader.max_doc(), max_doc: reader.max_doc(),
})) }))
} }
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
if doc >= reader.max_doc() {
return Err(does_not_match(doc));
}
Ok(Explanation::new("AllQuery", 1f32))
}
} }
enum State { enum State {
@@ -85,9 +93,9 @@ impl Scorer for AllScorer {
mod tests { mod tests {
use super::AllQuery; use super::AllQuery;
use query::Query; use crate::query::Query;
use schema::{Schema, TEXT}; use crate::schema::{Schema, TEXT};
use Index; use crate::Index;
#[test] #[test]
fn test_all_query() { fn test_all_query() {

View File

@@ -1,12 +1,14 @@
use common::BitSet; use crate::common::BitSet;
use core::SegmentReader; use crate::core::SegmentReader;
use query::BitSetDocSet; use crate::query::ConstScorer;
use query::ConstScorer; use crate::query::{BitSetDocSet, Explanation};
use query::{Scorer, Weight}; use crate::query::{Scorer, Weight};
use schema::{Field, IndexRecordOption}; use crate::schema::{Field, IndexRecordOption};
use crate::termdict::{TermDictionary, TermStreamer};
use crate::DocId;
use crate::TantivyError;
use crate::{Result, SkipResult};
use tantivy_fst::Automaton; use tantivy_fst::Automaton;
use termdict::{TermDictionary, TermStreamer};
use Result;
/// A weight struct for Fuzzy Term and Regex Queries /// A weight struct for Fuzzy Term and Regex Queries
pub struct AutomatonWeight<A> pub struct AutomatonWeight<A>
@@ -36,7 +38,7 @@ impl<A> Weight for AutomatonWeight<A>
where where
A: Automaton + Send + Sync + 'static, A: Automaton + Send + Sync + 'static,
{ {
fn scorer(&self, reader: &SegmentReader) -> Result<Box<Scorer>> { fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
let max_doc = reader.max_doc(); let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc); let mut doc_bitset = BitSet::with_max_value(max_doc);
@@ -56,4 +58,15 @@ where
let doc_bitset = BitSetDocSet::from(doc_bitset); let doc_bitset = BitSetDocSet::from(doc_bitset);
Ok(Box::new(ConstScorer::new(doc_bitset))) Ok(Box::new(ConstScorer::new(doc_bitset)))
} }
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer(reader)?;
if scorer.skip_next(doc) == SkipResult::Reached {
Ok(Explanation::new("AutomatonScorer", 1.0f32))
} else {
Err(TantivyError::InvalidArgument(
"Document does not exist".to_string(),
))
}
}
} }

Some files were not shown because too many files have changed in this diff Show More