Compare commits

..

2 Commits

Author SHA1 Message Date
Paul Masurel
a854a60e2a criterion 2019-08-11 15:46:42 +09:00
Paul Masurel
92d73a6bfb Added criterion benchmark 2019-08-09 17:34:06 +09:00
135 changed files with 2047 additions and 4016 deletions

12
.github/FUNDING.yml vendored
View File

@@ -1,12 +0,0 @@
# These are supported funding model platforms
github: fulmicoton
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

View File

@@ -47,7 +47,6 @@ matrix:
before_install:
- set -e
- rustup self update
- rustup component add rustfmt
install:
- sh ci/install.sh
@@ -61,7 +60,6 @@ before_script:
script:
- bash ci/script.sh
- cargo fmt --all -- --check
before_deploy:
- sh ci/before_deploy.sh

View File

@@ -5,28 +5,7 @@ Tantivy 0.11.0
- Various bugfixes in the query parser.
- Better handling of hyphens in query parser. (#609)
- Better handling of whitespaces.
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
- API change around `Box<BoxableTokenizer>`. See detail in #629
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
- Add a method to check the compatibility of the footer in the index with the running version of tantivy (@petr-tik)
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
- Added handling of pre-tokenized text fields (#642), which will enable users to
load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara)
- Fix crash when committing multiple times with deleted documents. #681 (@brainlock)
## How to update?
- The index format is changed. You are required to reindex your data to use tantivy 0.11.
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
an error and handling the `Result` is required.
- `tantivy::version()` now returns a `Version` object. This object implements `ToString()`
Tantivy 0.10.2
=====================
- Closes #656. Solving memory leak.
Tantivy 0.10.1
=====================

View File

@@ -13,11 +13,10 @@ keywords = ["search", "information", "retrieval"]
edition = "2018"
[dependencies]
base64 = "0.11.0"
base64 = "0.10.0"
byteorder = "1.0"
crc32fast = "1.2.0"
once_cell = "1.0"
regex ={version = "1.3.0", default-features = false, features = ["std"]}
once_cell = "0.2"
regex = "1.0"
tantivy-fst = "0.1"
memmap = {version = "0.7", optional=true}
lz4 = {version="1.20", optional=true}
@@ -25,6 +24,7 @@ snap = {version="0.2"}
atomicwrites = {version="0.2.2", optional=true}
tempfile = "3.0"
log = "0.4"
combine = ">=3.6.0,<4.0.0"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
@@ -33,25 +33,26 @@ fs2={version="0.4", optional=true}
itertools = "0.8"
levenshtein_automata = {version="0.1", features=["fst_automaton"]}
notify = {version="4", optional=true}
uuid = { version = "0.8", features = ["v4", "serde"] }
bit-set = "0.5"
uuid = { version = "0.7.2", features = ["v4", "serde"] }
crossbeam = "0.7"
futures = {version = "0.3", features=["thread-pool"] }
futures = "0.1"
futures-cpupool = "0.1"
owning_ref = "0.4"
stable_deref_trait = "1.0.0"
rust-stemmers = "1.2"
rust-stemmers = "1.1"
downcast-rs = { version="1.0" }
tantivy-query-grammar = { version="0.11", path="./query-grammar" }
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.4"
census = "0.2"
fnv = "1.0.6"
owned-read = "0.4"
failure = "0.1"
htmlescape = "0.3.1"
fail = "0.3"
scoped-pool = "1.0"
murmurhash32 = "0.2"
chrono = "0.4"
smallvec = "1.0"
rayon = "1"
smallvec = "0.6"
[target.'cfg(windows)'.dependencies]
winapi = "0.3"
@@ -61,10 +62,7 @@ rand = "0.7"
maplit = "1"
matches = "0.1.8"
time = "0.1.42"
[dev-dependencies.fail]
version = "0.3"
features = ["failpoints"]
criterion = "0.2"
[profile.release]
opt-level = 3
@@ -77,18 +75,19 @@ overflow-checks = true
[features]
default = ["mmap"]
forbench = []
mmap = ["atomicwrites", "fs2", "memmap", "notify"]
lz4-compression = ["lz4"]
failpoints = ["fail/failpoints"]
unstable = [] # useful for benches.
wasm-bindgen = ["uuid/wasm-bindgen"]
[workspace]
members = ["query-grammar"]
[badges]
travis-ci = { repository = "tantivy-search/tantivy" }
[dev-dependencies.fail]
features = ["failpoints"]
# Following the "fail" crate best practises, we isolate
# tests that define specific behavior in fail check points
# in a different binary.
@@ -100,3 +99,15 @@ travis-ci = { repository = "tantivy-search/tantivy" }
name = "failpoints"
path = "tests/failpoints/mod.rs"
required-features = ["fail/failpoints"]
[profile.bench]
lto = true
[[bench]]
name = "vint"
harness = false
[[bench]]
name = "fastfield"
harness = false

View File

@@ -1,3 +0,0 @@
test:
echo "Run test only... No examples."
cargo test --tests --lib

View File

@@ -21,9 +21,9 @@
[![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
**Tantivy** is a **full text search engine library** written in Rust.
**Tantivy** is a **full text search engine library** written in rust.
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) and [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
an off-the-shelf search engine server, but rather a crate that can be used
to build such a search engine.
@@ -31,7 +31,7 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
# Benchmark
Tantivy is typically faster than Lucene, but the results depend on
Tantivy is typically faster than Lucene, but the results will depend on
the nature of the queries in your workload.
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
@@ -40,19 +40,19 @@ performance for different type of queries / collection.
# Features
- Full-text search
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)) and [Japanese](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter))
- Configurable tokenizer. (stemming available for 17 latin languages. Third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)) and [Japanese](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
- Tiny startup time (<10ms), perfect for command line tools
- BM25 scoring (the same as Lucene)
- Natural query language (e.g. `(michael AND jackson) OR "king of pop"`)
- Phrase queries search (e.g. `"michael jackson"`)
- BM25 scoring (the same as lucene)
- Natural query language `(michael AND jackson) OR "king of pop"`
- Phrase queries search (`"michael jackson"`)
- Incremental indexing
- Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop)
- Mmap directory
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set
- Single valued and multivalued u64, i64, and f64 fast fields (equivalent of doc values in Lucene)
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set.
- Single valued and multivalued u64, i64 and f64 fast fields (equivalent of doc values in Lucene)
- `&[u8]` fast fields
- Text, i64, u64, f64, dates, and hierarchical facet fields
- Text, i64, u64, f64, dates and hierarchical facet fields
- LZ4 compressed document store
- Range queries
- Faceted search
@@ -61,42 +61,43 @@ performance for different type of queries / collection.
# Non-features
- Distributed search is out of the scope of Tantivy. That being said, Tantivy is a
- Distributed search is out of the scope of tantivy. That being said, tantivy is meant as a
library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
are within the scope of Tantivy.
are within the scope of tantivy.
# Supported OS and compiler
Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows.
Tantivy works on stable rust (>= 1.27) and supports Linux, MacOS and Windows.
# Getting started
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli) - `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
index documents, and search via the CLI or a small server with a REST API.
It walks you through getting a wikipedia search engine up and running in a few minutes.
- [Reference doc for the last released version](https://docs.rs/tantivy/)
- [tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli).
`tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
index documents and search via the CLI or a small server with a REST API.
It will walk you through getting a wikipedia search engine up and running in a few minutes.
- [reference doc for the last released version](https://docs.rs/tantivy/)
# How can I support this project?
There are many ways to support this project.
- Use Tantivy and tell us about your experience on [Gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com)
- Use tantivy and tell us about your experience on [gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com)
- Report bugs
- Write a blog post
- Help with documentation by asking questions or submitting PRs
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
- Talk about Tantivy around you
- Contribute code (you can join [our gitter](https://gitter.im/tantivy-search/tantivy) )
- Talk about tantivy around you
- Drop a word on on [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) or even [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
# Contributing code
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
We use the GitHub Pull Request workflow - reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
## Clone and build locally
Tantivy compiles on stable Rust but requires `Rust >= 1.27`.
To check out and run tests, you can simply run:
Tantivy compiles on stable rust but requires `Rust >= 1.27`.
To check out and run tests, you can simply run :
```bash
git clone https://github.com/tantivy-search/tantivy.git
@@ -107,7 +108,7 @@ To check out and run tests, you can simply run:
## Run tests
Some tests will not run with just `cargo test` because of `fail-rs`.
To run the tests exhaustively, run `./run-tests.sh`.
To run the tests exhaustively, run `./run-tests.sh`
## Debug
@@ -115,13 +116,13 @@ You might find it useful to step through the programme with a debugger.
### A failing test
Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that the `target/` directory exists. Use this bash script to find the name of the most recent debug build of Tantivy and run it under `rust-gdb`:
Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that `target/` dir exists. Use this bash script to find the most name of the most recent debug build of tantivy and run it under rust-gdb.
```bash
find target/debug/ -maxdepth 1 -executable -type f -name "tantivy*" -printf '%TY-%Tm-%Td %TT %p\n' | sort -r | cut -d " " -f 3 | xargs -I RECENT_DBG_TANTIVY rust-gdb RECENT_DBG_TANTIVY
```
Now that you are in `rust-gdb`, you can set breakpoints on lines and methods that match your source code and run the debug executable with flags that you normally pass to `cargo test` like this:
Now that you are in rust-gdb, you can set breakpoints on lines and methods that match your source-code and run the debug executable with flags that you normally pass to `cargo test` to like this
```bash
$gdb run --test-threads 1 --test $NAME_OF_TEST
@@ -129,7 +130,7 @@ $gdb run --test-threads 1 --test $NAME_OF_TEST
### An example
By default, `rustc` compiles everything in the `examples/` directory in debug mode. This makes it easy for you to make examples to reproduce bugs:
By default, rustc compiles everything in the `examples/` dir in debug mode. This makes it easy for you to make examples to reproduce bugs.
```bash
rust-gdb target/debug/examples/$EXAMPLE_NAME

73
benches/bitset.rs Normal file
View File

@@ -0,0 +1,73 @@
use criterion::{criterion_group, criterion_main, Criterion};
use rand::distributions::{Bernoulli, Uniform};
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use tantivy::forbench::bitset::{BitSet, TinySet};
use tantivy::query::BitSetDocSet;
use tantivy::DocSet;
fn sample_with_seed(n: u32, ratio: f64, seed_val: u8) -> Vec<u32> {
StdRng::from_seed([seed_val; 32])
.sample_iter(&Bernoulli::new(ratio).unwrap())
.take(n as usize)
.enumerate()
.filter_map(|(val, keep)| if keep { Some(val as u32) } else { None })
.collect()
}
fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
let seed: [u8; 32] = [1; 32];
StdRng::from_seed(seed)
.sample_iter(&Uniform::new(0u32, max_value))
.take(n_elems)
.collect::<Vec<u32>>()
}
fn bench_tinyset_pop(criterion: &mut Criterion) {
criterion.bench_function("pop_lowest", |b| {
b.iter(|| {
let mut tinyset = TinySet::singleton(criterion::black_box(31u32));
tinyset.pop_lowest();
tinyset.pop_lowest();
tinyset.pop_lowest();
tinyset.pop_lowest();
tinyset.pop_lowest();
tinyset.pop_lowest();
})
});
}
fn bench_bitset_insert(criterion: &mut Criterion) {
criterion.bench_function_over_inputs(
"bitset_insert",
|bench, (max_value, n_elems)| {
let els = generate_nonunique_unsorted(*max_value, *n_elems);
bench.iter(move || {
let mut bitset = BitSet::with_max_value(1_000_000);
for el in els.iter().cloned() {
bitset.insert(el);
}
});
},
vec![(1_000_000u32, 10_000)],
);
}
fn bench_bitsetdocset_iterate(b: &mut test::Bencher) {
let mut bitset = BitSet::with_max_value(1_000_000);
for el in sample_with_seed(1_000_000u32, 0.01, 0u8) {
bitset.insert(el);
}
b.iter(|| {
let mut docset = BitSetDocSet::from(bitset.clone());
while docset.advance() {}
});
}
criterion_group!(
benches,
bench_tinyset_pop,
bench_bitset_insert,
bench_bitsetdocset_iterate
);
criterion_main!(benches);

107
benches/fastfield.rs Normal file
View File

@@ -0,0 +1,107 @@
use criterion::criterion_group;
use criterion::criterion_main;
use criterion::Criterion;
use criterion::ParameterizedBenchmark;
use rand::rngs::StdRng;
use rand::seq::SliceRandom;
use rand::SeedableRng;
use tantivy::schema::{Schema, FAST};
use tantivy::{doc, DocId, Index};
const NUM_LOOKUPS: usize = 1_000;
fn generate_permutation(stride: usize, bit_width: u8) -> Vec<u64> {
let mut permutation: Vec<u64> = (0u64..(NUM_LOOKUPS * stride) as u64).collect();
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
permutation.push(1u64 << (bit_width as u64)); //< just to force the bit_width
permutation
}
fn bench_linear_lookup(c: &mut Criterion) {
c.bench(
"lookup_stride",
ParameterizedBenchmark::new(
"baseline_vec",
|bench, (stride, num_bits)| {
let arr = generate_permutation(*stride, *num_bits);
bench.iter(move || {
let mut a = 0u64;
for i in (0..NUM_LOOKUPS / stride).map(|v| v * 7) {
a ^= arr[i as usize];
}
a
})
},
vec![(7, 1), (7, 5), (7, 20)],
)
.with_function("fastfield", |bench, (stride, num_bits)| {
let mut schema_builder = Schema::builder();
let val_field = schema_builder.add_u64_field("val", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 80_000_000).unwrap();
for el in generate_permutation(*stride, *num_bits) {
index_writer.add_document(doc!(val_field=>el));
}
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0u32);
let fast_field_reader = segment_reader.fast_fields().u64(val_field).unwrap();
bench.iter(move || {
let mut a = 0u64;
for i in (0..NUM_LOOKUPS / stride).map(|v| v * 7) {
a ^= fast_field_reader.get(i as DocId);
}
a
})
}),
);
}
fn bench_jumpy_lookup(c: &mut Criterion) {
c.bench(
"lookup_jumpy",
ParameterizedBenchmark::new(
"baseline_vec",
|bench, (stride, num_bits)| {
let arr = generate_permutation(*stride, *num_bits);
bench.iter(move || {
let mut a = 0u64;
for _ in 0..NUM_LOOKUPS {
a = arr[a as usize];
}
a
})
},
vec![(7, 1), (7, 5), (7, 20)],
)
.with_function("fastfield", |bench, (stride, num_bits)| {
let mut schema_builder = Schema::builder();
let val_field = schema_builder.add_u64_field("val", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 80_000_000).unwrap();
for el in generate_permutation(*stride, *num_bits) {
index_writer.add_document(doc!(val_field=>el));
}
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0u32);
let fast_field_reader = segment_reader.fast_fields().u64(val_field).unwrap();
bench.iter(move || {
let mut a = 0u64;
for _ in 0..NUM_LOOKUPS {
a = fast_field_reader.get(a as DocId);
}
a
})
}),
);
}
criterion_group!(benches, bench_linear_lookup, bench_jumpy_lookup);
criterion_main!(benches);

50
benches/union.rs Normal file
View File

@@ -0,0 +1,50 @@
use criterion::{criterion_group, criterion_main, Criterion};
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use tantivy::query::QueryParser;
use tantivy::schema::{Schema, STRING};
use tantivy::{Document, Index};
fn bench_union(criterion: &mut Criterion) {
criterion.bench_function_over_inputs(
"union_docset_fulladvance",
|bench, (ratio_left, ratio_right)| {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_text_field("val", STRING);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 80_000_000).unwrap();
let mut stdrng = StdRng::from_seed([0u8; 32]);
for _ in 0u32..100_000u32 {
let mut doc = Document::default();
if stdrng.gen_bool(*ratio_left) {
doc.add_text(field, "left");
}
if stdrng.gen_bool(*ratio_right) {
doc.add_text(field, "right");
}
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let query = QueryParser::for_index(&index, vec![field])
.parse_query("left right")
.unwrap();
bench.iter(move || {
let weight = query.weight(&searcher, false).unwrap();
let mut scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
let mut sum_docs = 0u64;
scorer.for_each(&mut |doc_id, _score| {
sum_docs += doc_id as u64;
});
});
},
vec![(0.2, 0.1), (0.2, 0.02)],
);
}
criterion_group!(benches, bench_union);
criterion_main!(benches);

72
benches/vint.rs Normal file
View File

@@ -0,0 +1,72 @@
use criterion::{criterion_group, criterion_main, Criterion, ParameterizedBenchmark};
use rand::rngs::StdRng;
use rand::Rng;
use rand::SeedableRng;
use tantivy::forbench::compression::{compressed_block_size, BlockDecoder};
use tantivy::forbench::compression::{BlockEncoder, VIntEncoder};
use tantivy::forbench::compression::{VIntDecoder, COMPRESSION_BLOCK_SIZE};
fn generate_array_with_seed(n: usize, ratio: f64, seed_val: u8) -> Vec<u32> {
let seed: [u8; 32] = [seed_val; 32];
let mut rng = StdRng::from_seed(seed);
(0u32..).filter(|_| rng.gen_bool(ratio)).take(n).collect()
}
pub fn generate_array(n: usize, ratio: f64) -> Vec<u32> {
generate_array_with_seed(n, ratio, 4)
}
fn bench_compress(criterion: &mut Criterion) {
criterion.bench(
"compress_sorted",
ParameterizedBenchmark::new(
"bitpack",
|bench, ratio| {
let mut encoder = BlockEncoder::new();
let data = generate_array(COMPRESSION_BLOCK_SIZE, *ratio);
bench.iter(|| {
encoder.compress_block_sorted(&data, 0u32);
});
},
vec![0.1],
)
.with_function("vint", |bench, ratio| {
let mut encoder = BlockEncoder::new();
let data = generate_array(COMPRESSION_BLOCK_SIZE, *ratio);
bench.iter(|| {
encoder.compress_vint_sorted(&data, 0u32);
});
}),
);
}
fn bench_uncompress(criterion: &mut Criterion) {
criterion.bench(
"uncompress_sorted",
ParameterizedBenchmark::new(
"bitpack",
|bench, ratio| {
let mut encoder = BlockEncoder::new();
let data = generate_array(COMPRESSION_BLOCK_SIZE, *ratio);
let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32);
let mut decoder = BlockDecoder::new();
bench.iter(|| {
decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
});
},
vec![0.1],
)
.with_function("vint", |bench, ratio| {
let mut encoder = BlockEncoder::new();
let data = generate_array(COMPRESSION_BLOCK_SIZE, *ratio);
let compressed = encoder.compress_vint_sorted(&data, 0u32);
let mut decoder = BlockDecoder::new();
bench.iter(move || {
decoder.uncompress_vint_sorted(compressed, 0u32, COMPRESSION_BLOCK_SIZE);
});
}),
);
}
criterion_group!(benches, bench_compress, bench_uncompress);
criterion_main!(benches);

View File

@@ -7,7 +7,7 @@ set -ex
main() {
if [ ! -z $CODECOV ]; then
echo "Codecov"
cargo build --verbose && cargo coverage --verbose --all && bash <(curl -s https://codecov.io/bash) -s target/kcov
cargo build --verbose && cargo coverage --verbose && bash <(curl -s https://codecov.io/bash) -s target/kcov
else
echo "Build"
cross build --target $TARGET
@@ -15,8 +15,7 @@ main() {
return
fi
echo "Test"
cross test --target $TARGET --no-default-features --features mmap
cross test --target $TARGET --no-default-features --features mmap query-grammar
cross test --target $TARGET --no-default-features --features mmap -- --test-threads 1
fi
for example in $(ls examples/*.rs)
do

View File

@@ -5,17 +5,20 @@
//
// We will :
// - define our schema
// - create an index in a directory
// - index a few documents into our index
// - search for the best document matching a basic query
// - retrieve the best document's original content.
// = create an index in a directory
// - index few documents in our index
// - search for the best document matchings "sea whale"
// - retrieve the best document original content.
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::{doc, Index, ReloadPolicy};
use tantivy::Index;
use tantivy::ReloadPolicy;
use tempfile::TempDir;
fn main() -> tantivy::Result<()> {
@@ -30,7 +33,7 @@ fn main() -> tantivy::Result<()> {
// and for each field, its type and "the way it should
// be indexed".
// First we need to define a schema ...
// first we need to define a schema ...
let mut schema_builder = Schema::builder();
// Our first field is title.
@@ -45,7 +48,7 @@ fn main() -> tantivy::Result<()> {
//
// `STORED` means that the field will also be saved
// in a compressed, row-oriented key-value store.
// This store is useful for reconstructing the
// This store is useful to reconstruct the
// documents that were selected during the search phase.
schema_builder.add_text_field("title", TEXT | STORED);
@@ -54,7 +57,8 @@ fn main() -> tantivy::Result<()> {
// need to be able to be able to retrieve it
// for our application.
//
// We can make our index lighter by omitting the `STORED` flag.
// We can make our index lighter and
// by omitting `STORED` flag.
schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build();
@@ -67,7 +71,7 @@ fn main() -> tantivy::Result<()> {
// with our schema in the directory.
let index = Index::create_in_dir(&index_path, schema.clone())?;
// To insert a document we will need an index writer.
// To insert document we need an index writer.
// There must be only one writer at a time.
// This single `IndexWriter` is already
// multithreaded.
@@ -145,8 +149,8 @@ fn main() -> tantivy::Result<()> {
// At this point our documents are not searchable.
//
//
// We need to call `.commit()` explicitly to force the
// `index_writer` to finish processing the documents in the queue,
// We need to call .commit() explicitly to force the
// index_writer to finish processing the documents in the queue,
// flush the current index to the disk, and advertise
// the existence of new documents.
//
@@ -158,14 +162,14 @@ fn main() -> tantivy::Result<()> {
// persistently indexed.
//
// In the scenario of a crash or a power failure,
// tantivy behaves as if it has rolled back to its last
// tantivy behaves as if has rolled back to its last
// commit.
// # Searching
//
// ### Searcher
//
// A reader is required first in order to search an index.
// A reader is required to get search the index.
// It acts as a `Searcher` pool that reloads itself,
// depending on a `ReloadPolicy`.
//
@@ -181,7 +185,7 @@ fn main() -> tantivy::Result<()> {
// We now need to acquire a searcher.
//
// A searcher points to a snapshotted, immutable version of the index.
// A searcher points to snapshotted, immutable version of the index.
//
// Some search experience might require more than
// one query. Using the same searcher ensures that all of these queries will run on the
@@ -201,7 +205,7 @@ fn main() -> tantivy::Result<()> {
// in both title and body.
let query_parser = QueryParser::for_index(&index, vec![title, body]);
// `QueryParser` may fail if the query is not in the right
// QueryParser may fail if the query is not in the right
// format. For user facing applications, this can be a problem.
// A ticket has been opened regarding this problem.
let query = query_parser.parse_query("sea whale")?;
@@ -217,7 +221,7 @@ fn main() -> tantivy::Result<()> {
//
// We are not interested in all of the documents but
// only in the top 10. Keeping track of our top 10 best documents
// is the role of the `TopDocs` collector.
// is the role of the TopDocs.
// We can now perform our query.
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;

View File

@@ -9,12 +9,15 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::{Collector, SegmentCollector};
use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser;
use tantivy::schema::Field;
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, SegmentReader, TantivyError};
use tantivy::SegmentReader;
use tantivy::{Index, TantivyError};
#[derive(Default)]
struct Stats {

View File

@@ -2,11 +2,14 @@
//
// In this example, we'll see how to define a tokenizer pipeline
// by aligning a bunch of `TokenFilter`.
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::tokenizer::NgramTokenizer;
use tantivy::{doc, Index};
use tantivy::Index;
fn main() -> tantivy::Result<()> {
// # Defining the schema

View File

@@ -8,10 +8,13 @@
//
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::TermQuery;
use tantivy::schema::*;
use tantivy::{doc, Index, IndexReader};
use tantivy::Index;
use tantivy::IndexReader;
// A simple helper function to fetch a single document
// given its id from our index.

View File

@@ -12,10 +12,12 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::FacetCollector;
use tantivy::query::AllQuery;
use tantivy::schema::*;
use tantivy::{doc, Index};
use tantivy::Index;
use tempfile::TempDir;
fn main() -> tantivy::Result<()> {

View File

@@ -2,10 +2,14 @@
//
// Below is an example of creating an indexed integer field in your schema
// You can use RangeQuery to get a Count of all occurrences in a given range.
#[macro_use]
extern crate tantivy;
use tantivy::collector::Count;
use tantivy::query::RangeQuery;
use tantivy::schema::{Schema, INDEXED};
use tantivy::{doc, Index, Result};
use tantivy::Index;
use tantivy::Result;
fn run() -> Result<()> {
// For the sake of simplicity, this schema will only have 1 field

View File

@@ -9,8 +9,11 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::schema::*;
use tantivy::{doc, DocId, DocSet, Index, Postings};
use tantivy::Index;
use tantivy::{DocId, DocSet, Postings};
fn main() -> tantivy::Result<()> {
// We first create a schema for the sake of the

View File

@@ -25,11 +25,14 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use std::sync::{Arc, RwLock};
use std::thread;
use std::time::Duration;
use tantivy::schema::{Schema, STORED, TEXT};
use tantivy::{doc, Index, IndexWriter, Opstamp};
use tantivy::Opstamp;
use tantivy::{Index, IndexWriter};
fn main() -> tantivy::Result<()> {
// # Defining the schema
@@ -46,9 +49,10 @@ fn main() -> tantivy::Result<()> {
thread::spawn(move || {
// we index 100 times the document... for the sake of the example.
for i in 0..100 {
let opstamp = index_writer_clone_1
.read().unwrap() //< A read lock is sufficient here.
.add_document(
let opstamp = {
// A read lock is sufficient here.
let index_writer_rlock = index_writer_clone_1.read().unwrap();
index_writer_rlock.add_document(
doc!(
title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
@@ -59,7 +63,8 @@ fn main() -> tantivy::Result<()> {
fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
));
))
};
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
thread::sleep(Duration::from_millis(20));
}

View File

@@ -1,140 +0,0 @@
// # Pre-tokenized text example
//
// This example shows how to use pre-tokenized text. Sometimes yout might
// want to index and search through text which is already split into
// tokens by some external tool.
//
// In this example we will:
// - use tantivy tokenizer to create tokens and load them directly into tantivy,
// - import tokenized text straight from json,
// - perform a search on documents with pre-tokenized text
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, TokenStream, Tokenizer};
use tantivy::collector::{Count, TopDocs};
use tantivy::query::TermQuery;
use tantivy::schema::*;
use tantivy::{doc, Index, ReloadPolicy};
use tempfile::TempDir;
fn pre_tokenize_text(text: &str) -> Vec<Token> {
let mut token_stream = SimpleTokenizer.token_stream(text);
let mut tokens = vec![];
while token_stream.advance() {
tokens.push(token_stream.token().clone());
}
tokens
}
fn main() -> tantivy::Result<()> {
let index_path = TempDir::new()?;
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("title", TEXT | STORED);
schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_dir(&index_path, schema.clone())?;
let mut index_writer = index.writer(50_000_000)?;
// We can create a document manually, by setting the fields
// one by one in a Document object.
let title = schema.get_field("title").unwrap();
let body = schema.get_field("body").unwrap();
let title_text = "The Old Man and the Sea";
let body_text = "He was an old man who fished alone in a skiff in the Gulf Stream";
// Content of our first document
// We create `PreTokenizedString` which contains original text and vector of tokens
let title_tok = PreTokenizedString {
text: String::from(title_text),
tokens: pre_tokenize_text(title_text),
};
println!(
"Original text: \"{}\" and tokens: {:?}",
title_tok.text, title_tok.tokens
);
let body_tok = PreTokenizedString {
text: String::from(body_text),
tokens: pre_tokenize_text(body_text),
};
// Now lets create a document and add our `PreTokenizedString`
let old_man_doc = doc!(title => title_tok, body => body_tok);
// ... now let's just add it to the IndexWriter
index_writer.add_document(old_man_doc);
// Pretokenized text can also be fed as JSON
let short_man_json = r#"{
"title":[{
"text":"The Old Man",
"tokens":[
{"offset_from":0,"offset_to":3,"position":0,"text":"The","position_length":1},
{"offset_from":4,"offset_to":7,"position":1,"text":"Old","position_length":1},
{"offset_from":8,"offset_to":11,"position":2,"text":"Man","position_length":1}
]
}]
}"#;
let short_man_doc = schema.parse_document(&short_man_json)?;
index_writer.add_document(short_man_doc);
// Let's commit changes
index_writer.commit()?;
// ... and now is the time to query our index
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.try_into()?;
let searcher = reader.searcher();
// We want to get documents with token "Man", we will use TermQuery to do it
// Using PreTokenizedString means the tokens are stored as is avoiding stemming
// and lowercasing, which preserves full words in their original form
let query = TermQuery::new(
Term::from_field_text(title, "Man"),
IndexRecordOption::Basic,
);
let (top_docs, count) = searcher
.search(&query, &(TopDocs::with_limit(2), Count))
.unwrap();
assert_eq!(count, 2);
// Now let's print out the results.
// Note that the tokens are not stored along with the original text
// in the document store
for (_score, doc_address) in top_docs {
let retrieved_doc = searcher.doc(doc_address)?;
println!("Document: {}", schema.to_json(&retrieved_doc));
}
// In contrary to the previous query, when we search for the "man" term we
// should get no results, as it's not one of the indexed tokens. SimpleTokenizer
// only splits text on whitespace / punctuation.
let query = TermQuery::new(
Term::from_field_text(title, "man"),
IndexRecordOption::Basic,
);
let (_top_docs, count) = searcher
.search(&query, &(TopDocs::with_limit(2), Count))
.unwrap();
assert_eq!(count, 0);
Ok(())
}

View File

@@ -7,10 +7,13 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::{doc, Index, Snippet, SnippetGenerator};
use tantivy::Index;
use tantivy::{Snippet, SnippetGenerator};
use tempfile::TempDir;
fn main() -> tantivy::Result<()> {

View File

@@ -11,11 +11,13 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::tokenizer::*;
use tantivy::{doc, Index};
use tantivy::Index;
fn main() -> tantivy::Result<()> {
// this example assumes you understand the content in `basic_search`

View File

@@ -1,16 +0,0 @@
[package]
name = "tantivy-query-grammar"
version = "0.11.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
description = """Search engine library"""
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
homepage = "https://github.com/tantivy-search/tantivy"
repository = "https://github.com/tantivy-search/tantivy"
readme = "README.md"
keywords = ["search", "information", "retrieval"]
edition = "2018"
[dependencies]
combine = ">=3.6.0,<4.0.0"

View File

@@ -1,3 +0,0 @@
# Tantivy Query Grammar
This crate is used by tantivy to parse queries.

View File

@@ -1,17 +0,0 @@
#![recursion_limit = "100"]
mod occur;
mod query_grammar;
mod user_input_ast;
use combine::parser::Parser;
pub use crate::occur::Occur;
use crate::query_grammar::parse_to_ast;
pub use crate::user_input_ast::{UserInputAST, UserInputBound, UserInputLeaf, UserInputLiteral};
pub struct Error;
pub fn parse_query(query: &str) -> Result<UserInputAST, Error> {
let (user_input_ast, _remaining) = parse_to_ast().parse(query).map_err(|_| Error)?;
Ok(user_input_ast)
}

2
run-bench.rs Executable file
View File

@@ -0,0 +1,2 @@
#!/usr/bin/env bash
cargo bench --features forbench

View File

@@ -10,32 +10,49 @@ use crate::SegmentReader;
/// documents match the query.
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::Count;
/// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index};
///
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
///
/// let mut index_writer = index.writer(3_000_000).unwrap();
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
/// assert!(index_writer.commit().is_ok());
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let reader = index.reader().unwrap();
/// let searcher = reader.searcher();
/// {
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// let count = searcher.search(&query, &Count).unwrap();
///
/// // Here comes the important part
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary").unwrap();
/// let count = searcher.search(&query, &Count).unwrap();
/// assert_eq!(count, 2);
/// }
///
/// assert_eq!(count, 2);
/// Ok(())
/// }
/// ```
pub struct Count;
@@ -108,4 +125,5 @@ mod tests {
assert_eq!(count_collector.harvest(), 2);
}
}
}

View File

@@ -81,11 +81,14 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
///
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Facet, Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::FacetCollector;
/// use tantivy::query::AllQuery;
/// use tantivy::schema::{Facet, Schema, TEXT};
/// use tantivy::{doc, Index, Result};
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder();
///
@@ -126,7 +129,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// let searcher = reader.searcher();
///
/// {
/// let mut facet_collector = FacetCollector::for_field(facet);
/// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/lang");
/// facet_collector.add_facet("/category");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
@@ -142,7 +145,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// }
///
/// {
/// let mut facet_collector = FacetCollector::for_field(facet);
/// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
///
@@ -157,8 +160,8 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// ]);
/// }
///
/// {
/// let mut facet_collector = FacetCollector::for_field(facet);
/// {
/// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
///
@@ -171,7 +174,6 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
///
/// Ok(())
/// }
/// # assert!(example().is_ok());
/// ```
pub struct FacetCollector {
field: Field,
@@ -515,7 +517,7 @@ mod tests {
#[should_panic(expected = "Tried to add a facet which is a descendant of \
an already added facet.")]
fn test_misused_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
let mut facet_collector = FacetCollector::for_field(Field(0));
facet_collector.add_facet(Facet::from("/country"));
facet_collector.add_facet(Facet::from("/country/europe"));
}
@@ -546,7 +548,7 @@ mod tests {
#[test]
fn test_non_used_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
let mut facet_collector = FacetCollector::for_field(Field(0));
facet_collector.add_facet(Facet::from("/country"));
facet_collector.add_facet(Facet::from("/countryeurope"));
}
@@ -599,18 +601,19 @@ mod tests {
);
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use crate::collector::FacetCollector;
use crate::query::AllQuery;
use crate::schema::{Facet, Schema};
use crate::Index;
use rand::seq::SliceRandom;
use rand::thread_rng;
use collector::FacetCollector;
use query::AllQuery;
use rand::{thread_rng, Rng};
use schema::Facet;
use schema::Schema;
use test::Bencher;
use Index;
#[bench]
fn bench_facet_collector(b: &mut Bencher) {
@@ -627,7 +630,7 @@ mod bench {
}
}
// 40425 docs
docs[..].shuffle(&mut thread_rng());
thread_rng().shuffle(&mut docs[..]);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs {
@@ -636,7 +639,7 @@ mod bench {
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let searcher = reader.searcher();
let searcher = index.searcher();
let facet_collector = FacetCollector::for_field(facet_field);
searcher.search(&AllQuery, &facet_collector).unwrap();
});

View File

@@ -35,6 +35,7 @@ The resulting `Fruit` will then be a typed tuple with each collector's original
in their respective position.
```rust
# extern crate tantivy;
# use tantivy::schema::*;
# use tantivy::*;
# use tantivy::query::*;

View File

@@ -105,38 +105,54 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
/// [Combining several collectors section of the collector documentation](./index.html#combining-several-collectors).
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::{Count, TopDocs, MultiCollector};
/// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index};
///
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
///
/// let mut index_writer = index.writer(3_000_000).unwrap();
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
/// assert!(index_writer.commit().is_ok());
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let reader = index.reader().unwrap();
/// let searcher = reader.searcher();
/// let mut collectors = MultiCollector::new();
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
/// let count_handle = collectors.add_collector(Count);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// let mut multi_fruit = searcher.search(&query, &collectors)?;
///
/// let mut collectors = MultiCollector::new();
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
/// let count_handle = collectors.add_collector(Count);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary").unwrap();
/// let mut multi_fruit = searcher.search(&query, &collectors).unwrap();
/// let count = count_handle.extract(&mut multi_fruit);
/// let top_docs = top_docs_handle.extract(&mut multi_fruit);
///
/// let count = count_handle.extract(&mut multi_fruit);
/// let top_docs = top_docs_handle.extract(&mut multi_fruit);
/// # assert_eq!(count, 2);
/// # assert_eq!(top_docs.len(), 2);
///
/// assert_eq!(count, 2);
/// assert_eq!(top_docs.len(), 2);
/// Ok(())
/// }
/// ```
#[allow(clippy::type_complexity)]
#[derive(Default)]

View File

@@ -12,9 +12,6 @@ use std::collections::BinaryHeap;
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
/// default Rust heap is a max heap, whereas a min heap is needed.
///
/// Additionally, it guarantees stable sorting: in case of a tie on the feature, the document
/// address is used.
///
/// WARNING: equality is not what you would expect here.
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
/// is equal. This should be perfectly fine for this usage, but let's make sure this
@@ -24,37 +21,29 @@ struct ComparableDoc<T, D> {
doc: D,
}
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> {
impl<T: PartialOrd, D> PartialOrd for ComparableDoc<T, D> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T: PartialOrd, D: PartialOrd> Ord for ComparableDoc<T, D> {
impl<T: PartialOrd, D> Ord for ComparableDoc<T, D> {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
// Reversed to make BinaryHeap work as a min-heap
let by_feature = other
other
.feature
.partial_cmp(&self.feature)
.unwrap_or(Ordering::Equal);
let lazy_by_doc_address = || self.doc.partial_cmp(&other.doc).unwrap_or(Ordering::Equal);
// In case of a tie on the feature, we sort by ascending
// `DocAddress` in order to ensure a stable sorting of the
// documents.
by_feature.then_with(lazy_by_doc_address)
.unwrap_or_else(|| Ordering::Equal)
}
}
impl<T: PartialOrd, D: PartialOrd> PartialEq for ComparableDoc<T, D> {
impl<T: PartialOrd, D> PartialEq for ComparableDoc<T, D> {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {}
impl<T: PartialOrd, D> Eq for ComparableDoc<T, D> {}
pub(crate) struct TopCollector<T> {
limit: usize,
@@ -225,94 +214,4 @@ mod tests {
]
);
}
#[test]
fn test_top_segment_collector_stable_ordering_for_equal_feature() {
// given that the documents are collected in ascending doc id order,
// when harvesting we have to guarantee stable sorting in case of a tie
// on the score
let doc_ids_collection = [4, 5, 6];
let score = 3.14;
let mut top_collector_limit_2 = TopSegmentCollector::new(0, 2);
for id in &doc_ids_collection {
top_collector_limit_2.collect(*id, score);
}
let mut top_collector_limit_3 = TopSegmentCollector::new(0, 3);
for id in &doc_ids_collection {
top_collector_limit_3.collect(*id, score);
}
assert_eq!(
top_collector_limit_2.harvest(),
top_collector_limit_3.harvest()[..2].to_vec(),
);
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::TopSegmentCollector;
use test::Bencher;
#[bench]
fn bench_top_segment_collector_collect_not_at_capacity(b: &mut Bencher) {
let mut top_collector = TopSegmentCollector::new(0, 400);
b.iter(|| {
for i in 0..100 {
top_collector.collect(i, 0.8);
}
});
}
#[bench]
fn bench_top_segment_collector_collect_at_capacity(b: &mut Bencher) {
let mut top_collector = TopSegmentCollector::new(0, 100);
for i in 0..100 {
top_collector.collect(i, 0.8);
}
b.iter(|| {
for i in 0..100 {
top_collector.collect(i, 0.8);
}
});
}
#[bench]
fn bench_top_segment_collector_collect_and_harvest_many_ties(b: &mut Bencher) {
b.iter(|| {
let mut top_collector = TopSegmentCollector::new(0, 100);
for i in 0..100 {
top_collector.collect(i, 0.8);
}
// it would be nice to be able to do the setup N times but still
// measure only harvest(). We can't since harvest() consumes
// the top_collector.
top_collector.harvest()
});
}
#[bench]
fn bench_top_segment_collector_collect_and_harvest_no_tie(b: &mut Bencher) {
b.iter(|| {
let mut top_collector = TopSegmentCollector::new(0, 100);
let mut score = 1.0;
for i in 0..100 {
score += 1.0;
top_collector.collect(i, score);
}
// it would be nice to be able to do the setup N times but still
// measure only harvest(). We can't since harvest() consumes
// the top_collector.
top_collector.harvest()
});
}
}

View File

@@ -15,43 +15,57 @@ use crate::SegmentLocalId;
use crate::SegmentReader;
use std::fmt;
/// The `TopDocs` collector keeps track of the top `K` documents
/// The Top Score Collector keeps track of the K documents
/// sorted by their score.
///
/// The implementation is based on a `BinaryHeap`.
/// The theorical complexity for collecting the top `K` out of `n` documents
/// is `O(n log K)`.
///
/// This collector guarantees a stable sorting in case of a tie on the
/// document score. As such, it is suitable to implement pagination.
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::DocAddress;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::TopDocs;
/// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, DocAddress, Index};
///
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
///
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
/// assert!(index_writer.commit().is_ok());
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let reader = index.reader().unwrap();
/// let searcher = reader.searcher();
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2))?;
///
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary").unwrap();
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
///
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
/// Ok(())
/// }
/// ```
pub struct TopDocs(TopCollector<Score>);
@@ -73,8 +87,10 @@ impl TopDocs {
/// Set top-K to rank documents by a given fast field.
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, Result, DocAddress};
/// # use tantivy::{Index, Result, DocAddress};
/// # use tantivy::query::{Query, QueryParser};
/// use tantivy::Searcher;
/// use tantivy::collector::TopDocs;
@@ -88,12 +104,15 @@ impl TopDocs {
/// #
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
/// # index_writer.add_document(doc!(
/// # title => "The Name of the Wind",
/// # rating => 92u64,
/// # ));
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
/// # assert!(index_writer.commit().is_ok());
/// # let reader = index.reader().unwrap();
/// # index_writer.commit()?;
/// # let reader = index.reader()?;
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
/// # assert_eq!(top_docs,
@@ -109,7 +128,7 @@ impl TopDocs {
/// ///
/// /// `field` is required to be a FAST field.
/// fn docs_sorted_by_rating(searcher: &Searcher,
/// query: &dyn Query,
/// query: &Query,
/// sort_by_field: Field)
/// -> Result<Vec<(u64, DocAddress)>> {
///
@@ -178,40 +197,36 @@ impl TopDocs {
/// learning-to-rank model over various features
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, DocAddress, DocId, Score};
/// # use tantivy::{Index, DocAddress, DocId, Score};
/// # use tantivy::query::QueryParser;
/// use tantivy::SegmentReader;
/// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field;
///
/// fn create_schema() -> Schema {
/// let mut schema_builder = Schema::builder();
/// schema_builder.add_text_field("product_name", TEXT);
/// schema_builder.add_u64_field("popularity", FAST);
/// schema_builder.build()
/// }
///
/// fn create_index() -> tantivy::Result<Index> {
/// let schema = create_schema();
/// let index = Index::create_in_ram(schema);
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// let product_name = index.schema().get_field("product_name").unwrap();
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
/// index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
/// index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
/// index_writer.commit()?;
/// Ok(index)
/// }
///
/// let index = create_index().unwrap();
/// let product_name = index.schema().get_field("product_name").unwrap();
/// # fn create_schema() -> Schema {
/// # let mut schema_builder = Schema::builder();
/// # schema_builder.add_text_field("product_name", TEXT);
/// # schema_builder.add_u64_field("popularity", FAST);
/// # schema_builder.build()
/// # }
/// #
/// # fn main() -> tantivy::Result<()> {
/// # let schema = create_schema();
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # let product_name = index.schema().get_field("product_name").unwrap();
/// #
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
///
/// let user_query_str = "diary";
/// let query_parser = QueryParser::for_index(&index, vec![product_name]);
/// let query = query_parser.parse_query(user_query_str).unwrap();
/// # index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
/// # index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
/// # index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
/// # index_writer.commit()?;
/// // ...
/// # let user_query = "diary";
/// # let query = QueryParser::for_index(&index, vec![product_name]).parse_query(user_query)?;
///
/// // This is where we build our collector with our custom score.
/// let top_docs_by_custom_score = TopDocs
@@ -238,12 +253,15 @@ impl TopDocs {
/// popularity_boost_score * original_score
/// }
/// });
/// let reader = index.reader().unwrap();
/// let searcher = reader.searcher();
/// # let reader = index.reader()?;
/// # let searcher = reader.searcher();
/// // ... and here are our documents. Note this is a simple vec.
/// // The `Score` in the pair is our tweaked score.
/// let resulting_docs: Vec<(Score, DocAddress)> =
/// searcher.search(&query, &top_docs_by_custom_score).unwrap();
/// searcher.search(&*query, &top_docs_by_custom_score)?;
///
/// # Ok(())
/// # }
/// ```
///
/// # See also
@@ -284,8 +302,10 @@ impl TopDocs {
/// # Example
///
/// ```rust
/// # #[macro_use]
/// # extern crate tantivy;
/// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, DocAddress, DocId};
/// # use tantivy::{Index, DocAddress, DocId};
/// # use tantivy::query::QueryParser;
/// use tantivy::SegmentReader;
/// use tantivy::collector::TopDocs;
@@ -417,13 +437,12 @@ impl SegmentCollector for TopScoreSegmentCollector {
mod tests {
use super::TopDocs;
use crate::collector::Collector;
use crate::query::{AllQuery, Query, QueryParser};
use crate::query::{Query, QueryParser};
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
use crate::DocAddress;
use crate::Index;
use crate::IndexWriter;
use crate::Score;
use itertools::Itertools;
fn make_index() -> Index {
let mut schema_builder = Schema::builder();
@@ -484,29 +503,6 @@ mod tests {
);
}
#[test]
fn test_top_collector_stable_sorting() {
let index = make_index();
// using AllQuery to get a constant score
let searcher = index.reader().unwrap().searcher();
let page_1 = searcher.search(&AllQuery, &TopDocs::with_limit(2)).unwrap();
let page_2 = searcher.search(&AllQuery, &TopDocs::with_limit(3)).unwrap();
// precondition for the test to be meaningful: we did get documents
// with the same score
assert!(page_1.iter().map(|result| result.0).all_equal());
assert!(page_2.iter().map(|result| result.0).all_equal());
// sanity check since we're relying on make_index()
assert_eq!(page_1.len(), 2);
assert_eq!(page_2.len(), 3);
assert_eq!(page_1, &page_2[..page_1.len()]);
}
#[test]
#[should_panic]
fn test_top_0() {
@@ -564,7 +560,7 @@ mod tests {
));
});
let searcher = index.reader().unwrap().searcher();
let top_collector = TopDocs::with_limit(4).order_by_u64_field(Field::from_field_id(2));
let top_collector = TopDocs::with_limit(4).order_by_u64_field(Field(2));
let segment_reader = searcher.segment_reader(0u32);
top_collector
.for_segment(0, segment_reader)
@@ -605,4 +601,5 @@ mod tests {
let query = query_parser.parse_query(query).unwrap();
(index, query)
}
}

View File

@@ -86,6 +86,7 @@ where
}
}
#[inline(always)]
pub fn get(&self, idx: u64) -> u64 {
if self.num_bits == 0 {
return 0u64;

View File

@@ -2,7 +2,7 @@ use std::fmt;
use std::u64;
#[derive(Clone, Copy, Eq, PartialEq)]
pub(crate) struct TinySet(u64);
pub struct TinySet(u64);
impl fmt::Debug for TinySet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -353,43 +353,3 @@ mod tests {
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::BitSet;
use super::TinySet;
use test;
#[bench]
fn bench_tinyset_pop(b: &mut test::Bencher) {
b.iter(|| {
let mut tinyset = TinySet::singleton(test::black_box(31u32));
tinyset.pop_lowest();
tinyset.pop_lowest();
tinyset.pop_lowest();
tinyset.pop_lowest();
tinyset.pop_lowest();
tinyset.pop_lowest();
});
}
#[bench]
fn bench_tinyset_sum(b: &mut test::Bencher) {
let tiny_set = TinySet::empty().insert(10u32).insert(14u32).insert(21u32);
b.iter(|| {
assert_eq!(test::black_box(tiny_set).into_iter().sum::<u32>(), 45u32);
});
}
#[bench]
fn bench_tinyarr_sum(b: &mut test::Bencher) {
let v = [10u32, 14u32, 21u32];
b.iter(|| test::black_box(v).iter().cloned().sum::<u32>());
}
#[bench]
fn bench_bitset_initialize(b: &mut test::Bencher) {
b.iter(|| BitSet::with_max_value(1_000_000));
}
}

View File

@@ -2,7 +2,7 @@ use crate::common::BinarySerializable;
use crate::common::CountingWriter;
use crate::common::VInt;
use crate::directory::ReadOnlySource;
use crate::directory::{TerminatingWrite, WritePtr};
use crate::directory::WritePtr;
use crate::schema::Field;
use crate::space_usage::FieldUsage;
use crate::space_usage::PerFieldSpaceUsage;
@@ -42,7 +42,7 @@ pub struct CompositeWrite<W = WritePtr> {
offsets: HashMap<FileAddr, u64>,
}
impl<W: TerminatingWrite + Write> CompositeWrite<W> {
impl<W: Write> CompositeWrite<W> {
/// Crate a new API writer that writes a composite file
/// in a given write.
pub fn wrap(w: W) -> CompositeWrite<W> {
@@ -91,7 +91,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
let footer_len = (self.write.written_bytes() - footer_offset) as u32;
footer_len.serialize(&mut self.write)?;
self.write.terminate()
self.write.flush()?;
Ok(())
}
}
@@ -199,13 +200,13 @@ mod test {
let w = directory.open_write(path).unwrap();
let mut composite_write = CompositeWrite::wrap(w);
{
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
let mut write_0 = composite_write.for_field(Field(0u32));
VInt(32431123u64).serialize(&mut write_0).unwrap();
write_0.flush().unwrap();
}
{
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
let mut write_4 = composite_write.for_field(Field(4u32));
VInt(2).serialize(&mut write_4).unwrap();
write_4.flush().unwrap();
}
@@ -215,18 +216,14 @@ mod test {
let r = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&r).unwrap();
{
let file0 = composite_file
.open_read(Field::from_field_id(0u32))
.unwrap();
let file0 = composite_file.open_read(Field(0u32)).unwrap();
let mut file0_buf = file0.as_slice();
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
assert_eq!(file0_buf.len(), 0);
assert_eq!(payload_0, 32431123u64);
}
{
let file4 = composite_file
.open_read(Field::from_field_id(4u32))
.unwrap();
let file4 = composite_file.open_read(Field(4u32)).unwrap();
let mut file4_buf = file4.as_slice();
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
assert_eq!(file4_buf.len(), 0);
@@ -234,4 +231,5 @@ mod test {
}
}
}
}

View File

@@ -1,5 +1,3 @@
use crate::directory::AntiCallToken;
use crate::directory::TerminatingWrite;
use std::io;
use std::io::Write;
@@ -44,13 +42,6 @@ impl<W: Write> Write for CountingWriter<W> {
}
}
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.underlying.terminate_ref(token)
}
}
#[cfg(test)]
mod test {

View File

@@ -6,7 +6,7 @@ mod serialize;
mod vint;
pub use self::bitset::BitSet;
pub(crate) use self::bitset::TinySet;
pub use self::bitset::TinySet;
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
pub use self::counting_writer::CountingWriter;
pub use self::serialize::{BinarySerializable, FixedSize};

View File

@@ -199,7 +199,10 @@ pub mod test {
fn test_serialize_string() {
assert_eq!(serialize_test(String::from("")), 1);
assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4);
assert_eq!(serialize_test(String::from("富士さん見える。")), 1 + 3 * 8);
assert_eq!(
serialize_test(String::from("富士さん見える。")),
1 + 3 * 8
);
}
#[test]

View File

@@ -1,6 +1,6 @@
use crate::Result;
use crossbeam::channel;
use rayon::{ThreadPool, ThreadPoolBuilder};
use scoped_pool::{Pool, ThreadConfig};
/// Search executor whether search request are single thread or multithread.
///
@@ -11,7 +11,7 @@ use rayon::{ThreadPool, ThreadPoolBuilder};
/// used by the client. Second, we may stop using rayon in the future.
pub enum Executor {
SingleThread,
ThreadPool(ThreadPool),
ThreadPool(Pool),
}
impl Executor {
@@ -21,12 +21,10 @@ impl Executor {
}
// Creates an Executor that dispatches the tasks in a thread pool.
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Result<Executor> {
let pool = ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(move |num| format!("{}{}", prefix, num))
.build()?;
Ok(Executor::ThreadPool(pool))
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Executor {
let thread_config = ThreadConfig::new().prefix(prefix);
let pool = Pool::with_thread_config(num_threads, thread_config);
Executor::ThreadPool(pool)
}
// Perform a map in the thread pool.
@@ -50,9 +48,9 @@ impl Executor {
let num_fruits = args_with_indices.len();
let fruit_receiver = {
let (fruit_sender, fruit_receiver) = channel::unbounded();
pool.scope(|scope| {
pool.scoped(|scope| {
for arg_with_idx in args_with_indices {
scope.spawn(|_| {
scope.execute(|| {
let (idx, arg) = arg_with_idx;
let fruit = f(arg);
if let Err(err) = fruit_sender.send((idx, fruit)) {
@@ -105,7 +103,6 @@ mod tests {
#[should_panic] //< unfortunately the panic message is not propagated
fn test_panic_propagates_multi_thread() {
let _result: Vec<usize> = Executor::multi_thread(1, "search-test")
.unwrap()
.map(
|_| {
panic!("panic should propagate");
@@ -129,7 +126,6 @@ mod tests {
#[test]
fn test_map_multithread() {
let result: Vec<usize> = Executor::multi_thread(3, "search-test")
.unwrap()
.map(|i| Ok(i * 2), 0..10)
.unwrap();
assert_eq!(result.len(), 10);

View File

@@ -26,10 +26,9 @@ use crate::IndexWriter;
use crate::Result;
use num_cpus;
use std::borrow::BorrowMut;
use std::collections::HashSet;
use std::fmt;
#[cfg(feature = "mmap")]
use std::path::{Path, PathBuf};
use std::path::Path;
use std::sync::Arc;
fn load_metas(directory: &dyn Directory, inventory: &SegmentMetaInventory) -> Result<IndexMeta> {
@@ -73,16 +72,15 @@ impl Index {
/// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads.
pub fn set_multithread_executor(&mut self, num_threads: usize) -> Result<()> {
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-")?);
Ok(())
pub fn set_multithread_executor(&mut self, num_threads: usize) {
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-"));
}
/// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads.
pub fn set_default_multithread_executor(&mut self) -> Result<()> {
pub fn set_default_multithread_executor(&mut self) {
let default_num_threads = num_cpus::get();
self.set_multithread_executor(default_num_threads)
self.set_multithread_executor(default_num_threads);
}
/// Creates a new index using the `RAMDirectory`.
@@ -104,21 +102,23 @@ impl Index {
if Index::exists(&mmap_directory) {
return Err(TantivyError::IndexAlreadyExists);
}
Index::create(mmap_directory, schema)
}
/// Opens or creates a new index in the provided directory
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
if !Index::exists(&dir) {
return Index::create(dir, schema);
}
let index = Index::open(dir)?;
if index.schema() == schema {
Ok(index)
if Index::exists(&dir) {
let index = Index::open(dir)?;
if index.schema() == schema {
Ok(index)
} else {
Err(TantivyError::SchemaError(
"An index exists but the schema does not match.".to_string(),
))
}
} else {
Err(TantivyError::SchemaError(
"An index exists but the schema does not match.".to_string(),
))
Index::create(dir, schema)
}
}
@@ -173,11 +173,11 @@ impl Index {
}
/// Helper to access the tokenizer associated to a specific field.
pub fn tokenizer_for_field(&self, field: Field) -> Result<BoxedTokenizer> {
pub fn tokenizer_for_field(&self, field: Field) -> Result<Box<dyn BoxedTokenizer>> {
let field_entry = self.schema.get_field_entry(field);
let field_type = field_entry.field_type();
let tokenizer_manager: &TokenizerManager = self.tokenizers();
let tokenizer_name_opt: Option<BoxedTokenizer> = match field_type {
let tokenizer_name_opt: Option<Box<dyn BoxedTokenizer>> = match field_type {
FieldType::Str(text_options) => text_options
.get_indexing_options()
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
@@ -216,22 +216,8 @@ impl Index {
Index::open(mmap_directory)
}
/// Returns the list of the segment metas tracked by the index.
///
/// Such segments can of course be part of the index,
/// but also they could be segments being currently built or in the middle of a merge
/// operation.
pub fn list_all_segment_metas(&self) -> Vec<SegmentMeta> {
self.inventory.all()
}
/// Creates a new segment_meta (Advanced user only).
///
/// As long as the `SegmentMeta` lives, the files associated with the
/// `SegmentMeta` are guaranteed to not be garbage collected, regardless of
/// whether the segment is recorded as part of the index or not.
pub fn new_segment_meta(&self, segment_id: SegmentId, max_doc: u32) -> SegmentMeta {
self.inventory.new_segment_meta(segment_id, max_doc)
pub(crate) fn inventory(&self) -> &SegmentMetaInventory {
&self.inventory
}
/// Open the index using the provided directory
@@ -368,11 +354,6 @@ impl Index {
.map(SegmentMeta::id)
.collect())
}
/// Returns the set of corrupted files
pub fn validate_checksum(&self) -> Result<HashSet<PathBuf>> {
self.directory.list_damaged().map_err(Into::into)
}
}
impl fmt::Debug for Index {
@@ -386,9 +367,12 @@ mod tests {
use crate::directory::RAMDirectory;
use crate::schema::Field;
use crate::schema::{Schema, INDEXED, TEXT};
use crate::Index;
use crate::IndexReader;
use crate::IndexWriter;
use crate::ReloadPolicy;
use crate::{Directory, Index};
use std::thread;
use std::time::Duration;
#[test]
fn test_indexer_for_field() {
@@ -466,14 +450,14 @@ mod tests {
.try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, &index, &reader);
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
}
#[cfg(feature = "mmap")]
mod mmap_specific {
use super::*;
use crate::Directory;
use std::path::PathBuf;
use tempfile::TempDir;
@@ -484,20 +468,22 @@ mod tests {
let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
writer.commit().unwrap();
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, &index, &reader);
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
}
#[test]
fn test_index_manual_policy_mmap() {
let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap();
let mut index = Index::create_from_tempdir(schema).unwrap();
let index = Index::create_from_tempdir(schema).unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
writer.commit().unwrap();
let reader = index
@@ -507,12 +493,8 @@ mod tests {
.unwrap();
assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64));
let (sender, receiver) = crossbeam::channel::unbounded();
let _handle = index.directory_mut().watch(Box::new(move || {
let _ = sender.send(());
}));
writer.commit().unwrap();
assert!(receiver.recv().is_ok());
thread::sleep(Duration::from_millis(500));
assert_eq!(reader.searcher().num_docs(), 0);
reader.reload().unwrap();
assert_eq!(reader.searcher().num_docs(), 1);
@@ -532,26 +514,39 @@ mod tests {
.try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
let mut writer = write_index.writer_with_num_threads(1, 3_000_000).unwrap();
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
}
}
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
let mut reader_index = reader.index();
let (sender, receiver) = crossbeam::channel::unbounded();
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
let _ = sender.send(());
}));
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
fn test_index_on_commit_reload_policy_aux(
field: Field,
writer: &mut IndexWriter,
reader: &IndexReader,
) {
assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64));
writer.commit().unwrap();
assert!(receiver.recv().is_ok());
assert_eq!(reader.searcher().num_docs(), 1);
let mut count = 0;
for _ in 0..100 {
count = reader.searcher().num_docs();
if count > 0 {
break;
}
thread::sleep(Duration::from_millis(100));
}
assert_eq!(count, 1);
writer.add_document(doc!(field=>2u64));
writer.commit().unwrap();
assert!(receiver.recv().is_ok());
assert_eq!(reader.searcher().num_docs(), 2);
let mut count = 0;
for _ in 0..10 {
count = reader.searcher().num_docs();
if count > 1 {
break;
}
thread::sleep(Duration::from_millis(100));
}
assert_eq!(count, 2);
}
// This test will not pass on windows, because windows
@@ -568,13 +563,9 @@ mod tests {
for i in 0u64..8_000u64 {
writer.add_document(doc!(field => i));
}
let (sender, receiver) = crossbeam::channel::unbounded();
let _handle = directory.watch(Box::new(move || {
let _ = sender.send(());
}));
writer.commit().unwrap();
let mem_right_after_commit = directory.total_mem_usage();
assert!(receiver.recv().is_ok());
thread::sleep(Duration::from_millis(1_000));
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
@@ -588,11 +579,7 @@ mod tests {
reader.reload().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 8_000);
assert!(
mem_right_after_merge_finished < mem_right_after_commit,
"(mem after merge){} is expected < (mem before merge){}",
mem_right_after_merge_finished,
mem_right_after_commit
);
assert!(mem_right_after_merge_finished < mem_right_after_commit);
}
}

View File

@@ -30,6 +30,7 @@ impl SegmentMetaInventory {
.collect::<Vec<_>>()
}
#[doc(hidden)]
pub fn new_segment_meta(&self, segment_id: SegmentId, max_doc: u32) -> SegmentMeta {
let inner = InnerSegmentMeta {
segment_id,
@@ -150,21 +151,6 @@ impl SegmentMeta {
self.num_deleted_docs() > 0
}
/// Updates the max_doc value from the `SegmentMeta`.
///
/// This method is only used when updating `max_doc` from 0
/// as we finalize a fresh new segment.
pub(crate) fn with_max_doc(self, max_doc: u32) -> SegmentMeta {
assert_eq!(self.tracked.max_doc, 0);
assert!(self.tracked.deletes.is_none());
let tracked = self.tracked.map(move |inner_meta| InnerSegmentMeta {
segment_id: inner_meta.segment_id,
max_doc,
deletes: None,
});
SegmentMeta { tracked }
}
#[doc(hidden)]
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta {
let delete_meta = DeleteMeta {
@@ -300,9 +286,6 @@ mod tests {
payload: None,
};
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
assert_eq!(
json,
r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#
);
assert_eq!(json, r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#);
}
}

View File

@@ -50,17 +50,6 @@ impl Segment {
&self.meta
}
/// Updates the max_doc value from the `SegmentMeta`.
///
/// This method is only used when updating `max_doc` from 0
/// as we finalize a fresh new segment.
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
Segment {
index: self.index,
meta: self.meta.with_max_doc(max_doc),
}
}
#[doc(hidden)]
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
Segment {

View File

@@ -4,8 +4,6 @@ use uuid::Uuid;
#[cfg(test)]
use once_cell::sync::Lazy;
use std::error::Error;
use std::str::FromStr;
#[cfg(test)]
use std::sync::atomic;
@@ -54,51 +52,15 @@ impl SegmentId {
/// and the rest is random.
///
/// Picking the first 8 chars is ok to identify
/// segments in a display message (e.g. a5c4dfcb).
/// segments in a display message.
pub fn short_uuid_string(&self) -> String {
(&self.0.to_simple_ref().to_string()[..8]).to_string()
}
/// Returns a segment uuid string.
///
/// It consists in 32 lowercase hexadecimal chars
/// (e.g. a5c4dfcbdfe645089129e308e26d5523)
pub fn uuid_string(&self) -> String {
self.0.to_simple_ref().to_string()
}
/// Build a `SegmentId` string from the full uuid string.
///
/// E.g. "a5c4dfcbdfe645089129e308e26d5523"
pub fn from_uuid_string(uuid_string: &str) -> Result<SegmentId, SegmentIdParseError> {
FromStr::from_str(uuid_string)
}
}
/// Error type used when parsing a `SegmentId` from a string fails.
pub struct SegmentIdParseError(uuid::Error);
impl Error for SegmentIdParseError {}
impl fmt::Debug for SegmentIdParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Display for SegmentIdParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl FromStr for SegmentId {
type Err = SegmentIdParseError;
fn from_str(uuid_string: &str) -> Result<Self, SegmentIdParseError> {
let uuid = Uuid::parse_str(uuid_string).map_err(SegmentIdParseError)?;
Ok(SegmentId(uuid))
}
}
impl fmt::Debug for SegmentId {
@@ -118,18 +80,3 @@ impl Ord for SegmentId {
self.0.as_bytes().cmp(other.0.as_bytes())
}
}
#[cfg(test)]
mod tests {
use super::SegmentId;
#[test]
fn test_to_uuid_string() {
let full_uuid = "a5c4dfcbdfe645089129e308e26d5523";
let segment_id = SegmentId::from_uuid_string(full_uuid).unwrap();
assert_eq!(segment_id.uuid_string(), full_uuid);
assert_eq!(segment_id.short_uuid_string(), "a5c4dfcb");
// one extra char
assert!(SegmentId::from_uuid_string("a5c4dfcbdfe645089129e308e26d5523b").is_err());
}
}

View File

@@ -118,8 +118,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
///
/// Specifically, subsequent writes or flushes should
/// have no effect on the returned `ReadOnlySource` object.
///
/// You should only use this to read files create with [`open_write`]
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Removes a file
@@ -159,8 +157,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// atomic_write.
///
/// This should only be used for small files.
///
/// You should only use this to read files create with [`atomic_write`]
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
/// Atomically replace the content of a file with data.

View File

@@ -1,4 +1,3 @@
use crate::Version;
use std::error::Error as StdError;
use std::fmt;
use std::io;
@@ -157,65 +156,6 @@ impl StdError for OpenWriteError {
}
}
/// Type of index incompatibility between the library and the index found on disk
/// Used to catch and provide a hint to solve this incompatibility issue
pub enum Incompatibility {
/// This library cannot decompress the index found on disk
CompressionMismatch {
/// Compression algorithm used by the current version of tantivy
library_compression_format: String,
/// Compression algorithm that was used to serialise the index
index_compression_format: String,
},
/// The index format found on disk isn't supported by this version of the library
IndexMismatch {
/// Version used by the library
library_version: Version,
/// Version the index was built with
index_version: Version,
},
}
impl fmt::Debug for Incompatibility {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
Incompatibility::CompressionMismatch {
library_compression_format,
index_compression_format,
} => {
let err = format!(
"Library was compiled with {:?} compression, index was compressed with {:?}",
library_compression_format, index_compression_format
);
let advice = format!(
"Change the feature flag to {:?} and rebuild the library",
index_compression_format
);
write!(f, "{}. {}", err, advice)?;
}
Incompatibility::IndexMismatch {
library_version,
index_version,
} => {
let err = format!(
"Library version: {}, index version: {}",
library_version.index_format_version, index_version.index_format_version
);
// TODO make a more useful error message
// include the version range that supports this index_format_version
let advice = format!(
"Change tantivy to a version compatible with index format {} (e.g. {}.{}.x) \
and rebuild your project.",
index_version.index_format_version, index_version.major, index_version.minor
);
write!(f, "{}. {}", err, advice)?;
}
}
Ok(())
}
}
/// Error that may occur when accessing a file read
#[derive(Debug)]
pub enum OpenReadError {
@@ -224,8 +164,6 @@ pub enum OpenReadError {
/// Any kind of IO error that happens when
/// interacting with the underlying IO device.
IOError(IOError),
/// This library doesn't support the index version found on disk
IncompatibleIndex(Incompatibility),
}
impl From<IOError> for OpenReadError {
@@ -245,9 +183,19 @@ impl fmt::Display for OpenReadError {
"an io error occurred while opening a file for reading: '{}'",
err
),
OpenReadError::IncompatibleIndex(ref footer) => {
write!(f, "Incompatible index format: {:?}", footer)
}
}
}
}
impl StdError for OpenReadError {
fn description(&self) -> &str {
"error occurred while opening a file for reading"
}
fn cause(&self) -> Option<&dyn StdError> {
match *self {
OpenReadError::FileDoesNotExist(_) => None,
OpenReadError::IOError(ref err) => Some(err),
}
}
}
@@ -268,12 +216,6 @@ impl From<IOError> for DeleteError {
}
}
impl From<Incompatibility> for OpenReadError {
fn from(incompatibility: Incompatibility) -> Self {
OpenReadError::IncompatibleIndex(incompatibility)
}
}
impl fmt::Display for DeleteError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {

View File

@@ -1,339 +0,0 @@
use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt};
use crate::directory::error::Incompatibility;
use crate::directory::read_only_source::ReadOnlySource;
use crate::directory::{AntiCallToken, TerminatingWrite};
use crate::Version;
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use crc32fast::Hasher;
use std::io;
use std::io::Write;
type CrcHashU32 = u32;
#[derive(Debug, Clone, PartialEq)]
pub struct Footer {
pub version: Version,
pub meta: String,
pub versioned_footer: VersionedFooter,
}
/// Serialises the footer to a byte-array
/// - versioned_footer_len : 4 bytes
///- versioned_footer: variable bytes
/// - meta_len: 4 bytes
/// - meta: variable bytes
/// - version_len: 4 bytes
/// - version json: variable bytes
impl BinarySerializable for Footer {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
BinarySerializable::serialize(&self.versioned_footer, writer)?;
BinarySerializable::serialize(&self.meta, writer)?;
let version_string =
serde_json::to_string(&self.version).map_err(|_err| io::ErrorKind::InvalidInput)?;
BinarySerializable::serialize(&version_string, writer)?;
Ok(())
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let versioned_footer = VersionedFooter::deserialize(reader)?;
let meta = String::deserialize(reader)?;
let version_json = String::deserialize(reader)?;
let version = serde_json::from_str(&version_json)?;
Ok(Footer {
version,
meta,
versioned_footer,
})
}
}
impl Footer {
pub fn new(versioned_footer: VersionedFooter) -> Self {
let version = crate::VERSION.clone();
let meta = version.to_string();
Footer {
version,
meta,
versioned_footer,
}
}
pub fn append_footer<W: io::Write>(&self, mut write: &mut W) -> io::Result<()> {
let mut counting_write = CountingWriter::wrap(&mut write);
self.serialize(&mut counting_write)?;
let written_len = counting_write.written_bytes();
write.write_u32::<LittleEndian>(written_len as u32)?;
Ok(())
}
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
if source.len() < 4 {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The file is smaller than 4 bytes (len={}).",
source.len()
),
));
}
let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES);
let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize;
let body_len = body_footer.len() - footer_len;
let (body, footer_data) = body_footer.split(body_len);
let mut cursor = footer_data.as_slice();
let footer = Footer::deserialize(&mut cursor)?;
Ok((footer, body))
}
/// Confirms that the index will be read correctly by this version of tantivy
/// Has to be called after `extract_footer` to make sure it's not accessing uninitialised memory
pub fn is_compatible(&self) -> Result<(), Incompatibility> {
let library_version = crate::version();
match &self.versioned_footer {
VersionedFooter::V1 {
crc32: _crc,
store_compression: compression,
} => {
if &library_version.store_compression != compression {
return Err(Incompatibility::CompressionMismatch {
library_compression_format: library_version.store_compression.to_string(),
index_compression_format: compression.to_string(),
});
}
Ok(())
}
VersionedFooter::UnknownVersion => Err(Incompatibility::IndexMismatch {
library_version: library_version.clone(),
index_version: self.version.clone(),
}),
}
}
}
/// Footer that includes a crc32 hash that enables us to checksum files in the index
#[derive(Debug, Clone, PartialEq)]
pub enum VersionedFooter {
UnknownVersion,
V1 {
crc32: CrcHashU32,
store_compression: String,
},
}
impl BinarySerializable for VersionedFooter {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
let mut buf = Vec::new();
match self {
VersionedFooter::V1 {
crc32,
store_compression: compression,
} => {
// Serializes a valid `VersionedFooter` or panics if the version is unknown
// [ version | crc_hash | compression_mode ]
// [ 0..4 | 4..8 | variable ]
BinarySerializable::serialize(&1u32, &mut buf)?;
BinarySerializable::serialize(crc32, &mut buf)?;
BinarySerializable::serialize(compression, &mut buf)?;
}
VersionedFooter::UnknownVersion => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Cannot serialize an unknown versioned footer ",
));
}
}
BinarySerializable::serialize(&VInt(buf.len() as u64), writer)?;
writer.write_all(&buf[..])?;
Ok(())
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let len = VInt::deserialize(reader)?.0 as usize;
let mut buf = vec![0u8; len];
reader.read_exact(&mut buf[..])?;
let mut cursor = &buf[..];
let version = u32::deserialize(&mut cursor)?;
if version == 1 {
let crc32 = u32::deserialize(&mut cursor)?;
let compression = String::deserialize(&mut cursor)?;
Ok(VersionedFooter::V1 {
crc32,
store_compression: compression,
})
} else {
Ok(VersionedFooter::UnknownVersion)
}
}
}
impl VersionedFooter {
pub fn crc(&self) -> Option<CrcHashU32> {
match self {
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
VersionedFooter::UnknownVersion { .. } => None,
}
}
}
pub(crate) struct FooterProxy<W: TerminatingWrite> {
/// always Some except after terminate call
hasher: Option<Hasher>,
/// always Some except after terminate call
writer: Option<W>,
}
impl<W: TerminatingWrite> FooterProxy<W> {
pub fn new(writer: W) -> Self {
FooterProxy {
hasher: Some(Hasher::new()),
writer: Some(writer),
}
}
}
impl<W: TerminatingWrite> Write for FooterProxy<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let count = self.writer.as_mut().unwrap().write(buf)?;
self.hasher.as_mut().unwrap().update(&buf[..count]);
Ok(count)
}
fn flush(&mut self) -> io::Result<()> {
self.writer.as_mut().unwrap().flush()
}
}
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
let crc32 = self.hasher.take().unwrap().finalize();
let footer = Footer::new(VersionedFooter::V1 {
crc32,
store_compression: crate::store::COMPRESSION.to_string(),
});
let mut writer = self.writer.take().unwrap();
footer.append_footer(&mut writer)?;
writer.terminate()
}
}
#[cfg(test)]
mod tests {
use super::CrcHashU32;
use super::FooterProxy;
use crate::common::BinarySerializable;
use crate::directory::footer::{Footer, VersionedFooter};
use crate::directory::TerminatingWrite;
use byteorder::{ByteOrder, LittleEndian};
use regex::Regex;
#[test]
fn test_versioned_footer() {
let mut vec = Vec::new();
let footer_proxy = FooterProxy::new(&mut vec);
assert!(footer_proxy.terminate().is_ok());
assert_eq!(vec.len(), 167);
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
if let VersionedFooter::V1 {
crc32: _,
store_compression,
} = footer.versioned_footer
{
assert_eq!(store_compression, crate::store::COMPRESSION);
} else {
panic!("Versioned footer should be V1.");
}
assert_eq!(&footer.version, crate::version());
}
#[test]
fn test_serialize_deserialize_footer() {
let mut buffer = Vec::new();
let crc32 = 123456u32;
let footer: Footer = Footer::new(VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
});
footer.serialize(&mut buffer).unwrap();
let footer_deser = Footer::deserialize(&mut &buffer[..]).unwrap();
assert_eq!(footer_deser, footer);
}
#[test]
fn footer_length() {
let crc32 = 1111111u32;
let versioned_footer = VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
};
let mut buf = Vec::new();
versioned_footer.serialize(&mut buf).unwrap();
assert_eq!(buf.len(), 13);
let footer = Footer::new(versioned_footer);
let regex_ptn = Regex::new(
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
)
.unwrap();
assert!(regex_ptn.is_match(&footer.meta));
}
#[test]
fn versioned_footer_from_bytes() {
let v_footer_bytes = vec![
// versionned footer length
12 | 128,
// index format version
1,
0,
0,
0,
// crc 32
12,
35,
89,
18,
// compression format
3 | 128,
b'l',
b'z',
b'4',
];
let mut cursor = &v_footer_bytes[..];
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
assert!(cursor.is_empty());
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 {
crc32: expected_crc,
store_compression: "lz4".to_string(),
};
assert_eq!(versioned_footer, expected_versioned_footer);
let mut buffer = Vec::new();
assert!(versioned_footer.serialize(&mut buffer).is_ok());
assert_eq!(&v_footer_bytes[..], &buffer[..]);
}
#[test]
fn versioned_footer_panic() {
let v_footer_bytes = vec![6u8 | 128u8, 3u8, 0u8, 0u8, 1u8, 0u8, 0u8];
let mut b = &v_footer_bytes[..];
let versioned_footer = VersionedFooter::deserialize(&mut b).unwrap();
assert!(b.is_empty());
let expected_versioned_footer = VersionedFooter::UnknownVersion;
assert_eq!(versioned_footer, expected_versioned_footer);
let mut buf = Vec::new();
assert!(versioned_footer.serialize(&mut buf).is_err());
}
#[test]
#[cfg(not(feature = "lz4"))]
fn compression_mismatch() {
let crc32 = 1111111u32;
let versioned_footer = VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
};
let footer = Footer::new(versioned_footer);
let res = footer.is_compatible();
assert!(res.is_err());
}
}

View File

@@ -1,16 +1,13 @@
use crate::core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use crate::directory::footer::{Footer, FooterProxy};
use crate::directory::DirectoryLock;
use crate::directory::GarbageCollectionResult;
use crate::directory::Lock;
use crate::directory::META_LOCK;
use crate::directory::{ReadOnlySource, WritePtr};
use crate::directory::{WatchCallback, WatchHandle};
use crate::error::DataCorruption;
use crate::Directory;
use crc32fast::Hasher;
use crate::Result;
use serde_json;
use std::collections::HashSet;
use std::io;
@@ -65,7 +62,7 @@ fn save_managed_paths(
impl ManagedDirectory {
/// Wraps a directory as managed directory.
pub fn wrap<Dir: Directory>(directory: Dir) -> crate::Result<ManagedDirectory> {
pub fn wrap<Dir: Directory>(directory: Dir) -> Result<ManagedDirectory> {
match directory.atomic_read(&MANAGED_FILEPATH) {
Ok(data) => {
let managed_files_json = String::from_utf8_lossy(&data);
@@ -88,11 +85,6 @@ impl ManagedDirectory {
meta_informations: Arc::default(),
}),
Err(OpenReadError::IOError(e)) => Err(From::from(e)),
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
// For the moment, this should never happen `meta.json`
// do not have any footer and cannot detect incompatibility.
Err(crate::TantivyError::IncompatibleIndex(incompatibility))
}
}
}
@@ -110,10 +102,7 @@ impl ManagedDirectory {
/// If a file cannot be deleted (for permission reasons for instance)
/// an error is simply logged, and the file remains in the list of managed
/// files.
pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>(
&mut self,
get_living_files: L,
) -> crate::Result<GarbageCollectionResult> {
pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>(&mut self, get_living_files: L) {
info!("Garbage collect");
let mut files_to_delete = vec![];
@@ -139,25 +128,19 @@ impl ManagedDirectory {
// 2) writer change meta.json (for instance after a merge or a commit)
// 3) gc kicks in.
// 4) gc removes a file that was useful for process B, before process B opened it.
match self.acquire_lock(&META_LOCK) {
Ok(_meta_lock) => {
let living_files = get_living_files();
for managed_path in &meta_informations_rlock.managed_paths {
if !living_files.contains(managed_path) {
files_to_delete.push(managed_path.clone());
}
if let Ok(_meta_lock) = self.acquire_lock(&META_LOCK) {
let living_files = get_living_files();
for managed_path in &meta_informations_rlock.managed_paths {
if !living_files.contains(managed_path) {
files_to_delete.push(managed_path.clone());
}
}
Err(err) => {
error!("Failed to acquire lock for GC");
return Err(crate::Error::from(err));
}
} else {
error!("Failed to acquire lock for GC");
}
}
let mut failed_to_delete_files = vec![];
let mut deleted_files = vec![];
for file_to_delete in files_to_delete {
match self.delete(&file_to_delete) {
Ok(_) => {
@@ -167,10 +150,9 @@ impl ManagedDirectory {
Err(file_error) => {
match file_error {
DeleteError::FileDoesNotExist(_) => {
deleted_files.push(file_to_delete.clone());
deleted_files.push(file_to_delete);
}
DeleteError::IOError(_) => {
failed_to_delete_files.push(file_to_delete.clone());
if !cfg!(target_os = "windows") {
// On windows, delete is expected to fail if the file
// is mmapped.
@@ -193,13 +175,10 @@ impl ManagedDirectory {
for delete_file in &deleted_files {
managed_paths_write.remove(delete_file);
}
save_managed_paths(self.directory.as_mut(), &meta_informations_wlock)?;
if save_managed_paths(self.directory.as_mut(), &meta_informations_wlock).is_err() {
error!("Failed to save the list of managed files.");
}
}
Ok(GarbageCollectionResult {
deleted_files,
failed_to_delete_files,
})
}
/// Registers a file as managed
@@ -228,60 +207,17 @@ impl ManagedDirectory {
}
Ok(())
}
/// Verify checksum of a managed file
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
let reader = self.directory.open_read(path)?;
let (footer, data) = Footer::extract_footer(reader)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
let mut hasher = Hasher::new();
hasher.update(data.as_slice());
let crc = hasher.finalize();
Ok(footer
.versioned_footer
.crc()
.map(|v| v == crc)
.unwrap_or(false))
}
/// List files for which checksum does not match content
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
let mut hashset = HashSet::new();
let managed_paths = self
.meta_informations
.read()
.expect("Managed directory rlock poisoned in list damaged.")
.managed_paths
.clone();
for path in managed_paths.into_iter() {
if !self.validate_checksum(&path)? {
hashset.insert(path);
}
}
Ok(hashset)
}
}
impl Directory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let read_only_source = self.directory.open_read(path)?;
let (footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
footer.is_compatible()?;
Ok(reader)
self.directory.open_read(path)
}
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
self.register_file_as_managed(path)
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
self.directory
.open_write(path)?
.into_inner()
.map_err(|_| ())
.expect("buffer should be empty"),
))))
self.directory.open_write(path)
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
@@ -323,9 +259,8 @@ impl Clone for ManagedDirectory {
#[cfg(test)]
mod tests_mmap_specific {
use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite};
use crate::directory::{Directory, ManagedDirectory, MmapDirectory};
use std::collections::HashSet;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::{Path, PathBuf};
use tempfile::TempDir;
@@ -340,15 +275,16 @@ mod tests_mmap_specific {
{
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let write_file = managed_directory.open_write(test_path1).unwrap();
write_file.terminate().unwrap();
let mut write_file = managed_directory.open_write(test_path1).unwrap();
write_file.flush().unwrap();
managed_directory
.atomic_write(test_path2, &[0u8, 1u8])
.unwrap();
assert!(managed_directory.exists(test_path1));
assert!(managed_directory.exists(test_path2));
let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect();
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
let living_files: HashSet<PathBuf> =
[test_path1.to_owned()].into_iter().cloned().collect();
managed_directory.garbage_collect(|| living_files);
assert!(managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2));
}
@@ -358,7 +294,7 @@ mod tests_mmap_specific {
assert!(managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2));
let living_files: HashSet<PathBuf> = HashSet::new();
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
managed_directory.garbage_collect(|| living_files);
assert!(!managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2));
}
@@ -374,15 +310,13 @@ mod tests_mmap_specific {
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap();
write.terminate().unwrap();
managed_directory
.atomic_write(test_path1, &vec![0u8, 1u8])
.unwrap();
assert!(managed_directory.exists(test_path1));
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
assert!(managed_directory
.garbage_collect(|| living_files.clone())
.is_ok());
managed_directory.garbage_collect(|| living_files.clone());
if cfg!(target_os = "windows") {
// On Windows, gc should try and fail the file as it is mmapped.
assert!(managed_directory.exists(test_path1));
@@ -390,47 +324,11 @@ mod tests_mmap_specific {
drop(_mmap_read);
// The file should still be in the list of managed file and
// eventually be deleted once mmap is released.
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
managed_directory.garbage_collect(|| living_files);
assert!(!managed_directory.exists(test_path1));
} else {
assert!(!managed_directory.exists(test_path1));
}
}
#[test]
fn test_checksum() {
let test_path1: &'static Path = Path::new("some_path_for_test");
let test_path2: &'static Path = Path::new("other_test_path");
let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap();
write.terminate().unwrap();
let mut write = managed_directory.open_write(test_path2).unwrap();
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
write.terminate().unwrap();
let read_source = managed_directory.open_read(test_path2).unwrap();
assert_eq!(read_source.as_slice(), &[3u8, 4u8, 5u8]);
assert!(managed_directory.list_damaged().unwrap().is_empty());
let mut corrupted_path = tempdir_path.clone();
corrupted_path.push(test_path2);
let mut file = OpenOptions::new()
.write(true)
.open(&corrupted_path)
.unwrap();
file.write_all(&[255u8]).unwrap();
file.flush().unwrap();
drop(file);
let damaged = managed_directory.list_damaged().unwrap();
assert_eq!(damaged.len(), 1);
assert!(damaged.contains(test_path2));
}
}

View File

@@ -11,7 +11,6 @@ use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
};
use crate::directory::read_only_source::BoxedData;
use crate::directory::AntiCallToken;
use crate::directory::Directory;
use crate::directory::DirectoryLock;
use crate::directory::Lock;
@@ -19,7 +18,7 @@ use crate::directory::ReadOnlySource;
use crate::directory::WatchCallback;
use crate::directory::WatchCallbackList;
use crate::directory::WatchHandle;
use crate::directory::{TerminatingWrite, WritePtr};
use crate::directory::WritePtr;
use atomicwrites;
use memmap::Mmap;
use std::collections::HashMap;
@@ -142,28 +141,42 @@ impl MmapCache {
}
}
struct WatcherWrapper {
struct InnerWatcherWrapper {
_watcher: Mutex<notify::RecommendedWatcher>,
watcher_router: Arc<WatchCallbackList>,
watcher_router: WatchCallbackList,
}
impl InnerWatcherWrapper {
pub fn new(path: &Path) -> Result<(Self, Receiver<notify::RawEvent>), notify::Error> {
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
// We need to initialize the
let mut watcher = notify::raw_watcher(tx)?;
watcher.watch(path, RecursiveMode::Recursive)?;
let inner = InnerWatcherWrapper {
_watcher: Mutex::new(watcher),
watcher_router: Default::default(),
};
Ok((inner, watcher_recv))
}
}
#[derive(Clone)]
struct WatcherWrapper {
inner: Arc<InnerWatcherWrapper>,
}
impl WatcherWrapper {
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
// We need to initialize the
let watcher = notify::raw_watcher(tx)
.and_then(|mut watcher| {
watcher.watch(path, RecursiveMode::Recursive)?;
Ok(watcher)
})
.map_err(|err| match err {
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
_ => {
panic!("Unknown error while starting watching directory {:?}", path);
}
})?;
let watcher_router: Arc<WatchCallbackList> = Default::default();
let watcher_router_clone = watcher_router.clone();
let (inner, watcher_recv) = InnerWatcherWrapper::new(path).map_err(|err| match err {
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
_ => {
panic!("Unknown error while starting watching directory {:?}", path);
}
})?;
let watcher_wrapper = WatcherWrapper {
inner: Arc::new(inner),
};
let watcher_wrapper_clone = watcher_wrapper.clone();
thread::Builder::new()
.name("meta-file-watch-thread".to_string())
.spawn(move || {
@@ -174,7 +187,7 @@ impl WatcherWrapper {
// We might want to be more accurate than this at one point.
if let Some(filename) = changed_path.file_name() {
if filename == *META_FILEPATH {
let _ = watcher_router_clone.broadcast();
watcher_wrapper_clone.inner.watcher_router.broadcast();
}
}
}
@@ -187,15 +200,13 @@ impl WatcherWrapper {
}
}
}
})?;
Ok(WatcherWrapper {
_watcher: Mutex::new(watcher),
watcher_router,
})
})
.expect("Failed to spawn thread to watch meta.json");
Ok(watcher_wrapper)
}
pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle {
self.watcher_router.subscribe(watch_callback)
self.inner.watcher_router.subscribe(watch_callback)
}
}
@@ -254,7 +265,7 @@ impl MmapDirectoryInner {
}
}
if let Some(watch_wrapper) = self.watcher.write().unwrap().as_mut() {
Ok(watch_wrapper.watch(watch_callback))
return Ok(watch_wrapper.watch(watch_callback));
} else {
unreachable!("At this point, watch wrapper is supposed to be initialized");
}
@@ -401,12 +412,6 @@ impl Seek for SafeFileWriter {
}
}
impl TerminatingWrite for SafeFileWriter {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
impl Directory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);
@@ -538,15 +543,16 @@ mod tests {
// The following tests are specific to the MmapDirectory
use super::*;
use crate::indexer::LogMergePolicy;
use crate::schema::{Schema, SchemaBuilder, TEXT};
use crate::Index;
use crate::ReloadPolicy;
use std::fs;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use std::time::Duration;
#[test]
fn test_open_non_existent_path() {
fn test_open_non_existant_path() {
assert!(MmapDirectory::open(PathBuf::from("./nowhere")).is_err());
}
@@ -639,18 +645,13 @@ mod tests {
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp_dirpath = tmp_dir.path().to_owned();
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
let tmp_file = tmp_dirpath.join(*META_FILEPATH);
let tmp_file = tmp_dirpath.join("coucou");
let _handle = watch_wrapper.watch(Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst);
}));
let (sender, receiver) = crossbeam::channel::unbounded();
let _handle2 = watch_wrapper.watch(Box::new(move || {
let _ = sender.send(());
}));
assert_eq!(counter.load(Ordering::SeqCst), 0);
fs::write(&tmp_file, b"whateverwilldo").unwrap();
assert!(receiver.recv().is_ok());
assert!(counter.load(Ordering::SeqCst) >= 1);
thread::sleep(Duration::new(0, 1_000u32));
}
#[test]
@@ -659,42 +660,34 @@ mod tests {
let mut schema_builder: SchemaBuilder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
{
let index = Index::create(mmap_directory.clone(), schema).unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut log_merge_policy = LogMergePolicy::default();
log_merge_policy.set_min_merge_size(3);
index_writer.set_merge_policy(Box::new(log_merge_policy));
for _num_commits in 0..10 {
for _num_commits in 0..16 {
for _ in 0..10 {
index_writer.add_document(doc!(text_field=>"abc"));
}
index_writer.commit().unwrap();
}
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
for _ in 0..4 {
for _ in 0..30 {
index_writer.add_document(doc!(text_field=>"abc"));
index_writer.commit().unwrap();
reader.reload().unwrap();
}
index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap();
let num_segments = reader.searcher().segment_readers().len();
assert!(num_segments <= 4);
assert_eq!(num_segments, 4);
assert_eq!(
num_segments * 7,
mmap_directory.get_cache_info().mmapped.len()
);
}
assert!(mmap_directory.get_cache_info().mmapped.is_empty());
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
}
}

View File

@@ -9,7 +9,6 @@ mod mmap_directory;
mod directory;
mod directory_lock;
mod footer;
mod managed_directory;
mod ram_directory;
mod read_only_source;
@@ -25,74 +24,18 @@ pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource;
pub(crate) use self::watch_event_router::WatchCallbackList;
pub use self::watch_event_router::{WatchCallback, WatchHandle};
use std::io::{self, BufWriter, Write};
use std::path::PathBuf;
/// Outcome of the Garbage collection
pub struct GarbageCollectionResult {
/// List of files that were deleted in this cycle
pub deleted_files: Vec<PathBuf>,
/// List of files that were schedule to be deleted in this cycle,
/// but deletion did not work. This typically happens on windows,
/// as deleting a memory mapped file is forbidden.
///
/// If a searcher is still held, a file cannot be deleted.
/// This is not considered a bug, the file will simply be deleted
/// in the next GC.
pub failed_to_delete_files: Vec<PathBuf>,
}
use std::io::{BufWriter, Write};
#[cfg(feature = "mmap")]
pub use self::mmap_directory::MmapDirectory;
pub use self::managed_directory::ManagedDirectory;
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
///
/// The point is that while the type is public, it cannot be built by anyone
/// outside of this module.
pub struct AntiCallToken(());
/// Trait used to indicate when no more write need to be done on a writer
pub trait TerminatingWrite: Write {
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
fn terminate(mut self) -> io::Result<()>
where
Self: Sized,
{
self.terminate_ref(AntiCallToken(()))
}
/// You should implement this function to define custom behavior.
/// This function should flush any buffer it may hold.
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()>;
}
impl<W: TerminatingWrite + ?Sized> TerminatingWrite for Box<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.as_mut().terminate_ref(token)
}
}
impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
fn terminate_ref(&mut self, a: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.get_mut().terminate_ref(a)
}
}
#[cfg(test)]
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
/// Write object for Directory.
///
/// `WritePtr` are required to implement both Write
/// and Seek.
pub type WritePtr = BufWriter<Box<dyn TerminatingWrite>>;
pub type WritePtr = BufWriter<Box<dyn Write>>;
#[cfg(test)]
mod tests;

View File

@@ -1,9 +1,8 @@
use crate::core::META_FILEPATH;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::AntiCallToken;
use crate::directory::WatchCallbackList;
use crate::directory::WritePtr;
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
use crate::directory::{TerminatingWrite, WritePtr};
use fail::fail_point;
use std::collections::HashMap;
use std::fmt;
@@ -72,12 +71,6 @@ impl Write for VecWriter {
}
}
impl TerminatingWrite for VecWriter {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
#[derive(Default)]
struct InnerDirectory {
fs: HashMap<PathBuf, ReadOnlySource>,
@@ -191,11 +184,11 @@ impl Directory for RAMDirectory {
// Reserve the path to prevent calls to .write() to succeed.
self.fs.write().unwrap().write(path_buf.clone(), &[]);
let mut vec_writer = VecWriter::new(path_buf, self.clone());
let mut vec_writer = VecWriter::new(path_buf.clone(), self.clone());
vec_writer.write_all(data)?;
vec_writer.flush()?;
if path == Path::new(&*META_FILEPATH) {
let _ = self.fs.write().unwrap().watch_router.broadcast();
self.fs.write().unwrap().watch_router.broadcast();
}
Ok(())
}

View File

@@ -70,12 +70,6 @@ impl ReadOnlySource {
(left, right)
}
/// Splits into 2 `ReadOnlySource`, at the offset `end - right_len`.
pub fn split_from_end(self, right_len: usize) -> (ReadOnlySource, ReadOnlySource) {
let left_len = self.len() - right_len;
self.split(left_len)
}
/// Creates a ReadOnlySource that is just a
/// view over a slice of the data.
///

View File

@@ -1,117 +1,25 @@
use super::*;
use futures::channel::oneshot;
use futures::executor::block_on;
use std::io::Write;
use std::mem;
use std::path::{Path, PathBuf};
use std::sync::atomic::Ordering::SeqCst;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::thread;
use std::time;
use std::time::Duration;
#[cfg(feature = "mmap")]
mod mmap_directory_tests {
use crate::directory::MmapDirectory;
type DirectoryImpl = MmapDirectory;
fn make_directory() -> DirectoryImpl {
MmapDirectory::create_from_tempdir().unwrap()
}
#[test]
fn test_simple() {
let mut directory = make_directory();
super::test_simple(&mut directory);
}
#[test]
fn test_write_create_the_file() {
let mut directory = make_directory();
super::test_write_create_the_file(&mut directory);
}
#[test]
fn test_rewrite_forbidden() {
let mut directory = make_directory();
super::test_rewrite_forbidden(&mut directory);
}
#[test]
fn test_directory_delete() {
let mut directory = make_directory();
super::test_directory_delete(&mut directory);
}
#[test]
fn test_lock_non_blocking() {
let mut directory = make_directory();
super::test_lock_non_blocking(&mut directory);
}
#[test]
fn test_lock_blocking() {
let mut directory = make_directory();
super::test_lock_blocking(&mut directory);
}
#[test]
fn test_watch() {
let mut directory = make_directory();
super::test_watch(&mut directory);
}
#[test]
fn test_ram_directory() {
let mut ram_directory = RAMDirectory::create();
test_directory(&mut ram_directory);
}
mod ram_directory_tests {
use crate::directory::RAMDirectory;
type DirectoryImpl = RAMDirectory;
fn make_directory() -> DirectoryImpl {
RAMDirectory::default()
}
#[test]
fn test_simple() {
let mut directory = make_directory();
super::test_simple(&mut directory);
}
#[test]
fn test_write_create_the_file() {
let mut directory = make_directory();
super::test_write_create_the_file(&mut directory);
}
#[test]
fn test_rewrite_forbidden() {
let mut directory = make_directory();
super::test_rewrite_forbidden(&mut directory);
}
#[test]
fn test_directory_delete() {
let mut directory = make_directory();
super::test_directory_delete(&mut directory);
}
#[test]
fn test_lock_non_blocking() {
let mut directory = make_directory();
super::test_lock_non_blocking(&mut directory);
}
#[test]
fn test_lock_blocking() {
let mut directory = make_directory();
super::test_lock_blocking(&mut directory);
}
#[test]
fn test_watch() {
let mut directory = make_directory();
super::test_watch(&mut directory);
}
#[test]
#[cfg(feature = "mmap")]
fn test_mmap_directory() {
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
test_directory(&mut mmap_directory);
}
#[test]
@@ -191,39 +99,48 @@ fn test_directory_delete(directory: &mut dyn Directory) {
assert!(directory.delete(&test_path).is_err());
}
fn test_directory(directory: &mut dyn Directory) {
test_simple(directory);
test_rewrite_forbidden(directory);
test_write_create_the_file(directory);
test_directory_delete(directory);
test_lock_non_blocking(directory);
test_lock_blocking(directory);
test_watch(directory);
}
fn test_watch(directory: &mut dyn Directory) {
let num_progress: Arc<AtomicUsize> = Default::default();
let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone();
let (sender, receiver) = crossbeam::channel::unbounded();
let watch_callback = Box::new(move || {
counter_clone.fetch_add(1, SeqCst);
counter_clone.fetch_add(1, Ordering::SeqCst);
});
// This callback is used to synchronize watching in our unit test.
// We bind it to a variable because the callback is removed when that
// handle is dropped.
let watch_handle = directory.watch(watch_callback).unwrap();
let _progress_listener = directory
.watch(Box::new(move || {
let val = num_progress.fetch_add(1, SeqCst);
let _ = sender.send(val);
}))
.unwrap();
assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data")
.is_ok());
thread::sleep(Duration::new(0, 10_000));
assert_eq!(0, counter.load(Ordering::SeqCst));
let watch_handle = directory.watch(watch_callback).unwrap();
for i in 0..10 {
assert_eq!(i, counter.load(SeqCst));
assert_eq!(i, counter.load(Ordering::SeqCst));
assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
.is_ok());
assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i));
assert_eq!(i + 1, counter.load(SeqCst));
for _ in 0..100 {
if counter.load(Ordering::SeqCst) > i {
break;
}
thread::sleep(Duration::from_millis(10));
}
assert_eq!(i + 1, counter.load(Ordering::SeqCst));
}
mem::drop(watch_handle);
assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data")
.is_ok());
assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok());
assert_eq!(10, counter.load(SeqCst));
thread::sleep(Duration::from_millis(200));
assert_eq!(10, counter.load(Ordering::SeqCst));
}
fn test_lock_non_blocking(directory: &mut dyn Directory) {
@@ -257,13 +174,9 @@ fn test_lock_blocking(directory: &mut dyn Directory) {
is_blocking: true,
});
assert!(lock_a_res.is_ok());
let in_thread = Arc::new(AtomicBool::default());
let in_thread_clone = in_thread.clone();
let (sender, receiver) = oneshot::channel();
std::thread::spawn(move || {
//< lock_a_res is sent to the thread.
in_thread_clone.store(true, SeqCst);
let _just_sync = block_on(receiver);
std::thread::sleep(time::Duration::from_millis(10));
// explicitely droping lock_a_res. It would have been sufficient to just force it
// to be part of the move, but the intent seems clearer that way.
drop(lock_a_res);
@@ -276,18 +189,14 @@ fn test_lock_blocking(directory: &mut dyn Directory) {
});
assert!(lock_a_res.is_err());
}
let directory_clone = directory.box_clone();
let (sender2, receiver2) = oneshot::channel();
let join_handle = std::thread::spawn(move || {
assert!(sender2.send(()).is_ok());
let lock_a_res = directory_clone.acquire_lock(&Lock {
{
// the blocking call should wait for at least 10ms.
let start = time::Instant::now();
let lock_a_res = directory.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"),
is_blocking: true,
});
assert!(in_thread.load(SeqCst));
assert!(lock_a_res.is_ok());
});
assert!(block_on(receiver2).is_ok());
assert!(sender.send(()).is_ok());
assert!(join_handle.join().is_ok());
assert!(start.elapsed().subsec_millis() >= 10);
}
}

View File

@@ -1,5 +1,3 @@
use futures::channel::oneshot;
use futures::{Future, TryFutureExt};
use std::sync::Arc;
use std::sync::RwLock;
use std::sync::Weak;
@@ -49,21 +47,14 @@ impl WatchCallbackList {
}
/// Triggers all callbacks
pub fn broadcast(&self) -> impl Future<Output = ()> {
pub fn broadcast(&self) {
let callbacks = self.list_callback();
let (sender, receiver) = oneshot::channel();
let result = receiver.unwrap_or_else(|_| ());
if callbacks.is_empty() {
let _ = sender.send(());
return result;
}
let spawn_res = std::thread::Builder::new()
.name("watch-callbacks".to_string())
.spawn(move || {
for callback in callbacks {
callback();
}
let _ = sender.send(());
});
if let Err(err) = spawn_res {
error!(
@@ -71,17 +62,19 @@ impl WatchCallbackList {
err
);
}
result
}
}
#[cfg(test)]
mod tests {
use crate::directory::WatchCallbackList;
use futures::executor::block_on;
use std::mem;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::thread;
use std::time::Duration;
const WAIT_TIME: u64 = 20;
#[test]
fn test_watch_event_router_simple() {
@@ -91,22 +84,22 @@ mod tests {
let inc_callback = Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst);
});
block_on(watch_event_router.broadcast());
watch_event_router.broadcast();
assert_eq!(0, counter.load(Ordering::SeqCst));
let handle_a = watch_event_router.subscribe(inc_callback);
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(0, counter.load(Ordering::SeqCst));
block_on(watch_event_router.broadcast());
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(1, counter.load(Ordering::SeqCst));
block_on(async {
(
watch_event_router.broadcast().await,
watch_event_router.broadcast().await,
watch_event_router.broadcast().await,
)
});
watch_event_router.broadcast();
watch_event_router.broadcast();
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(4, counter.load(Ordering::SeqCst));
mem::drop(handle_a);
block_on(watch_event_router.broadcast());
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(4, counter.load(Ordering::SeqCst));
}
@@ -122,20 +115,20 @@ mod tests {
};
let handle_a = watch_event_router.subscribe(inc_callback(1));
let handle_a2 = watch_event_router.subscribe(inc_callback(10));
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(0, counter.load(Ordering::SeqCst));
block_on(async {
futures::join!(
watch_event_router.broadcast(),
watch_event_router.broadcast()
)
});
watch_event_router.broadcast();
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(22, counter.load(Ordering::SeqCst));
mem::drop(handle_a);
block_on(watch_event_router.broadcast());
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(32, counter.load(Ordering::SeqCst));
mem::drop(handle_a2);
block_on(watch_event_router.broadcast());
block_on(watch_event_router.broadcast());
watch_event_router.broadcast();
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(32, counter.load(Ordering::SeqCst));
}
@@ -149,15 +142,15 @@ mod tests {
});
let handle_a = watch_event_router.subscribe(inc_callback);
assert_eq!(0, counter.load(Ordering::SeqCst));
block_on(async {
let future1 = watch_event_router.broadcast();
let future2 = watch_event_router.broadcast();
futures::join!(future1, future2)
});
watch_event_router.broadcast();
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(2, counter.load(Ordering::SeqCst));
thread::sleep(Duration::from_millis(WAIT_TIME));
mem::drop(handle_a);
let _ = watch_event_router.broadcast();
block_on(watch_event_router.broadcast());
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(2, counter.load(Ordering::SeqCst));
}
}

View File

@@ -2,8 +2,8 @@
use std::io;
use crate::directory::error::LockError;
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use crate::directory::error::{Incompatibility, LockError};
use crate::fastfield::FastFieldNotAvailableError;
use crate::query;
use crate::schema;
@@ -80,9 +80,6 @@ pub enum TantivyError {
/// System error. (e.g.: We failed spawning a new thread)
#[fail(display = "System error.'{}'", _0)]
SystemError(String),
/// Index incompatible with current version of tantivy
#[fail(display = "{:?}", _0)]
IncompatibleIndex(Incompatibility),
}
impl From<DataCorruption> for TantivyError {
@@ -132,9 +129,6 @@ impl From<OpenReadError> for TantivyError {
match error {
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
OpenReadError::IncompatibleIndex(incompatibility) => {
TantivyError::IncompatibleIndex(incompatibility)
}
}
}
}
@@ -176,9 +170,3 @@ impl From<serde_json::Error> for TantivyError {
TantivyError::IOError(io_err.into())
}
}
impl From<rayon::ThreadPoolBuildError> for TantivyError {
fn from(error: rayon::ThreadPoolBuildError) -> TantivyError {
TantivyError::SystemError(error.to_string())
}
}

View File

@@ -1,19 +1,17 @@
use crate::common::{BitSet, HasLen};
use crate::common::HasLen;
use crate::directory::ReadOnlySource;
use crate::directory::WritePtr;
use crate::space_usage::ByteCount;
use crate::DocId;
use bit_set::BitSet;
use std::io;
use std::io::Write;
/// Write a delete `BitSet`
///
/// where `delete_bitset` is the set of deleted `DocId`.
pub fn write_delete_bitset(
delete_bitset: &BitSet,
max_doc: u32,
writer: &mut WritePtr,
) -> io::Result<()> {
pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> {
let max_doc = delete_bitset.capacity();
let mut byte = 0u8;
let mut shift = 0u8;
for doc in 0..max_doc {
@@ -31,7 +29,7 @@ pub fn write_delete_bitset(
if max_doc % 8 > 0 {
writer.write_all(&[byte])?;
}
Ok(())
writer.flush()
}
/// Set of deleted `DocId`s.
@@ -85,40 +83,43 @@ impl HasLen for DeleteBitSet {
mod tests {
use super::*;
use crate::directory::*;
use bit_set::BitSet;
use std::path::PathBuf;
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
fn test_delete_bitset_helper(bitset: &BitSet) {
let test_path = PathBuf::from("test");
let mut directory = RAMDirectory::create();
{
let mut writer = directory.open_write(&*test_path).unwrap();
write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
writer.terminate().unwrap();
write_delete_bitset(bitset, &mut writer).unwrap();
}
let source = directory.open_read(&test_path).unwrap();
let delete_bitset = DeleteBitSet::open(source);
for doc in 0..max_doc {
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
{
let source = directory.open_read(&test_path).unwrap();
let delete_bitset = DeleteBitSet::open(source);
let n = bitset.capacity();
for doc in 0..n {
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
}
assert_eq!(delete_bitset.len(), bitset.len());
}
assert_eq!(delete_bitset.len(), bitset.len());
}
#[test]
fn test_delete_bitset() {
{
let mut bitset = BitSet::with_max_value(10);
let mut bitset = BitSet::with_capacity(10);
bitset.insert(1);
bitset.insert(9);
test_delete_bitset_helper(&bitset, 10);
test_delete_bitset_helper(&bitset);
}
{
let mut bitset = BitSet::with_max_value(8);
let mut bitset = BitSet::with_capacity(8);
bitset.insert(1);
bitset.insert(2);
bitset.insert(3);
bitset.insert(5);
bitset.insert(7);
test_delete_bitset_helper(&bitset, 8);
test_delete_bitset_helper(&bitset);
}
}
}

View File

@@ -429,111 +429,5 @@ mod tests {
}
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::tests::FIELD;
use super::tests::{generate_permutation, SCHEMA};
use super::*;
use crate::common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::FastFieldReader;
use std::collections::HashMap;
use std::path::Path;
use test::{self, Bencher};
#[bench]
fn bench_intfastfield_linear_veclookup(b: &mut Bencher) {
let permutation = generate_permutation();
b.iter(|| {
let n = test::black_box(7000u32);
let mut a = 0u64;
for i in (0u32..n / 7).map(|v| v * 7) {
a ^= permutation[i as usize];
}
a
});
}
#[bench]
fn bench_intfastfield_veclookup(b: &mut Bencher) {
let permutation = generate_permutation();
b.iter(|| {
let n = test::black_box(1000u32);
let mut a = 0u64;
for _ in 0u32..n {
a = permutation[a as usize];
}
a
});
}
#[bench]
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
let path = Path::new("test");
let permutation = generate_permutation();
let mut directory: RAMDirectory = RAMDirectory::create();
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for &x in &permutation {
fast_field_writers.add_document(&doc!(*FIELD=>x));
}
fast_field_writers
.serialize(&mut serializer, &HashMap::new())
.unwrap();
serializer.close().unwrap();
}
let source = directory.open_read(&path).unwrap();
{
let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data);
b.iter(|| {
let n = test::black_box(7000u32);
let mut a = 0u64;
for i in (0u32..n / 7).map(|val| val * 7) {
a ^= fast_field_reader.get(i);
}
a
});
}
}
#[bench]
fn bench_intfastfield_fflookup(b: &mut Bencher) {
let path = Path::new("test");
let permutation = generate_permutation();
let mut directory: RAMDirectory = RAMDirectory::create();
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for &x in &permutation {
fast_field_writers.add_document(&doc!(*FIELD=>x));
}
fast_field_writers
.serialize(&mut serializer, &HashMap::new())
.unwrap();
serializer.close().unwrap();
}
let source = directory.open_read(&path).unwrap();
{
let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data);
b.iter(|| {
let n = test::black_box(1000u32);
let mut a = 0u32;
for _ in 0u32..n {
a = fast_field_reader.get(a) as u32;
}
a
});
}
}
}

View File

@@ -5,8 +5,8 @@ use crate::postings::UnorderedTermId;
use crate::schema::{Document, Field};
use crate::termdict::TermOrdinal;
use crate::DocId;
use fnv::FnvHashMap;
use itertools::Itertools;
use std::collections::HashMap;
use std::io;
/// Writer for multi-valued (as in, more than one value per document)
@@ -102,7 +102,7 @@ impl MultiValueIntFastFieldWriter {
pub fn serialize(
&self,
serializer: &mut FastFieldSerializer,
mapping_opt: Option<&FnvHashMap<UnorderedTermId, TermOrdinal>>,
mapping_opt: Option<&HashMap<UnorderedTermId, TermOrdinal>>,
) -> io::Result<()> {
{
// writing the offset index

View File

@@ -67,10 +67,12 @@ impl<Item: FastValue> FastFieldReader<Item> {
///
/// May panic if `doc` is greater than the segment
// `maxdoc`.
#[inline(always)]
pub fn get(&self, doc: DocId) -> Item {
self.get_u64(u64::from(doc))
}
#[inline(always)]
pub(crate) fn get_u64(&self, doc: u64) -> Item {
Item::from_u64(self.min_value_u64 + self.bit_unpacker.get(doc))
}

View File

@@ -59,7 +59,8 @@ impl FastFieldReaders {
fast_bytes: Default::default(),
fast_fields_composite: fast_fields_composite.clone(),
};
for (field, field_entry) in schema.fields() {
for (field_id, field_entry) in schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
let field_type = field_entry.field_type();
if field_type == &FieldType::Bytes {
let idx_reader = fast_fields_composite
@@ -156,6 +157,8 @@ impl FastFieldReaders {
/// If the field is a i64-fast field, return the associated u64 reader. Values are
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. ///
///
///TODO should it also be lenient with f64?
///
/// This method is useful when merging segment reader.
pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> {
if let Some(u64_ff_reader) = self.u64(field) {
@@ -164,9 +167,6 @@ impl FastFieldReaders {
if let Some(i64_ff_reader) = self.i64(field) {
return Some(i64_ff_reader.into_u64_reader());
}
if let Some(f64_ff_reader) = self.f64(field) {
return Some(f64_ff_reader.into_u64_reader());
}
None
}
@@ -203,9 +203,6 @@ impl FastFieldReaders {
if let Some(i64s_ff_reader) = self.i64s(field) {
return Some(i64s_ff_reader.into_u64s_reader());
}
if let Some(f64s_ff_reader) = self.f64s(field) {
return Some(f64s_ff_reader.into_u64s_reader());
}
None
}

View File

@@ -6,7 +6,6 @@ use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
use crate::postings::UnorderedTermId;
use crate::schema::{Cardinality, Document, Field, FieldType, Schema};
use crate::termdict::TermOrdinal;
use fnv::FnvHashMap;
use std::collections::HashMap;
use std::io;
@@ -24,7 +23,8 @@ impl FastFieldsWriter {
let mut multi_values_writers = Vec::new();
let mut bytes_value_writers = Vec::new();
for (field, field_entry) in schema.fields() {
for (field_id, field_entry) in schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
let default_value = match *field_entry.field_type() {
FieldType::I64(_) => common::i64_to_u64(0i64),
FieldType::F64(_) => common::f64_to_u64(0.0f64),
@@ -116,7 +116,7 @@ impl FastFieldsWriter {
pub fn serialize(
&self,
serializer: &mut FastFieldSerializer,
mapping: &HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>,
mapping: &HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>,
) -> io::Result<()> {
for field_writer in &self.single_value_writers {
field_writer.serialize(serializer)?;

View File

@@ -22,14 +22,11 @@ impl FieldNormsWriter {
pub(crate) fn fields_with_fieldnorm(schema: &Schema) -> Vec<Field> {
schema
.fields()
.filter_map(|(field, field_entry)| {
if field_entry.is_indexed() {
Some(field)
} else {
None
}
})
.collect::<Vec<_>>()
.iter()
.enumerate()
.filter(|&(_, field_entry)| field_entry.is_indexed())
.map(|(field, _)| Field(field as u32))
.collect::<Vec<Field>>()
}
/// Initialize with state for tracking the field norm fields
@@ -38,7 +35,7 @@ impl FieldNormsWriter {
let fields = FieldNormsWriter::fields_with_fieldnorm(schema);
let max_field = fields
.iter()
.map(Field::field_id)
.map(|field| field.0)
.max()
.map(|max_field_id| max_field_id as usize + 1)
.unwrap_or(0);
@@ -53,8 +50,8 @@ impl FieldNormsWriter {
///
/// Will extend with 0-bytes for documents that have not been seen.
pub fn fill_up_to_max_doc(&mut self, max_doc: DocId) {
for field in self.fields.iter() {
self.fieldnorms_buffer[field.field_id() as usize].resize(max_doc as usize, 0u8);
for &field in self.fields.iter() {
self.fieldnorms_buffer[field.0 as usize].resize(max_doc as usize, 0u8);
}
}
@@ -67,7 +64,7 @@ impl FieldNormsWriter {
/// * field - the field being set
/// * fieldnorm - the number of terms present in document `doc` in field `field`
pub fn record(&mut self, doc: DocId, field: Field, fieldnorm: u32) {
let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.field_id() as usize];
let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.0 as usize];
assert!(
fieldnorm_buffer.len() <= doc as usize,
"Cannot register a given fieldnorm twice"
@@ -80,7 +77,7 @@ impl FieldNormsWriter {
/// Serialize the seen fieldnorm values to the serializer for all fields.
pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> {
for &field in self.fields.iter() {
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.0 as usize][..];
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
}
Ok(())

View File

@@ -2,7 +2,7 @@ use super::operation::DeleteOperation;
use crate::Opstamp;
use std::mem;
use std::ops::DerefMut;
use std::sync::{Arc, RwLock, Weak};
use std::sync::{Arc, RwLock};
// The DeleteQueue is similar in conceptually to a multiple
// consumer single producer broadcast channel.
@@ -14,15 +14,14 @@ use std::sync::{Arc, RwLock, Weak};
//
// New consumer can be created in two ways
// - calling `delete_queue.cursor()` returns a cursor, that
// will include all future delete operation (and some or none
// of the past operations... The client is in charge of checking the opstamps.).
// will include all future delete operation (and no past operations).
// - cloning an existing cursor returns a new cursor, that
// is at the exact same position, and can now advance independently
// from the original cursor.
#[derive(Default)]
struct InnerDeleteQueue {
writer: Vec<DeleteOperation>,
last_block: Weak<Block>,
last_block: Option<Arc<Block>>,
}
#[derive(Clone)]
@@ -33,31 +32,21 @@ pub struct DeleteQueue {
impl DeleteQueue {
// Creates a new delete queue.
pub fn new() -> DeleteQueue {
DeleteQueue {
let delete_queue = DeleteQueue {
inner: Arc::default(),
}
}
};
let next_block = NextBlock::from(delete_queue.clone());
fn get_last_block(&self) -> Arc<Block> {
{
// try get the last block with simply acquiring the read lock.
let rlock = self.inner.read().unwrap();
if let Some(block) = rlock.last_block.upgrade() {
return block;
}
let mut delete_queue_wlock = delete_queue.inner.write().unwrap();
delete_queue_wlock.last_block = Some(Arc::new(Block {
operations: Arc::default(),
next: next_block,
}));
}
// It failed. Let's double check after acquiring the write, as someone could have called
// `get_last_block` right after we released the rlock.
let mut wlock = self.inner.write().unwrap();
if let Some(block) = wlock.last_block.upgrade() {
return block;
}
let block = Arc::new(Block {
operations: Arc::default(),
next: NextBlock::from(self.clone()),
});
wlock.last_block = Arc::downgrade(&block);
block
delete_queue
}
// Creates a new cursor that makes it possible to
@@ -65,7 +54,17 @@ impl DeleteQueue {
//
// Past delete operations are not accessible.
pub fn cursor(&self) -> DeleteCursor {
let last_block = self.get_last_block();
let last_block = self
.inner
.read()
.expect("Read lock poisoned when opening delete queue cursor")
.last_block
.clone()
.expect(
"Failed to unwrap last_block. This should never happen
as the Option<> is only here to make
initialization possible",
);
let operations_len = last_block.operations.len();
DeleteCursor {
block: last_block,
@@ -101,19 +100,23 @@ impl DeleteQueue {
.write()
.expect("Failed to acquire write lock on delete queue writer");
if self_wlock.writer.is_empty() {
return None;
let delete_operations;
{
let writer: &mut Vec<DeleteOperation> = &mut self_wlock.writer;
if writer.is_empty() {
return None;
}
delete_operations = mem::replace(writer, vec![]);
}
let delete_operations = mem::replace(&mut self_wlock.writer, vec![]);
let new_block = Arc::new(Block {
operations: Arc::new(delete_operations.into_boxed_slice()),
next: NextBlock::from(self.clone()),
});
self_wlock.last_block = Arc::downgrade(&new_block);
Some(new_block)
let next_block = NextBlock::from(self.clone());
{
self_wlock.last_block = Some(Arc::new(Block {
operations: Arc::new(delete_operations),
next: next_block,
}));
}
self_wlock.last_block.clone()
}
}
@@ -167,7 +170,7 @@ impl NextBlock {
}
struct Block {
operations: Arc<Box<[DeleteOperation]>>,
operations: Arc<Vec<DeleteOperation>>,
next: NextBlock,
}
@@ -255,7 +258,7 @@ mod tests {
let delete_queue = DeleteQueue::new();
let make_op = |i: usize| {
let field = Field::from_field_id(1u32);
let field = Field(1u32);
DeleteOperation {
opstamp: i as u64,
term: Term::from_field_u64(field, i as u64),

View File

@@ -1,15 +1,13 @@
use super::operation::{AddOperation, UserOperation};
use super::segment_updater::SegmentUpdater;
use super::PreparedCommit;
use crate::common::BitSet;
use crate::core::Index;
use crate::core::Segment;
use crate::core::SegmentComponent;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::core::SegmentReader;
use crate::directory::TerminatingWrite;
use crate::directory::{DirectoryLock, GarbageCollectionResult};
use crate::directory::DirectoryLock;
use crate::docset::DocSet;
use crate::error::TantivyError;
use crate::fastfield::write_delete_bitset;
@@ -24,9 +22,10 @@ use crate::schema::Document;
use crate::schema::IndexRecordOption;
use crate::schema::Term;
use crate::Opstamp;
use crate::Result;
use bit_set::BitSet;
use crossbeam::channel;
use futures::executor::block_on;
use futures::future::Future;
use futures::{Canceled, Future};
use smallvec::smallvec;
use smallvec::SmallVec;
use std::mem;
@@ -72,7 +71,7 @@ pub struct IndexWriter {
heap_size_in_bytes_per_thread: usize,
workers_join_handle: Vec<JoinHandle<crate::Result<()>>>,
workers_join_handle: Vec<JoinHandle<Result<()>>>,
operation_receiver: OperationReceiver,
operation_sender: OperationSender,
@@ -95,7 +94,7 @@ fn compute_deleted_bitset(
delete_cursor: &mut DeleteCursor,
doc_opstamps: &DocToOpstampMapping,
target_opstamp: Opstamp,
) -> crate::Result<bool> {
) -> Result<bool> {
let mut might_have_changed = false;
while let Some(delete_op) = delete_cursor.get() {
if delete_op.opstamp > target_opstamp {
@@ -115,7 +114,7 @@ fn compute_deleted_bitset(
while docset.advance() {
let deleted_doc = docset.doc();
if deleted_doc < limit_doc {
delete_bitset.insert(deleted_doc);
delete_bitset.insert(deleted_doc as usize);
might_have_changed = true;
}
}
@@ -126,74 +125,64 @@ fn compute_deleted_bitset(
Ok(might_have_changed)
}
/// Advance delete for the given segment up to the target opstamp.
///
/// Note that there are no guarantee that the resulting `segment_entry` delete_opstamp
/// is `==` target_opstamp.
/// For instance, there was no delete operation between the state of the `segment_entry` and
/// the `target_opstamp`, `segment_entry` is not updated.
/// Advance delete for the given segment up
/// to the target opstamp.
pub(crate) fn advance_deletes(
mut segment: Segment,
segment_entry: &mut SegmentEntry,
target_opstamp: Opstamp,
) -> crate::Result<()> {
if segment_entry.meta().delete_opstamp() == Some(target_opstamp) {
// We are already up-to-date here.
return Ok(());
}
) -> Result<()> {
{
if segment_entry.meta().delete_opstamp() == Some(target_opstamp) {
// We are already up-to-date here.
return Ok(());
}
let mut delete_cursor = segment_entry.delete_cursor().clone();
if segment_entry.delete_bitset().is_none() && delete_cursor.get().is_none() {
// There has been no `DeleteOperation` between the segment status and `target_opstamp`.
return Ok(());
}
let segment_reader = SegmentReader::open(&segment)?;
let segment_reader = SegmentReader::open(&segment)?;
let max_doc = segment_reader.max_doc();
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
None => BitSet::with_capacity(max_doc as usize),
};
let max_doc = segment_reader.max_doc();
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
None => BitSet::with_max_value(max_doc),
};
let delete_cursor = segment_entry.delete_cursor();
compute_deleted_bitset(
&mut delete_bitset,
&segment_reader,
&mut delete_cursor,
&DocToOpstampMapping::None,
target_opstamp,
)?;
compute_deleted_bitset(
&mut delete_bitset,
&segment_reader,
delete_cursor,
&DocToOpstampMapping::None,
target_opstamp,
)?;
// TODO optimize
if let Some(seg_delete_bitset) = segment_reader.delete_bitset() {
// TODO optimize
for doc in 0u32..max_doc {
if seg_delete_bitset.is_deleted(doc) {
delete_bitset.insert(doc);
if segment_reader.is_deleted(doc) {
delete_bitset.insert(doc as usize);
}
}
}
let num_deleted_docs = delete_bitset.len();
if num_deleted_docs > 0 {
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
delete_file.terminate()?;
let num_deleted_docs = delete_bitset.len();
if num_deleted_docs > 0 {
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
write_delete_bitset(&delete_bitset, &mut delete_file)?;
}
}
segment_entry.set_meta(segment.meta().clone());
Ok(())
}
fn index_documents(
memory_budget: usize,
segment: Segment,
segment: &Segment,
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
segment_updater: &mut SegmentUpdater,
mut delete_cursor: DeleteCursor,
) -> crate::Result<bool> {
) -> Result<bool> {
let schema = segment.schema();
let segment_id = segment.id();
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
for document_group in grouped_document_iterator {
for doc in document_group {
@@ -213,32 +202,25 @@ fn index_documents(
return Ok(false);
}
let max_doc = segment_writer.max_doc();
let num_docs = segment_writer.max_doc();
// this is ensured by the call to peek before starting
// the worker thread.
assert!(max_doc > 0);
assert!(num_docs > 0);
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
let segment_with_max_doc = segment.with_max_doc(max_doc);
let segment_meta = segment
.index()
.inventory()
.new_segment_meta(segment_id, num_docs);
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
let delete_bitset_opt = apply_deletes(
&segment_with_max_doc,
&mut delete_cursor,
&doc_opstamps,
last_docstamp,
)?;
let delete_bitset_opt =
apply_deletes(&segment, &mut delete_cursor, &doc_opstamps, last_docstamp)?;
let segment_entry = SegmentEntry::new(
segment_with_max_doc.meta().clone(),
delete_cursor,
delete_bitset_opt,
);
block_on(segment_updater.schedule_add_segment(segment_entry))?;
Ok(true)
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, delete_bitset_opt);
Ok(segment_updater.add_segment(segment_entry))
}
fn apply_deletes(
@@ -246,7 +228,7 @@ fn apply_deletes(
mut delete_cursor: &mut DeleteCursor,
doc_opstamps: &[Opstamp],
last_docstamp: Opstamp,
) -> crate::Result<Option<BitSet>> {
) -> Result<Option<BitSet<u32>>> {
if delete_cursor.get().is_none() {
// if there are no delete operation in the queue, no need
// to even open the segment.
@@ -254,9 +236,7 @@ fn apply_deletes(
}
let segment_reader = SegmentReader::open(segment)?;
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
let max_doc = segment.meta().max_doc();
let mut deleted_bitset = BitSet::with_max_value(max_doc);
let mut deleted_bitset = BitSet::with_capacity(segment_reader.max_doc() as usize);
let may_have_deletes = compute_deleted_bitset(
&mut deleted_bitset,
&segment_reader,
@@ -291,7 +271,7 @@ impl IndexWriter {
num_threads: usize,
heap_size_in_bytes_per_thread: usize,
directory_lock: DirectoryLock,
) -> crate::Result<IndexWriter> {
) -> Result<IndexWriter> {
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
let err_msg = format!(
"The heap size per thread needs to be at least {}.",
@@ -340,17 +320,12 @@ impl IndexWriter {
Ok(index_writer)
}
fn drop_sender(&mut self) {
let (sender, _receiver) = channel::bounded(1);
mem::replace(&mut self.operation_sender, sender);
}
/// If there are some merging threads, blocks until they all finish their work and
/// then drop the `IndexWriter`.
pub fn wait_merging_threads(mut self) -> crate::Result<()> {
pub fn wait_merging_threads(mut self) -> Result<()> {
// this will stop the indexing thread,
// dropping the last reference to the segment_updater.
self.drop_sender();
drop(self.operation_sender);
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]);
for join_handle in former_workers_handles {
@@ -361,6 +336,7 @@ impl IndexWriter {
TantivyError::ErrorInThread("Error in indexing worker thread.".into())
})?;
}
drop(self.workers_join_handle);
let result = self
.segment_updater
@@ -375,10 +351,10 @@ impl IndexWriter {
}
#[doc(hidden)]
pub fn add_segment(&self, segment_meta: SegmentMeta) -> crate::Result<()> {
pub fn add_segment(&mut self, segment_meta: SegmentMeta) {
let delete_cursor = self.delete_queue.cursor();
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
block_on(self.segment_updater.schedule_add_segment(segment_entry))
self.segment_updater.add_segment(segment_entry);
}
/// Creates a new segment.
@@ -395,7 +371,7 @@ impl IndexWriter {
/// Spawns a new worker thread for indexing.
/// The thread consumes documents from the pipeline.
fn add_indexing_worker(&mut self) -> crate::Result<()> {
fn add_indexing_worker(&mut self) -> Result<()> {
let document_receiver_clone = self.operation_receiver.clone();
let mut segment_updater = self.segment_updater.clone();
@@ -403,7 +379,7 @@ impl IndexWriter {
let mem_budget = self.heap_size_in_bytes_per_thread;
let index = self.index.clone();
let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new()
let join_handle: JoinHandle<Result<()>> = thread::Builder::new()
.name(format!("thrd-tantivy-index{}", self.worker_id))
.spawn(move || {
loop {
@@ -432,7 +408,7 @@ impl IndexWriter {
let segment = index.new_segment();
index_documents(
mem_budget,
segment,
&segment,
&mut document_iterator,
&mut segment_updater,
delete_cursor.clone(),
@@ -449,23 +425,22 @@ impl IndexWriter {
self.segment_updater.get_merge_policy()
}
/// Setter for the merge policy.
/// Set the merge policy.
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
self.segment_updater.set_merge_policy(merge_policy);
}
fn start_workers(&mut self) -> crate::Result<()> {
fn start_workers(&mut self) -> Result<()> {
for _ in 0..self.num_threads {
self.add_indexing_worker()?;
}
Ok(())
}
/// Detects and removes the files that are not used by the index anymore.
pub fn garbage_collect_files(
&self,
) -> impl Future<Output = crate::Result<GarbageCollectionResult>> {
self.segment_updater.schedule_garbage_collect()
/// Detects and removes the files that
/// are not used by the index anymore.
pub fn garbage_collect_files(&mut self) -> Result<()> {
self.segment_updater.garbage_collect_files().wait()
}
/// Deletes all documents from the index
@@ -475,10 +450,12 @@ impl IndexWriter {
/// by clearing and resubmitting necessary documents
///
/// ```rust
/// use tantivy::collector::TopDocs;
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::query::QueryParser;
/// use tantivy::collector::TopDocs;
/// use tantivy::schema::*;
/// use tantivy::{doc, Index};
/// use tantivy::Index;
///
/// fn main() -> tantivy::Result<()> {
/// let mut schema_builder = Schema::builder();
@@ -504,7 +481,7 @@ impl IndexWriter {
/// Ok(())
/// }
/// ```
pub fn delete_all_documents(&self) -> crate::Result<Opstamp> {
pub fn delete_all_documents(&mut self) -> Result<Opstamp> {
// Delete segments
self.segment_updater.remove_all_segments();
// Return new stamp - reverted stamp
@@ -515,9 +492,11 @@ impl IndexWriter {
/// Merges a given list of segments
///
/// `segment_ids` is required to be non-empty.
pub async fn merge(&mut self, segment_ids: &[SegmentId]) -> crate::Result<SegmentMeta> {
let merge_operation = self.segment_updater.make_merge_operation(segment_ids);
self.segment_updater.start_merge(merge_operation)?.await
pub fn merge(
&mut self,
segment_ids: &[SegmentId],
) -> Result<impl Future<Item = SegmentMeta, Error = Canceled>> {
self.segment_updater.start_merge(segment_ids)
}
/// Closes the current document channel send.
@@ -543,8 +522,13 @@ impl IndexWriter {
/// state as it was after the last commit.
///
/// The opstamp at the last commit is returned.
pub fn rollback(&mut self) -> crate::Result<Opstamp> {
pub fn rollback(&mut self) -> Result<Opstamp> {
info!("Rolling back to opstamp {}", self.committed_opstamp);
self.rollback_impl()
}
/// Private, implementation of rollback
fn rollback_impl(&mut self) -> Result<Opstamp> {
// marks the segment updater as killed. From now on, all
// segment updates will be ignored.
self.segment_updater.kill();
@@ -600,7 +584,7 @@ impl IndexWriter {
/// It is also possible to add a payload to the `commit`
/// using this API.
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit> {
pub fn prepare_commit(&mut self) -> Result<PreparedCommit<'_>> {
// Here, because we join all of the worker threads,
// all of the segment update for this commit have been
// sent.
@@ -647,7 +631,7 @@ impl IndexWriter {
/// Commit returns the `opstamp` of the last document
/// that made it in the commit.
///
pub fn commit(&mut self) -> crate::Result<Opstamp> {
pub fn commit(&mut self) -> Result<Opstamp> {
self.prepare_commit()?.commit()
}
@@ -688,6 +672,9 @@ impl IndexWriter {
/// The opstamp is an increasing `u64` that can
/// be used by the client to align commits with its own
/// document queue.
///
/// Currently it represents the number of documents that
/// have been added since the creation of the index.
pub fn add_document(&self, document: Document) -> Opstamp {
let opstamp = self.stamper.stamp();
let add_operation = AddOperation { opstamp, document };
@@ -761,16 +748,6 @@ impl IndexWriter {
}
}
impl Drop for IndexWriter {
fn drop(&mut self) {
self.segment_updater.kill();
self.drop_sender();
for work in self.workers_join_handle.drain(..) {
let _ = work.join();
}
}
}
#[cfg(test)]
mod tests {
@@ -780,7 +757,7 @@ mod tests {
use crate::error::*;
use crate::indexer::NoMergePolicy;
use crate::query::TermQuery;
use crate::schema::{self, IndexRecordOption, STRING};
use crate::schema::{self, IndexRecordOption};
use crate::Index;
use crate::ReloadPolicy;
use crate::Term;
@@ -1206,15 +1183,4 @@ mod tests {
assert!(commit_again.is_ok());
}
#[test]
fn test_index_doc_missing_field() {
let mut schema_builder = schema::Schema::builder();
let idfield = schema_builder.add_text_field("id", STRING);
schema_builder.add_text_field("optfield", STRING);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(idfield=>"myid"));
let commit = index_writer.commit();
assert!(commit.is_ok());
}
}

View File

@@ -2,23 +2,14 @@ use crate::Opstamp;
use crate::SegmentId;
use census::{Inventory, TrackedObject};
use std::collections::HashSet;
use std::ops::Deref;
#[derive(Default)]
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>);
impl Deref for MergeOperationInventory {
type Target = Inventory<InnerMergeOperation>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub struct MergeOperationInventory(Inventory<InnerMergeOperation>);
impl MergeOperationInventory {
pub fn segment_in_merge(&self) -> HashSet<SegmentId> {
let mut segment_in_merge = HashSet::default();
for merge_op in self.list() {
for merge_op in self.0.list() {
for &segment_id in &merge_op.segment_ids {
segment_in_merge.insert(segment_id);
}
@@ -44,13 +35,13 @@ pub struct MergeOperation {
inner: TrackedObject<InnerMergeOperation>,
}
pub(crate) struct InnerMergeOperation {
struct InnerMergeOperation {
target_opstamp: Opstamp,
segment_ids: Vec<SegmentId>,
}
impl MergeOperation {
pub(crate) fn new(
pub fn new(
inventory: &MergeOperationInventory,
target_opstamp: Opstamp,
segment_ids: Vec<SegmentId>,
@@ -60,7 +51,7 @@ impl MergeOperation {
segment_ids,
};
MergeOperation {
inner: inventory.track(inner_merge_operation),
inner: inventory.0.track(inner_merge_operation),
}
}

View File

@@ -190,7 +190,8 @@ impl IndexMerger {
fast_field_serializer: &mut FastFieldSerializer,
mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>,
) -> Result<()> {
for (field, field_entry) in self.schema.fields() {
for (field_id, field_entry) in self.schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
let field_type = field_entry.field_type();
match *field_type {
FieldType::HierarchicalFacet => {
@@ -648,12 +649,15 @@ impl IndexMerger {
serializer: &mut InvertedIndexSerializer,
) -> Result<HashMap<Field, TermOrdinalMapping>> {
let mut term_ordinal_mappings = HashMap::new();
for (field, field_entry) in self.schema.fields() {
for (field_ord, field_entry) in self.schema.fields().iter().enumerate() {
if field_entry.is_indexed() {
if let Some(term_ordinal_mapping) =
self.write_postings_for_field(field, field_entry.field_type(), serializer)?
{
term_ordinal_mappings.insert(field, term_ordinal_mapping);
let indexed_field = Field(field_ord as u32);
if let Some(term_ordinal_mapping) = self.write_postings_for_field(
indexed_field,
field_entry.field_type(),
serializer,
)? {
term_ordinal_mappings.insert(indexed_field, term_ordinal_mapping);
}
}
}
@@ -709,7 +713,7 @@ mod tests {
use crate::IndexWriter;
use crate::Searcher;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use futures::executor::block_on;
use futures::Future;
use std::io::Cursor;
#[test]
@@ -792,7 +796,11 @@ mod tests {
.searchable_segment_ids()
.expect("Searchable segments failed.");
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index_writer.wait_merging_threads().unwrap();
}
{
@@ -1036,7 +1044,11 @@ mod tests {
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
reader.reload().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
@@ -1131,7 +1143,11 @@ mod tests {
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
reader.reload().unwrap();
let searcher = reader.searcher();
@@ -1265,7 +1281,11 @@ mod tests {
.searchable_segment_ids()
.expect("Searchable segments failed.");
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap();
test_searcher(
@@ -1320,7 +1340,11 @@ mod tests {
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
reader.reload().unwrap();
// commit has not been called yet. The document should still be
// there.
@@ -1341,18 +1365,22 @@ mod tests {
let mut doc = Document::default();
doc.add_u64(int_field, 1);
index_writer.add_document(doc.clone());
assert!(index_writer.commit().is_ok());
index_writer.commit().expect("commit failed");
index_writer.add_document(doc);
assert!(index_writer.commit().is_ok());
index_writer.commit().expect("commit failed");
index_writer.delete_term(Term::from_field_u64(int_field, 1));
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
// assert delete has not been committed
assert!(reader.reload().is_ok());
reader.reload().expect("failed to load searcher 1");
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2);
@@ -1391,12 +1419,12 @@ mod tests {
index_doc(&mut index_writer, &[1, 5]);
index_doc(&mut index_writer, &[3]);
index_doc(&mut index_writer, &[17]);
assert!(index_writer.commit().is_ok());
index_writer.commit().expect("committed");
index_doc(&mut index_writer, &[20]);
assert!(index_writer.commit().is_ok());
index_writer.commit().expect("committed");
index_doc(&mut index_writer, &[28, 27]);
index_doc(&mut index_writer, &[1_000]);
assert!(index_writer.commit().is_ok());
index_writer.commit().expect("committed");
}
let reader = index.reader().unwrap();
let searcher = reader.searcher();
@@ -1428,6 +1456,15 @@ mod tests {
assert_eq!(&vals, &[17]);
}
println!(
"{:?}",
searcher
.segment_readers()
.iter()
.map(|reader| reader.max_doc())
.collect::<Vec<_>>()
);
{
let segment = searcher.segment_reader(1u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
@@ -1451,13 +1488,27 @@ mod tests {
.searchable_segment_ids()
.expect("Searchable segments failed.");
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
assert!(index_writer.wait_merging_threads().is_ok());
index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index_writer
.wait_merging_threads()
.expect("Wait for merging threads");
}
assert!(reader.reload().is_ok());
reader.reload().expect("Load searcher");
{
let searcher = reader.searcher();
println!(
"{:?}",
searcher
.segment_readers()
.iter()
.map(|reader| reader.max_doc())
.collect::<Vec<_>>()
);
let segment = searcher.segment_reader(0u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
@@ -1492,46 +1543,4 @@ mod tests {
assert_eq!(&vals, &[20]);
}
}
#[test]
fn merges_f64_fast_fields_correctly() -> crate::Result<()> {
let mut builder = schema::SchemaBuilder::new();
let fast_multi = IntOptions::default().set_fast(Cardinality::MultiValues);
let field = builder.add_f64_field("f64", schema::FAST);
let multi_field = builder.add_f64_field("f64s", fast_multi);
let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_with_num_threads(1, 3_000_000)?;
// Make sure we'll attempt to merge every created segment
let mut policy = crate::indexer::LogMergePolicy::default();
policy.set_min_merge_size(2);
writer.set_merge_policy(Box::new(policy));
for i in 0..100 {
let mut doc = Document::new();
doc.add_f64(field, 42.0);
doc.add_f64(multi_field, 0.24);
doc.add_f64(multi_field, 0.27);
writer.add_document(doc);
if i % 5 == 0 {
writer.commit()?;
}
}
writer.commit()?;
writer.wait_merging_threads()?;
// If a merging thread fails, we should end up with more
// than one segment here
assert_eq!(1, index.searchable_segments()?.len());
Ok(())
}
}

View File

@@ -18,7 +18,7 @@ mod stamper;
pub use self::index_writer::IndexWriter;
pub use self::log_merge_policy::LogMergePolicy;
pub use self::merge_operation::MergeOperation;
pub use self::merge_operation::{MergeOperation, MergeOperationInventory};
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
pub use self::prepared_commit::PreparedCommit;
pub use self::segment_entry::SegmentEntry;
@@ -28,25 +28,3 @@ pub use self::segment_writer::SegmentWriter;
/// Alias for the default merge policy, which is the `LogMergePolicy`.
pub type DefaultMergePolicy = LogMergePolicy;
#[cfg(test)]
mod tests {
use crate::schema::{self, Schema};
use crate::{Index, Term};
#[test]
fn test_advance_delete_bug() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// there must be one deleted document in the segment
index_writer.add_document(doc!(text_field=>"b"));
index_writer.delete_term(Term::from_field_text(text_field, "b"));
// we need enough data to trigger the bug (at least 32 documents)
for _ in 0..32 {
index_writer.add_document(doc!(text_field=>"c"));
}
index_writer.commit().unwrap();
index_writer.commit().unwrap();
}
}

View File

@@ -1,7 +1,6 @@
use super::IndexWriter;
use crate::Opstamp;
use crate::Result;
use futures::executor::block_on;
/// A prepared commit
pub struct PreparedCommit<'a> {
@@ -33,11 +32,9 @@ impl<'a> PreparedCommit<'a> {
pub fn commit(self) -> Result<Opstamp> {
info!("committing {}", self.opstamp);
let _ = block_on(
self.index_writer
.segment_updater()
.schedule_commit(self.opstamp, self.payload),
);
self.index_writer
.segment_updater()
.commit(self.opstamp, self.payload)?;
Ok(self.opstamp)
}
}

View File

@@ -1,7 +1,7 @@
use crate::common::BitSet;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::indexer::delete_queue::DeleteCursor;
use bit_set::BitSet;
use std::fmt;
/// A segment entry describes the state of

View File

@@ -16,28 +16,6 @@ struct SegmentRegisters {
committed: SegmentRegister,
}
#[derive(PartialEq, Eq)]
pub(crate) enum SegmentsStatus {
Committed,
Uncommitted,
}
impl SegmentRegisters {
/// Check if all the segments are committed or uncommited.
///
/// If some segment is missing or segments are in a different state (this should not happen
/// if tantivy is used correctly), returns `None`.
fn segments_status(&self, segment_ids: &[SegmentId]) -> Option<SegmentsStatus> {
if self.uncommitted.contains_all(segment_ids) {
Some(SegmentsStatus::Uncommitted)
} else if self.committed.contains_all(segment_ids) {
Some(SegmentsStatus::Committed)
} else {
None
}
}
}
/// The segment manager stores the list of segments
/// as well as their state.
///
@@ -175,35 +153,33 @@ impl SegmentManager {
let mut registers_lock = self.write();
registers_lock.uncommitted.add_segment_entry(segment_entry);
}
// Replace a list of segments for their equivalent merged segment.
//
// Returns true if these segments are committed, false if the merge segments are uncommited.
pub(crate) fn end_merge(
pub fn end_merge(
&self,
before_merge_segment_ids: &[SegmentId],
after_merge_segment_entry: SegmentEntry,
) -> crate::Result<SegmentsStatus> {
) {
let mut registers_lock = self.write();
let segments_status = registers_lock
.segments_status(before_merge_segment_ids)
.ok_or_else(|| {
let target_register: &mut SegmentRegister = {
if registers_lock
.uncommitted
.contains_all(before_merge_segment_ids)
{
&mut registers_lock.uncommitted
} else if registers_lock
.committed
.contains_all(before_merge_segment_ids)
{
&mut registers_lock.committed
} else {
warn!("couldn't find segment in SegmentManager");
crate::Error::InvalidArgument(
"The segments that were merged could not be found in the SegmentManager. \
This is not necessarily a bug, and can happen after a rollback for instance."
.to_string(),
)
})?;
let target_register: &mut SegmentRegister = match segments_status {
SegmentsStatus::Uncommitted => &mut registers_lock.uncommitted,
SegmentsStatus::Committed => &mut registers_lock.committed,
return;
}
};
for segment_id in before_merge_segment_ids {
target_register.remove_segment(segment_id);
}
target_register.add_segment_entry(after_merge_segment_entry);
Ok(segments_status)
}
pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> {

View File

@@ -134,4 +134,5 @@ mod tests {
}
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
}
}

View File

@@ -6,34 +6,39 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::core::SerializableSegment;
use crate::core::META_FILEPATH;
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
use crate::directory::{Directory, DirectoryClone};
use crate::error::TantivyError;
use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::index_writer::advance_deletes;
use crate::indexer::merge_operation::MergeOperationInventory;
use crate::indexer::merger::IndexMerger;
use crate::indexer::segment_manager::SegmentsStatus;
use crate::indexer::stamper::Stamper;
use crate::indexer::MergeOperation;
use crate::indexer::SegmentEntry;
use crate::indexer::SegmentSerializer;
use crate::indexer::{DefaultMergePolicy, MergePolicy};
use crate::indexer::{MergeCandidate, MergeOperation};
use crate::schema::Schema;
use crate::Opstamp;
use futures::channel::oneshot;
use futures::executor::{ThreadPool, ThreadPoolBuilder};
use futures::future::Future;
use futures::future::TryFutureExt;
use crate::Result;
use futures::oneshot;
use futures::sync::oneshot::Receiver;
use futures::Future;
use futures_cpupool::Builder as CpuPoolBuilder;
use futures_cpupool::CpuFuture;
use futures_cpupool::CpuPool;
use serde_json;
use std::borrow::BorrowMut;
use std::collections::HashMap;
use std::collections::HashSet;
use std::io::Write;
use std::ops::Deref;
use std::mem;
use std::ops::DerefMut;
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::sync::RwLock;
const NUM_MERGE_THREADS: usize = 4;
use std::thread;
use std::thread::JoinHandle;
/// Save the index meta file.
/// This operation is atomic :
@@ -44,7 +49,7 @@ const NUM_MERGE_THREADS: usize = 4;
/// and flushed.
///
/// This method is not part of tantivy's public API
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::Result<()> {
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> Result<()> {
save_metas(
&IndexMeta {
segments: Vec::new(),
@@ -65,7 +70,7 @@ pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::R
/// and flushed.
///
/// This method is not part of tantivy's public API
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result<()> {
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> Result<()> {
info!("save metas");
let mut buffer = serde_json::to_vec_pretty(metas)?;
// Just adding a new line at the end of the buffer.
@@ -84,38 +89,21 @@ fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result
// We voluntarily pass a merge_operation ref to guarantee that
// the merge_operation is alive during the process
#[derive(Clone)]
pub(crate) struct SegmentUpdater(Arc<InnerSegmentUpdater>);
pub struct SegmentUpdater(Arc<InnerSegmentUpdater>);
impl Deref for SegmentUpdater {
type Target = InnerSegmentUpdater;
#[inline]
fn deref(&self) -> &Self::Target {
&self.0
}
}
async fn garbage_collect_files(
segment_updater: SegmentUpdater,
) -> crate::Result<GarbageCollectionResult> {
info!("Running garbage collection");
let mut index = segment_updater.index.clone();
index
.directory_mut()
.garbage_collect(move || segment_updater.list_files())
}
/// Merges a list of segments the list of segment givens in the `segment_entries`.
/// This function happens in the calling thread and is computationally expensive.
fn merge(
fn perform_merge(
merge_operation: &MergeOperation,
index: &Index,
mut segment_entries: Vec<SegmentEntry>,
target_opstamp: Opstamp,
) -> crate::Result<SegmentEntry> {
) -> Result<SegmentEntry> {
let target_opstamp = merge_operation.target_opstamp();
// first we need to apply deletes to our segment.
let mut merged_segment = index.new_segment();
// First we apply all of the delet to the merged segment, up to the target opstamp.
// TODO add logging
let schema = index.schema();
for segment_entry in &mut segment_entries {
let segment = index.segment(segment_entry.meta().clone());
advance_deletes(segment, segment_entry, target_opstamp)?;
@@ -129,19 +117,24 @@ fn merge(
.collect();
// An IndexMerger is like a "view" of our merged segments.
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
let merger: IndexMerger = IndexMerger::open(schema, &segments[..])?;
// ... we just serialize this index merger in our new segment
// to merge the two segments.
// ... we just serialize this index merger in our new segment to merge the two segments.
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
let num_docs = merger.write(segment_serializer)?;
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
let segment_meta = index
.inventory()
.new_segment_meta(merged_segment.id(), num_docs);
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
let after_merge_segment_entry = SegmentEntry::new(segment_meta.clone(), delete_cursor, None);
Ok(after_merge_segment_entry)
}
pub(crate) struct InnerSegmentUpdater {
struct InnerSegmentUpdater {
// we keep a copy of the current active IndexMeta to
// avoid loading the file everytime we need it in the
// `SegmentUpdater`.
@@ -149,12 +142,12 @@ pub(crate) struct InnerSegmentUpdater {
// This should be up to date as all update happen through
// the unique active `SegmentUpdater`.
active_metas: RwLock<Arc<IndexMeta>>,
pool: ThreadPool,
merge_thread_pool: ThreadPool,
pool: CpuPool,
index: Index,
segment_manager: SegmentManager,
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
merging_thread_id: AtomicUsize,
merging_threads: RwLock<HashMap<usize, JoinHandle<Result<()>>>>,
killed: AtomicBool,
stamper: Stamper,
merge_operations: MergeOperationInventory,
@@ -165,31 +158,22 @@ impl SegmentUpdater {
index: Index,
stamper: Stamper,
delete_cursor: &DeleteCursor,
) -> crate::Result<SegmentUpdater> {
) -> Result<SegmentUpdater> {
let segments = index.searchable_segment_metas()?;
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
let pool = ThreadPoolBuilder::new()
let pool = CpuPoolBuilder::new()
.name_prefix("segment_updater")
.pool_size(1)
.create()
.map_err(|_| {
crate::Error::SystemError("Failed to spawn segment updater thread".to_string())
})?;
let merge_thread_pool = ThreadPoolBuilder::new()
.name_prefix("merge_thread")
.pool_size(NUM_MERGE_THREADS)
.create()
.map_err(|_| {
crate::Error::SystemError("Failed to spawn segment merging thread".to_string())
})?;
.create();
let index_meta = index.load_metas()?;
Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater {
active_metas: RwLock::new(Arc::new(index_meta)),
pool,
merge_thread_pool,
index,
segment_manager,
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
merging_thread_id: AtomicUsize::default(),
merging_threads: RwLock::new(HashMap::new()),
killed: AtomicBool::new(false),
stamper,
merge_operations: Default::default(),
@@ -197,82 +181,67 @@ impl SegmentUpdater {
}
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
self.merge_policy.read().unwrap().clone()
self.0.merge_policy.read().unwrap().clone()
}
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
let arc_merge_policy = Arc::new(merge_policy);
*self.merge_policy.write().unwrap() = arc_merge_policy;
*self.0.merge_policy.write().unwrap() = arc_merge_policy;
}
fn schedule_future<T: 'static + Send, F: Future<Output = crate::Result<T>> + 'static + Send>(
fn get_merging_thread_id(&self) -> usize {
self.0.merging_thread_id.fetch_add(1, Ordering::SeqCst)
}
fn run_async<T: 'static + Send, F: 'static + Send + FnOnce(SegmentUpdater) -> T>(
&self,
f: F,
) -> impl Future<Output = crate::Result<T>> {
let (sender, receiver) = oneshot::channel();
if self.is_alive() {
self.pool.spawn_ok(async move {
let _ = sender.send(f.await);
});
} else {
let _ = sender.send(Err(crate::TantivyError::SystemError(
"Segment updater killed".to_string(),
)));
}
receiver.unwrap_or_else(|_| {
let err_msg =
"A segment_updater future did not success. This should never happen.".to_string();
Err(crate::Error::SystemError(err_msg))
})
) -> CpuFuture<T, TantivyError> {
let me_clone = self.clone();
self.0.pool.spawn_fn(move || Ok(f(me_clone)))
}
pub fn schedule_add_segment(
&self,
segment_entry: SegmentEntry,
) -> impl Future<Output = crate::Result<()>> {
let segment_updater = self.clone();
self.schedule_future(async move {
segment_updater.segment_manager.add_segment(segment_entry);
segment_updater.consider_merge_options().await;
Ok(())
pub fn add_segment(&self, segment_entry: SegmentEntry) -> bool {
self.run_async(|segment_updater| {
segment_updater.0.segment_manager.add_segment(segment_entry);
segment_updater.consider_merge_options();
true
})
.forget();
true
}
/// Orders `SegmentManager` to remove all segments
pub(crate) fn remove_all_segments(&self) {
self.segment_manager.remove_all_segments();
self.0.segment_manager.remove_all_segments();
}
pub fn kill(&mut self) {
self.killed.store(true, Ordering::Release);
self.0.killed.store(true, Ordering::Release);
}
pub fn is_alive(&self) -> bool {
!self.killed.load(Ordering::Acquire)
!self.0.killed.load(Ordering::Acquire)
}
/// Apply deletes up to the target opstamp to all segments.
///
/// The method returns copies of the segment entries,
/// updated with the delete information.
fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> {
let mut segment_entries = self.segment_manager.segment_entries();
fn purge_deletes(&self, target_opstamp: Opstamp) -> Result<Vec<SegmentEntry>> {
let mut segment_entries = self.0.segment_manager.segment_entries();
for segment_entry in &mut segment_entries {
let segment = self.index.segment(segment_entry.meta().clone());
let segment = self.0.index.segment(segment_entry.meta().clone());
advance_deletes(segment, segment_entry, target_opstamp)?;
}
Ok(segment_entries)
}
pub fn save_metas(
&self,
opstamp: Opstamp,
commit_message: Option<String>,
) -> crate::Result<()> {
pub fn save_metas(&self, opstamp: Opstamp, commit_message: Option<String>) {
if self.is_alive() {
let index = &self.index;
let index = &self.0.index;
let directory = index.directory();
let mut commited_segment_metas = self.segment_manager.committed_segment_metas();
let mut commited_segment_metas = self.0.segment_manager.committed_segment_metas();
// We sort segment_readers by number of documents.
// This is an heuristic to make multithreading more efficient.
@@ -294,18 +263,16 @@ impl SegmentUpdater {
opstamp,
payload: commit_message,
};
// TODO add context to the error.
save_metas(&index_meta, directory.box_clone().borrow_mut())?;
save_metas(&index_meta, directory.box_clone().borrow_mut())
.expect("Could not save metas.");
self.store_meta(&index_meta);
}
Ok(())
}
pub fn schedule_garbage_collect(
&self,
) -> impl Future<Output = crate::Result<GarbageCollectionResult>> {
let garbage_collect_future = garbage_collect_files(self.clone());
self.schedule_future(garbage_collect_future)
pub fn garbage_collect_files(&self) -> CpuFuture<(), TantivyError> {
self.run_async(move |segment_updater| {
segment_updater.garbage_collect_files_exec();
})
}
/// List the files that are useful to the index.
@@ -313,130 +280,148 @@ impl SegmentUpdater {
/// This does not include lock files, or files that are obsolete
/// but have not yet been deleted by the garbage collector.
fn list_files(&self) -> HashSet<PathBuf> {
let mut files: HashSet<PathBuf> = self
.index
.list_all_segment_metas()
.into_iter()
.flat_map(|segment_meta| segment_meta.list_files())
.collect();
let mut files = HashSet::new();
files.insert(META_FILEPATH.to_path_buf());
for segment_meta in self.0.index.inventory().all() {
files.extend(segment_meta.list_files());
}
files
}
pub fn schedule_commit(
&self,
opstamp: Opstamp,
payload: Option<String>,
) -> impl Future<Output = crate::Result<()>> {
let segment_updater: SegmentUpdater = self.clone();
self.schedule_future(async move {
let segment_entries = segment_updater.purge_deletes(opstamp)?;
segment_updater.segment_manager.commit(segment_entries);
segment_updater.save_metas(opstamp, payload)?;
let _ = garbage_collect_files(segment_updater.clone()).await;
segment_updater.consider_merge_options().await;
Ok(())
fn garbage_collect_files_exec(&self) {
info!("Running garbage collection");
let mut index = self.0.index.clone();
index.directory_mut().garbage_collect(|| self.list_files());
}
pub fn commit(&self, opstamp: Opstamp, payload: Option<String>) -> Result<()> {
self.run_async(move |segment_updater| {
if segment_updater.is_alive() {
let segment_entries = segment_updater
.purge_deletes(opstamp)
.expect("Failed purge deletes");
segment_updater.0.segment_manager.commit(segment_entries);
segment_updater.save_metas(opstamp, payload);
segment_updater.garbage_collect_files_exec();
segment_updater.consider_merge_options();
}
})
.wait()
}
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> Result<Receiver<SegmentMeta>> {
let commit_opstamp = self.load_metas().opstamp;
let merge_operation = MergeOperation::new(
&self.0.merge_operations,
commit_opstamp,
segment_ids.to_vec(),
);
self.run_async(move |segment_updater| segment_updater.start_merge_impl(merge_operation))
.wait()?
}
fn store_meta(&self, index_meta: &IndexMeta) {
*self.active_metas.write().unwrap() = Arc::new(index_meta.clone());
*self.0.active_metas.write().unwrap() = Arc::new(index_meta.clone());
}
fn load_metas(&self) -> Arc<IndexMeta> {
self.active_metas.read().unwrap().clone()
self.0.active_metas.read().unwrap().clone()
}
pub(crate) fn make_merge_operation(&self, segment_ids: &[SegmentId]) -> MergeOperation {
let commit_opstamp = self.load_metas().opstamp;
MergeOperation::new(&self.merge_operations, commit_opstamp, segment_ids.to_vec())
}
// Starts a merge operation. This function will block until the merge operation is effectively
// started. Note that it does not wait for the merge to terminate.
// The calling thread should not be block for a long time, as this only involve waiting for the
// `SegmentUpdater` queue which in turns only contains lightweight operations.
//
// The merge itself happens on a different thread.
//
// When successful, this function returns a `Future` for a `Result<SegmentMeta>` that represents
// the actual outcome of the merge operation.
//
// It returns an error if for some reason the merge operation could not be started.
//
// At this point an error is not necessarily the sign of a malfunction.
// (e.g. A rollback could have happened, between the instant when the merge operaiton was
// suggested and the moment when it ended up being executed.)
//
// `segment_ids` is required to be non-empty.
pub fn start_merge(
&self,
merge_operation: MergeOperation,
) -> crate::Result<impl Future<Output = crate::Result<SegmentMeta>>> {
fn start_merge_impl(&self, merge_operation: MergeOperation) -> Result<Receiver<SegmentMeta>> {
assert!(
!merge_operation.segment_ids().is_empty(),
"Segment_ids cannot be empty."
);
let segment_updater = self.clone();
let segment_updater_clone = self.clone();
let segment_entries: Vec<SegmentEntry> = self
.0
.segment_manager
.start_merge(merge_operation.segment_ids())?;
info!("Starting merge - {:?}", merge_operation.segment_ids());
// let segment_ids_vec = merge_operation.segment_ids.to_vec();
let (merging_future_send, merging_future_recv) =
oneshot::channel::<crate::Result<SegmentMeta>>();
let merging_thread_id = self.get_merging_thread_id();
info!(
"Starting merge thread #{} - {:?}",
merging_thread_id,
merge_operation.segment_ids()
);
let (merging_future_send, merging_future_recv) = oneshot();
self.merge_thread_pool.spawn_ok(async move {
// The fact that `merge_operation` is moved here is important.
// Its lifetime is used to track how many merging thread are currently running,
// as well as which segment is currently in merge and therefore should not be
// candidate for another merge.
match merge(
&segment_updater.index,
segment_entries,
merge_operation.target_opstamp(),
) {
Ok(after_merge_segment_entry) => {
let segment_meta = segment_updater
.end_merge(merge_operation, after_merge_segment_entry)
.await;
let _send_result = merging_future_send.send(segment_meta);
}
Err(e) => {
warn!(
"Merge of {:?} was cancelled: {:?}",
merge_operation.segment_ids().to_vec(),
e
);
// ... cancel merge
if cfg!(test) {
panic!("Merge failed.");
// first we need to apply deletes to our segment.
let merging_join_handle = thread::Builder::new()
.name(format!("mergingthread-{}", merging_thread_id))
.spawn(move || {
// first we need to apply deletes to our segment.
let merge_result = perform_merge(
&merge_operation,
&segment_updater_clone.0.index,
segment_entries,
);
match merge_result {
Ok(after_merge_segment_entry) => {
let merged_segment_meta = after_merge_segment_entry.meta().clone();
segment_updater_clone
.end_merge(merge_operation, after_merge_segment_entry)
.expect("Segment updater thread is corrupted.");
// the future may fail if the listener of the oneshot future
// has been destroyed.
//
// This is not a problem here, so we just ignore any
// possible error.
let _merging_future_res = merging_future_send.send(merged_segment_meta);
}
Err(e) => {
warn!(
"Merge of {:?} was cancelled: {:?}",
merge_operation.segment_ids(),
e
);
// ... cancel merge
if cfg!(test) {
panic!("Merge failed.");
}
// As `merge_operation` will be dropped, the segment in merge state will
// be available for merge again.
// `merging_future_send` will be dropped, sending an error to the future.
}
}
}
});
Ok(merging_future_recv
.unwrap_or_else(|_| Err(crate::Error::SystemError("Merge failed".to_string()))))
segment_updater_clone
.0
.merging_threads
.write()
.unwrap()
.remove(&merging_thread_id);
Ok(())
})
.expect("Failed to spawn a thread.");
self.0
.merging_threads
.write()
.unwrap()
.insert(merging_thread_id, merging_join_handle);
Ok(merging_future_recv)
}
async fn consider_merge_options(&self) {
let merge_segment_ids: HashSet<SegmentId> = self.merge_operations.segment_in_merge();
fn consider_merge_options(&self) {
let merge_segment_ids: HashSet<SegmentId> = self.0.merge_operations.segment_in_merge();
let (committed_segments, uncommitted_segments) =
get_mergeable_segments(&merge_segment_ids, &self.segment_manager);
get_mergeable_segments(&merge_segment_ids, &self.0.segment_manager);
// Committed segments cannot be merged with uncommitted_segments.
// We therefore consider merges using these two sets of segments independently.
let merge_policy = self.get_merge_policy();
let current_opstamp = self.stamper.stamp();
let current_opstamp = self.0.stamper.stamp();
let mut merge_candidates: Vec<MergeOperation> = merge_policy
.compute_merge_candidates(&uncommitted_segments)
.into_iter()
.map(|merge_candidate| {
MergeOperation::new(&self.merge_operations, current_opstamp, merge_candidate.0)
MergeOperation::new(&self.0.merge_operations, current_opstamp, merge_candidate.0)
})
.collect();
@@ -444,18 +429,25 @@ impl SegmentUpdater {
let committed_merge_candidates = merge_policy
.compute_merge_candidates(&committed_segments)
.into_iter()
.map(|merge_candidate: MergeCandidate| {
MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0)
.map(|merge_candidate| {
MergeOperation::new(&self.0.merge_operations, commit_opstamp, merge_candidate.0)
})
.collect::<Vec<_>>();
merge_candidates.extend(committed_merge_candidates.into_iter());
for merge_operation in merge_candidates {
if let Err(err) = self.start_merge(merge_operation) {
warn!(
"Starting the merge failed for the following reason. This is not fatal. {}",
err
);
match self.start_merge_impl(merge_operation) {
Ok(merge_future) => {
if let Err(e) = merge_future.fuse().poll() {
error!("The merge task failed quickly after starting: {:?}", e);
}
}
Err(err) => {
warn!(
"Starting the merge failed for the following reason. This is not fatal. {}",
err
);
}
}
}
}
@@ -464,17 +456,15 @@ impl SegmentUpdater {
&self,
merge_operation: MergeOperation,
mut after_merge_segment_entry: SegmentEntry,
) -> impl Future<Output = crate::Result<SegmentMeta>> {
let segment_updater = self.clone();
let after_merge_segment_meta = after_merge_segment_entry.meta().clone();
let end_merge_future = self.schedule_future(async move {
) -> Result<()> {
self.run_async(move |segment_updater| {
info!("End merge {:?}", after_merge_segment_entry.meta());
{
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
if let Some(delete_operation) = delete_cursor.get() {
let committed_opstamp = segment_updater.load_metas().opstamp;
if delete_operation.opstamp < committed_opstamp {
let index = &segment_updater.index;
let index = &segment_updater.0.index;
let segment = index.segment(after_merge_segment_entry.meta().clone());
if let Err(e) = advance_deletes(
segment,
@@ -492,26 +482,21 @@ impl SegmentUpdater {
// ... cancel merge
// `merge_operations` are tracked. As it is dropped, the
// the segment_ids will be available again for merge.
return Err(e);
return;
}
}
}
let previous_metas = segment_updater.load_metas();
let segments_status = segment_updater
segment_updater
.0
.segment_manager
.end_merge(merge_operation.segment_ids(), after_merge_segment_entry)?;
if segments_status == SegmentsStatus::Committed {
segment_updater
.save_metas(previous_metas.opstamp, previous_metas.payload.clone())?;
}
segment_updater.consider_merge_options().await;
.end_merge(merge_operation.segment_ids(), after_merge_segment_entry);
segment_updater.consider_merge_options();
segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload.clone());
} // we drop all possible handle to a now useless `SegmentMeta`.
let _ = garbage_collect_files(segment_updater).await;
Ok(())
});
end_merge_future.map_ok(|_| after_merge_segment_meta)
segment_updater.garbage_collect_files_exec();
})
.wait()
}
/// Wait for current merging threads.
@@ -529,9 +514,26 @@ impl SegmentUpdater {
///
/// Obsolete files will eventually be cleaned up
/// by the directory garbage collector.
pub fn wait_merging_thread(&self) -> crate::Result<()> {
self.merge_operations.wait_until_empty();
Ok(())
pub fn wait_merging_thread(&self) -> Result<()> {
loop {
let merging_threads: HashMap<usize, JoinHandle<Result<()>>> = {
let mut merging_threads = self.0.merging_threads.write().unwrap();
mem::replace(merging_threads.deref_mut(), HashMap::new())
};
if merging_threads.is_empty() {
return Ok(());
}
debug!("wait merging thread {}", merging_threads.len());
for (_, merging_thread_handle) in merging_threads {
merging_thread_handle
.join()
.map(|_| ())
.map_err(|_| TantivyError::ErrorInThread("Merging thread failed.".into()))?;
}
// Our merging thread may have queued their completed merged segment.
// Let's wait for that too.
self.run_async(move |_| {}).wait()?;
}
}
}
@@ -687,6 +689,7 @@ mod tests {
index_writer.segment_updater().remove_all_segments();
let seg_vec = index_writer
.segment_updater()
.0
.segment_manager
.segment_entries();
assert!(seg_vec.is_empty());

View File

@@ -6,15 +6,14 @@ use crate::fieldnorm::FieldNormsWriter;
use crate::indexer::segment_serializer::SegmentSerializer;
use crate::postings::compute_table_size;
use crate::postings::MultiFieldPostingsWriter;
use crate::schema::FieldEntry;
use crate::schema::FieldType;
use crate::schema::Schema;
use crate::schema::Term;
use crate::schema::Value;
use crate::schema::{Field, FieldEntry};
use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::FacetTokenizer;
use crate::tokenizer::PreTokenizedStream;
use crate::tokenizer::{TokenStream, TokenStreamChain, Tokenizer};
use crate::tokenizer::{TokenStream, Tokenizer};
use crate::DocId;
use crate::Opstamp;
use crate::Result;
@@ -50,7 +49,7 @@ pub struct SegmentWriter {
fast_field_writers: FastFieldsWriter,
fieldnorms_writer: FieldNormsWriter,
doc_opstamps: Vec<Opstamp>,
tokenizers: Vec<Option<BoxedTokenizer>>,
tokenizers: Vec<Option<Box<dyn BoxedTokenizer>>>,
}
impl SegmentWriter {
@@ -71,10 +70,12 @@ impl SegmentWriter {
let table_num_bits = initial_table_size(memory_budget)?;
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
let tokenizers = schema
.fields()
.map(
|(_, field_entry): (Field, &FieldEntry)| match field_entry.field_type() {
let tokenizers =
schema
.fields()
.iter()
.map(FieldEntry::field_type)
.map(|field_type| match *field_type {
FieldType::Str(ref text_options) => text_options
.get_indexing_options()
.and_then(|text_index_option| {
@@ -82,9 +83,8 @@ impl SegmentWriter {
segment.index().tokenizers().get(tokenizer_name)
}),
_ => None,
},
)
.collect();
})
.collect();
Ok(SegmentWriter {
max_doc: 0,
multifield_postings,
@@ -159,44 +159,26 @@ impl SegmentWriter {
}
}
FieldType::Str(_) => {
let mut token_streams: Vec<Box<dyn TokenStream>> = vec![];
let mut offsets = vec![];
let mut total_offset = 0;
for field_value in field_values {
match field_value.value() {
Value::PreTokStr(tok_str) => {
offsets.push(total_offset);
if let Some(last_token) = tok_str.tokens.last() {
total_offset += last_token.offset_to;
}
token_streams
.push(Box::new(PreTokenizedStream::from(tok_str.clone())));
}
Value::Str(ref text) => {
if let Some(ref mut tokenizer) =
self.tokenizers[field.field_id() as usize]
{
offsets.push(total_offset);
total_offset += text.len();
token_streams.push(tokenizer.token_stream(text));
}
}
_ => (),
let num_tokens = if let Some(ref mut tokenizer) =
self.tokenizers[field.0 as usize]
{
let texts: Vec<&str> = field_values
.iter()
.flat_map(|field_value| match *field_value.value() {
Value::Str(ref text) => Some(text.as_str()),
_ => None,
})
.collect();
if texts.is_empty() {
0
} else {
let mut token_stream = tokenizer.token_stream_texts(&texts[..]);
self.multifield_postings
.index_text(doc_id, field, &mut token_stream)
}
}
let num_tokens = if token_streams.is_empty() {
0
} else {
let mut token_stream: Box<dyn TokenStream> =
Box::new(TokenStreamChain::new(offsets, token_streams));
self.multifield_postings
.index_text(doc_id, field, &mut token_stream)
0
};
self.fieldnorms_writer.record(doc_id, field, num_tokens);
}
FieldType::U64(ref int_option) => {
@@ -249,7 +231,6 @@ impl SegmentWriter {
}
}
doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
doc.prepare_for_store();
let doc_writer = self.segment_serializer.get_store_writer();
doc_writer.store(&doc)?;
self.max_doc += 1;
@@ -315,4 +296,5 @@ mod tests {
assert_eq!(initial_table_size(10_000_000).unwrap(), 17);
assert_eq!(initial_table_size(1_000_000_000).unwrap(), 19);
}
}

109
src/lib.rs Normal file → Executable file
View File

@@ -1,4 +1,5 @@
#![doc(html_logo_url = "http://fulmicoton.com/tantivy-logo/tantivy-logo.png")]
#![recursion_limit = "100"]
#![cfg_attr(all(feature = "unstable", test), feature(test))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))]
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
@@ -10,12 +11,21 @@
//! Think `Lucene`, but in Rust.
//!
//! ```rust
//! # extern crate tempfile;
//! #
//! #[macro_use]
//! extern crate tantivy;
//!
//! // ...
//!
//! # use std::path::Path;
//! # use tempfile::TempDir;
//! # use tantivy::Index;
//! # use tantivy::schema::*;
//! # use tantivy::{Score, DocAddress};
//! # use tantivy::collector::TopDocs;
//! # use tantivy::query::QueryParser;
//! # use tantivy::schema::*;
//! # use tantivy::{doc, DocAddress, Index, Score};
//! #
//! # fn main() {
//! # // Let's create a temporary directory for the
@@ -160,6 +170,7 @@ pub use self::snippet::{Snippet, SnippetGenerator};
mod docset;
pub use self::docset::{DocSet, SkipResult};
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
pub use crate::core::SegmentComponent;
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
@@ -169,58 +180,11 @@ pub use crate::indexer::IndexWriter;
pub use crate::postings::Postings;
pub use crate::reader::LeasedItem;
pub use crate::schema::{Document, Term};
use std::fmt;
use once_cell::sync::Lazy;
/// Index format version.
const INDEX_FORMAT_VERSION: u32 = 1;
/// Structure version for the index.
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Version {
major: u32,
minor: u32,
patch: u32,
index_format_version: u32,
store_compression: String,
}
impl fmt::Debug for Version {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.to_string())
}
}
static VERSION: Lazy<Version> = Lazy::new(|| Version {
major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
patch: env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
index_format_version: INDEX_FORMAT_VERSION,
store_compression: crate::store::COMPRESSION.to_string(),
});
impl ToString for Version {
fn to_string(&self) -> String {
format!(
"tantivy v{}.{}.{}, index_format v{}, store_compression: {}",
self.major, self.minor, self.patch, self.index_format_version, self.store_compression
)
}
}
static VERSION_STRING: Lazy<String> = Lazy::new(|| VERSION.to_string());
/// Expose the current version of tantivy as found in Cargo.toml during compilation.
/// eg. "0.11.0" as well as the compression scheme used in the docstore.
pub fn version() -> &'static Version {
&VERSION
}
/// Exposes the complete version of tantivy as found in Cargo.toml during compilation as a string.
/// eg. "tantivy v0.11.0, index_format v1, store_compression: lz4".
pub fn version_string() -> &'static str {
VERSION_STRING.as_str()
/// Expose the current version of tantivy, as well
/// whether it was compiled with the simd compression.
pub fn version() -> &'static str {
env!("CARGO_PKG_VERSION")
}
/// Defines tantivy's merging strategy
@@ -258,13 +222,15 @@ pub type Score = f32;
pub type SegmentLocalId = u32;
impl DocAddress {
/// Return the segment ordinal id that identifies the segment
/// hosting the document in the `Searcher` it is called from.
/// Return the segment ordinal.
/// The segment ordinal is an id identifying the segment
/// hosting the document. It is only meaningful, in the context
/// of a searcher.
pub fn segment_ord(self) -> SegmentLocalId {
self.0
}
/// Return the segment-local `DocId`
/// Return the segment local `DocId`
pub fn doc(self) -> DocId {
self.1
}
@@ -273,17 +239,16 @@ impl DocAddress {
/// `DocAddress` contains all the necessary information
/// to identify a document given a `Searcher` object.
///
/// It consists of an id identifying its segment, and
/// a segment-local `DocId`.
/// It consists in an id identifying its segment, and
/// its segment-local `DocId`.
///
/// The id used for the segment is actually an ordinal
/// in the list of `Segment`s held by a `Searcher`.
/// in the list of segment hold by a `Searcher`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct DocAddress(pub SegmentLocalId, pub DocId);
#[cfg(test)]
mod tests {
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
use crate::core::SegmentReader;
use crate::docset::DocSet;
@@ -333,18 +298,6 @@ mod tests {
sample_with_seed(n, ratio, 4)
}
#[test]
#[cfg(not(feature = "lz4"))]
fn test_version_string() {
use regex::Regex;
let regex_ptn = Regex::new(
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
)
.unwrap();
let version = super::version().to_string();
assert!(regex_ptn.find(&version).is_some());
}
#[test]
#[cfg(feature = "mmap")]
fn test_indexing() {
@@ -941,3 +894,15 @@ mod tests {
}
}
}
#[cfg(feature = "forbench")]
pub mod forbench {
pub mod compression {
pub use crate::postings::compression::*;
}
pub mod bitset {
pub use crate::common::BitSet;
pub use crate::common::TinySet;
}
}

View File

@@ -22,9 +22,11 @@
///
/// # Example
///
/// ```rust
/// ```
/// #[macro_use]
/// extern crate tantivy;
///
/// use tantivy::schema::{Schema, TEXT, FAST};
/// use tantivy::doc;
///
/// //...
///
@@ -35,9 +37,9 @@
/// let likes = schema_builder.add_u64_field("num_u64", FAST);
/// let schema = schema_builder.build();
/// let doc = doc!(
/// title => "Life Aquatic",
/// author => "Wes Anderson",
/// likes => 4u64
/// title => "Life Aquatic",
/// author => "Wes Anderson",
/// likes => 4u64
/// );
/// # }
/// ```

View File

@@ -36,10 +36,11 @@ struct Positions {
impl Positions {
pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions {
let (body, footer) = skip_source.split_from_end(u32::SIZE_IN_BYTES);
let skip_len = skip_source.len();
let (body, footer) = skip_source.split(skip_len - u32::SIZE_IN_BYTES);
let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
let (skip_source, long_skip_source) =
body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize));
let body_split = body.len() - u64::SIZE_IN_BYTES * (num_long_skips as usize);
let (skip_source, long_skip_source) = body.split(body_split);
Positions {
bit_packer: BitPacker4x::new(),
skip_source,

View File

@@ -160,9 +160,9 @@ impl VIntEncoder for BlockEncoder {
}
impl VIntDecoder for BlockDecoder {
fn uncompress_vint_sorted<'a>(
fn uncompress_vint_sorted(
&mut self,
compressed_data: &'a [u8],
compressed_data: &[u8],
offset: u32,
num_els: usize,
) -> usize {
@@ -170,7 +170,7 @@ impl VIntDecoder for BlockDecoder {
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
}
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize {
fn uncompress_vint_unsorted(&mut self, compressed_data: &[u8], num_els: usize) -> usize {
self.output_len = num_els;
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
}
@@ -268,80 +268,17 @@ pub mod tests {
}
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::*;
use rand::rngs::StdRng;
use rand::Rng;
use rand::SeedableRng;
use test::Bencher;
fn generate_array_with_seed(n: usize, ratio: f64, seed_val: u8) -> Vec<u32> {
let mut seed: [u8; 32] = [0; 32];
seed[31] = seed_val;
let mut rng = StdRng::from_seed(seed);
(0u32..).filter(|_| rng.gen_bool(ratio)).take(n).collect()
}
pub fn generate_array(n: usize, ratio: f64) -> Vec<u32> {
generate_array_with_seed(n, ratio, 4)
}
#[bench]
fn bench_compress(b: &mut Bencher) {
let mut encoder = BlockEncoder::new();
let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
b.iter(|| {
encoder.compress_block_sorted(&data, 0u32);
});
}
#[bench]
fn bench_uncompress(b: &mut Bencher) {
let mut encoder = BlockEncoder::new();
let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32);
let mut decoder = BlockDecoder::new();
b.iter(|| {
decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
});
}
#[test]
fn test_all_docs_compression_numbits() {
for expected_num_bits in 0u8.. {
for expected_num_bits in 0u8..33u8 {
let mut data = [0u32; 128];
if expected_num_bits > 0 {
data[0] = (1u64 << (expected_num_bits as usize) - 1) as u32;
data[0] = (1u64 << (expected_num_bits as u64) - 1u64) as u32;
}
let mut encoder = BlockEncoder::new();
let (num_bits, compressed) = encoder.compress_block_unsorted(&data);
assert_eq!(compressed.len(), compressed_block_size(num_bits));
}
}
const NUM_INTS_BENCH_VINT: usize = 10;
#[bench]
fn bench_compress_vint(b: &mut Bencher) {
let mut encoder = BlockEncoder::new();
let data = generate_array(NUM_INTS_BENCH_VINT, 0.001);
b.iter(|| {
encoder.compress_vint_sorted(&data, 0u32);
});
}
#[bench]
fn bench_uncompress_vint(b: &mut Bencher) {
let mut encoder = BlockEncoder::new();
let data = generate_array(NUM_INTS_BENCH_VINT, 0.001);
let compressed = encoder.compress_vint_sorted(&data, 0u32);
let mut decoder = BlockDecoder::new();
b.iter(|| {
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT);
});
}
}

View File

@@ -3,6 +3,7 @@ Postings module (also called inverted index)
*/
mod block_search;
pub(crate) mod compression;
/// Postings module
///
@@ -356,9 +357,9 @@ pub mod tests {
#[test]
fn test_skip_next() {
let term_0 = Term::from_field_u64(Field::from_field_id(0), 0);
let term_1 = Term::from_field_u64(Field::from_field_id(0), 1);
let term_2 = Term::from_field_u64(Field::from_field_id(0), 2);
let term_0 = Term::from_field_u64(Field(0), 0);
let term_1 = Term::from_field_u64(Field(0), 1);
let term_2 = Term::from_field_u64(Field(0), 2);
let num_docs = 300u32;
@@ -511,19 +512,19 @@ pub mod tests {
}
pub static TERM_A: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0);
let field = Field(0);
Term::from_field_text(field, "a")
});
pub static TERM_B: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0);
let field = Field(0);
Term::from_field_text(field, "b")
});
pub static TERM_C: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0);
let field = Field(0);
Term::from_field_text(field, "c")
});
pub static TERM_D: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0);
let field = Field(0);
Term::from_field_text(field, "d")
});
@@ -622,23 +623,23 @@ pub mod tests {
assert!(!postings_unopt.advance());
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::tests::*;
use crate::docset::SkipResult;
use crate::query::Intersection;
use crate::schema::IndexRecordOption;
use crate::tests;
use crate::DocSet;
use docset::SkipResult;
use query::Intersection;
use schema::IndexRecordOption;
use test::{self, Bencher};
use tests;
use DocSet;
#[bench]
fn bench_segment_postings(b: &mut Bencher) {
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
@@ -652,8 +653,7 @@ mod bench {
#[bench]
fn bench_segment_intersection(b: &mut Bencher) {
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
let segment_postings_a = segment_reader
@@ -683,8 +683,7 @@ mod bench {
}
fn bench_skip_next(p: f64, b: &mut Bencher) {
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
let docs = tests::sample(segment_reader.num_docs(), p);
@@ -739,8 +738,7 @@ mod bench {
#[bench]
fn bench_iterate_segment_postings(b: &mut Bencher) {
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
let n: u32 = test::black_box(17);

View File

@@ -12,7 +12,6 @@ use crate::tokenizer::TokenStream;
use crate::tokenizer::{Token, MAX_TOKEN_LEN};
use crate::DocId;
use crate::Result;
use fnv::FnvHashMap;
use std::collections::HashMap;
use std::io;
use std::marker::PhantomData;
@@ -61,12 +60,12 @@ fn make_field_partition(
.iter()
.map(|(key, _, _)| Term::wrap(key).field())
.enumerate();
let mut prev_field_opt = None;
let mut prev_field = Field(u32::max_value());
let mut fields = vec![];
let mut offsets = vec![];
for (offset, field) in term_offsets_it {
if Some(field) != prev_field_opt {
prev_field_opt = Some(field);
if field != prev_field {
prev_field = field;
fields.push(field);
offsets.push(offset);
}
@@ -86,7 +85,8 @@ impl MultiFieldPostingsWriter {
let term_index = TermHashMap::new(table_bits);
let per_field_postings_writers: Vec<_> = schema
.fields()
.map(|(_, field_entry)| posting_from_field_entry(field_entry))
.iter()
.map(|field_entry| posting_from_field_entry(field_entry))
.collect();
MultiFieldPostingsWriter {
heap: MemoryArena::new(),
@@ -106,8 +106,7 @@ impl MultiFieldPostingsWriter {
field: Field,
token_stream: &mut dyn TokenStream,
) -> u32 {
let postings_writer =
self.per_field_postings_writers[field.field_id() as usize].deref_mut();
let postings_writer = self.per_field_postings_writers[field.0 as usize].deref_mut();
postings_writer.index_text(
&mut self.term_index,
doc,
@@ -118,8 +117,7 @@ impl MultiFieldPostingsWriter {
}
pub fn subscribe(&mut self, doc: DocId, term: &Term) -> UnorderedTermId {
let postings_writer =
self.per_field_postings_writers[term.field().field_id() as usize].deref_mut();
let postings_writer = self.per_field_postings_writers[term.field().0 as usize].deref_mut();
postings_writer.subscribe(&mut self.term_index, doc, 0u32, term, &mut self.heap)
}
@@ -129,12 +127,12 @@ impl MultiFieldPostingsWriter {
pub fn serialize(
&self,
serializer: &mut InvertedIndexSerializer,
) -> Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
) -> Result<HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>> {
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
self.term_index.iter().collect();
term_offsets.sort_unstable_by_key(|&(k, _, _)| k);
let mut unordered_term_mappings: HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>> =
let mut unordered_term_mappings: HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>> =
HashMap::new();
let field_offsets = make_field_partition(&term_offsets);
@@ -149,7 +147,7 @@ impl MultiFieldPostingsWriter {
let unordered_term_ids = term_offsets[start..stop]
.iter()
.map(|&(_, _, bucket)| bucket);
let mapping: FnvHashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
let mapping: HashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
.enumerate()
.map(|(term_ord, unord_term_id)| {
(unord_term_id as UnorderedTermId, term_ord as TermOrdinal)
@@ -161,7 +159,7 @@ impl MultiFieldPostingsWriter {
FieldType::Bytes => {}
}
let postings_writer = &self.per_field_postings_writers[field.field_id() as usize];
let postings_writer = &self.per_field_postings_writers[field.0 as usize];
let mut field_serializer =
serializer.new_field(field, postings_writer.total_num_tokens())?;
postings_writer.serialize(

View File

@@ -141,7 +141,10 @@ impl<'a> FieldSerializer<'a> {
FieldType::Str(ref text_options) => {
if let Some(text_indexing_options) = text_options.get_indexing_options() {
let index_option = text_indexing_options.index_option();
(index_option.has_freq(), index_option.has_positions())
(
index_option.is_termfreq_enabled(),
index_option.is_position_enabled(),
)
} else {
(false, false)
}

View File

@@ -310,7 +310,6 @@ mod bench {
use super::super::MemoryArena;
use super::ExpUnrolledLinkedList;
use byteorder::{NativeEndian, WriteBytesExt};
use std::iter;
use test::Bencher;
const NUM_STACK: usize = 10_000;
@@ -336,10 +335,11 @@ mod bench {
fn bench_push_stack(bench: &mut Bencher) {
bench.iter(|| {
let mut heap = MemoryArena::new();
let mut stacks: Vec<ExpUnrolledLinkedList> =
iter::repeat_with(ExpUnrolledLinkedList::new)
.take(NUM_STACK)
.collect();
let mut stacks = Vec::with_capacity(100);
for _ in 0..NUM_STACK {
let mut stack = ExpUnrolledLinkedList::new();
stacks.push(stack);
}
for s in 0..NUM_STACK {
for i in 0u32..STACK_SIZE {
let t = s * 392017 % NUM_STACK;

View File

@@ -130,4 +130,5 @@ mod tests {
assert!(!scorer.advance());
}
}
}

View File

@@ -8,13 +8,15 @@ use crate::termdict::{TermDictionary, TermStreamer};
use crate::DocId;
use crate::TantivyError;
use crate::{Result, SkipResult};
use std::sync::Arc;
use tantivy_fst::Automaton;
/// A weight struct for Fuzzy Term and Regex Queries
pub struct AutomatonWeight<A> {
pub struct AutomatonWeight<A>
where
A: Automaton + Send + Sync + 'static,
{
field: Field,
automaton: Arc<A>,
automaton: A,
}
impl<A> AutomatonWeight<A>
@@ -22,16 +24,12 @@ where
A: Automaton + Send + Sync + 'static,
{
/// Create a new AutomationWeight
pub fn new<IntoArcA: Into<Arc<A>>>(field: Field, automaton: IntoArcA) -> AutomatonWeight<A> {
AutomatonWeight {
field,
automaton: automaton.into(),
}
pub fn new(field: Field, automaton: A) -> AutomatonWeight<A> {
AutomatonWeight { field, automaton }
}
fn automaton_stream<'a>(&'a self, term_dict: &'a TermDictionary) -> TermStreamer<'a, &'a A> {
let automaton: &A = &*self.automaton;
let term_stream_builder = term_dict.search(automaton);
let term_stream_builder = term_dict.search(&self.automaton);
term_stream_builder.into_stream()
}
}

View File

@@ -216,48 +216,5 @@ mod tests {
assert!(!docset.advance());
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::BitSet;
use super::BitSetDocSet;
use crate::test;
use crate::tests;
use crate::DocSet;
#[bench]
fn bench_bitset_1pct_insert(b: &mut test::Bencher) {
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
b.iter(|| {
let mut bitset = BitSet::with_max_value(1_000_000);
for el in els.iter().cloned() {
bitset.insert(el);
}
});
}
#[bench]
fn bench_bitset_1pct_clone(b: &mut test::Bencher) {
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
let mut bitset = BitSet::with_max_value(1_000_000);
for el in els {
bitset.insert(el);
}
b.iter(|| bitset.clone());
}
#[bench]
fn bench_bitset_1pct_clone_iterate(b: &mut test::Bencher) {
let els = tests::sample(1_000_000u32, 0.01);
let mut bitset = BitSet::with_max_value(1_000_000);
for el in els {
bitset.insert(el);
}
b.iter(|| {
let mut docset = BitSetDocSet::from(bitset.clone());
while docset.advance() {}
});
}
}

View File

@@ -137,4 +137,5 @@ mod tests {
fn test_idf() {
assert_nearly_equals(idf(1, 2), 0.6931472);
}
}

View File

@@ -9,8 +9,7 @@ use crate::Result;
use crate::Searcher;
use std::collections::BTreeSet;
/// The boolean query returns a set of documents
/// that matches the Boolean combination of constituent subqueries.
/// The boolean query combines a set of queries
///
/// The documents matched by the boolean query are
/// those which
@@ -20,113 +19,6 @@ use std::collections::BTreeSet;
/// `MustNot` occurence.
/// * match at least one of the subqueries that is not
/// a `MustNot` occurence.
///
///
/// You can combine other query types and their `Occur`ances into one `BooleanQuery`
///
/// ```rust
///use tantivy::collector::Count;
///use tantivy::doc;
///use tantivy::query::{BooleanQuery, Occur, PhraseQuery, Query, TermQuery};
///use tantivy::schema::{IndexRecordOption, Schema, TEXT};
///use tantivy::Term;
///use tantivy::{Index, Result};
///
///fn main() -> Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let body = schema_builder.add_text_field("body", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// body => "hidden",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// body => "found",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// // Make TermQuery's for "girl" and "diary" in the title
/// let girl_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(title, "girl"),
/// IndexRecordOption::Basic,
/// ));
/// let diary_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(title, "diary"),
/// IndexRecordOption::Basic,
/// ));
/// // A TermQuery with "found" in the body
/// let body_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(body, "found"),
/// IndexRecordOption::Basic,
/// ));
/// // TermQuery "diary" must and "girl" must not be present
/// let queries_with_occurs1 = vec![
/// (Occur::Must, diary_term_query.box_clone()),
/// (Occur::MustNot, girl_term_query),
/// ];
/// // Make a BooleanQuery equivalent to
/// // title:+diary title:-girl
/// let diary_must_and_girl_mustnot = BooleanQuery::from(queries_with_occurs1);
/// let count1 = searcher.search(&diary_must_and_girl_mustnot, &Count)?;
/// assert_eq!(count1, 1);
///
/// // TermQuery for "cow" in the title
/// let cow_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(title, "cow"),
/// IndexRecordOption::Basic,
/// ));
/// // "title:diary OR title:cow"
/// let title_diary_or_cow = BooleanQuery::from(vec![
/// (Occur::Should, diary_term_query.box_clone()),
/// (Occur::Should, cow_term_query),
/// ]);
/// let count2 = searcher.search(&title_diary_or_cow, &Count)?;
/// assert_eq!(count2, 4);
///
/// // Make a `PhraseQuery` from a vector of `Term`s
/// let phrase_query: Box<dyn Query> = Box::new(PhraseQuery::new(vec![
/// Term::from_field_text(title, "dairy"),
/// Term::from_field_text(title, "cow"),
/// ]));
/// // You can combine subqueries of different types into 1 BooleanQuery:
/// // `TermQuery` and `PhraseQuery`
/// // "title:diary OR "dairy cow"
/// let term_of_phrase_query = BooleanQuery::from(vec![
/// (Occur::Should, diary_term_query.box_clone()),
/// (Occur::Should, phrase_query.box_clone()),
/// ]);
/// let count3 = searcher.search(&term_of_phrase_query, &Count)?;
/// assert_eq!(count3, 4);
///
/// // You can nest one BooleanQuery inside another
/// // body:found AND ("title:diary OR "dairy cow")
/// let nested_query = BooleanQuery::from(vec![
/// (Occur::Must, body_term_query),
/// (Occur::Must, Box::new(term_of_phrase_query))
/// ]);
/// let count4 = searcher.search(&nested_query, &Count)?;
/// assert_eq!(count4, 1);
/// Ok(())
///}
/// ```
#[derive(Debug)]
pub struct BooleanQuery {
subqueries: Vec<(Occur, Box<dyn Query>)>,

View File

@@ -247,7 +247,9 @@ mod tests {
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![title, text]);
let query = query_parser.parse_query("Оксана Лифенко").unwrap();
let query = query_parser
.parse_query("Оксана Лифенко")
.unwrap();
let weight = query.weight(&searcher, true).unwrap();
let mut scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
scorer.advance();

View File

@@ -54,21 +54,21 @@ where
match self.excluding_state {
State::ExcludeOne(excluded_doc) => {
if doc == excluded_doc {
return false;
}
if excluded_doc > doc {
return true;
}
match self.excluding_docset.skip_next(doc) {
SkipResult::OverStep => {
self.excluding_state = State::ExcludeOne(self.excluding_docset.doc());
true
false
} else if excluded_doc > doc {
true
} else {
match self.excluding_docset.skip_next(doc) {
SkipResult::OverStep => {
self.excluding_state = State::ExcludeOne(self.excluding_docset.doc());
true
}
SkipResult::End => {
self.excluding_state = State::Finished;
true
}
SkipResult::Reached => false,
}
SkipResult::End => {
self.excluding_state = State::Finished;
true
}
SkipResult::Reached => false,
}
}
State::Finished => true,
@@ -175,4 +175,5 @@ mod tests {
sample_skip,
);
}
}

View File

@@ -28,11 +28,14 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
/// containing a specific term that is within
/// Levenshtein distance
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result, Term};
/// use tantivy::collector::{Count, TopDocs};
/// use tantivy::query::FuzzyTermQuery;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Result, Term};
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
@@ -58,6 +61,7 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
/// let searcher = reader.searcher();
///
/// {
///
/// let term = Term::from_field_text(title, "Diary");
/// let query = FuzzyTermQuery::new(term, 1, true);
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
@@ -67,7 +71,6 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
///
/// Ok(())
/// }
/// # assert!(example().is_ok());
/// ```
#[derive(Debug, Clone)]
pub struct FuzzyTermQuery {

View File

@@ -45,7 +45,7 @@ pub fn intersect_scorers(mut scorers: Vec<Box<dyn Scorer>>) -> Box<dyn Scorer> {
})
}
/// Creates a `DocSet` that iterate through the intersection of two or more `DocSet`s.
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
pub struct Intersection<TDocSet: DocSet, TOtherDocSet: DocSet = Box<dyn Scorer>> {
left: TDocSet,
right: TDocSet,

View File

@@ -5,7 +5,7 @@ use Score;
use SkipResult;
/// Creates a `DocSet` that iterate through the intersection of two `DocSet`s.
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
pub struct IntersectionTwoTerms<TDocSet> {
left: TDocSet,
right: TDocSet

View File

@@ -12,6 +12,7 @@ mod exclude;
mod explanation;
mod fuzzy_query;
mod intersection;
mod occur;
mod phrase_query;
mod query;
mod query_parser;
@@ -42,6 +43,7 @@ pub use self::exclude::Exclude;
pub use self::explanation::Explanation;
pub use self::fuzzy_query::FuzzyTermQuery;
pub use self::intersection::intersect_scorers;
pub use self::occur::Occur;
pub use self::phrase_query::PhraseQuery;
pub use self::query::Query;
pub use self::query_parser::QueryParser;
@@ -53,7 +55,6 @@ pub use self::scorer::ConstScorer;
pub use self::scorer::Scorer;
pub use self::term_query::TermQuery;
pub use self::weight::Weight;
pub use tantivy_query_grammar::Occur;
#[cfg(test)]
mod tests {

View File

@@ -1,8 +1,5 @@
use std::fmt;
use std::fmt::Write;
/// Defines whether a term in a query must be present,
/// should be present or must be not present.
/// should be present or must not be present.
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
pub enum Occur {
/// For a given document to be considered for scoring,
@@ -21,38 +18,32 @@ impl Occur {
/// - `Should` => '?',
/// - `Must` => '+'
/// - `Not` => '-'
fn to_char(self) -> char {
pub fn to_char(self) -> char {
match self {
Occur::Should => '?',
Occur::Must => '+',
Occur::MustNot => '-',
}
}
}
/// Compose two occur values.
pub fn compose(left: Occur, right: Occur) -> Occur {
match left {
Occur::Should => right,
Occur::Must => {
if right == Occur::MustNot {
Occur::MustNot
} else {
Occur::Must
}
/// Compose two occur values.
pub fn compose_occur(left: Occur, right: Occur) -> Occur {
match left {
Occur::Should => right,
Occur::Must => {
if right == Occur::MustNot {
Occur::MustNot
} else {
Occur::Must
}
Occur::MustNot => {
if right == Occur::MustNot {
Occur::Must
} else {
Occur::MustNot
}
}
Occur::MustNot => {
if right == Occur::MustNot {
Occur::Must
} else {
Occur::MustNot
}
}
}
}
impl fmt::Display for Occur {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_char(self.to_char())
}
}

View File

@@ -40,7 +40,7 @@ impl PhraseQuery {
PhraseQuery::new_with_offset(terms_with_offset)
}
/// Creates a new `PhraseQuery` given a list of terms and their offsets.
/// Creates a new `PhraseQuery` given a list of terms and there offsets.
///
/// Can be used to provide custom offset for each term.
pub fn new_with_offset(mut terms: Vec<(usize, Term)>) -> PhraseQuery {
@@ -73,7 +73,7 @@ impl PhraseQuery {
.collect::<Vec<Term>>()
}
/// Returns the `PhraseWeight` for the given phrase query given a specific `searcher`.
/// Returns the `PhraseWeight` for the given phrase query given a specific `searcher`.
///
/// This function is the same as `.weight(...)` except it returns
/// a specialized type `PhraseWeight` instead of a Boxed trait.

View File

@@ -4,7 +4,6 @@ use crate::postings::Postings;
use crate::query::bm25::BM25Weight;
use crate::query::{Intersection, Scorer};
use crate::DocId;
use std::cmp::Ordering;
struct PostingsWithOffset<TPostings> {
offset: u32,
@@ -60,16 +59,12 @@ fn intersection_exists(left: &[u32], right: &[u32]) -> bool {
while left_i < left.len() && right_i < right.len() {
let left_val = left[left_i];
let right_val = right[right_i];
match left_val.cmp(&right_val) {
Ordering::Less => {
left_i += 1;
}
Ordering::Equal => {
return true;
}
Ordering::Greater => {
right_i += 1;
}
if left_val < right_val {
left_i += 1;
} else if right_val < left_val {
right_i += 1;
} else {
return true;
}
}
false
@@ -82,18 +77,14 @@ fn intersection_count(left: &[u32], right: &[u32]) -> usize {
while left_i < left.len() && right_i < right.len() {
let left_val = left[left_i];
let right_val = right[right_i];
match left_val.cmp(&right_val) {
Ordering::Less => {
left_i += 1;
}
Ordering::Equal => {
count += 1;
left_i += 1;
right_i += 1;
}
Ordering::Greater => {
right_i += 1;
}
if left_val < right_val {
left_i += 1;
} else if right_val < left_val {
right_i += 1;
} else {
count += 1;
left_i += 1;
right_i += 1;
}
}
count
@@ -112,19 +103,15 @@ fn intersection(left: &mut [u32], right: &[u32]) -> usize {
while left_i < left_len && right_i < right_len {
let left_val = left[left_i];
let right_val = right[right_i];
match left_val.cmp(&right_val) {
Ordering::Less => {
left_i += 1;
}
Ordering::Equal => {
left[count] = left_val;
count += 1;
left_i += 1;
right_i += 1;
}
Ordering::Greater => {
right_i += 1;
}
if left_val < right_val {
left_i += 1;
} else if right_val < left_val {
right_i += 1;
} else {
left[count] = left_val;
count += 1;
left_i += 1;
right_i += 1;
}
}
count

View File

@@ -1,4 +1,6 @@
mod query_grammar;
mod query_parser;
mod user_input_ast;
pub mod logical_ast;
pub use self::query_parser::QueryParser;

View File

@@ -1,5 +1,6 @@
use super::user_input_ast::*;
use crate::Occur;
use crate::query::occur::Occur;
use crate::query::query_parser::user_input_ast::UserInputBound;
use combine::char::*;
use combine::error::StreamError;
use combine::stream::StreamErrorFor;
@@ -82,67 +83,28 @@ parser! {
}
parser! {
/// Function that parses a range out of a Stream
/// Supports ranges like:
/// [5 TO 10], {5 TO 10}, [* TO 10], [10 TO *], {10 TO *], >5, <=10
/// [a TO *], [a TO c], [abc TO bcd}
fn range[I]()(I) -> UserInputLeaf
where [I: Stream<Item = char>] {
let range_term_val = || {
word().or(negative_number()).or(char('*').with(value("*".to_string())))
};
// check for unbounded range in the form of <5, <=10, >5, >=5
let elastic_unbounded_range = (choice([attempt(string(">=")),
attempt(string("<=")),
attempt(string("<")),
attempt(string(">"))])
.skip(spaces()),
range_term_val()).
map(|(comparison_sign, bound): (&str, String)|
match comparison_sign {
">=" => (UserInputBound::Inclusive(bound), UserInputBound::Unbounded),
"<=" => (UserInputBound::Unbounded, UserInputBound::Inclusive(bound)),
"<" => (UserInputBound::Unbounded, UserInputBound::Exclusive(bound)),
">" => (UserInputBound::Exclusive(bound), UserInputBound::Unbounded),
// default case
_ => (UserInputBound::Unbounded, UserInputBound::Unbounded)
});
let lower_bound = (one_of("{[".chars()), range_term_val())
.map(|(boundary_char, lower_bound): (char, String)|
if lower_bound == "*" {
UserInputBound::Unbounded
} else if boundary_char == '{' {
UserInputBound::Exclusive(lower_bound)
} else {
UserInputBound::Inclusive(lower_bound)
});
if boundary_char == '{' { UserInputBound::Exclusive(lower_bound) }
else { UserInputBound::Inclusive(lower_bound) });
let upper_bound = (range_term_val(), one_of("}]".chars()))
.map(|(higher_bound, boundary_char): (String, char)|
if higher_bound == "*" {
UserInputBound::Unbounded
} else if boundary_char == '}' {
UserInputBound::Exclusive(higher_bound)
} else {
UserInputBound::Inclusive(higher_bound)
});
// return only lower and upper
let lower_to_upper = (lower_bound.
skip((spaces(),
string("TO"),
spaces())),
upper_bound);
(optional(field()).skip(spaces()),
// try elastic first, if it matches, the range is unbounded
attempt(elastic_unbounded_range).or(lower_to_upper))
.map(|(field, (lower, upper))|
// Construct the leaf from extracted field (optional)
// and bounds
UserInputLeaf::Range {
field,
lower,
upper
if boundary_char == '}' { UserInputBound::Exclusive(higher_bound) }
else { UserInputBound::Inclusive(higher_bound) });
(
optional(field()),
lower_bound
.skip((spaces(), string("TO"), spaces())),
upper_bound,
).map(|(field, lower, upper)| UserInputLeaf::Range {
field,
lower,
upper
})
}
}
@@ -296,49 +258,6 @@ mod test {
);
}
#[test]
fn test_parse_elastic_query_ranges() {
test_parse_query_to_ast_helper("title: >a", "title:{\"a\" TO \"*\"}");
test_parse_query_to_ast_helper("title:>=a", "title:[\"a\" TO \"*\"}");
test_parse_query_to_ast_helper("title: <a", "title:{\"*\" TO \"a\"}");
test_parse_query_to_ast_helper("title:<=a", "title:{\"*\" TO \"a\"]");
test_parse_query_to_ast_helper("title:<=bsd", "title:{\"*\" TO \"bsd\"]");
test_parse_query_to_ast_helper("weight: >70", "weight:{\"70\" TO \"*\"}");
test_parse_query_to_ast_helper("weight:>=70", "weight:[\"70\" TO \"*\"}");
test_parse_query_to_ast_helper("weight: <70", "weight:{\"*\" TO \"70\"}");
test_parse_query_to_ast_helper("weight:<=70", "weight:{\"*\" TO \"70\"]");
test_parse_query_to_ast_helper("weight: >60.7", "weight:{\"60.7\" TO \"*\"}");
test_parse_query_to_ast_helper("weight: <= 70", "weight:{\"*\" TO \"70\"]");
test_parse_query_to_ast_helper("weight: <= 70.5", "weight:{\"*\" TO \"70.5\"]");
}
#[test]
fn test_range_parser() {
// testing the range() parser separately
let res = range().parse("title: <hello").unwrap().0;
let expected = UserInputLeaf::Range {
field: Some("title".to_string()),
lower: UserInputBound::Unbounded,
upper: UserInputBound::Exclusive("hello".to_string()),
};
let res2 = range().parse("title:{* TO hello}").unwrap().0;
assert_eq!(res, expected);
assert_eq!(res2, expected);
let expected_weight = UserInputLeaf::Range {
field: Some("weight".to_string()),
lower: UserInputBound::Inclusive("71.2".to_string()),
upper: UserInputBound::Unbounded,
};
let res3 = range().parse("weight: >=71.2").unwrap().0;
let res4 = range().parse("weight:[71.2 TO *}").unwrap().0;
assert_eq!(res3, expected_weight);
assert_eq!(res4, expected_weight);
}
#[test]
fn test_parse_query_to_triming_spaces() {
test_parse_query_to_ast_helper(" abc", "\"abc\"");
@@ -372,7 +291,7 @@ mod test {
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[* TO toto}", "foo:{\"*\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[* TO toto}", "foo:[\"*\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[1 TO *}", "foo:[\"1\" TO \"*\"}");
test_parse_query_to_ast_helper("foo:[1.1 TO *}", "foo:[\"1.1\" TO \"*\"}");
test_is_parse_err("abc + ");

View File

@@ -1,5 +1,9 @@
use super::logical_ast::*;
use super::query_grammar::parse_to_ast;
use super::user_input_ast::*;
use crate::core::Index;
use crate::query::occur::compose_occur;
use crate::query::query_parser::logical_ast::LogicalAST;
use crate::query::AllQuery;
use crate::query::BooleanQuery;
use crate::query::EmptyQuery;
@@ -12,11 +16,11 @@ use crate::schema::IndexRecordOption;
use crate::schema::{Field, Schema};
use crate::schema::{FieldType, Term};
use crate::tokenizer::TokenizerManager;
use combine::Parser;
use std::borrow::Cow;
use std::num::{ParseFloatError, ParseIntError};
use std::ops::Bound;
use std::str::FromStr;
use tantivy_query_grammar::{UserInputAST, UserInputBound, UserInputLeaf};
/// Possible error that may happen when parsing a query.
#[derive(Debug, PartialEq, Eq, Fail)]
@@ -218,8 +222,9 @@ impl QueryParser {
/// Parse the user query into an AST.
fn parse_query_to_logical_ast(&self, query: &str) -> Result<LogicalAST, QueryParserError> {
let user_input_ast =
tantivy_query_grammar::parse_query(query).map_err(|_| QueryParserError::SyntaxError)?;
let (user_input_ast, _remaining) = parse_to_ast()
.parse(query)
.map_err(|_| QueryParserError::SyntaxError)?;
self.compute_logical_ast(user_input_ast)
}
@@ -364,7 +369,6 @@ impl QueryParser {
match *bound {
UserInputBound::Inclusive(_) => Ok(Bound::Included(term)),
UserInputBound::Exclusive(_) => Ok(Bound::Excluded(term)),
UserInputBound::Unbounded => Ok(Bound::Unbounded),
}
}
@@ -394,7 +398,7 @@ impl QueryParser {
let mut logical_sub_queries: Vec<(Occur, LogicalAST)> = Vec::new();
for sub_query in sub_queries {
let (occur, sub_ast) = self.compute_logical_ast_with_occur(sub_query)?;
let new_occur = Occur::compose(default_occur, occur);
let new_occur = compose_occur(default_occur, occur);
logical_sub_queries.push((new_occur, sub_ast));
}
Ok((Occur::Should, LogicalAST::Clause(logical_sub_queries)))
@@ -402,7 +406,7 @@ impl QueryParser {
UserInputAST::Unary(left_occur, subquery) => {
let (right_occur, logical_sub_queries) =
self.compute_logical_ast_with_occur(*subquery)?;
Ok((Occur::compose(left_occur, right_occur), logical_sub_queries))
Ok((compose_occur(left_occur, right_occur), logical_sub_queries))
}
UserInputAST::Leaf(leaf) => {
let result_ast = self.compute_logical_ast_from_leaf(*leaf)?;
@@ -624,7 +628,7 @@ mod test {
pub fn test_parse_query_untokenized() {
test_parse_query_to_logical_ast_helper(
"nottokenized:\"wordone wordtwo\"",
"Term(field=7,bytes=[119, 111, 114, 100, 111, 110, \
"Term([0, 0, 0, 7, 119, 111, 114, 100, 111, 110, \
101, 32, 119, 111, 114, 100, 116, 119, 111])",
false,
);
@@ -668,25 +672,19 @@ mod test {
.is_ok());
test_parse_query_to_logical_ast_helper(
"unsigned:2324",
"Term(field=3,bytes=[0, 0, 0, 0, 0, 0, 9, 20])",
"Term([0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 9, 20])",
false,
);
test_parse_query_to_logical_ast_helper(
"signed:-2324",
&format!(
"{:?}",
Term::from_field_i64(Field::from_field_id(2u32), -2324)
),
&format!("{:?}", Term::from_field_i64(Field(2u32), -2324)),
false,
);
test_parse_query_to_logical_ast_helper(
"float:2.5",
&format!(
"{:?}",
Term::from_field_f64(Field::from_field_id(10u32), 2.5)
),
&format!("{:?}", Term::from_field_f64(Field(10u32), 2.5)),
false,
);
}
@@ -695,19 +693,19 @@ mod test {
pub fn test_parse_query_to_ast_single_term() {
test_parse_query_to_logical_ast_helper(
"title:toto",
"Term(field=0,bytes=[116, 111, 116, 111])",
"Term([0, 0, 0, 0, 116, 111, 116, 111])",
false,
);
test_parse_query_to_logical_ast_helper(
"+title:toto",
"Term(field=0,bytes=[116, 111, 116, 111])",
"Term([0, 0, 0, 0, 116, 111, 116, 111])",
false,
);
test_parse_query_to_logical_ast_helper(
"+title:toto -titi",
"(+Term(field=0,bytes=[116, 111, 116, 111]) \
-(Term(field=0,bytes=[116, 105, 116, 105]) \
Term(field=1,bytes=[116, 105, 116, 105])))",
"(+Term([0, 0, 0, 0, 116, 111, 116, 111]) \
-(Term([0, 0, 0, 0, 116, 105, 116, 105]) \
Term([0, 0, 0, 1, 116, 105, 116, 105])))",
false,
);
assert_eq!(
@@ -722,13 +720,14 @@ mod test {
pub fn test_parse_query_to_ast_two_terms() {
test_parse_query_to_logical_ast_helper(
"title:a b",
"(Term(field=0,bytes=[97]) (Term(field=0,bytes=[98]) Term(field=1,bytes=[98])))",
"(Term([0, 0, 0, 0, 97]) (Term([0, 0, 0, 0, 98]) \
Term([0, 0, 0, 1, 98])))",
false,
);
test_parse_query_to_logical_ast_helper(
"title:\"a b\"",
"\"[(0, Term(field=0,bytes=[97])), \
(1, Term(field=0,bytes=[98]))]\"",
"\"[(0, Term([0, 0, 0, 0, 97])), \
(1, Term([0, 0, 0, 0, 98]))]\"",
false,
);
}
@@ -737,43 +736,45 @@ mod test {
pub fn test_parse_query_to_ast_ranges() {
test_parse_query_to_logical_ast_helper(
"title:[a TO b]",
"(Included(Term(field=0,bytes=[97])) TO Included(Term(field=0,bytes=[98])))",
"(Included(Term([0, 0, 0, 0, 97])) TO \
Included(Term([0, 0, 0, 0, 98])))",
false,
);
test_parse_query_to_logical_ast_helper(
"[a TO b]",
"((Included(Term(field=0,bytes=[97])) TO \
Included(Term(field=0,bytes=[98]))) \
(Included(Term(field=1,bytes=[97])) TO \
Included(Term(field=1,bytes=[98]))))",
"((Included(Term([0, 0, 0, 0, 97])) TO \
Included(Term([0, 0, 0, 0, 98]))) \
(Included(Term([0, 0, 0, 1, 97])) TO \
Included(Term([0, 0, 0, 1, 98]))))",
false,
);
test_parse_query_to_logical_ast_helper(
"title:{titi TO toto}",
"(Excluded(Term(field=0,bytes=[116, 105, 116, 105])) TO \
Excluded(Term(field=0,bytes=[116, 111, 116, 111])))",
"(Excluded(Term([0, 0, 0, 0, 116, 105, 116, 105])) TO \
Excluded(Term([0, 0, 0, 0, 116, 111, 116, 111])))",
false,
);
test_parse_query_to_logical_ast_helper(
"title:{* TO toto}",
"(Unbounded TO Excluded(Term(field=0,bytes=[116, 111, 116, 111])))",
"(Unbounded TO \
Excluded(Term([0, 0, 0, 0, 116, 111, 116, 111])))",
false,
);
test_parse_query_to_logical_ast_helper(
"title:{titi TO *}",
"(Excluded(Term(field=0,bytes=[116, 105, 116, 105])) TO Unbounded)",
"(Excluded(Term([0, 0, 0, 0, 116, 105, 116, 105])) TO Unbounded)",
false,
);
test_parse_query_to_logical_ast_helper(
"signed:{-5 TO 3}",
"(Excluded(Term(field=2,bytes=[127, 255, 255, 255, 255, 255, 255, 251])) TO \
Excluded(Term(field=2,bytes=[128, 0, 0, 0, 0, 0, 0, 3])))",
"(Excluded(Term([0, 0, 0, 2, 127, 255, 255, 255, 255, 255, 255, 251])) TO \
Excluded(Term([0, 0, 0, 2, 128, 0, 0, 0, 0, 0, 0, 3])))",
false,
);
test_parse_query_to_logical_ast_helper(
"float:{-1.5 TO 1.5}",
"(Excluded(Term(field=10,bytes=[64, 7, 255, 255, 255, 255, 255, 255])) TO \
Excluded(Term(field=10,bytes=[191, 248, 0, 0, 0, 0, 0, 0])))",
"(Excluded(Term([0, 0, 0, 10, 64, 7, 255, 255, 255, 255, 255, 255])) TO \
Excluded(Term([0, 0, 0, 10, 191, 248, 0, 0, 0, 0, 0, 0])))",
false,
);
@@ -878,19 +879,19 @@ mod test {
pub fn test_parse_query_to_ast_conjunction() {
test_parse_query_to_logical_ast_helper(
"title:toto",
"Term(field=0,bytes=[116, 111, 116, 111])",
"Term([0, 0, 0, 0, 116, 111, 116, 111])",
true,
);
test_parse_query_to_logical_ast_helper(
"+title:toto",
"Term(field=0,bytes=[116, 111, 116, 111])",
"Term([0, 0, 0, 0, 116, 111, 116, 111])",
true,
);
test_parse_query_to_logical_ast_helper(
"+title:toto -titi",
"(+Term(field=0,bytes=[116, 111, 116, 111]) \
-(Term(field=0,bytes=[116, 105, 116, 105]) \
Term(field=1,bytes=[116, 105, 116, 105])))",
"(+Term([0, 0, 0, 0, 116, 111, 116, 111]) \
-(Term([0, 0, 0, 0, 116, 105, 116, 105]) \
Term([0, 0, 0, 1, 116, 105, 116, 105])))",
true,
);
assert_eq!(
@@ -901,15 +902,15 @@ mod test {
);
test_parse_query_to_logical_ast_helper(
"title:a b",
"(+Term(field=0,bytes=[97]) \
+(Term(field=0,bytes=[98]) \
Term(field=1,bytes=[98])))",
"(+Term([0, 0, 0, 0, 97]) \
+(Term([0, 0, 0, 0, 98]) \
Term([0, 0, 0, 1, 98])))",
true,
);
test_parse_query_to_logical_ast_helper(
"title:\"a b\"",
"\"[(0, Term(field=0,bytes=[97])), \
(1, Term(field=0,bytes=[98]))]\"",
"\"[(0, Term([0, 0, 0, 0, 97])), \
(1, Term([0, 0, 0, 0, 98]))]\"",
true,
);
}
@@ -918,8 +919,10 @@ mod test {
pub fn test_query_parser_hyphen() {
test_parse_query_to_logical_ast_helper(
"title:www-form-encoded",
"\"[(0, Term(field=0,bytes=[119, 119, 119])), (1, Term(field=0,bytes=[102, 111, 114, 109])), (2, Term(field=0,bytes=[101, 110, 99, 111, 100, 101, 100]))]\"",
false
"\"[(0, Term([0, 0, 0, 0, 119, 119, 119])), \
(1, Term([0, 0, 0, 0, 102, 111, 114, 109])), \
(2, Term([0, 0, 0, 0, 101, 110, 99, 111, 100, 101, 100]))]\"",
false,
);
}
}

View File

@@ -1,9 +1,8 @@
use std::fmt;
use std::fmt::{Debug, Formatter};
use crate::Occur;
use crate::query::Occur;
#[derive(PartialEq)]
pub enum UserInputLeaf {
Literal(UserInputLiteral),
All,
@@ -36,7 +35,6 @@ impl Debug for UserInputLeaf {
}
}
#[derive(PartialEq)]
pub struct UserInputLiteral {
pub field_name: Option<String>,
pub phrase: String,
@@ -51,11 +49,9 @@ impl fmt::Debug for UserInputLiteral {
}
}
#[derive(PartialEq)]
pub enum UserInputBound {
Inclusive(String),
Exclusive(String),
Unbounded,
}
impl UserInputBound {
@@ -63,7 +59,6 @@ impl UserInputBound {
match *self {
UserInputBound::Inclusive(ref word) => write!(formatter, "[\"{}\"", word),
UserInputBound::Exclusive(ref word) => write!(formatter, "{{\"{}\"", word),
UserInputBound::Unbounded => write!(formatter, "{{\"*\""),
}
}
@@ -71,7 +66,6 @@ impl UserInputBound {
match *self {
UserInputBound::Inclusive(ref word) => write!(formatter, "\"{}\"]", word),
UserInputBound::Exclusive(ref word) => write!(formatter, "\"{}\"}}", word),
UserInputBound::Unbounded => write!(formatter, "\"*\"}}"),
}
}
@@ -79,7 +73,6 @@ impl UserInputBound {
match *self {
UserInputBound::Inclusive(ref contents) => contents,
UserInputBound::Exclusive(ref contents) => contents,
UserInputBound::Unbounded => &"*",
}
}
}
@@ -151,7 +144,7 @@ impl fmt::Debug for UserInputAST {
Ok(())
}
UserInputAST::Unary(ref occur, ref subquery) => {
write!(formatter, "{}({:?})", occur, subquery)
write!(formatter, "{}({:?})", occur.to_char(), subquery)
}
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
}

View File

@@ -38,33 +38,45 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
/// # Example
///
/// ```rust
/// use tantivy::collector::Count;
/// use tantivy::query::RangeQuery;
/// use tantivy::schema::{Schema, INDEXED};
/// use tantivy::{doc, Index};
/// # fn test() -> tantivy::Result<()> {
/// let mut schema_builder = Schema::builder();
/// let year_field = schema_builder.add_u64_field("year", INDEXED);
/// let schema = schema_builder.build();
///
/// let index = Index::create_in_ram(schema);
/// let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
/// for year in 1950u64..2017u64 {
/// let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
/// for _ in 0..num_docs_within_year {
/// index_writer.add_document(doc!(year_field => year));
/// }
/// }
/// index_writer.commit()?;
///
/// let reader = index.reader()?;
/// # #[macro_use]
/// # extern crate tantivy;
/// # use tantivy::Index;
/// # use tantivy::schema::{Schema, INDEXED};
/// # use tantivy::collector::Count;
/// # use tantivy::Result;
/// # use tantivy::query::RangeQuery;
/// #
/// # fn run() -> Result<()> {
/// # let mut schema_builder = Schema::builder();
/// # let year_field = schema_builder.add_u64_field("year", INDEXED);
/// # let schema = schema_builder.build();
/// #
/// # let index = Index::create_in_ram(schema);
/// # {
/// # let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
/// # for year in 1950u64..2017u64 {
/// # let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
/// # for _ in 0..num_docs_within_year {
/// # index_writer.add_document(doc!(year_field => year));
/// # }
/// # }
/// # index_writer.commit().unwrap();
/// # }
/// # let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
///
/// let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?;
/// assert_eq!(num_60s_books, 2285);
/// Ok(())
///
/// # assert_eq!(num_60s_books, 2285);
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # run().unwrap()
/// # }
/// # assert!(test().is_ok());
/// ```
#[derive(Clone, Debug)]
pub struct RangeQuery {
@@ -326,33 +338,39 @@ mod tests {
use crate::collector::Count;
use crate::schema::{Document, Field, Schema, INDEXED};
use crate::Index;
use crate::Result;
use std::collections::Bound;
#[test]
fn test_range_query_simple() {
let mut schema_builder = Schema::builder();
let year_field = schema_builder.add_u64_field("year", INDEXED);
let schema = schema_builder.build();
fn run() -> Result<()> {
let mut schema_builder = Schema::builder();
let year_field = schema_builder.add_u64_field("year", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
for year in 1950u64..2017u64 {
let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
for _ in 0..num_docs_within_year {
index_writer.add_document(doc!(year_field => year));
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
for year in 1950u64..2017u64 {
let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
for _ in 0..num_docs_within_year {
index_writer.add_document(doc!(year_field => year));
}
}
index_writer.commit().unwrap();
}
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960u64..1970u64);
// ... or `1960..=1969` if inclusive range is enabled.
let count = searcher.search(&docs_in_the_sixties, &Count)?;
assert_eq!(count, 2285);
Ok(())
}
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960u64..1970u64);
// ... or `1960..=1969` if inclusive range is enabled.
let count = searcher.search(&docs_in_the_sixties, &Count).unwrap();
assert_eq!(count, 2285);
run().unwrap();
}
#[test]
@@ -471,4 +489,5 @@ mod tests {
91
);
}
}

View File

@@ -4,82 +4,82 @@ use crate::schema::Field;
use crate::Result;
use crate::Searcher;
use std::clone::Clone;
use std::sync::Arc;
use tantivy_fst::Regex;
/// A Regex Query matches all of the documents
// A Regex Query matches all of the documents
/// containing a specific term that matches
/// a regex pattern.
/// a regex pattern
/// A Fuzzy Query matches all of the documents
/// containing a specific term that is within
/// Levenshtein distance
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result, Term};
/// use tantivy::collector::Count;
/// use tantivy::query::RegexQuery;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Term};
///
/// # fn test() -> tantivy::Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let term = Term::from_field_text(title, "Diary");
/// let query = RegexQuery::new("d[ai]{2}ry".to_string(), title);
/// let count = searcher.search(&query, &Count)?;
/// assert_eq!(count, 3);
/// Ok(())
/// }
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let term = Term::from_field_text(title, "Diary");
/// let query = RegexQuery::from_pattern("d[ai]{2}ry", title)?;
/// let count = searcher.search(&query, &Count)?;
/// assert_eq!(count, 3);
/// Ok(())
/// # }
/// # assert!(test().is_ok());
/// ```
#[derive(Debug, Clone)]
pub struct RegexQuery {
regex: Arc<Regex>,
regex_pattern: String,
field: Field,
}
impl RegexQuery {
/// Creates a new RegexQuery from a given pattern
pub fn from_pattern(regex_pattern: &str, field: Field) -> Result<Self> {
let regex = Regex::new(&regex_pattern)
.map_err(|_| TantivyError::InvalidArgument(regex_pattern.to_string()))?;
Ok(RegexQuery::from_regex(regex, field))
}
/// Creates a new RegexQuery from a fully built Regex
pub fn from_regex<T: Into<Arc<Regex>>>(regex: T, field: Field) -> Self {
/// Creates a new Fuzzy Query
pub fn new(regex_pattern: String, field: Field) -> RegexQuery {
RegexQuery {
regex: regex.into(),
regex_pattern,
field,
}
}
fn specialized_weight(&self) -> AutomatonWeight<Regex> {
AutomatonWeight::new(self.field, self.regex.clone())
fn specialized_weight(&self) -> Result<AutomatonWeight<Regex>> {
let automaton = Regex::new(&self.regex_pattern)
.map_err(|_| TantivyError::InvalidArgument(self.regex_pattern.clone()))?;
Ok(AutomatonWeight::new(self.field, automaton))
}
}
impl Query for RegexQuery {
fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result<Box<dyn Weight>> {
Ok(Box::new(self.specialized_weight()))
Ok(Box::new(self.specialized_weight()?))
}
}
@@ -87,14 +87,13 @@ impl Query for RegexQuery {
mod test {
use super::RegexQuery;
use crate::collector::TopDocs;
use crate::schema::Schema;
use crate::schema::TEXT;
use crate::schema::{Field, Schema};
use crate::tests::assert_nearly_equals;
use crate::{Index, IndexReader};
use std::sync::Arc;
use tantivy_fst::Regex;
use crate::Index;
fn build_test_index() -> (IndexReader, Field) {
#[test]
pub fn test_regex_query() {
let mut schema_builder = Schema::builder();
let country_field = schema_builder.add_text_field("country", TEXT);
let schema = schema_builder.build();
@@ -110,65 +109,20 @@ mod test {
index_writer.commit().unwrap();
}
let reader = index.reader().unwrap();
(reader, country_field)
}
fn verify_regex_query(
query_matching_one: RegexQuery,
query_matching_zero: RegexQuery,
reader: IndexReader,
) {
let searcher = reader.searcher();
{
let regex_query = RegexQuery::new("jap[ao]n".to_string(), country_field);
let scored_docs = searcher
.search(&query_matching_one, &TopDocs::with_limit(2))
.search(&regex_query, &TopDocs::with_limit(2))
.unwrap();
assert_eq!(scored_docs.len(), 1, "Expected only 1 document");
let (score, _) = scored_docs[0];
assert_nearly_equals(1f32, score);
}
let regex_query = RegexQuery::new("jap[A-Z]n".to_string(), country_field);
let top_docs = searcher
.search(&query_matching_zero, &TopDocs::with_limit(2))
.search(&regex_query, &TopDocs::with_limit(2))
.unwrap();
assert!(top_docs.is_empty(), "Expected ZERO document");
}
#[test]
pub fn test_regex_query() {
let (reader, field) = build_test_index();
let matching_one = RegexQuery::from_pattern("jap[ao]n", field).unwrap();
let matching_zero = RegexQuery::from_pattern("jap[A-Z]n", field).unwrap();
verify_regex_query(matching_one, matching_zero, reader);
}
#[test]
pub fn test_construct_from_regex() {
let (reader, field) = build_test_index();
let matching_one = RegexQuery::from_regex(Regex::new("jap[ao]n").unwrap(), field);
let matching_zero = RegexQuery::from_regex(Regex::new("jap[A-Z]n").unwrap(), field);
verify_regex_query(matching_one, matching_zero, reader);
}
#[test]
pub fn test_construct_from_reused_regex() {
let r1 = Arc::new(Regex::new("jap[ao]n").unwrap());
let r2 = Arc::new(Regex::new("jap[A-Z]n").unwrap());
let (reader, field) = build_test_index();
let matching_one = RegexQuery::from_regex(r1.clone(), field);
let matching_zero = RegexQuery::from_regex(r2.clone(), field);
verify_regex_query(matching_one, matching_zero, reader.clone());
let matching_one = RegexQuery::from_regex(r1.clone(), field);
let matching_zero = RegexQuery::from_regex(r2.clone(), field);
verify_regex_query(matching_one, matching_zero, reader.clone());
}
}

Some files were not shown because too many files have changed in this diff Show More