mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-03 07:42:54 +00:00
Compare commits
10 Commits
debugging-
...
nrt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9bc6a43917 | ||
|
|
e5bf41c1f6 | ||
|
|
58d40ebf95 | ||
|
|
f07634517f | ||
|
|
2edea107ef | ||
|
|
a6b5f4f5b5 | ||
|
|
7859ee4a39 | ||
|
|
f06e116aae | ||
|
|
ec32e0546a | ||
|
|
0cd10e1197 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,5 +1,4 @@
|
|||||||
tantivy.iml
|
tantivy.iml
|
||||||
proptest-regressions
|
|
||||||
*.swp
|
*.swp
|
||||||
target
|
target
|
||||||
target/debug
|
target/debug
|
||||||
@@ -12,4 +11,3 @@ cpp/simdcomp/bitpackingbenchmark
|
|||||||
*.bk
|
*.bk
|
||||||
.idea
|
.idea
|
||||||
trace.dat
|
trace.dat
|
||||||
cargo-timing*
|
|
||||||
|
|||||||
55
CHANGELOG.md
55
CHANGELOG.md
@@ -1,58 +1,3 @@
|
|||||||
Tantivy 0.14.0
|
|
||||||
=========================
|
|
||||||
- Remove dependency to atomicwrites #833 .Implemented by @pmasurel upon suggestion and research from @asafigan).
|
|
||||||
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
|
|
||||||
- API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match.
|
|
||||||
- Large API Change in the Directory API. Tantivy used to assume that all files could be somehow memory mapped. After this change, Directory return a `FileSlice` that can be reduced and eventually read into an `OwnedBytes` object. Long and blocking io operation are still required by they do not span over the entire file.
|
|
||||||
- Added support for Brotli compression in the DocStore. (@ppodolsky)
|
|
||||||
- Added helper for building intersections and unions in BooleanQuery (@guilload)
|
|
||||||
- Bugfix in `Query::explain`
|
|
||||||
- Removed dependency on `notify` #924. Replaced with `FileWatcher` struct that polls meta file every 500ms in background thread. (@halvorboe @guilload)
|
|
||||||
- Added `FilterCollector`, which wraps another collector and filters docs using a predicate over a fast field (@barrotsteindev)
|
|
||||||
- Simplified the encoding of the skip reader struct. BlockWAND max tf is now encoded over a single byte. (@pmasurel)
|
|
||||||
- `FilterCollector` now supports all Fast Field value types (@barrotsteindev)
|
|
||||||
|
|
||||||
This version breaks compatibility and requires users to reindex everything.
|
|
||||||
|
|
||||||
Tantivy 0.13.2
|
|
||||||
===================
|
|
||||||
Bugfix. Acquiring a facet reader on a segment that does not contain any
|
|
||||||
doc with this facet returns `None`. (#896)
|
|
||||||
|
|
||||||
Tantivy 0.13.1
|
|
||||||
===================
|
|
||||||
Made `Query` and `Collector` `Send + Sync`.
|
|
||||||
Updated misc dependency versions.
|
|
||||||
|
|
||||||
Tantivy 0.13.0
|
|
||||||
======================
|
|
||||||
Tantivy 0.13 introduce a change in the index format that will require
|
|
||||||
you to reindex your index (BlockWAND information are added in the skiplist).
|
|
||||||
The index size increase is minor as this information is only added for
|
|
||||||
full blocks.
|
|
||||||
If you have a massive index for which reindexing is not an option, please contact me
|
|
||||||
so that we can discuss possible solutions.
|
|
||||||
|
|
||||||
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
|
|
||||||
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
|
|
||||||
- `MMapDirectory::open` does not return a `Result` anymore.
|
|
||||||
- Change in the DocSet and Scorer API. (@fulmicoton).
|
|
||||||
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
|
|
||||||
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
|
|
||||||
As a result, iterating through DocSet now looks as follows
|
|
||||||
```rust
|
|
||||||
let mut doc = docset.doc();
|
|
||||||
while doc != TERMINATED {
|
|
||||||
// ...
|
|
||||||
doc = docset.advance();
|
|
||||||
}
|
|
||||||
```
|
|
||||||
The change made it possible to greatly simplify a lot of the docset's code.
|
|
||||||
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
|
|
||||||
- Added an offset option to the Top(.*)Collectors. (@robyoung)
|
|
||||||
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
|
|
||||||
to the PISA team for answering all my questions!)
|
|
||||||
|
|
||||||
Tantivy 0.12.0
|
Tantivy 0.12.0
|
||||||
======================
|
======================
|
||||||
- Removing static dispatch in tokenizers for simplicity. (#762)
|
- Removing static dispatch in tokenizers for simplicity. (#762)
|
||||||
|
|||||||
68
Cargo.toml
68
Cargo.toml
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy"
|
name = "tantivy"
|
||||||
version = "0.14.0-dev"
|
version = "0.12.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
@@ -13,55 +13,54 @@ keywords = ["search", "information", "retrieval"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64 = "0.13"
|
base64 = "0.12.0"
|
||||||
byteorder = "1"
|
byteorder = "1.0"
|
||||||
crc32fast = "1"
|
crc32fast = "1.2.0"
|
||||||
once_cell = "1"
|
once_cell = "1.0"
|
||||||
regex ={version = "1", default-features = false, features = ["std"]}
|
regex ={version = "1.3.0", default-features = false, features = ["std"]}
|
||||||
tantivy-fst = "0.3"
|
tantivy-fst = "0.2.1"
|
||||||
memmap = {version = "0.7", optional=true}
|
memmap = {version = "0.7", optional=true}
|
||||||
lz4 = {version="1", optional=true}
|
lz4 = {version="1.20", optional=true}
|
||||||
brotli = {version="3.3.0", optional=true}
|
|
||||||
snap = "1"
|
snap = "1"
|
||||||
tempfile = {version="3", optional=true}
|
atomicwrites = {version="0.2.2", optional=true}
|
||||||
|
tempfile = "3.0"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
serde = {version="1", features=["derive"]}
|
serde = {version="1.0", features=["derive"]}
|
||||||
serde_json = "1"
|
serde_json = "1.0"
|
||||||
num_cpus = "1"
|
num_cpus = "1.2"
|
||||||
fs2={version="0.4", optional=true}
|
fs2={version="0.4", optional=true}
|
||||||
levenshtein_automata = "0.2"
|
levenshtein_automata = "0.1"
|
||||||
|
notify = {version="4", optional=true}
|
||||||
uuid = { version = "0.8", features = ["v4", "serde"] }
|
uuid = { version = "0.8", features = ["v4", "serde"] }
|
||||||
crossbeam = "0.8"
|
crossbeam = "0.7"
|
||||||
futures = {version = "0.3", features=["thread-pool"] }
|
futures = {version = "0.3", features=["thread-pool"] }
|
||||||
tantivy-query-grammar = { version="0.14.0-dev", path="./query-grammar" }
|
owning_ref = "0.4"
|
||||||
stable_deref_trait = "1"
|
stable_deref_trait = "1.0.0"
|
||||||
rust-stemmers = "1"
|
rust-stemmers = "1.2"
|
||||||
downcast-rs = "1"
|
downcast-rs = { version="1.0" }
|
||||||
|
tantivy-query-grammar = { version="0.12", path="./query-grammar" }
|
||||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||||
census = "0.4"
|
census = {path="../census"}
|
||||||
fnv = "1"
|
fnv = "1.0.6"
|
||||||
thiserror = "1.0"
|
owned-read = "0.4"
|
||||||
htmlescape = "0.3"
|
failure = "0.1"
|
||||||
fail = "0.4"
|
htmlescape = "0.3.1"
|
||||||
|
fail = "0.3"
|
||||||
murmurhash32 = "0.2"
|
murmurhash32 = "0.2"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
smallvec = "1"
|
smallvec = "1.0"
|
||||||
rayon = "1"
|
rayon = "1"
|
||||||
env_logger = "0.8"
|
|
||||||
lru = "0.6"
|
|
||||||
|
|
||||||
[target.'cfg(windows)'.dependencies]
|
[target.'cfg(windows)'.dependencies]
|
||||||
winapi = "0.3"
|
winapi = "0.3"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rand = "0.8"
|
rand = "0.7"
|
||||||
maplit = "1"
|
maplit = "1"
|
||||||
matches = "0.1.8"
|
matches = "0.1.8"
|
||||||
proptest = "0.10"
|
|
||||||
criterion = "0.3"
|
|
||||||
|
|
||||||
[dev-dependencies.fail]
|
[dev-dependencies.fail]
|
||||||
version = "0.4"
|
version = "0.3"
|
||||||
features = ["failpoints"]
|
features = ["failpoints"]
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
@@ -75,8 +74,7 @@ overflow-checks = true
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["mmap"]
|
default = ["mmap"]
|
||||||
mmap = ["fs2", "tempfile", "memmap"]
|
mmap = ["atomicwrites", "fs2", "memmap", "notify"]
|
||||||
brotli-compression = ["brotli"]
|
|
||||||
lz4-compression = ["lz4"]
|
lz4-compression = ["lz4"]
|
||||||
failpoints = ["fail/failpoints"]
|
failpoints = ["fail/failpoints"]
|
||||||
unstable = [] # useful for benches.
|
unstable = [] # useful for benches.
|
||||||
@@ -99,7 +97,3 @@ travis-ci = { repository = "tantivy-search/tantivy" }
|
|||||||
name = "failpoints"
|
name = "failpoints"
|
||||||
path = "tests/failpoints/mod.rs"
|
path = "tests/failpoints/mod.rs"
|
||||||
required-features = ["fail/failpoints"]
|
required-features = ["fail/failpoints"]
|
||||||
|
|
||||||
[[bench]]
|
|
||||||
name = "analyzer"
|
|
||||||
harness = false
|
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
[](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
|
[](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
|
||||||
[](https://crates.io/crates/tantivy)
|
[](https://crates.io/crates/tantivy)
|
||||||
|
[](https://saythanks.io/to/fulmicoton)
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -30,11 +31,12 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
|
|||||||
|
|
||||||
# Benchmark
|
# Benchmark
|
||||||
|
|
||||||
|
Tantivy is typically faster than Lucene, but the results depend on
|
||||||
|
the nature of the queries in your workload.
|
||||||
|
|
||||||
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
||||||
performance for different type of queries / collection.
|
performance for different type of queries / collection.
|
||||||
|
|
||||||
Your mileage WILL vary depending on the nature of queries and their load.
|
|
||||||
|
|
||||||
# Features
|
# Features
|
||||||
|
|
||||||
- Full-text search
|
- Full-text search
|
||||||
@@ -84,7 +86,7 @@ There are many ways to support this project.
|
|||||||
- Help with documentation by asking questions or submitting PRs
|
- Help with documentation by asking questions or submitting PRs
|
||||||
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
|
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
|
||||||
- Talk about Tantivy around you
|
- Talk about Tantivy around you
|
||||||
- [](https://www.patreon.com/fulmicoton)
|
- Drop a word on on [](https://saythanks.io/to/fulmicoton) or even [](https://www.patreon.com/fulmicoton)
|
||||||
|
|
||||||
# Contributing code
|
# Contributing code
|
||||||
|
|
||||||
|
|||||||
9
TODO.md
Normal file
9
TODO.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
- segment writer serializes if already flush
|
||||||
|
- flush if exceed max doc.
|
||||||
|
- unit tests
|
||||||
|
- reader from `IndexWriter`
|
||||||
|
- configurable limit to flush
|
||||||
|
- bundle directory
|
||||||
|
- add index worker when exceeds some doc limit
|
||||||
|
- flush before prepare commit.
|
||||||
|
- segment_writer should not receive a segment
|
||||||
@@ -18,5 +18,5 @@ install:
|
|||||||
build: false
|
build: false
|
||||||
|
|
||||||
test_script:
|
test_script:
|
||||||
- REM SET RUST_LOG=tantivy,test & cargo test --all --verbose --no-default-features --features mmap
|
- REM SET RUST_LOG=tantivy,test & cargo test --verbose --no-default-features --features mmap
|
||||||
- REM SET RUST_BACKTRACE=1 & cargo build --examples
|
- REM SET RUST_BACKTRACE=1 & cargo build --examples
|
||||||
|
|||||||
3774
benches/alice.txt
3774
benches/alice.txt
File diff suppressed because it is too large
Load Diff
@@ -1,22 +0,0 @@
|
|||||||
use criterion::{criterion_group, criterion_main, Criterion};
|
|
||||||
use tantivy::tokenizer::TokenizerManager;
|
|
||||||
|
|
||||||
const ALICE_TXT: &'static str = include_str!("alice.txt");
|
|
||||||
|
|
||||||
pub fn criterion_benchmark(c: &mut Criterion) {
|
|
||||||
let tokenizer_manager = TokenizerManager::default();
|
|
||||||
let tokenizer = tokenizer_manager.get("default").unwrap();
|
|
||||||
c.bench_function("default-tokenize-alice", |b| {
|
|
||||||
b.iter(|| {
|
|
||||||
let mut word_count = 0;
|
|
||||||
let mut token_stream = tokenizer.token_stream(ALICE_TXT);
|
|
||||||
while token_stream.advance() {
|
|
||||||
word_count += 1;
|
|
||||||
}
|
|
||||||
assert_eq!(word_count, 30_731);
|
|
||||||
})
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
criterion_group!(benches, criterion_benchmark);
|
|
||||||
criterion_main!(benches);
|
|
||||||
@@ -112,6 +112,18 @@ fn main() -> tantivy::Result<()> {
|
|||||||
limbs and branches that arch over the pool"
|
limbs and branches that arch over the pool"
|
||||||
));
|
));
|
||||||
|
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
title => "Of Mice and Men",
|
||||||
|
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||||
|
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||||
|
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||||
|
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||||
|
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||||
|
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||||
|
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||||
|
limbs and branches that arch over the pool"
|
||||||
|
));
|
||||||
|
|
||||||
// Multivalued field just need to be repeated.
|
// Multivalued field just need to be repeated.
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
title => "Frankenstein",
|
title => "Frankenstein",
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use tantivy::fastfield::FastFieldReader;
|
|||||||
use tantivy::query::QueryParser;
|
use tantivy::query::QueryParser;
|
||||||
use tantivy::schema::Field;
|
use tantivy::schema::Field;
|
||||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||||
use tantivy::{doc, Index, Score, SegmentReader, TantivyError};
|
use tantivy::{doc, Index, SegmentReader, TantivyError};
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct Stats {
|
struct Stats {
|
||||||
@@ -114,7 +114,7 @@ struct StatsSegmentCollector {
|
|||||||
impl SegmentCollector for StatsSegmentCollector {
|
impl SegmentCollector for StatsSegmentCollector {
|
||||||
type Fruit = Option<Stats>;
|
type Fruit = Option<Stats>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, _score: Score) {
|
fn collect(&mut self, doc: u32, _score: f32) {
|
||||||
let value = self.fast_field_reader.get(doc) as f64;
|
let value = self.fast_field_reader.get(doc) as f64;
|
||||||
self.stats.count += 1;
|
self.stats.count += 1;
|
||||||
self.stats.sum += value;
|
self.stats.sum += value;
|
||||||
|
|||||||
@@ -1,98 +0,0 @@
|
|||||||
use std::collections::HashSet;
|
|
||||||
use tantivy::collector::TopDocs;
|
|
||||||
use tantivy::doc;
|
|
||||||
use tantivy::query::BooleanQuery;
|
|
||||||
use tantivy::schema::*;
|
|
||||||
use tantivy::{DocId, Index, Score, SegmentReader};
|
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
|
|
||||||
let title = schema_builder.add_text_field("title", STORED);
|
|
||||||
let ingredient = schema_builder.add_facet_field("ingredient");
|
|
||||||
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema.clone());
|
|
||||||
|
|
||||||
let mut index_writer = index.writer(30_000_000)?;
|
|
||||||
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
title => "Fried egg",
|
|
||||||
ingredient => Facet::from("/ingredient/egg"),
|
|
||||||
ingredient => Facet::from("/ingredient/oil"),
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
title => "Scrambled egg",
|
|
||||||
ingredient => Facet::from("/ingredient/egg"),
|
|
||||||
ingredient => Facet::from("/ingredient/butter"),
|
|
||||||
ingredient => Facet::from("/ingredient/milk"),
|
|
||||||
ingredient => Facet::from("/ingredient/salt"),
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
title => "Egg rolls",
|
|
||||||
ingredient => Facet::from("/ingredient/egg"),
|
|
||||||
ingredient => Facet::from("/ingredient/garlic"),
|
|
||||||
ingredient => Facet::from("/ingredient/salt"),
|
|
||||||
ingredient => Facet::from("/ingredient/oil"),
|
|
||||||
ingredient => Facet::from("/ingredient/tortilla-wrap"),
|
|
||||||
ingredient => Facet::from("/ingredient/mushroom"),
|
|
||||||
));
|
|
||||||
index_writer.commit()?;
|
|
||||||
|
|
||||||
let reader = index.reader()?;
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
{
|
|
||||||
let facets = vec![
|
|
||||||
Facet::from("/ingredient/egg"),
|
|
||||||
Facet::from("/ingredient/oil"),
|
|
||||||
Facet::from("/ingredient/garlic"),
|
|
||||||
Facet::from("/ingredient/mushroom"),
|
|
||||||
];
|
|
||||||
let query = BooleanQuery::new_multiterms_query(
|
|
||||||
facets
|
|
||||||
.iter()
|
|
||||||
.map(|key| Term::from_facet(ingredient, &key))
|
|
||||||
.collect(),
|
|
||||||
);
|
|
||||||
let top_docs_by_custom_score =
|
|
||||||
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
|
|
||||||
let ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
|
|
||||||
let facet_dict = ingredient_reader.facet_dict();
|
|
||||||
|
|
||||||
let query_ords: HashSet<u64> = facets
|
|
||||||
.iter()
|
|
||||||
.filter_map(|key| facet_dict.term_ord(key.encoded_str()).unwrap())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut facet_ords_buffer: Vec<u64> = Vec::with_capacity(20);
|
|
||||||
|
|
||||||
move |doc: DocId, original_score: Score| {
|
|
||||||
ingredient_reader.facet_ords(doc, &mut facet_ords_buffer);
|
|
||||||
let missing_ingredients = facet_ords_buffer
|
|
||||||
.iter()
|
|
||||||
.filter(|ord| !query_ords.contains(ord))
|
|
||||||
.count();
|
|
||||||
let tweak = 1.0 / 4_f32.powi(missing_ingredients as i32);
|
|
||||||
|
|
||||||
original_score * tweak
|
|
||||||
}
|
|
||||||
});
|
|
||||||
let top_docs = searcher.search(&query, &top_docs_by_custom_score)?;
|
|
||||||
|
|
||||||
let titles: Vec<String> = top_docs
|
|
||||||
.iter()
|
|
||||||
.map(|(_, doc_id)| {
|
|
||||||
searcher
|
|
||||||
.doc(*doc_id)
|
|
||||||
.unwrap()
|
|
||||||
.get_first(title)
|
|
||||||
.unwrap()
|
|
||||||
.text()
|
|
||||||
.unwrap()
|
|
||||||
.to_owned()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -10,7 +10,7 @@
|
|||||||
// ---
|
// ---
|
||||||
// Importing tantivy...
|
// Importing tantivy...
|
||||||
use tantivy::schema::*;
|
use tantivy::schema::*;
|
||||||
use tantivy::{doc, DocSet, Index, Postings, TERMINATED};
|
use tantivy::{doc, DocId, DocSet, Index, Postings};
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
fn main() -> tantivy::Result<()> {
|
||||||
// We first create a schema for the sake of the
|
// We first create a schema for the sake of the
|
||||||
@@ -45,7 +45,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// Inverted index stands for the combination of
|
// Inverted index stands for the combination of
|
||||||
// - the term dictionary
|
// - the term dictionary
|
||||||
// - the inverted lists associated to each terms and their positions
|
// - the inverted lists associated to each terms and their positions
|
||||||
let inverted_index = segment_reader.inverted_index(title)?;
|
let inverted_index = segment_reader.inverted_index(title);
|
||||||
|
|
||||||
// A `Term` is a text token associated with a field.
|
// A `Term` is a text token associated with a field.
|
||||||
// Let's go through all docs containing the term `title:the` and access their position
|
// Let's go through all docs containing the term `title:the` and access their position
|
||||||
@@ -58,15 +58,16 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// If you don't need all this information, you may get better performance by decompressing less
|
// If you don't need all this information, you may get better performance by decompressing less
|
||||||
// information.
|
// information.
|
||||||
if let Some(mut segment_postings) =
|
if let Some(mut segment_postings) =
|
||||||
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)?
|
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)
|
||||||
{
|
{
|
||||||
// this buffer will be used to request for positions
|
// this buffer will be used to request for positions
|
||||||
let mut positions: Vec<u32> = Vec::with_capacity(100);
|
let mut positions: Vec<u32> = Vec::with_capacity(100);
|
||||||
let mut doc_id = segment_postings.doc();
|
while segment_postings.advance() {
|
||||||
while doc_id != TERMINATED {
|
// the number of time the term appears in the document.
|
||||||
|
let doc_id: DocId = segment_postings.doc(); //< do not try to access this before calling advance once.
|
||||||
|
|
||||||
// This MAY contains deleted documents as well.
|
// This MAY contains deleted documents as well.
|
||||||
if segment_reader.is_deleted(doc_id) {
|
if segment_reader.is_deleted(doc_id) {
|
||||||
doc_id = segment_postings.advance();
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,7 +86,6 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// Doc 2: TermFreq 1: [0]
|
// Doc 2: TermFreq 1: [0]
|
||||||
// ```
|
// ```
|
||||||
println!("Doc {}: TermFreq {}: {:?}", doc_id, term_freq, positions);
|
println!("Doc {}: TermFreq {}: {:?}", doc_id, term_freq, positions);
|
||||||
doc_id = segment_postings.advance();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -106,7 +106,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// Inverted index stands for the combination of
|
// Inverted index stands for the combination of
|
||||||
// - the term dictionary
|
// - the term dictionary
|
||||||
// - the inverted lists associated to each terms and their positions
|
// - the inverted lists associated to each terms and their positions
|
||||||
let inverted_index = segment_reader.inverted_index(title)?;
|
let inverted_index = segment_reader.inverted_index(title);
|
||||||
|
|
||||||
// This segment posting object is like a cursor over the documents matching the term.
|
// This segment posting object is like a cursor over the documents matching the term.
|
||||||
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
|
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
|
||||||
@@ -115,18 +115,13 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// If you don't need all this information, you may get better performance by decompressing less
|
// If you don't need all this information, you may get better performance by decompressing less
|
||||||
// information.
|
// information.
|
||||||
if let Some(mut block_segment_postings) =
|
if let Some(mut block_segment_postings) =
|
||||||
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)?
|
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
|
||||||
{
|
{
|
||||||
loop {
|
while block_segment_postings.advance() {
|
||||||
let docs = block_segment_postings.docs();
|
|
||||||
if docs.is_empty() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Once again these docs MAY contains deleted documents as well.
|
// Once again these docs MAY contains deleted documents as well.
|
||||||
let docs = block_segment_postings.docs();
|
let docs = block_segment_postings.docs();
|
||||||
// Prints `Docs [0, 2].`
|
// Prints `Docs [0, 2].`
|
||||||
println!("Docs {:?}", docs);
|
println!("Docs {:?}", docs);
|
||||||
block_segment_postings.advance();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-query-grammar"
|
name = "tantivy-query-grammar"
|
||||||
version = "0.14.0-dev"
|
version = "0.12.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
|
|||||||
@@ -31,12 +31,22 @@ impl Occur {
|
|||||||
|
|
||||||
/// Compose two occur values.
|
/// Compose two occur values.
|
||||||
pub fn compose(left: Occur, right: Occur) -> Occur {
|
pub fn compose(left: Occur, right: Occur) -> Occur {
|
||||||
match (left, right) {
|
match left {
|
||||||
(Occur::Should, _) => right,
|
Occur::Should => right,
|
||||||
(Occur::Must, Occur::MustNot) => Occur::MustNot,
|
Occur::Must => {
|
||||||
(Occur::Must, _) => Occur::Must,
|
if right == Occur::MustNot {
|
||||||
(Occur::MustNot, Occur::MustNot) => Occur::Must,
|
Occur::MustNot
|
||||||
(Occur::MustNot, _) => Occur::MustNot,
|
} else {
|
||||||
|
Occur::Must
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Occur::MustNot => {
|
||||||
|
if right == Occur::MustNot {
|
||||||
|
Occur::Must
|
||||||
|
} else {
|
||||||
|
Occur::MustNot
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -46,27 +56,3 @@ impl fmt::Display for Occur {
|
|||||||
f.write_char(self.to_char())
|
f.write_char(self.to_char())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use crate::Occur;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_occur_compose() {
|
|
||||||
assert_eq!(Occur::compose(Occur::Should, Occur::Should), Occur::Should);
|
|
||||||
assert_eq!(Occur::compose(Occur::Should, Occur::Must), Occur::Must);
|
|
||||||
assert_eq!(
|
|
||||||
Occur::compose(Occur::Should, Occur::MustNot),
|
|
||||||
Occur::MustNot
|
|
||||||
);
|
|
||||||
assert_eq!(Occur::compose(Occur::Must, Occur::Should), Occur::Must);
|
|
||||||
assert_eq!(Occur::compose(Occur::Must, Occur::Must), Occur::Must);
|
|
||||||
assert_eq!(Occur::compose(Occur::Must, Occur::MustNot), Occur::MustNot);
|
|
||||||
assert_eq!(
|
|
||||||
Occur::compose(Occur::MustNot, Occur::Should),
|
|
||||||
Occur::MustNot
|
|
||||||
);
|
|
||||||
assert_eq!(Occur::compose(Occur::MustNot, Occur::Must), Occur::MustNot);
|
|
||||||
assert_eq!(Occur::compose(Occur::MustNot, Occur::MustNot), Occur::Must);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -9,10 +9,8 @@ use combine::{
|
|||||||
|
|
||||||
fn field<'a>() -> impl Parser<&'a str, Output = String> {
|
fn field<'a>() -> impl Parser<&'a str, Output = String> {
|
||||||
(
|
(
|
||||||
(letter().or(char('_'))),
|
letter(),
|
||||||
many(satisfy(|c: char| {
|
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
|
||||||
c.is_alphanumeric() || c == '_' || c == '-'
|
|
||||||
})),
|
|
||||||
)
|
)
|
||||||
.skip(char(':'))
|
.skip(char(':'))
|
||||||
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
||||||
@@ -156,11 +154,17 @@ fn negate(expr: UserInputAST) -> UserInputAST {
|
|||||||
expr.unary(Occur::MustNot)
|
expr.unary(Occur::MustNot)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn must(expr: UserInputAST) -> UserInputAST {
|
||||||
|
expr.unary(Occur::Must)
|
||||||
|
}
|
||||||
|
|
||||||
fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||||
parser(|input| {
|
parser(|input| {
|
||||||
char('(')
|
char('-')
|
||||||
.with(ast())
|
.with(leaf())
|
||||||
.skip(char(')'))
|
.map(negate)
|
||||||
|
.or(char('+').with(leaf()).map(must))
|
||||||
|
.or(char('(').with(ast()).skip(char(')')))
|
||||||
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All)))
|
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All)))
|
||||||
.or(attempt(
|
.or(attempt(
|
||||||
string("NOT").skip(spaces1()).with(leaf()).map(negate),
|
string("NOT").skip(spaces1()).with(leaf()).map(negate),
|
||||||
@@ -172,17 +176,7 @@ fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn occur_symbol<'a>() -> impl Parser<&'a str, Output = Occur> {
|
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f32> {
|
||||||
char('-')
|
|
||||||
.map(|_| Occur::MustNot)
|
|
||||||
.or(char('+').map(|_| Occur::Must))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn occur_leaf<'a>() -> impl Parser<&'a str, Output = (Option<Occur>, UserInputAST)> {
|
|
||||||
(optional(occur_symbol()), boosted_leaf())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
|
|
||||||
(many1(digit()), optional((char('.'), many1(digit())))).map(
|
(many1(digit()), optional((char('.'), many1(digit())))).map(
|
||||||
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
|
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
|
||||||
let mut float_str = int_part;
|
let mut float_str = int_part;
|
||||||
@@ -190,18 +184,18 @@ fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
|
|||||||
float_str.push(chr);
|
float_str.push(chr);
|
||||||
float_str.push_str(&decimal_str);
|
float_str.push_str(&decimal_str);
|
||||||
}
|
}
|
||||||
float_str.parse::<f64>().unwrap()
|
float_str.parse::<f32>().unwrap()
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
|
fn boost<'a>() -> impl Parser<&'a str, Output = f32> {
|
||||||
(char('^'), positive_float_number()).map(|(_, boost)| boost)
|
(char('^'), positive_float_number()).map(|(_, boost)| boost)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||||
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
|
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
|
||||||
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
|
Some(boost) if (boost - 1.0).abs() > std::f32::EPSILON => {
|
||||||
UserInputAST::Boost(Box::new(leaf), boost)
|
UserInputAST::Boost(Box::new(leaf), boost)
|
||||||
}
|
}
|
||||||
_ => leaf,
|
_ => leaf,
|
||||||
@@ -245,29 +239,21 @@ fn aggregate_binary_expressions(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn operand_leaf<'a>() -> impl Parser<&'a str, Output = (BinaryOperand, UserInputAST)> {
|
pub fn ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||||
(
|
let operand_leaf = (
|
||||||
binary_operand().skip(spaces()),
|
binary_operand().skip(spaces()),
|
||||||
boosted_leaf().skip(spaces()),
|
boosted_leaf().skip(spaces()),
|
||||||
)
|
);
|
||||||
}
|
let boolean_expr = (boosted_leaf().skip(spaces().silent()), many1(operand_leaf))
|
||||||
|
|
||||||
pub fn ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
|
||||||
let boolean_expr = (boosted_leaf().skip(spaces()), many1(operand_leaf()))
|
|
||||||
.map(|(left, right)| aggregate_binary_expressions(left, right));
|
.map(|(left, right)| aggregate_binary_expressions(left, right));
|
||||||
let whitespace_separated_leaves = many1(occur_leaf().skip(spaces().silent())).map(
|
let whitespace_separated_leaves =
|
||||||
|subqueries: Vec<(Option<Occur>, UserInputAST)>| {
|
many1(boosted_leaf().skip(spaces().silent())).map(|subqueries: Vec<UserInputAST>| {
|
||||||
if subqueries.len() == 1 {
|
if subqueries.len() == 1 {
|
||||||
let (occur_opt, ast) = subqueries.into_iter().next().unwrap();
|
subqueries.into_iter().next().unwrap()
|
||||||
match occur_opt.unwrap_or(Occur::Should) {
|
|
||||||
Occur::Must | Occur::Should => ast,
|
|
||||||
Occur::MustNot => UserInputAST::Clause(vec![(Some(Occur::MustNot), ast)]),
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
UserInputAST::Clause(subqueries.into_iter().collect())
|
UserInputAST::Clause(subqueries.into_iter().collect())
|
||||||
}
|
}
|
||||||
},
|
});
|
||||||
);
|
|
||||||
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
|
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
|
||||||
spaces().with(expr).skip(spaces())
|
spaces().with(expr).skip(spaces())
|
||||||
}
|
}
|
||||||
@@ -281,16 +267,14 @@ pub fn parse_to_ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
|
|
||||||
type TestParseResult = Result<(), StringStreamError>;
|
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use combine::parser::Parser;
|
use combine::parser::Parser;
|
||||||
|
|
||||||
pub fn nearly_equals(a: f64, b: f64) -> bool {
|
pub fn nearly_equals(a: f32, b: f32) -> bool {
|
||||||
(a - b).abs() < 0.0005 * (a + b).abs()
|
(a - b).abs() < 0.0005 * (a + b).abs()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn assert_nearly_equals(expected: f64, val: f64) {
|
fn assert_nearly_equals(expected: f32, val: f32) {
|
||||||
assert!(
|
assert!(
|
||||||
nearly_equals(val, expected),
|
nearly_equals(val, expected),
|
||||||
"Got {}, expected {}.",
|
"Got {}, expected {}.",
|
||||||
@@ -299,16 +283,9 @@ mod test {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_occur_symbol() -> TestParseResult {
|
|
||||||
assert_eq!(super::occur_symbol().parse("-")?, (Occur::MustNot, ""));
|
|
||||||
assert_eq!(super::occur_symbol().parse("+")?, (Occur::Must, ""));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_positive_float_number() {
|
fn test_positive_float_number() {
|
||||||
fn valid_parse(float_str: &str, expected_val: f64, expected_remaining: &str) {
|
fn valid_parse(float_str: &str, expected_val: f32, expected_remaining: &str) {
|
||||||
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
|
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
|
||||||
assert_eq!(remaining, expected_remaining);
|
assert_eq!(remaining, expected_remaining);
|
||||||
assert_nearly_equals(val, expected_val);
|
assert_nearly_equals(val, expected_val);
|
||||||
@@ -316,9 +293,9 @@ mod test {
|
|||||||
fn error_parse(float_str: &str) {
|
fn error_parse(float_str: &str) {
|
||||||
assert!(positive_float_number().parse(float_str).is_err());
|
assert!(positive_float_number().parse(float_str).is_err());
|
||||||
}
|
}
|
||||||
valid_parse("1.0", 1.0, "");
|
valid_parse("1.0", 1.0f32, "");
|
||||||
valid_parse("1", 1.0, "");
|
valid_parse("1", 1.0f32, "");
|
||||||
valid_parse("0.234234 aaa", 0.234234f64, " aaa");
|
valid_parse("0.234234 aaa", 0.234234f32, " aaa");
|
||||||
error_parse(".3332");
|
error_parse(".3332");
|
||||||
error_parse("1.");
|
error_parse("1.");
|
||||||
error_parse("-1.");
|
error_parse("-1.");
|
||||||
@@ -353,7 +330,7 @@ mod test {
|
|||||||
"Err(UnexpectedParse)"
|
"Err(UnexpectedParse)"
|
||||||
);
|
);
|
||||||
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
|
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
|
||||||
test_parse_query_to_ast_helper("NOT a", "(-\"a\")");
|
test_parse_query_to_ast_helper("NOT a", "-(\"a\")");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -361,16 +338,16 @@ mod test {
|
|||||||
assert!(parse_to_ast().parse("a^2^3").is_err());
|
assert!(parse_to_ast().parse("a^2^3").is_err());
|
||||||
assert!(parse_to_ast().parse("a^2^").is_err());
|
assert!(parse_to_ast().parse("a^2^").is_err());
|
||||||
test_parse_query_to_ast_helper("a^3", "(\"a\")^3");
|
test_parse_query_to_ast_helper("a^3", "(\"a\")^3");
|
||||||
test_parse_query_to_ast_helper("a^3 b^2", "(*(\"a\")^3 *(\"b\")^2)");
|
test_parse_query_to_ast_helper("a^3 b^2", "((\"a\")^3 (\"b\")^2)");
|
||||||
test_parse_query_to_ast_helper("a^1", "\"a\"");
|
test_parse_query_to_ast_helper("a^1", "\"a\"");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_query_to_ast_binary_op() {
|
fn test_parse_query_to_ast_binary_op() {
|
||||||
test_parse_query_to_ast_helper("a AND b", "(+\"a\" +\"b\")");
|
test_parse_query_to_ast_helper("a AND b", "(+(\"a\") +(\"b\"))");
|
||||||
test_parse_query_to_ast_helper("a OR b", "(?\"a\" ?\"b\")");
|
test_parse_query_to_ast_helper("a OR b", "(?(\"a\") ?(\"b\"))");
|
||||||
test_parse_query_to_ast_helper("a OR b AND c", "(?\"a\" ?(+\"b\" +\"c\"))");
|
test_parse_query_to_ast_helper("a OR b AND c", "(?(\"a\") ?((+(\"b\") +(\"c\"))))");
|
||||||
test_parse_query_to_ast_helper("a AND b AND c", "(+\"a\" +\"b\" +\"c\")");
|
test_parse_query_to_ast_helper("a AND b AND c", "(+(\"a\") +(\"b\") +(\"c\"))");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
|
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
|
||||||
"Err(UnexpectedParse)"
|
"Err(UnexpectedParse)"
|
||||||
@@ -408,32 +385,6 @@ mod test {
|
|||||||
test_parse_query_to_ast_helper("weight: <= 70.5", "weight:{\"*\" TO \"70.5\"]");
|
test_parse_query_to_ast_helper("weight: <= 70.5", "weight:{\"*\" TO \"70.5\"]");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_occur_leaf() {
|
|
||||||
let ((occur, ast), _) = super::occur_leaf().parse("+abc").unwrap();
|
|
||||||
assert_eq!(occur, Some(Occur::Must));
|
|
||||||
assert_eq!(format!("{:?}", ast), "\"abc\"");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_field_name() -> TestParseResult {
|
|
||||||
assert_eq!(
|
|
||||||
super::field().parse("my-field-name:a")?,
|
|
||||||
("my-field-name".to_string(), "a")
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
super::field().parse("my_field_name:a")?,
|
|
||||||
("my_field_name".to_string(), "a")
|
|
||||||
);
|
|
||||||
assert!(super::field().parse(":a").is_err());
|
|
||||||
assert!(super::field().parse("-my_field:a").is_err());
|
|
||||||
assert_eq!(
|
|
||||||
super::field().parse("_my_field:a")?,
|
|
||||||
("_my_field".to_string(), "a")
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_range_parser() {
|
fn test_range_parser() {
|
||||||
// testing the range() parser separately
|
// testing the range() parser separately
|
||||||
@@ -462,67 +413,32 @@ mod test {
|
|||||||
fn test_parse_query_to_triming_spaces() {
|
fn test_parse_query_to_triming_spaces() {
|
||||||
test_parse_query_to_ast_helper(" abc", "\"abc\"");
|
test_parse_query_to_ast_helper(" abc", "\"abc\"");
|
||||||
test_parse_query_to_ast_helper("abc ", "\"abc\"");
|
test_parse_query_to_ast_helper("abc ", "\"abc\"");
|
||||||
test_parse_query_to_ast_helper("( a OR abc)", "(?\"a\" ?\"abc\")");
|
test_parse_query_to_ast_helper("( a OR abc)", "(?(\"a\") ?(\"abc\"))");
|
||||||
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
|
test_parse_query_to_ast_helper("(a OR abc)", "(?(\"a\") ?(\"abc\"))");
|
||||||
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
|
test_parse_query_to_ast_helper("(a OR abc)", "(?(\"a\") ?(\"abc\"))");
|
||||||
test_parse_query_to_ast_helper("a OR abc ", "(?\"a\" ?\"abc\")");
|
test_parse_query_to_ast_helper("a OR abc ", "(?(\"a\") ?(\"abc\"))");
|
||||||
test_parse_query_to_ast_helper("(a OR abc )", "(?\"a\" ?\"abc\")");
|
test_parse_query_to_ast_helper("(a OR abc )", "(?(\"a\") ?(\"abc\"))");
|
||||||
test_parse_query_to_ast_helper("(a OR abc) ", "(?\"a\" ?\"abc\")");
|
test_parse_query_to_ast_helper("(a OR abc) ", "(?(\"a\") ?(\"abc\"))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_query_single_term() {
|
fn test_parse_query_to_ast() {
|
||||||
test_parse_query_to_ast_helper("abc", "\"abc\"");
|
test_parse_query_to_ast_helper("abc", "\"abc\"");
|
||||||
}
|
test_parse_query_to_ast_helper("a b", "(\"a\" \"b\")");
|
||||||
|
test_parse_query_to_ast_helper("+(a b)", "+((\"a\" \"b\"))");
|
||||||
#[test]
|
test_parse_query_to_ast_helper("+d", "+(\"d\")");
|
||||||
fn test_parse_query_default_clause() {
|
test_parse_query_to_ast_helper("+(a b) +d", "(+((\"a\" \"b\")) +(\"d\"))");
|
||||||
test_parse_query_to_ast_helper("a b", "(*\"a\" *\"b\")");
|
test_parse_query_to_ast_helper("(+a +b) d", "((+(\"a\") +(\"b\")) \"d\")");
|
||||||
}
|
test_parse_query_to_ast_helper("(+a)", "+(\"a\")");
|
||||||
|
test_parse_query_to_ast_helper("(+a +b)", "(+(\"a\") +(\"b\"))");
|
||||||
#[test]
|
|
||||||
fn test_parse_query_must_default_clause() {
|
|
||||||
test_parse_query_to_ast_helper("+(a b)", "(*\"a\" *\"b\")");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parse_query_must_single_term() {
|
|
||||||
test_parse_query_to_ast_helper("+d", "\"d\"");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_single_term_with_field() {
|
|
||||||
test_parse_query_to_ast_helper("abc:toto", "abc:\"toto\"");
|
test_parse_query_to_ast_helper("abc:toto", "abc:\"toto\"");
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_single_term_with_float() {
|
|
||||||
test_parse_query_to_ast_helper("abc:1.1", "abc:\"1.1\"");
|
test_parse_query_to_ast_helper("abc:1.1", "abc:\"1.1\"");
|
||||||
}
|
test_parse_query_to_ast_helper("+abc:toto", "+(abc:\"toto\")");
|
||||||
|
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+(abc:\"toto\") -(\"titi\"))");
|
||||||
#[test]
|
test_parse_query_to_ast_helper("-abc:toto", "-(abc:\"toto\")");
|
||||||
fn test_must_clause() {
|
test_parse_query_to_ast_helper("abc:a b", "(abc:\"a\" \"b\")");
|
||||||
test_parse_query_to_ast_helper("(+a +b)", "(+\"a\" +\"b\")");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parse_test_query_plus_a_b_plus_d() {
|
|
||||||
test_parse_query_to_ast_helper("+(a b) +d", "(+(*\"a\" *\"b\") +\"d\")");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parse_test_query_other() {
|
|
||||||
test_parse_query_to_ast_helper("(+a +b) d", "(*(+\"a\" +\"b\") *\"d\")");
|
|
||||||
test_parse_query_to_ast_helper("+abc:toto", "abc:\"toto\"");
|
|
||||||
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+abc:\"toto\" -\"titi\")");
|
|
||||||
test_parse_query_to_ast_helper("-abc:toto", "(-abc:\"toto\")");
|
|
||||||
test_parse_query_to_ast_helper("abc:a b", "(*abc:\"a\" *\"b\")");
|
|
||||||
test_parse_query_to_ast_helper("abc:\"a b\"", "abc:\"a b\"");
|
test_parse_query_to_ast_helper("abc:\"a b\"", "abc:\"a b\"");
|
||||||
test_parse_query_to_ast_helper("foo:[1 TO 5]", "foo:[\"1\" TO \"5\"]");
|
test_parse_query_to_ast_helper("foo:[1 TO 5]", "foo:[\"1\" TO \"5\"]");
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parse_query_with_range() {
|
|
||||||
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
|
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
|
||||||
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
|
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
|
||||||
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
|
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
|
||||||
|
|||||||
@@ -85,14 +85,15 @@ impl UserInputBound {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub enum UserInputAST {
|
pub enum UserInputAST {
|
||||||
Clause(Vec<(Option<Occur>, UserInputAST)>),
|
Clause(Vec<UserInputAST>),
|
||||||
|
Unary(Occur, Box<UserInputAST>),
|
||||||
Leaf(Box<UserInputLeaf>),
|
Leaf(Box<UserInputLeaf>),
|
||||||
Boost(Box<UserInputAST>, f64),
|
Boost(Box<UserInputAST>, f32),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserInputAST {
|
impl UserInputAST {
|
||||||
pub fn unary(self, occur: Occur) -> UserInputAST {
|
pub fn unary(self, occur: Occur) -> UserInputAST {
|
||||||
UserInputAST::Clause(vec![(Some(occur), self)])
|
UserInputAST::Unary(occur, Box::new(self))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compose(occur: Occur, asts: Vec<UserInputAST>) -> UserInputAST {
|
fn compose(occur: Occur, asts: Vec<UserInputAST>) -> UserInputAST {
|
||||||
@@ -103,7 +104,7 @@ impl UserInputAST {
|
|||||||
} else {
|
} else {
|
||||||
UserInputAST::Clause(
|
UserInputAST::Clause(
|
||||||
asts.into_iter()
|
asts.into_iter()
|
||||||
.map(|ast: UserInputAST| (Some(occur), ast))
|
.map(|ast: UserInputAST| ast.unary(occur))
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -134,36 +135,25 @@ impl From<UserInputLeaf> for UserInputAST {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_occur_ast(
|
|
||||||
occur_opt: Option<Occur>,
|
|
||||||
ast: &UserInputAST,
|
|
||||||
formatter: &mut fmt::Formatter,
|
|
||||||
) -> fmt::Result {
|
|
||||||
if let Some(occur) = occur_opt {
|
|
||||||
write!(formatter, "{}{:?}", occur, ast)?;
|
|
||||||
} else {
|
|
||||||
write!(formatter, "*{:?}", ast)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for UserInputAST {
|
impl fmt::Debug for UserInputAST {
|
||||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
match *self {
|
match *self {
|
||||||
UserInputAST::Clause(ref subqueries) => {
|
UserInputAST::Clause(ref subqueries) => {
|
||||||
if subqueries.is_empty() {
|
if subqueries.is_empty() {
|
||||||
write!(formatter, "<emptyclause>")?;
|
write!(formatter, "<emptyclause>")?;
|
||||||
} else {
|
} else {
|
||||||
write!(formatter, "(")?;
|
write!(formatter, "(")?;
|
||||||
print_occur_ast(subqueries[0].0, &subqueries[0].1, formatter)?;
|
write!(formatter, "{:?}", &subqueries[0])?;
|
||||||
for subquery in &subqueries[1..] {
|
for subquery in &subqueries[1..] {
|
||||||
write!(formatter, " ")?;
|
write!(formatter, " {:?}", subquery)?;
|
||||||
print_occur_ast(subquery.0, &subquery.1, formatter)?;
|
|
||||||
}
|
}
|
||||||
write!(formatter, ")")?;
|
write!(formatter, ")")?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
UserInputAST::Unary(ref occur, ref subquery) => {
|
||||||
|
write!(formatter, "{}({:?})", occur, subquery)
|
||||||
|
}
|
||||||
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
|
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
|
||||||
UserInputAST::Boost(ref leaf, boost) => write!(formatter, "({:?})^{}", leaf, boost),
|
UserInputAST::Boost(ref leaf, boost) => write!(formatter, "({:?})^{}", leaf, boost),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -96,18 +96,18 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut count_collector = SegmentCountCollector::default();
|
let mut count_collector = SegmentCountCollector::default();
|
||||||
count_collector.collect(0u32, 1.0);
|
count_collector.collect(0u32, 1f32);
|
||||||
assert_eq!(count_collector.harvest(), 1);
|
assert_eq!(count_collector.harvest(), 1);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut count_collector = SegmentCountCollector::default();
|
let mut count_collector = SegmentCountCollector::default();
|
||||||
count_collector.collect(0u32, 1.0);
|
count_collector.collect(0u32, 1f32);
|
||||||
assert_eq!(count_collector.harvest(), 1);
|
assert_eq!(count_collector.harvest(), 1);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut count_collector = SegmentCountCollector::default();
|
let mut count_collector = SegmentCountCollector::default();
|
||||||
count_collector.collect(0u32, 1.0);
|
count_collector.collect(0u32, 1f32);
|
||||||
count_collector.collect(1u32, 1.0);
|
count_collector.collect(1u32, 1f32);
|
||||||
assert_eq!(count_collector.harvest(), 2);
|
assert_eq!(count_collector.harvest(), 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,13 +11,13 @@ impl<TCustomScorer, TScore> CustomScoreTopCollector<TCustomScorer, TScore>
|
|||||||
where
|
where
|
||||||
TScore: Clone + PartialOrd,
|
TScore: Clone + PartialOrd,
|
||||||
{
|
{
|
||||||
pub(crate) fn new(
|
pub fn new(
|
||||||
custom_scorer: TCustomScorer,
|
custom_scorer: TCustomScorer,
|
||||||
collector: TopCollector<TScore>,
|
limit: usize,
|
||||||
) -> CustomScoreTopCollector<TCustomScorer, TScore> {
|
) -> CustomScoreTopCollector<TCustomScorer, TScore> {
|
||||||
CustomScoreTopCollector {
|
CustomScoreTopCollector {
|
||||||
custom_scorer,
|
custom_scorer,
|
||||||
collector,
|
collector: TopCollector::with_limit(limit),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -28,7 +28,7 @@ where
|
|||||||
/// It is the segment local version of the [`CustomScorer`](./trait.CustomScorer.html).
|
/// It is the segment local version of the [`CustomScorer`](./trait.CustomScorer.html).
|
||||||
pub trait CustomSegmentScorer<TScore>: 'static {
|
pub trait CustomSegmentScorer<TScore>: 'static {
|
||||||
/// Computes the score of a specific `doc`.
|
/// Computes the score of a specific `doc`.
|
||||||
fn score(&mut self, doc: DocId) -> TScore;
|
fn score(&self, doc: DocId) -> TScore;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `CustomScorer` makes it possible to define any kind of score.
|
/// `CustomScorer` makes it possible to define any kind of score.
|
||||||
@@ -46,7 +46,7 @@ pub trait CustomScorer<TScore>: Sync {
|
|||||||
|
|
||||||
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
|
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
|
||||||
where
|
where
|
||||||
TCustomScorer: CustomScorer<TScore> + Send + Sync,
|
TCustomScorer: CustomScorer<TScore>,
|
||||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||||
{
|
{
|
||||||
type Fruit = Vec<(TScore, DocAddress)>;
|
type Fruit = Vec<(TScore, DocAddress)>;
|
||||||
@@ -58,10 +58,10 @@ where
|
|||||||
segment_local_id: u32,
|
segment_local_id: u32,
|
||||||
segment_reader: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
) -> crate::Result<Self::Child> {
|
) -> crate::Result<Self::Child> {
|
||||||
|
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
|
||||||
let segment_collector = self
|
let segment_collector = self
|
||||||
.collector
|
.collector
|
||||||
.for_segment(segment_local_id, segment_reader)?;
|
.for_segment(segment_local_id, segment_reader)?;
|
||||||
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
|
|
||||||
Ok(CustomScoreTopSegmentCollector {
|
Ok(CustomScoreTopSegmentCollector {
|
||||||
segment_collector,
|
segment_collector,
|
||||||
segment_scorer,
|
segment_scorer,
|
||||||
@@ -117,9 +117,9 @@ where
|
|||||||
|
|
||||||
impl<F, TScore> CustomSegmentScorer<TScore> for F
|
impl<F, TScore> CustomSegmentScorer<TScore> for F
|
||||||
where
|
where
|
||||||
F: 'static + FnMut(DocId) -> TScore,
|
F: 'static + Sync + Send + Fn(DocId) -> TScore,
|
||||||
{
|
{
|
||||||
fn score(&mut self, doc: DocId) -> TScore {
|
fn score(&self, doc: DocId) -> TScore {
|
||||||
(self)(doc)
|
(self)(doc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,61 +0,0 @@
|
|||||||
use std::collections::HashSet;
|
|
||||||
|
|
||||||
use crate::{DocAddress, DocId, Score};
|
|
||||||
|
|
||||||
use super::{Collector, SegmentCollector};
|
|
||||||
|
|
||||||
/// Collectors that returns the set of DocAddress that matches the query.
|
|
||||||
///
|
|
||||||
/// This collector is mostly useful for tests.
|
|
||||||
pub struct DocSetCollector;
|
|
||||||
|
|
||||||
impl Collector for DocSetCollector {
|
|
||||||
type Fruit = HashSet<DocAddress>;
|
|
||||||
type Child = DocSetChildCollector;
|
|
||||||
|
|
||||||
fn for_segment(
|
|
||||||
&self,
|
|
||||||
segment_local_id: crate::SegmentLocalId,
|
|
||||||
_segment: &crate::SegmentReader,
|
|
||||||
) -> crate::Result<Self::Child> {
|
|
||||||
Ok(DocSetChildCollector {
|
|
||||||
segment_local_id,
|
|
||||||
docs: HashSet::new(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn requires_scoring(&self) -> bool {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
fn merge_fruits(
|
|
||||||
&self,
|
|
||||||
segment_fruits: Vec<(u32, HashSet<DocId>)>,
|
|
||||||
) -> crate::Result<Self::Fruit> {
|
|
||||||
let len: usize = segment_fruits.iter().map(|(_, docset)| docset.len()).sum();
|
|
||||||
let mut result = HashSet::with_capacity(len);
|
|
||||||
for (segment_local_id, docs) in segment_fruits {
|
|
||||||
for doc in docs {
|
|
||||||
result.insert(DocAddress(segment_local_id, doc));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct DocSetChildCollector {
|
|
||||||
segment_local_id: u32,
|
|
||||||
docs: HashSet<DocId>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SegmentCollector for DocSetChildCollector {
|
|
||||||
type Fruit = (u32, HashSet<DocId>);
|
|
||||||
|
|
||||||
fn collect(&mut self, doc: crate::DocId, _score: Score) {
|
|
||||||
self.docs.insert(doc);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn harvest(self) -> (u32, HashSet<DocId>) {
|
|
||||||
(self.segment_local_id, self.docs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
use crate::collector::SegmentCollector;
|
use crate::collector::SegmentCollector;
|
||||||
|
use crate::docset::SkipResult;
|
||||||
use crate::fastfield::FacetReader;
|
use crate::fastfield::FacetReader;
|
||||||
use crate::schema::Facet;
|
use crate::schema::Facet;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
@@ -7,6 +8,7 @@ use crate::DocId;
|
|||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
|
use crate::TantivyError;
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::btree_map;
|
use std::collections::btree_map;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
@@ -186,11 +188,6 @@ pub struct FacetSegmentCollector {
|
|||||||
collapse_facet_ords: Vec<u64>,
|
collapse_facet_ords: Vec<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
enum SkipResult {
|
|
||||||
Found,
|
|
||||||
NotFound,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
||||||
target: &[u8],
|
target: &[u8],
|
||||||
collapse_it: &mut Peekable<I>,
|
collapse_it: &mut Peekable<I>,
|
||||||
@@ -200,14 +197,14 @@ fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
|||||||
Some(facet_bytes) => match facet_bytes.encoded_str().as_bytes().cmp(target) {
|
Some(facet_bytes) => match facet_bytes.encoded_str().as_bytes().cmp(target) {
|
||||||
Ordering::Less => {}
|
Ordering::Less => {}
|
||||||
Ordering::Greater => {
|
Ordering::Greater => {
|
||||||
return SkipResult::NotFound;
|
return SkipResult::OverStep;
|
||||||
}
|
}
|
||||||
Ordering::Equal => {
|
Ordering::Equal => {
|
||||||
return SkipResult::Found;
|
return SkipResult::Reached;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
None => {
|
None => {
|
||||||
return SkipResult::NotFound;
|
return SkipResult::End;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
collapse_it.next();
|
collapse_it.next();
|
||||||
@@ -265,7 +262,10 @@ impl Collector for FacetCollector {
|
|||||||
_: SegmentLocalId,
|
_: SegmentLocalId,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
) -> crate::Result<FacetSegmentCollector> {
|
) -> crate::Result<FacetSegmentCollector> {
|
||||||
let facet_reader = reader.facet_reader(self.field)?;
|
let field_name = reader.schema().get_field_name(self.field);
|
||||||
|
let facet_reader = reader.facet_reader(self.field).ok_or_else(|| {
|
||||||
|
TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name))
|
||||||
|
})?;
|
||||||
|
|
||||||
let mut collapse_mapping = Vec::new();
|
let mut collapse_mapping = Vec::new();
|
||||||
let mut counts = Vec::new();
|
let mut counts = Vec::new();
|
||||||
@@ -274,14 +274,14 @@ impl Collector for FacetCollector {
|
|||||||
let mut collapse_facet_it = self.facets.iter().peekable();
|
let mut collapse_facet_it = self.facets.iter().peekable();
|
||||||
collapse_facet_ords.push(0);
|
collapse_facet_ords.push(0);
|
||||||
{
|
{
|
||||||
let mut facet_streamer = facet_reader.facet_dict().range().into_stream()?;
|
let mut facet_streamer = facet_reader.facet_dict().range().into_stream();
|
||||||
if facet_streamer.advance() {
|
if facet_streamer.advance() {
|
||||||
'outer: loop {
|
'outer: loop {
|
||||||
// at the begining of this loop, facet_streamer
|
// at the begining of this loop, facet_streamer
|
||||||
// is positionned on a term that has not been processed yet.
|
// is positionned on a term that has not been processed yet.
|
||||||
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
|
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
|
||||||
match skip_result {
|
match skip_result {
|
||||||
SkipResult::Found => {
|
SkipResult::Reached => {
|
||||||
// we reach a facet we decided to collapse.
|
// we reach a facet we decided to collapse.
|
||||||
let collapse_depth = facet_depth(facet_streamer.key());
|
let collapse_depth = facet_depth(facet_streamer.key());
|
||||||
let mut collapsed_id = 0;
|
let mut collapsed_id = 0;
|
||||||
@@ -301,7 +301,7 @@ impl Collector for FacetCollector {
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
SkipResult::NotFound => {
|
SkipResult::End | SkipResult::OverStep => {
|
||||||
collapse_mapping.push(0);
|
collapse_mapping.push(0);
|
||||||
if !facet_streamer.advance() {
|
if !facet_streamer.advance() {
|
||||||
break;
|
break;
|
||||||
@@ -368,12 +368,9 @@ impl SegmentCollector for FacetSegmentCollector {
|
|||||||
}
|
}
|
||||||
let mut facet = vec![];
|
let mut facet = vec![];
|
||||||
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
|
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
|
||||||
// TODO handle errors.
|
facet_dict.ord_to_term(facet_ord as u64, &mut facet);
|
||||||
if facet_dict.ord_to_term(facet_ord as u64, &mut facet).is_ok() {
|
// TODO
|
||||||
if let Ok(facet) = Facet::from_encoded(facet) {
|
facet_counts.insert(Facet::from_encoded(facet).unwrap(), count);
|
||||||
facet_counts.insert(facet, count);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
FacetCounts { facet_counts }
|
FacetCounts { facet_counts }
|
||||||
}
|
}
|
||||||
@@ -471,7 +468,7 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
let num_facets: usize = 3 * 4 * 5;
|
let num_facets: usize = 3 * 4 * 5;
|
||||||
let facets: Vec<Facet> = (0..num_facets)
|
let facets: Vec<Facet> = (0..num_facets)
|
||||||
.map(|mut n| {
|
.map(|mut n| {
|
||||||
@@ -530,7 +527,7 @@ mod tests {
|
|||||||
let facet_field = schema_builder.add_facet_field("facets");
|
let facet_field = schema_builder.add_facet_field("facets");
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from_text(&"/subjects/A/a"),
|
facet_field => Facet::from_text(&"/subjects/A/a"),
|
||||||
facet_field => Facet::from_text(&"/subjects/B/a"),
|
facet_field => Facet::from_text(&"/subjects/B/a"),
|
||||||
@@ -549,12 +546,12 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_doc_search_by_facet() -> crate::Result<()> {
|
fn test_doc_search_by_facet() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let facet_field = schema_builder.add_facet_field("facet");
|
let facet_field = schema_builder.add_facet_field("facet");
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from_text(&"/A/A"),
|
facet_field => Facet::from_text(&"/A/A"),
|
||||||
));
|
));
|
||||||
@@ -567,8 +564,8 @@ mod tests {
|
|||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from_text(&"/D/C/A"),
|
facet_field => Facet::from_text(&"/D/C/A"),
|
||||||
));
|
));
|
||||||
index_writer.commit()?;
|
index_writer.commit().unwrap();
|
||||||
let reader = index.reader()?;
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
assert_eq!(searcher.num_docs(), 4);
|
assert_eq!(searcher.num_docs(), 4);
|
||||||
|
|
||||||
@@ -585,17 +582,17 @@ mod tests {
|
|||||||
assert_eq!(count_facet("/A/C"), 1);
|
assert_eq!(count_facet("/A/C"), 1);
|
||||||
assert_eq!(count_facet("/A/C/A"), 1);
|
assert_eq!(count_facet("/A/C/A"), 1);
|
||||||
assert_eq!(count_facet("/C/A"), 0);
|
assert_eq!(count_facet("/C/A"), 0);
|
||||||
|
|
||||||
let query_parser = QueryParser::for_index(&index, vec![]);
|
|
||||||
{
|
{
|
||||||
let query = query_parser.parse_query("facet:/A/B")?;
|
let query_parser = QueryParser::for_index(&index, vec![]);
|
||||||
assert_eq!(1, searcher.search(&query, &Count).unwrap());
|
{
|
||||||
|
let query = query_parser.parse_query("facet:/A/B").unwrap();
|
||||||
|
assert_eq!(1, searcher.search(&query, &Count).unwrap());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let query = query_parser.parse_query("facet:/A").unwrap();
|
||||||
|
assert_eq!(3, searcher.search(&query, &Count).unwrap());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
{
|
|
||||||
let query = query_parser.parse_query("facet:/A")?;
|
|
||||||
assert_eq!(3, searcher.search(&query, &Count)?);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -630,7 +627,7 @@ mod tests {
|
|||||||
.collect();
|
.collect();
|
||||||
docs[..].shuffle(&mut thread_rng());
|
docs[..].shuffle(&mut thread_rng());
|
||||||
|
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
for doc in docs {
|
for doc in docs {
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
@@ -683,7 +680,7 @@ mod bench {
|
|||||||
// 40425 docs
|
// 40425 docs
|
||||||
docs[..].shuffle(&mut thread_rng());
|
docs[..].shuffle(&mut thread_rng());
|
||||||
|
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
for doc in docs {
|
for doc in docs {
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,189 +0,0 @@
|
|||||||
// # Custom collector example
|
|
||||||
//
|
|
||||||
// This example shows how you can implement your own
|
|
||||||
// collector. As an example, we will compute a collector
|
|
||||||
// that computes the standard deviation of a given fast field.
|
|
||||||
//
|
|
||||||
// Of course, you can have a look at the tantivy's built-in collectors
|
|
||||||
// such as the `CountCollector` for more examples.
|
|
||||||
|
|
||||||
// ---
|
|
||||||
// Importing tantivy...
|
|
||||||
use std::marker::PhantomData;
|
|
||||||
|
|
||||||
use crate::collector::{Collector, SegmentCollector};
|
|
||||||
use crate::fastfield::{FastFieldReader, FastValue};
|
|
||||||
use crate::schema::Field;
|
|
||||||
use crate::{Score, SegmentReader, TantivyError};
|
|
||||||
|
|
||||||
/// The `FilterCollector` collector filters docs using a u64 fast field value and a predicate.
|
|
||||||
/// Only the documents for which the predicate returned "true" will be passed on to the next collector.
|
|
||||||
///
|
|
||||||
/// ```rust
|
|
||||||
/// use tantivy::collector::{TopDocs, FilterCollector};
|
|
||||||
/// use tantivy::query::QueryParser;
|
|
||||||
/// use tantivy::schema::{Schema, TEXT, INDEXED, FAST};
|
|
||||||
/// use tantivy::{doc, DocAddress, Index};
|
|
||||||
///
|
|
||||||
/// let mut schema_builder = Schema::builder();
|
|
||||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
|
||||||
/// let price = schema_builder.add_u64_field("price", INDEXED | FAST);
|
|
||||||
/// let schema = schema_builder.build();
|
|
||||||
/// let index = Index::create_in_ram(schema);
|
|
||||||
///
|
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
|
||||||
/// index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64));
|
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64));
|
|
||||||
/// index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64));
|
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64));
|
|
||||||
/// assert!(index_writer.commit().is_ok());
|
|
||||||
///
|
|
||||||
/// let reader = index.reader().unwrap();
|
|
||||||
/// let searcher = reader.searcher();
|
|
||||||
///
|
|
||||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
|
||||||
/// let query = query_parser.parse_query("diary").unwrap();
|
|
||||||
/// let no_filter_collector = FilterCollector::new(price, &|value: u64| value > 20_120u64, TopDocs::with_limit(2));
|
|
||||||
/// let top_docs = searcher.search(&query, &no_filter_collector).unwrap();
|
|
||||||
///
|
|
||||||
/// assert_eq!(top_docs.len(), 1);
|
|
||||||
/// assert_eq!(top_docs[0].1, DocAddress(0, 1));
|
|
||||||
///
|
|
||||||
/// let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new(price, &|value| value < 5u64, TopDocs::with_limit(2));
|
|
||||||
/// let filtered_top_docs = searcher.search(&query, &filter_all_collector).unwrap();
|
|
||||||
///
|
|
||||||
/// assert_eq!(filtered_top_docs.len(), 0);
|
|
||||||
/// ```
|
|
||||||
pub struct FilterCollector<TCollector, TPredicate, TPredicateValue: FastValue>
|
|
||||||
where
|
|
||||||
TPredicate: 'static,
|
|
||||||
{
|
|
||||||
field: Field,
|
|
||||||
collector: TCollector,
|
|
||||||
predicate: &'static TPredicate,
|
|
||||||
t_predicate_value: PhantomData<TPredicateValue>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TCollector, TPredicate, TPredicateValue: FastValue>
|
|
||||||
FilterCollector<TCollector, TPredicate, TPredicateValue>
|
|
||||||
where
|
|
||||||
TCollector: Collector + Send + Sync,
|
|
||||||
TPredicate: Fn(TPredicateValue) -> bool + Send + Sync,
|
|
||||||
{
|
|
||||||
/// Create a new FilterCollector.
|
|
||||||
pub fn new(
|
|
||||||
field: Field,
|
|
||||||
predicate: &'static TPredicate,
|
|
||||||
collector: TCollector,
|
|
||||||
) -> FilterCollector<TCollector, TPredicate, TPredicateValue> {
|
|
||||||
FilterCollector {
|
|
||||||
field,
|
|
||||||
predicate,
|
|
||||||
collector,
|
|
||||||
t_predicate_value: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TCollector, TPredicate, TPredicateValue: FastValue> Collector
|
|
||||||
for FilterCollector<TCollector, TPredicate, TPredicateValue>
|
|
||||||
where
|
|
||||||
TCollector: Collector + Send + Sync,
|
|
||||||
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync,
|
|
||||||
TPredicateValue: 'static + FastValue,
|
|
||||||
{
|
|
||||||
// That's the type of our result.
|
|
||||||
// Our standard deviation will be a float.
|
|
||||||
type Fruit = TCollector::Fruit;
|
|
||||||
|
|
||||||
type Child = FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>;
|
|
||||||
|
|
||||||
fn for_segment(
|
|
||||||
&self,
|
|
||||||
segment_local_id: u32,
|
|
||||||
segment_reader: &SegmentReader,
|
|
||||||
) -> crate::Result<FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>> {
|
|
||||||
let schema = segment_reader.schema();
|
|
||||||
let field_entry = schema.get_field_entry(self.field);
|
|
||||||
if !field_entry.is_fast() {
|
|
||||||
return Err(TantivyError::SchemaError(format!(
|
|
||||||
"Field {:?} is not a fast field.",
|
|
||||||
field_entry.name()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
let requested_type = TPredicateValue::to_type();
|
|
||||||
let field_schema_type = field_entry.field_type().value_type();
|
|
||||||
if requested_type != field_schema_type {
|
|
||||||
return Err(TantivyError::SchemaError(format!(
|
|
||||||
"Field {:?} is of type {:?}!={:?}",
|
|
||||||
field_entry.name(),
|
|
||||||
requested_type,
|
|
||||||
field_schema_type
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let fast_field_reader = segment_reader
|
|
||||||
.fast_fields()
|
|
||||||
.typed_fast_field_reader(self.field)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
TantivyError::SchemaError(format!(
|
|
||||||
"{:?} is not declared as a fast field in the schema.",
|
|
||||||
self.field
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let segment_collector = self
|
|
||||||
.collector
|
|
||||||
.for_segment(segment_local_id, segment_reader)?;
|
|
||||||
|
|
||||||
Ok(FilterSegmentCollector {
|
|
||||||
fast_field_reader,
|
|
||||||
segment_collector,
|
|
||||||
predicate: self.predicate,
|
|
||||||
t_predicate_value: PhantomData,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn requires_scoring(&self) -> bool {
|
|
||||||
self.collector.requires_scoring()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn merge_fruits(
|
|
||||||
&self,
|
|
||||||
segment_fruits: Vec<<TCollector::Child as SegmentCollector>::Fruit>,
|
|
||||||
) -> crate::Result<TCollector::Fruit> {
|
|
||||||
self.collector.merge_fruits(segment_fruits)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
|
|
||||||
where
|
|
||||||
TPredicate: 'static,
|
|
||||||
TPredicateValue: 'static + FastValue,
|
|
||||||
{
|
|
||||||
fast_field_reader: FastFieldReader<TPredicateValue>,
|
|
||||||
segment_collector: TSegmentCollector,
|
|
||||||
predicate: &'static TPredicate,
|
|
||||||
t_predicate_value: PhantomData<TPredicateValue>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TSegmentCollector, TPredicate, TPredicateValue> SegmentCollector
|
|
||||||
for FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
|
|
||||||
where
|
|
||||||
TSegmentCollector: SegmentCollector,
|
|
||||||
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync,
|
|
||||||
TPredicateValue: 'static + FastValue,
|
|
||||||
{
|
|
||||||
type Fruit = TSegmentCollector::Fruit;
|
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, score: Score) {
|
|
||||||
let value = self.fast_field_reader.get(doc);
|
|
||||||
if (self.predicate)(value) {
|
|
||||||
self.segment_collector.collect(doc, score)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn harvest(self) -> <TSegmentCollector as SegmentCollector>::Fruit {
|
|
||||||
self.segment_collector.harvest()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
127
src/collector/int_facet_collector.rs
Normal file
127
src/collector/int_facet_collector.rs
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
use std::cmp::Eq;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::hash::Hash;
|
||||||
|
|
||||||
|
use collector::Collector;
|
||||||
|
use fastfield::FastFieldReader;
|
||||||
|
use schema::Field;
|
||||||
|
|
||||||
|
use DocId;
|
||||||
|
use Result;
|
||||||
|
use Score;
|
||||||
|
use SegmentReader;
|
||||||
|
use SegmentLocalId;
|
||||||
|
|
||||||
|
|
||||||
|
/// Facet collector for i64/u64 fast field
|
||||||
|
pub struct IntFacetCollector<T>
|
||||||
|
where
|
||||||
|
T: FastFieldReader,
|
||||||
|
T::ValueType: Eq + Hash,
|
||||||
|
{
|
||||||
|
counters: HashMap<T::ValueType, u64>,
|
||||||
|
field: Field,
|
||||||
|
ff_reader: Option<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
impl<T> IntFacetCollector<T>
|
||||||
|
where
|
||||||
|
T: FastFieldReader,
|
||||||
|
T::ValueType: Eq + Hash,
|
||||||
|
{
|
||||||
|
/// Creates a new facet collector for aggregating a given field.
|
||||||
|
pub fn new(field: Field) -> IntFacetCollector<T> {
|
||||||
|
IntFacetCollector {
|
||||||
|
counters: HashMap::new(),
|
||||||
|
field: field,
|
||||||
|
ff_reader: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
impl<T> Collector for IntFacetCollector<T>
|
||||||
|
where
|
||||||
|
T: FastFieldReader,
|
||||||
|
T::ValueType: Eq + Hash,
|
||||||
|
{
|
||||||
|
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
|
||||||
|
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn collect(&mut self, doc: DocId, _: Score) {
|
||||||
|
let val = self.ff_reader
|
||||||
|
.as_ref()
|
||||||
|
.expect(
|
||||||
|
"collect() was called before set_segment. \
|
||||||
|
This should never happen.",
|
||||||
|
)
|
||||||
|
.get(doc);
|
||||||
|
*(self.counters.entry(val).or_insert(0)) += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
|
||||||
|
use collector::{chain, IntFacetCollector};
|
||||||
|
use query::QueryParser;
|
||||||
|
use fastfield::{I64FastFieldReader, U64FastFieldReader};
|
||||||
|
use schema::{self, FAST, STRING};
|
||||||
|
use Index;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
// create 10 documents, set num field value to 0 or 1 for even/odd ones
|
||||||
|
// make sure we have facet counters correctly filled
|
||||||
|
fn test_facet_collector_results() {
|
||||||
|
|
||||||
|
let mut schema_builder = schema::Schema::builder();
|
||||||
|
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
|
||||||
|
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
|
||||||
|
let num_field_f64 = schema_builder.add_f64_field("num_f64", FAST);
|
||||||
|
let text_field = schema_builder.add_text_field("text", STRING);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
{
|
||||||
|
for i in 0u64..10u64 {
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
num_field_i64 => ((i as i64) % 3i64) as i64,
|
||||||
|
num_field_u64 => (i % 2u64) as u64,
|
||||||
|
num_field_f64 => (i % 4u64) as f64,
|
||||||
|
text_field => "text"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert_eq!(index_writer.commit().unwrap(), 10u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
let searcher = index.reader().searcher();
|
||||||
|
let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64);
|
||||||
|
let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64);
|
||||||
|
let mut ffvf_f64: IntFacetCollector<F64FastFieldReader> = IntFacetCollector::new(num_field_f64);
|
||||||
|
|
||||||
|
{
|
||||||
|
// perform the query
|
||||||
|
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64).push(&mut ffvf_f64);
|
||||||
|
let mut query_parser = QueryParser::for_index(index, vec![text_field]);
|
||||||
|
let query = query_parser.parse_query("text:text").unwrap();
|
||||||
|
query.search(&searcher, &mut facet_collectors).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(ffvf_u64.counters[&0], 5);
|
||||||
|
assert_eq!(ffvf_u64.counters[&1], 5);
|
||||||
|
assert_eq!(ffvf_i64.counters[&0], 4);
|
||||||
|
assert_eq!(ffvf_i64.counters[&1], 3);
|
||||||
|
assert_eq!(ffvf_f64.counters[&0.0], 3);
|
||||||
|
assert_eq!(ffvf_f64.counters[&2.0], 2);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -109,13 +109,6 @@ pub use self::tweak_score_top_collector::{ScoreSegmentTweaker, ScoreTweaker};
|
|||||||
|
|
||||||
mod facet_collector;
|
mod facet_collector;
|
||||||
pub use self::facet_collector::FacetCollector;
|
pub use self::facet_collector::FacetCollector;
|
||||||
use crate::query::Weight;
|
|
||||||
|
|
||||||
mod docset_collector;
|
|
||||||
pub use self::docset_collector::DocSetCollector;
|
|
||||||
|
|
||||||
mod filter_collector_wrapper;
|
|
||||||
pub use self::filter_collector_wrapper::FilterCollector;
|
|
||||||
|
|
||||||
/// `Fruit` is the type for the result of our collection.
|
/// `Fruit` is the type for the result of our collection.
|
||||||
/// e.g. `usize` for the `Count` collector.
|
/// e.g. `usize` for the `Count` collector.
|
||||||
@@ -139,13 +132,13 @@ impl<T> Fruit for T where T: Send + downcast_rs::Downcast {}
|
|||||||
/// The collection logic itself is in the `SegmentCollector`.
|
/// The collection logic itself is in the `SegmentCollector`.
|
||||||
///
|
///
|
||||||
/// Segments are not guaranteed to be visited in any specific order.
|
/// Segments are not guaranteed to be visited in any specific order.
|
||||||
pub trait Collector: Sync + Send {
|
pub trait Collector: Sync {
|
||||||
/// `Fruit` is the type for the result of our collection.
|
/// `Fruit` is the type for the result of our collection.
|
||||||
/// e.g. `usize` for the `Count` collector.
|
/// e.g. `usize` for the `Count` collector.
|
||||||
type Fruit: Fruit;
|
type Fruit: Fruit;
|
||||||
|
|
||||||
/// Type of the `SegmentCollector` associated to this collector.
|
/// Type of the `SegmentCollector` associated to this collector.
|
||||||
type Child: SegmentCollector;
|
type Child: SegmentCollector<Fruit = Self::Fruit>;
|
||||||
|
|
||||||
/// `set_segment` is called before beginning to enumerate
|
/// `set_segment` is called before beginning to enumerate
|
||||||
/// on this segment.
|
/// on this segment.
|
||||||
@@ -160,33 +153,7 @@ pub trait Collector: Sync + Send {
|
|||||||
|
|
||||||
/// Combines the fruit associated to the collection of each segments
|
/// Combines the fruit associated to the collection of each segments
|
||||||
/// into one fruit.
|
/// into one fruit.
|
||||||
fn merge_fruits(
|
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit>;
|
||||||
&self,
|
|
||||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
|
||||||
) -> crate::Result<Self::Fruit>;
|
|
||||||
|
|
||||||
/// Created a segment collector and
|
|
||||||
fn collect_segment(
|
|
||||||
&self,
|
|
||||||
weight: &dyn Weight,
|
|
||||||
segment_ord: u32,
|
|
||||||
reader: &SegmentReader,
|
|
||||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
|
||||||
let mut segment_collector = self.for_segment(segment_ord as u32, reader)?;
|
|
||||||
|
|
||||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
|
||||||
weight.for_each(reader, &mut |doc, score| {
|
|
||||||
if delete_bitset.is_alive(doc) {
|
|
||||||
segment_collector.collect(doc, score);
|
|
||||||
}
|
|
||||||
})?;
|
|
||||||
} else {
|
|
||||||
weight.for_each(reader, &mut |doc, score| {
|
|
||||||
segment_collector.collect(doc, score);
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
Ok(segment_collector.harvest())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The `SegmentCollector` is the trait in charge of defining the
|
/// The `SegmentCollector` is the trait in charge of defining the
|
||||||
@@ -233,11 +200,11 @@ where
|
|||||||
|
|
||||||
fn merge_fruits(
|
fn merge_fruits(
|
||||||
&self,
|
&self,
|
||||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
children: Vec<(Left::Fruit, Right::Fruit)>,
|
||||||
) -> crate::Result<(Left::Fruit, Right::Fruit)> {
|
) -> crate::Result<(Left::Fruit, Right::Fruit)> {
|
||||||
let mut left_fruits = vec![];
|
let mut left_fruits = vec![];
|
||||||
let mut right_fruits = vec![];
|
let mut right_fruits = vec![];
|
||||||
for (left_fruit, right_fruit) in segment_fruits {
|
for (left_fruit, right_fruit) in children {
|
||||||
left_fruits.push(left_fruit);
|
left_fruits.push(left_fruit);
|
||||||
right_fruits.push(right_fruit);
|
right_fruits.push(right_fruit);
|
||||||
}
|
}
|
||||||
@@ -291,10 +258,7 @@ where
|
|||||||
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
|
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(
|
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
|
||||||
&self,
|
|
||||||
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
|
||||||
) -> crate::Result<Self::Fruit> {
|
|
||||||
let mut one_fruits = vec![];
|
let mut one_fruits = vec![];
|
||||||
let mut two_fruits = vec![];
|
let mut two_fruits = vec![];
|
||||||
let mut three_fruits = vec![];
|
let mut three_fruits = vec![];
|
||||||
@@ -361,10 +325,7 @@ where
|
|||||||
|| self.3.requires_scoring()
|
|| self.3.requires_scoring()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(
|
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
|
||||||
&self,
|
|
||||||
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
|
||||||
) -> crate::Result<Self::Fruit> {
|
|
||||||
let mut one_fruits = vec![];
|
let mut one_fruits = vec![];
|
||||||
let mut two_fruits = vec![];
|
let mut two_fruits = vec![];
|
||||||
let mut three_fruits = vec![];
|
let mut three_fruits = vec![];
|
||||||
|
|||||||
@@ -34,13 +34,13 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
|||||||
|
|
||||||
fn merge_fruits(
|
fn merge_fruits(
|
||||||
&self,
|
&self,
|
||||||
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
children: Vec<<Self as Collector>::Fruit>,
|
||||||
) -> crate::Result<Box<dyn Fruit>> {
|
) -> crate::Result<Box<dyn Fruit>> {
|
||||||
let typed_fruit: Vec<<TCollector::Child as SegmentCollector>::Fruit> = children
|
let typed_fruit: Vec<TCollector::Fruit> = children
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|untyped_fruit| {
|
.map(|untyped_fruit| {
|
||||||
untyped_fruit
|
untyped_fruit
|
||||||
.downcast::<<TCollector::Child as SegmentCollector>::Fruit>()
|
.downcast::<TCollector::Fruit>()
|
||||||
.map(|boxed_but_typed| *boxed_but_typed)
|
.map(|boxed_but_typed| *boxed_but_typed)
|
||||||
.map_err(|_| {
|
.map_err(|_| {
|
||||||
TantivyError::InvalidArgument("Failed to cast child fruit.".to_string())
|
TantivyError::InvalidArgument("Failed to cast child fruit.".to_string())
|
||||||
@@ -55,7 +55,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
|||||||
impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
||||||
type Fruit = Box<dyn Fruit>;
|
type Fruit = Box<dyn Fruit>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, score: Score) {
|
fn collect(&mut self, doc: u32, score: f32) {
|
||||||
self.as_mut().collect(doc, score);
|
self.as_mut().collect(doc, score);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,7 +65,7 @@ impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub trait BoxableSegmentCollector {
|
pub trait BoxableSegmentCollector {
|
||||||
fn collect(&mut self, doc: u32, score: Score);
|
fn collect(&mut self, doc: u32, score: f32);
|
||||||
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
|
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,7 +74,7 @@ pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegment
|
|||||||
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
|
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
|
||||||
for SegmentCollectorWrapper<TSegmentCollector>
|
for SegmentCollectorWrapper<TSegmentCollector>
|
||||||
{
|
{
|
||||||
fn collect(&mut self, doc: u32, score: Score) {
|
fn collect(&mut self, doc: u32, score: f32) {
|
||||||
self.0.collect(doc, score);
|
self.0.collect(doc, score);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -259,7 +259,7 @@ mod tests {
|
|||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text=>"abc"));
|
index_writer.add_document(doc!(text=>"abc"));
|
||||||
index_writer.add_document(doc!(text=>"abc abc abc"));
|
index_writer.add_document(doc!(text=>"abc abc abc"));
|
||||||
index_writer.add_document(doc!(text=>"abc abc"));
|
index_writer.add_document(doc!(text=>"abc abc"));
|
||||||
|
|||||||
@@ -8,13 +8,6 @@ use crate::DocId;
|
|||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
|
|
||||||
use crate::collector::{FilterCollector, TopDocs};
|
|
||||||
use crate::query::QueryParser;
|
|
||||||
use crate::schema::{Schema, FAST, TEXT};
|
|
||||||
use crate::DateTime;
|
|
||||||
use crate::{doc, Index};
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
||||||
compute_score: true,
|
compute_score: true,
|
||||||
};
|
};
|
||||||
@@ -23,54 +16,6 @@ pub const TEST_COLLECTOR_WITHOUT_SCORE: TestCollector = TestCollector {
|
|||||||
compute_score: true,
|
compute_score: true,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_filter_collector() {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let title = schema_builder.add_text_field("title", TEXT);
|
|
||||||
let price = schema_builder.add_u64_field("price", FAST);
|
|
||||||
let date = schema_builder.add_date_field("date", FAST);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
|
||||||
index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64, date => DateTime::from_str("1898-04-09T00:00:00+00:00").unwrap()));
|
|
||||||
index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64, date => DateTime::from_str("2020-04-09T00:00:00+00:00").unwrap()));
|
|
||||||
index_writer.add_document(doc!(title => "The Diary of Anne Frank", price => 18_240u64, date => DateTime::from_str("2019-04-20T00:00:00+00:00").unwrap()));
|
|
||||||
index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64, date => DateTime::from_str("2019-04-09T00:00:00+00:00").unwrap()));
|
|
||||||
index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64, date => DateTime::from_str("2018-04-09T00:00:00+00:00").unwrap()));
|
|
||||||
assert!(index_writer.commit().is_ok());
|
|
||||||
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
|
|
||||||
let query_parser = QueryParser::for_index(&index, vec![title]);
|
|
||||||
let query = query_parser.parse_query("diary").unwrap();
|
|
||||||
let filter_some_collector = FilterCollector::new(
|
|
||||||
price,
|
|
||||||
&|value: u64| value > 20_120u64,
|
|
||||||
TopDocs::with_limit(2),
|
|
||||||
);
|
|
||||||
let top_docs = searcher.search(&query, &filter_some_collector).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(top_docs.len(), 1);
|
|
||||||
assert_eq!(top_docs[0].1, DocAddress(0, 1));
|
|
||||||
|
|
||||||
let filter_all_collector: FilterCollector<_, _, u64> =
|
|
||||||
FilterCollector::new(price, &|value| value < 5u64, TopDocs::with_limit(2));
|
|
||||||
let filtered_top_docs = searcher.search(&query, &filter_all_collector).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(filtered_top_docs.len(), 0);
|
|
||||||
|
|
||||||
fn date_filter(value: DateTime) -> bool {
|
|
||||||
(value - DateTime::from_str("2019-04-09T00:00:00+00:00").unwrap()).num_weeks() > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
let filter_dates_collector = FilterCollector::new(date, &date_filter, TopDocs::with_limit(5));
|
|
||||||
let filtered_date_docs = searcher.search(&query, &filter_dates_collector).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(filtered_date_docs.len(), 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Stores all of the doc ids.
|
/// Stores all of the doc ids.
|
||||||
/// This collector is only used for tests.
|
/// This collector is only used for tests.
|
||||||
/// It is unusable in pr
|
/// It is unusable in pr
|
||||||
@@ -240,15 +185,12 @@ impl Collector for BytesFastFieldTestCollector {
|
|||||||
_segment_local_id: u32,
|
_segment_local_id: u32,
|
||||||
segment_reader: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
) -> crate::Result<BytesFastFieldSegmentCollector> {
|
) -> crate::Result<BytesFastFieldSegmentCollector> {
|
||||||
let reader = segment_reader
|
|
||||||
.fast_fields()
|
|
||||||
.bytes(self.field)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
crate::TantivyError::InvalidArgument("Field is not a bytes fast field.".to_string())
|
|
||||||
})?;
|
|
||||||
Ok(BytesFastFieldSegmentCollector {
|
Ok(BytesFastFieldSegmentCollector {
|
||||||
vals: Vec::new(),
|
vals: Vec::new(),
|
||||||
reader,
|
reader: segment_reader
|
||||||
|
.fast_fields()
|
||||||
|
.bytes(self.field)
|
||||||
|
.expect("Field is not a bytes fast field."),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,7 +206,7 @@ impl Collector for BytesFastFieldTestCollector {
|
|||||||
impl SegmentCollector for BytesFastFieldSegmentCollector {
|
impl SegmentCollector for BytesFastFieldSegmentCollector {
|
||||||
type Fruit = Vec<u8>;
|
type Fruit = Vec<u8>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, _score: Score) {
|
fn collect(&mut self, doc: u32, _score: f32) {
|
||||||
let data = self.reader.get_bytes(doc);
|
let data = self.reader.get_bytes(doc);
|
||||||
self.vals.extend(data);
|
self.vals.extend(data);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,9 +18,9 @@ use std::collections::BinaryHeap;
|
|||||||
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
|
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
|
||||||
/// is equal. This should be perfectly fine for this usage, but let's make sure this
|
/// is equal. This should be perfectly fine for this usage, but let's make sure this
|
||||||
/// struct is never public.
|
/// struct is never public.
|
||||||
pub(crate) struct ComparableDoc<T, D> {
|
struct ComparableDoc<T, D> {
|
||||||
pub feature: T,
|
feature: T,
|
||||||
pub doc: D,
|
doc: D,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> {
|
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> {
|
||||||
@@ -56,8 +56,7 @@ impl<T: PartialOrd, D: PartialOrd> PartialEq for ComparableDoc<T, D> {
|
|||||||
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {}
|
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {}
|
||||||
|
|
||||||
pub(crate) struct TopCollector<T> {
|
pub(crate) struct TopCollector<T> {
|
||||||
pub limit: usize,
|
limit: usize,
|
||||||
pub offset: usize,
|
|
||||||
_marker: PhantomData<T>,
|
_marker: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -73,20 +72,14 @@ where
|
|||||||
if limit < 1 {
|
if limit < 1 {
|
||||||
panic!("Limit must be strictly greater than 0.");
|
panic!("Limit must be strictly greater than 0.");
|
||||||
}
|
}
|
||||||
Self {
|
TopCollector {
|
||||||
limit,
|
limit,
|
||||||
offset: 0,
|
|
||||||
_marker: PhantomData,
|
_marker: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Skip the first "offset" documents when collecting.
|
pub fn limit(&self) -> usize {
|
||||||
///
|
self.limit
|
||||||
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
|
|
||||||
/// Lucene's TopDocsCollector.
|
|
||||||
pub fn and_offset(mut self, offset: usize) -> TopCollector<T> {
|
|
||||||
self.offset = offset;
|
|
||||||
self
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn merge_fruits(
|
pub fn merge_fruits(
|
||||||
@@ -99,7 +92,7 @@ where
|
|||||||
let mut top_collector = BinaryHeap::new();
|
let mut top_collector = BinaryHeap::new();
|
||||||
for child_fruit in children {
|
for child_fruit in children {
|
||||||
for (feature, doc) in child_fruit {
|
for (feature, doc) in child_fruit {
|
||||||
if top_collector.len() < (self.limit + self.offset) {
|
if top_collector.len() < self.limit {
|
||||||
top_collector.push(ComparableDoc { feature, doc });
|
top_collector.push(ComparableDoc { feature, doc });
|
||||||
} else if let Some(mut head) = top_collector.peek_mut() {
|
} else if let Some(mut head) = top_collector.peek_mut() {
|
||||||
if head.feature < feature {
|
if head.feature < feature {
|
||||||
@@ -111,7 +104,6 @@ where
|
|||||||
Ok(top_collector
|
Ok(top_collector
|
||||||
.into_sorted_vec()
|
.into_sorted_vec()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.skip(self.offset)
|
|
||||||
.map(|cdoc| (cdoc.feature, cdoc.doc))
|
.map(|cdoc| (cdoc.feature, cdoc.doc))
|
||||||
.collect())
|
.collect())
|
||||||
}
|
}
|
||||||
@@ -121,23 +113,7 @@ where
|
|||||||
segment_id: SegmentLocalId,
|
segment_id: SegmentLocalId,
|
||||||
_: &SegmentReader,
|
_: &SegmentReader,
|
||||||
) -> crate::Result<TopSegmentCollector<F>> {
|
) -> crate::Result<TopSegmentCollector<F>> {
|
||||||
Ok(TopSegmentCollector::new(
|
Ok(TopSegmentCollector::new(segment_id, self.limit))
|
||||||
segment_id,
|
|
||||||
self.limit + self.offset,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new TopCollector with the same limit and offset.
|
|
||||||
///
|
|
||||||
/// Ideally we would use Into but the blanket implementation seems to cause the Scorer traits
|
|
||||||
/// to fail.
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub(crate) fn into_tscore<TScore: PartialOrd + Clone>(self) -> TopCollector<TScore> {
|
|
||||||
TopCollector {
|
|
||||||
limit: self.limit,
|
|
||||||
offset: self.offset,
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -211,7 +187,7 @@ impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::{TopCollector, TopSegmentCollector};
|
use super::TopSegmentCollector;
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -272,48 +248,6 @@ mod tests {
|
|||||||
top_collector_limit_3.harvest()[..2].to_vec(),
|
top_collector_limit_3.harvest()[..2].to_vec(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_collector_with_limit_and_offset() {
|
|
||||||
let collector = TopCollector::with_limit(2).and_offset(1);
|
|
||||||
|
|
||||||
let results = collector
|
|
||||||
.merge_fruits(vec![vec![
|
|
||||||
(0.9, DocAddress(0, 1)),
|
|
||||||
(0.8, DocAddress(0, 2)),
|
|
||||||
(0.7, DocAddress(0, 3)),
|
|
||||||
(0.6, DocAddress(0, 4)),
|
|
||||||
(0.5, DocAddress(0, 5)),
|
|
||||||
]])
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
results,
|
|
||||||
vec![(0.8, DocAddress(0, 2)), (0.7, DocAddress(0, 3)),]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_collector_with_limit_larger_than_set_and_offset() {
|
|
||||||
let collector = TopCollector::with_limit(2).and_offset(1);
|
|
||||||
|
|
||||||
let results = collector
|
|
||||||
.merge_fruits(vec![vec![(0.9, DocAddress(0, 1)), (0.8, DocAddress(0, 2))]])
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(results, vec![(0.8, DocAddress(0, 2)),]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_collector_with_limit_and_offset_larger_than_set() {
|
|
||||||
let collector = TopCollector::with_limit(2).and_offset(20);
|
|
||||||
|
|
||||||
let results = collector
|
|
||||||
.merge_fruits(vec![vec![(0.9, DocAddress(0, 1)), (0.8, DocAddress(0, 2))]])
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(results, vec![]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(all(test, feature = "unstable"))]
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
|
|||||||
@@ -1,82 +1,19 @@
|
|||||||
use super::Collector;
|
use super::Collector;
|
||||||
use crate::collector::top_collector::{ComparableDoc, TopCollector};
|
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
||||||
|
use crate::collector::top_collector::TopCollector;
|
||||||
|
use crate::collector::top_collector::TopSegmentCollector;
|
||||||
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
||||||
use crate::collector::{
|
use crate::collector::{
|
||||||
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
||||||
};
|
};
|
||||||
use crate::fastfield::FastFieldReader;
|
use crate::fastfield::FastFieldReader;
|
||||||
use crate::query::Weight;
|
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
use crate::{collector::custom_score_top_collector::CustomScoreTopCollector, fastfield::FastValue};
|
|
||||||
use crate::{collector::top_collector::TopSegmentCollector, TantivyError};
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::{collections::BinaryHeap, marker::PhantomData};
|
|
||||||
|
|
||||||
struct FastFieldConvertCollector<
|
|
||||||
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
|
|
||||||
TFastValue: FastValue,
|
|
||||||
> {
|
|
||||||
pub collector: TCollector,
|
|
||||||
pub field: Field,
|
|
||||||
pub fast_value: std::marker::PhantomData<TFastValue>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TCollector, TFastValue> Collector for FastFieldConvertCollector<TCollector, TFastValue>
|
|
||||||
where
|
|
||||||
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
|
|
||||||
TFastValue: FastValue + 'static,
|
|
||||||
{
|
|
||||||
type Fruit = Vec<(TFastValue, DocAddress)>;
|
|
||||||
|
|
||||||
type Child = TCollector::Child;
|
|
||||||
|
|
||||||
fn for_segment(
|
|
||||||
&self,
|
|
||||||
segment_local_id: crate::SegmentLocalId,
|
|
||||||
segment: &SegmentReader,
|
|
||||||
) -> crate::Result<Self::Child> {
|
|
||||||
let schema = segment.schema();
|
|
||||||
let field_entry = schema.get_field_entry(self.field);
|
|
||||||
if !field_entry.is_fast() {
|
|
||||||
return Err(TantivyError::SchemaError(format!(
|
|
||||||
"Field {:?} is not a fast field.",
|
|
||||||
field_entry.name()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
let schema_type = TFastValue::to_type();
|
|
||||||
let requested_type = field_entry.field_type().value_type();
|
|
||||||
if schema_type != requested_type {
|
|
||||||
return Err(TantivyError::SchemaError(format!(
|
|
||||||
"Field {:?} is of type {:?}!={:?}",
|
|
||||||
field_entry.name(),
|
|
||||||
schema_type,
|
|
||||||
requested_type
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
self.collector.for_segment(segment_local_id, segment)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn requires_scoring(&self) -> bool {
|
|
||||||
self.collector.requires_scoring()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn merge_fruits(
|
|
||||||
&self,
|
|
||||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
|
||||||
) -> crate::Result<Self::Fruit> {
|
|
||||||
let raw_result = self.collector.merge_fruits(segment_fruits)?;
|
|
||||||
let transformed_result = raw_result
|
|
||||||
.into_iter()
|
|
||||||
.map(|(score, doc_address)| (TFastValue::from_u64(score), doc_address))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
Ok(transformed_result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The `TopDocs` collector keeps track of the top `K` documents
|
/// The `TopDocs` collector keeps track of the top `K` documents
|
||||||
/// sorted by their score.
|
/// sorted by their score.
|
||||||
@@ -99,7 +36,7 @@ where
|
|||||||
/// let schema = schema_builder.build();
|
/// let schema = schema_builder.build();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let index = Index::create_in_ram(schema);
|
||||||
///
|
///
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||||
@@ -113,18 +50,14 @@ where
|
|||||||
/// let query = query_parser.parse_query("diary").unwrap();
|
/// let query = query_parser.parse_query("diary").unwrap();
|
||||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
|
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
|
||||||
///
|
///
|
||||||
/// assert_eq!(top_docs[0].1, DocAddress(0, 1));
|
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
|
||||||
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
|
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
|
||||||
/// ```
|
/// ```
|
||||||
pub struct TopDocs(TopCollector<Score>);
|
pub struct TopDocs(TopCollector<Score>);
|
||||||
|
|
||||||
impl fmt::Debug for TopDocs {
|
impl fmt::Debug for TopDocs {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(
|
write!(f, "TopDocs({})", self.0.limit())
|
||||||
f,
|
|
||||||
"TopDocs(limit={}, offset={})",
|
|
||||||
self.0.limit, self.0.offset
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,8 +66,8 @@ struct ScorerByFastFieldReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||||
fn score(&mut self, doc: DocId) -> u64 {
|
fn score(&self, doc: DocId) -> u64 {
|
||||||
self.ff_reader.get(doc)
|
self.ff_reader.get_u64(u64::from(doc))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,10 +81,10 @@ impl CustomScorer<u64> for ScorerByField {
|
|||||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
|
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
|
||||||
let ff_reader = segment_reader
|
let ff_reader = segment_reader
|
||||||
.fast_fields()
|
.fast_fields()
|
||||||
.u64_lenient(self.field)
|
.u64(self.field)
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
crate::TantivyError::SchemaError(format!(
|
crate::TantivyError::SchemaError(format!(
|
||||||
"Field requested ({:?}) is not a fast field.",
|
"Field requested ({:?}) is not a i64/u64 fast field.",
|
||||||
self.field
|
self.field
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
@@ -168,57 +101,8 @@ impl TopDocs {
|
|||||||
TopDocs(TopCollector::with_limit(limit))
|
TopDocs(TopCollector::with_limit(limit))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Skip the first "offset" documents when collecting.
|
|
||||||
///
|
|
||||||
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
|
|
||||||
/// Lucene's TopDocsCollector.
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
///
|
|
||||||
/// ```rust
|
|
||||||
/// use tantivy::collector::TopDocs;
|
|
||||||
/// use tantivy::query::QueryParser;
|
|
||||||
/// use tantivy::schema::{Schema, TEXT};
|
|
||||||
/// use tantivy::{doc, DocAddress, Index};
|
|
||||||
///
|
|
||||||
/// let mut schema_builder = Schema::builder();
|
|
||||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
|
||||||
/// let schema = schema_builder.build();
|
|
||||||
/// let index = Index::create_in_ram(schema);
|
|
||||||
///
|
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
|
||||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
|
||||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of Lena Mukhina"));
|
|
||||||
/// assert!(index_writer.commit().is_ok());
|
|
||||||
///
|
|
||||||
/// let reader = index.reader().unwrap();
|
|
||||||
/// let searcher = reader.searcher();
|
|
||||||
///
|
|
||||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
|
||||||
/// let query = query_parser.parse_query("diary").unwrap();
|
|
||||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2).and_offset(1)).unwrap();
|
|
||||||
///
|
|
||||||
/// assert_eq!(top_docs.len(), 2);
|
|
||||||
/// assert_eq!(top_docs[0].1, DocAddress(0, 4));
|
|
||||||
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
|
|
||||||
/// ```
|
|
||||||
pub fn and_offset(self, offset: usize) -> TopDocs {
|
|
||||||
TopDocs(self.0.and_offset(offset))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set top-K to rank documents by a given fast field.
|
/// Set top-K to rank documents by a given fast field.
|
||||||
///
|
///
|
||||||
/// If the field is not a fast or does not exist, this method returns successfully (it is not aware of any schema).
|
|
||||||
/// An error will be returned at the moment of search.
|
|
||||||
///
|
|
||||||
/// If the field is a FAST field but not a u64 field, search will return successfully but it will return
|
|
||||||
/// returns a monotonic u64-representation (ie. the order is still correct) of the requested field type.
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
///
|
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||||
/// # use tantivy::{doc, Index, DocAddress};
|
/// # use tantivy::{doc, Index, DocAddress};
|
||||||
@@ -234,13 +118,13 @@ impl TopDocs {
|
|||||||
/// # let schema = schema_builder.build();
|
/// # let schema = schema_builder.build();
|
||||||
/// #
|
/// #
|
||||||
/// # let index = Index::create_in_ram(schema);
|
/// # let index = Index::create_in_ram(schema);
|
||||||
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||||
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
|
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
|
||||||
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
||||||
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
||||||
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
|
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
|
||||||
/// # assert!(index_writer.commit().is_ok());
|
/// # assert!(index_writer.commit().is_ok());
|
||||||
/// # let reader = index.reader()?;
|
/// # let reader = index.reader().unwrap();
|
||||||
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
||||||
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
||||||
/// # assert_eq!(top_docs,
|
/// # assert_eq!(top_docs,
|
||||||
@@ -248,20 +132,25 @@ impl TopDocs {
|
|||||||
/// # (80u64, DocAddress(0u32, 3))]);
|
/// # (80u64, DocAddress(0u32, 3))]);
|
||||||
/// # Ok(())
|
/// # Ok(())
|
||||||
/// # }
|
/// # }
|
||||||
|
///
|
||||||
|
///
|
||||||
/// /// Searches the document matching the given query, and
|
/// /// Searches the document matching the given query, and
|
||||||
/// /// collects the top 10 documents, order by the u64-`field`
|
/// /// collects the top 10 documents, order by the u64-`field`
|
||||||
/// /// given in argument.
|
/// /// given in argument.
|
||||||
|
/// ///
|
||||||
|
/// /// `field` is required to be a FAST field.
|
||||||
/// fn docs_sorted_by_rating(searcher: &Searcher,
|
/// fn docs_sorted_by_rating(searcher: &Searcher,
|
||||||
/// query: &dyn Query,
|
/// query: &dyn Query,
|
||||||
/// rating_field: Field)
|
/// sort_by_field: Field)
|
||||||
/// -> tantivy::Result<Vec<(u64, DocAddress)>> {
|
/// -> tantivy::Result<Vec<(u64, DocAddress)>> {
|
||||||
///
|
///
|
||||||
/// // This is where we build our topdocs collector
|
/// // This is where we build our topdocs collector
|
||||||
/// //
|
/// //
|
||||||
/// // Note the `rating_field` needs to be a FAST field here.
|
/// // Note the generics parameter that needs to match the
|
||||||
/// let top_books_by_rating = TopDocs
|
/// // type `sort_by_field`.
|
||||||
|
/// let top_docs_by_rating = TopDocs
|
||||||
/// ::with_limit(10)
|
/// ::with_limit(10)
|
||||||
/// .order_by_u64_field(rating_field);
|
/// .order_by_u64_field(sort_by_field);
|
||||||
///
|
///
|
||||||
/// // ... and here are our documents. Note this is a simple vec.
|
/// // ... and here are our documents. Note this is a simple vec.
|
||||||
/// // The `u64` in the pair is the value of our fast field for
|
/// // The `u64` in the pair is the value of our fast field for
|
||||||
@@ -271,105 +160,21 @@ impl TopDocs {
|
|||||||
/// // length of 10, or less if not enough documents matched the
|
/// // length of 10, or less if not enough documents matched the
|
||||||
/// // query.
|
/// // query.
|
||||||
/// let resulting_docs: Vec<(u64, DocAddress)> =
|
/// let resulting_docs: Vec<(u64, DocAddress)> =
|
||||||
/// searcher.search(query, &top_books_by_rating)?;
|
/// searcher.search(query, &top_docs_by_rating)?;
|
||||||
///
|
///
|
||||||
/// Ok(resulting_docs)
|
/// Ok(resulting_docs)
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// # See also
|
/// # Panics
|
||||||
///
|
///
|
||||||
/// To confortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
/// May panic if the field requested is not a fast field.
|
||||||
/// [.order_by_fast_field(...)](#method.order_by_fast_field) method.
|
///
|
||||||
pub fn order_by_u64_field(
|
pub fn order_by_u64_field(
|
||||||
self,
|
self,
|
||||||
field: Field,
|
field: Field,
|
||||||
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
|
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
|
||||||
CustomScoreTopCollector::new(ScorerByField { field }, self.0.into_tscore())
|
self.custom_score(ScorerByField { field })
|
||||||
}
|
|
||||||
|
|
||||||
/// Set top-K to rank documents by a given fast field.
|
|
||||||
///
|
|
||||||
/// If the field is not a fast field, or its field type does not match the generic type, this method does not panic,
|
|
||||||
/// but an explicit error will be returned at the moment of collection.
|
|
||||||
///
|
|
||||||
/// Note that this method is a generic. The requested fast field type will be often
|
|
||||||
/// inferred in your code by the rust compiler.
|
|
||||||
///
|
|
||||||
/// Implementation-wise, for performance reason, tantivy will manipulate the u64 representation of your fast
|
|
||||||
/// field until the last moment.
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
///
|
|
||||||
/// ```rust
|
|
||||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
|
||||||
/// # use tantivy::{doc, Index, DocAddress};
|
|
||||||
/// # use tantivy::query::{Query, AllQuery};
|
|
||||||
/// use tantivy::Searcher;
|
|
||||||
/// use tantivy::collector::TopDocs;
|
|
||||||
/// use tantivy::schema::Field;
|
|
||||||
///
|
|
||||||
/// # fn main() -> tantivy::Result<()> {
|
|
||||||
/// # let mut schema_builder = Schema::builder();
|
|
||||||
/// # let title = schema_builder.add_text_field("company", TEXT);
|
|
||||||
/// # let rating = schema_builder.add_i64_field("revenue", FAST);
|
|
||||||
/// # let schema = schema_builder.build();
|
|
||||||
/// #
|
|
||||||
/// # let index = Index::create_in_ram(schema);
|
|
||||||
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
|
||||||
/// # index_writer.add_document(doc!(title => "MadCow Inc.", rating => 92_000_000i64));
|
|
||||||
/// # index_writer.add_document(doc!(title => "Zozo Cow KKK", rating => 119_000_000i64));
|
|
||||||
/// # index_writer.add_document(doc!(title => "Declining Cow", rating => -63_000_000i64));
|
|
||||||
/// # assert!(index_writer.commit().is_ok());
|
|
||||||
/// # let reader = index.reader()?;
|
|
||||||
/// # let top_docs = docs_sorted_by_revenue(&reader.searcher(), &AllQuery, rating)?;
|
|
||||||
/// # assert_eq!(top_docs,
|
|
||||||
/// # vec![(119_000_000i64, DocAddress(0, 1)),
|
|
||||||
/// # (92_000_000i64, DocAddress(0, 0))]);
|
|
||||||
/// # Ok(())
|
|
||||||
/// # }
|
|
||||||
/// /// Searches the document matching the given query, and
|
|
||||||
/// /// collects the top 10 documents, order by the u64-`field`
|
|
||||||
/// /// given in argument.
|
|
||||||
/// fn docs_sorted_by_revenue(searcher: &Searcher,
|
|
||||||
/// query: &dyn Query,
|
|
||||||
/// revenue_field: Field)
|
|
||||||
/// -> tantivy::Result<Vec<(i64, DocAddress)>> {
|
|
||||||
///
|
|
||||||
/// // This is where we build our topdocs collector
|
|
||||||
/// //
|
|
||||||
/// // Note the generics parameter that needs to match the
|
|
||||||
/// // type `sort_by_field`. revenue_field here is a FAST i64 field.
|
|
||||||
/// let top_company_by_revenue = TopDocs
|
|
||||||
/// ::with_limit(2)
|
|
||||||
/// .order_by_fast_field(revenue_field);
|
|
||||||
///
|
|
||||||
/// // ... and here are our documents. Note this is a simple vec.
|
|
||||||
/// // The `i64` in the pair is the value of our fast field for
|
|
||||||
/// // each documents.
|
|
||||||
/// //
|
|
||||||
/// // The vec is sorted decreasingly by `sort_by_field`, and has a
|
|
||||||
/// // length of 10, or less if not enough documents matched the
|
|
||||||
/// // query.
|
|
||||||
/// let resulting_docs: Vec<(i64, DocAddress)> =
|
|
||||||
/// searcher.search(query, &top_company_by_revenue)?;
|
|
||||||
///
|
|
||||||
/// Ok(resulting_docs)
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
pub fn order_by_fast_field<TFastValue>(
|
|
||||||
self,
|
|
||||||
fast_field: Field,
|
|
||||||
) -> impl Collector<Fruit = Vec<(TFastValue, DocAddress)>>
|
|
||||||
where
|
|
||||||
TFastValue: FastValue + 'static,
|
|
||||||
{
|
|
||||||
let u64_collector = self.order_by_u64_field(fast_field);
|
|
||||||
FastFieldConvertCollector {
|
|
||||||
collector: u64_collector,
|
|
||||||
field: fast_field,
|
|
||||||
fast_value: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ranks the documents using a custom score.
|
/// Ranks the documents using a custom score.
|
||||||
@@ -414,7 +219,7 @@ impl TopDocs {
|
|||||||
/// fn create_index() -> tantivy::Result<Index> {
|
/// fn create_index() -> tantivy::Result<Index> {
|
||||||
/// let schema = create_schema();
|
/// let schema = create_schema();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let index = Index::create_in_ram(schema);
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||||
/// let product_name = index.schema().get_field("product_name").unwrap();
|
/// let product_name = index.schema().get_field("product_name").unwrap();
|
||||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||||
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
||||||
@@ -453,7 +258,7 @@ impl TopDocs {
|
|||||||
/// let popularity: u64 = popularity_reader.get(doc);
|
/// let popularity: u64 = popularity_reader.get(doc);
|
||||||
/// // Well.. For the sake of the example we use a simple logarithm
|
/// // Well.. For the sake of the example we use a simple logarithm
|
||||||
/// // function.
|
/// // function.
|
||||||
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
|
/// let popularity_boost_score = ((2u64 + popularity) as f32).log2();
|
||||||
/// popularity_boost_score * original_score
|
/// popularity_boost_score * original_score
|
||||||
/// }
|
/// }
|
||||||
/// });
|
/// });
|
||||||
@@ -474,9 +279,9 @@ impl TopDocs {
|
|||||||
where
|
where
|
||||||
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
||||||
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
|
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
|
||||||
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker> + Send + Sync,
|
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
|
||||||
{
|
{
|
||||||
TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore())
|
TweakedScoreTopCollector::new(score_tweaker, self.0.limit())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ranks the documents using a custom score.
|
/// Ranks the documents using a custom score.
|
||||||
@@ -521,7 +326,7 @@ impl TopDocs {
|
|||||||
/// # fn main() -> tantivy::Result<()> {
|
/// # fn main() -> tantivy::Result<()> {
|
||||||
/// # let schema = create_schema();
|
/// # let schema = create_schema();
|
||||||
/// # let index = Index::create_in_ram(schema);
|
/// # let index = Index::create_in_ram(schema);
|
||||||
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||||
/// # let product_name = index.schema().get_field("product_name").unwrap();
|
/// # let product_name = index.schema().get_field("product_name").unwrap();
|
||||||
/// #
|
/// #
|
||||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||||
@@ -588,9 +393,9 @@ impl TopDocs {
|
|||||||
where
|
where
|
||||||
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
||||||
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
|
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
|
||||||
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer> + Send + Sync,
|
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
|
||||||
{
|
{
|
||||||
CustomScoreTopCollector::new(custom_score, self.0.into_tscore())
|
CustomScoreTopCollector::new(custom_score, self.0.limit())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -618,64 +423,6 @@ impl Collector for TopDocs {
|
|||||||
) -> crate::Result<Self::Fruit> {
|
) -> crate::Result<Self::Fruit> {
|
||||||
self.0.merge_fruits(child_fruits)
|
self.0.merge_fruits(child_fruits)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn collect_segment(
|
|
||||||
&self,
|
|
||||||
weight: &dyn Weight,
|
|
||||||
segment_ord: u32,
|
|
||||||
reader: &SegmentReader,
|
|
||||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
|
||||||
let heap_len = self.0.limit + self.0.offset;
|
|
||||||
let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len);
|
|
||||||
|
|
||||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
|
||||||
let mut threshold = Score::MIN;
|
|
||||||
weight.for_each_pruning(threshold, reader, &mut |doc, score| {
|
|
||||||
if delete_bitset.is_deleted(doc) {
|
|
||||||
return threshold;
|
|
||||||
}
|
|
||||||
let heap_item = ComparableDoc {
|
|
||||||
feature: score,
|
|
||||||
doc,
|
|
||||||
};
|
|
||||||
if heap.len() < heap_len {
|
|
||||||
heap.push(heap_item);
|
|
||||||
if heap.len() == heap_len {
|
|
||||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
|
||||||
}
|
|
||||||
return threshold;
|
|
||||||
}
|
|
||||||
*heap.peek_mut().unwrap() = heap_item;
|
|
||||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
|
||||||
threshold
|
|
||||||
})?;
|
|
||||||
} else {
|
|
||||||
weight.for_each_pruning(Score::MIN, reader, &mut |doc, score| {
|
|
||||||
let heap_item = ComparableDoc {
|
|
||||||
feature: score,
|
|
||||||
doc,
|
|
||||||
};
|
|
||||||
if heap.len() < heap_len {
|
|
||||||
heap.push(heap_item);
|
|
||||||
// TODO the threshold is suboptimal for heap.len == heap_len
|
|
||||||
if heap.len() == heap_len {
|
|
||||||
return heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
|
||||||
} else {
|
|
||||||
return Score::MIN;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*heap.peek_mut().unwrap() = heap_item;
|
|
||||||
heap.peek().map(|el| el.feature).unwrap_or(Score::MIN)
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let fruit = heap
|
|
||||||
.into_sorted_vec()
|
|
||||||
.into_iter()
|
|
||||||
.map(|cid| (cid.feature, DocAddress(segment_ord, cid.doc)))
|
|
||||||
.collect();
|
|
||||||
Ok(fruit)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Segment Collector associated to `TopDocs`.
|
/// Segment Collector associated to `TopDocs`.
|
||||||
@@ -685,7 +432,7 @@ impl SegmentCollector for TopScoreSegmentCollector {
|
|||||||
type Fruit = Vec<(Score, DocAddress)>;
|
type Fruit = Vec<(Score, DocAddress)>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: DocId, score: Score) {
|
fn collect(&mut self, doc: DocId, score: Score) {
|
||||||
self.0.collect(doc, score);
|
self.0.collect(doc, score)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn harvest(self) -> Vec<(Score, DocAddress)> {
|
fn harvest(self) -> Vec<(Score, DocAddress)> {
|
||||||
@@ -699,10 +446,10 @@ mod tests {
|
|||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
use crate::query::{AllQuery, Query, QueryParser};
|
use crate::query::{AllQuery, Query, QueryParser};
|
||||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||||
|
use crate::DocAddress;
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::IndexWriter;
|
use crate::IndexWriter;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::{DocAddress, DocId, SegmentReader};
|
|
||||||
|
|
||||||
fn make_index() -> Index {
|
fn make_index() -> Index {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
@@ -711,7 +458,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
|
index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
|
||||||
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
|
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
|
||||||
index_writer.add_document(doc!(text_field=>"I like Droopy"));
|
index_writer.add_document(doc!(text_field=>"I like Droopy"));
|
||||||
@@ -720,15 +467,8 @@ mod tests {
|
|||||||
index
|
index
|
||||||
}
|
}
|
||||||
|
|
||||||
fn assert_results_equals(results: &[(Score, DocAddress)], expected: &[(Score, DocAddress)]) {
|
|
||||||
for (result, expected) in results.iter().zip(expected.iter()) {
|
|
||||||
assert_eq!(result.1, expected.1);
|
|
||||||
crate::assert_nearly_equals!(result.0, expected.0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_top_collector_not_at_capacity_without_offset() {
|
fn test_top_collector_not_at_capacity() {
|
||||||
let index = make_index();
|
let index = make_index();
|
||||||
let field = index.schema().get_field("text").unwrap();
|
let field = index.schema().get_field("text").unwrap();
|
||||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||||
@@ -739,31 +479,16 @@ mod tests {
|
|||||||
.searcher()
|
.searcher()
|
||||||
.search(&text_query, &TopDocs::with_limit(4))
|
.search(&text_query, &TopDocs::with_limit(4))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_results_equals(
|
assert_eq!(
|
||||||
&score_docs,
|
score_docs,
|
||||||
&[
|
vec![
|
||||||
(0.81221175, DocAddress(0u32, 1)),
|
(0.81221175, DocAddress(0u32, 1)),
|
||||||
(0.5376842, DocAddress(0u32, 2)),
|
(0.5376842, DocAddress(0u32, 2)),
|
||||||
(0.48527452, DocAddress(0, 0)),
|
(0.48527452, DocAddress(0, 0))
|
||||||
],
|
]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_collector_not_at_capacity_with_offset() {
|
|
||||||
let index = make_index();
|
|
||||||
let field = index.schema().get_field("text").unwrap();
|
|
||||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
|
||||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
|
||||||
let score_docs: Vec<(Score, DocAddress)> = index
|
|
||||||
.reader()
|
|
||||||
.unwrap()
|
|
||||||
.searcher()
|
|
||||||
.search(&text_query, &TopDocs::with_limit(4).and_offset(2))
|
|
||||||
.unwrap();
|
|
||||||
assert_results_equals(&score_docs[..], &[(0.48527452, DocAddress(0, 0))]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_top_collector_at_capacity() {
|
fn test_top_collector_at_capacity() {
|
||||||
let index = make_index();
|
let index = make_index();
|
||||||
@@ -776,33 +501,12 @@ mod tests {
|
|||||||
.searcher()
|
.searcher()
|
||||||
.search(&text_query, &TopDocs::with_limit(2))
|
.search(&text_query, &TopDocs::with_limit(2))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_results_equals(
|
assert_eq!(
|
||||||
&score_docs,
|
score_docs,
|
||||||
&[
|
vec![
|
||||||
(0.81221175, DocAddress(0u32, 1)),
|
(0.81221175, DocAddress(0u32, 1)),
|
||||||
(0.5376842, DocAddress(0u32, 2)),
|
(0.5376842, DocAddress(0u32, 2)),
|
||||||
],
|
]
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_collector_at_capacity_with_offset() {
|
|
||||||
let index = make_index();
|
|
||||||
let field = index.schema().get_field("text").unwrap();
|
|
||||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
|
||||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
|
||||||
let score_docs: Vec<(Score, DocAddress)> = index
|
|
||||||
.reader()
|
|
||||||
.unwrap()
|
|
||||||
.searcher()
|
|
||||||
.search(&text_query, &TopDocs::with_limit(2).and_offset(1))
|
|
||||||
.unwrap();
|
|
||||||
assert_results_equals(
|
|
||||||
&score_docs[..],
|
|
||||||
&[
|
|
||||||
(0.5376842, DocAddress(0u32, 2)),
|
|
||||||
(0.48527452, DocAddress(0, 0)),
|
|
||||||
],
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -863,8 +567,8 @@ mod tests {
|
|||||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||||
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
|
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
&top_docs[..],
|
top_docs,
|
||||||
&[
|
vec![
|
||||||
(64, DocAddress(0, 1)),
|
(64, DocAddress(0, 1)),
|
||||||
(16, DocAddress(0, 2)),
|
(16, DocAddress(0, 2)),
|
||||||
(12, DocAddress(0, 0))
|
(12, DocAddress(0, 0))
|
||||||
@@ -872,94 +576,6 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_field_collector_datetime() -> crate::Result<()> {
|
|
||||||
use std::str::FromStr;
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let name = schema_builder.add_text_field("name", TEXT);
|
|
||||||
let birthday = schema_builder.add_date_field("birthday", FAST);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
let pr_birthday = crate::DateTime::from_str("1898-04-09T00:00:00+00:00")?;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
name => "Paul Robeson",
|
|
||||||
birthday => pr_birthday
|
|
||||||
));
|
|
||||||
let mr_birthday = crate::DateTime::from_str("1947-11-08T00:00:00+00:00")?;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
name => "Minnie Riperton",
|
|
||||||
birthday => mr_birthday
|
|
||||||
));
|
|
||||||
index_writer.commit()?;
|
|
||||||
let searcher = index.reader()?.searcher();
|
|
||||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field(birthday);
|
|
||||||
let top_docs: Vec<(crate::DateTime, DocAddress)> =
|
|
||||||
searcher.search(&AllQuery, &top_collector)?;
|
|
||||||
assert_eq!(
|
|
||||||
&top_docs[..],
|
|
||||||
&[
|
|
||||||
(mr_birthday, DocAddress(0, 1)),
|
|
||||||
(pr_birthday, DocAddress(0, 0)),
|
|
||||||
]
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_field_collector_i64() -> crate::Result<()> {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let city = schema_builder.add_text_field("city", TEXT);
|
|
||||||
let altitude = schema_builder.add_i64_field("altitude", FAST);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
city => "georgetown",
|
|
||||||
altitude => -1i64,
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
city => "tokyo",
|
|
||||||
altitude => 40i64,
|
|
||||||
));
|
|
||||||
index_writer.commit()?;
|
|
||||||
let searcher = index.reader()?.searcher();
|
|
||||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
|
|
||||||
let top_docs: Vec<(i64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
|
||||||
assert_eq!(
|
|
||||||
&top_docs[..],
|
|
||||||
&[(40i64, DocAddress(0, 1)), (-1i64, DocAddress(0, 0)),]
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_field_collector_f64() -> crate::Result<()> {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let city = schema_builder.add_text_field("city", TEXT);
|
|
||||||
let altitude = schema_builder.add_f64_field("altitude", FAST);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
city => "georgetown",
|
|
||||||
altitude => -1.0f64,
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
city => "tokyo",
|
|
||||||
altitude => 40f64,
|
|
||||||
));
|
|
||||||
index_writer.commit()?;
|
|
||||||
let searcher = index.reader()?.searcher();
|
|
||||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
|
|
||||||
let top_docs: Vec<(f64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
|
||||||
assert_eq!(
|
|
||||||
&top_docs[..],
|
|
||||||
&[(40f64, DocAddress(0, 1)), (-1.0f64, DocAddress(0, 0)),]
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic]
|
#[should_panic]
|
||||||
fn test_field_does_not_exist() {
|
fn test_field_does_not_exist() {
|
||||||
@@ -982,85 +598,29 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_field_not_fast_field() -> crate::Result<()> {
|
fn test_field_not_fast_field() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
|
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||||
let size = schema_builder.add_u64_field(SIZE, STORED);
|
let size = schema_builder.add_u64_field(SIZE, STORED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let (index, _) = index("beer", title, schema, |index_writer| {
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
index_writer.add_document(doc!(
|
||||||
index_writer.add_document(doc!(size=>1u64));
|
title => "bottle of beer",
|
||||||
index_writer.commit()?;
|
size => 12u64,
|
||||||
let searcher = index.reader()?.searcher();
|
));
|
||||||
|
});
|
||||||
|
let searcher = index.reader().unwrap().searcher();
|
||||||
let segment = searcher.segment_reader(0);
|
let segment = searcher.segment_reader(0);
|
||||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||||
let err = top_collector.for_segment(0, segment).err().unwrap();
|
let err = top_collector.for_segment(0, segment);
|
||||||
assert!(
|
if let Err(crate::TantivyError::SchemaError(msg)) = err {
|
||||||
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field requested (Field(0)) is not a fast field.")
|
assert_eq!(
|
||||||
);
|
msg,
|
||||||
Ok(())
|
"Field requested (Field(1)) is not a i64/u64 fast field."
|
||||||
}
|
);
|
||||||
|
} else {
|
||||||
#[test]
|
assert!(false);
|
||||||
fn test_field_wrong_type() -> crate::Result<()> {
|
}
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let size = schema_builder.add_u64_field(SIZE, STORED);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
index_writer.add_document(doc!(size=>1u64));
|
|
||||||
index_writer.commit()?;
|
|
||||||
let searcher = index.reader()?.searcher();
|
|
||||||
let segment = searcher.segment_reader(0);
|
|
||||||
let top_collector = TopDocs::with_limit(4).order_by_fast_field::<i64>(size);
|
|
||||||
let err = top_collector.for_segment(0, segment).err().unwrap();
|
|
||||||
assert!(
|
|
||||||
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field \"size\" is not a fast field.")
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_tweak_score_top_collector_with_offset() {
|
|
||||||
let index = make_index();
|
|
||||||
let field = index.schema().get_field("text").unwrap();
|
|
||||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
|
||||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
|
||||||
let collector = TopDocs::with_limit(2).and_offset(1).tweak_score(
|
|
||||||
move |_segment_reader: &SegmentReader| move |doc: DocId, _original_score: Score| doc,
|
|
||||||
);
|
|
||||||
let score_docs: Vec<(u32, DocAddress)> = index
|
|
||||||
.reader()
|
|
||||||
.unwrap()
|
|
||||||
.searcher()
|
|
||||||
.search(&text_query, &collector)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
score_docs,
|
|
||||||
vec![(1, DocAddress(0, 1)), (0, DocAddress(0, 0)),]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_custom_score_top_collector_with_offset() {
|
|
||||||
let index = make_index();
|
|
||||||
let field = index.schema().get_field("text").unwrap();
|
|
||||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
|
||||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
|
||||||
let collector = TopDocs::with_limit(2)
|
|
||||||
.and_offset(1)
|
|
||||||
.custom_score(move |_segment_reader: &SegmentReader| move |doc: DocId| doc);
|
|
||||||
let score_docs: Vec<(u32, DocAddress)> = index
|
|
||||||
.reader()
|
|
||||||
.unwrap()
|
|
||||||
.searcher()
|
|
||||||
.search(&text_query, &collector)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
score_docs,
|
|
||||||
vec![(1, DocAddress(0, 1)), (0, DocAddress(0, 0)),]
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn index(
|
fn index(
|
||||||
@@ -1070,7 +630,8 @@ mod tests {
|
|||||||
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
|
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
|
||||||
) -> (Index, Box<dyn Query>) {
|
) -> (Index, Box<dyn Query>) {
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
doc_adder(&mut index_writer);
|
doc_adder(&mut index_writer);
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
let query_parser = QueryParser::for_index(&index, vec![query_field]);
|
let query_parser = QueryParser::for_index(&index, vec![query_field]);
|
||||||
|
|||||||
@@ -14,11 +14,11 @@ where
|
|||||||
{
|
{
|
||||||
pub fn new(
|
pub fn new(
|
||||||
score_tweaker: TScoreTweaker,
|
score_tweaker: TScoreTweaker,
|
||||||
collector: TopCollector<TScore>,
|
limit: usize,
|
||||||
) -> TweakedScoreTopCollector<TScoreTweaker, TScore> {
|
) -> TweakedScoreTopCollector<TScoreTweaker, TScore> {
|
||||||
TweakedScoreTopCollector {
|
TweakedScoreTopCollector {
|
||||||
score_tweaker,
|
score_tweaker,
|
||||||
collector,
|
collector: TopCollector::with_limit(limit),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -29,7 +29,7 @@ where
|
|||||||
/// It is the segment local version of the [`ScoreTweaker`](./trait.ScoreTweaker.html).
|
/// It is the segment local version of the [`ScoreTweaker`](./trait.ScoreTweaker.html).
|
||||||
pub trait ScoreSegmentTweaker<TScore>: 'static {
|
pub trait ScoreSegmentTweaker<TScore>: 'static {
|
||||||
/// Tweak the given `score` for the document `doc`.
|
/// Tweak the given `score` for the document `doc`.
|
||||||
fn score(&mut self, doc: DocId, score: Score) -> TScore;
|
fn score(&self, doc: DocId, score: Score) -> TScore;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `ScoreTweaker` makes it possible to tweak the score
|
/// `ScoreTweaker` makes it possible to tweak the score
|
||||||
@@ -49,7 +49,7 @@ pub trait ScoreTweaker<TScore>: Sync {
|
|||||||
|
|
||||||
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
|
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
|
||||||
where
|
where
|
||||||
TScoreTweaker: ScoreTweaker<TScore> + Send + Sync,
|
TScoreTweaker: ScoreTweaker<TScore>,
|
||||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||||
{
|
{
|
||||||
type Fruit = Vec<(TScore, DocAddress)>;
|
type Fruit = Vec<(TScore, DocAddress)>;
|
||||||
@@ -121,9 +121,9 @@ where
|
|||||||
|
|
||||||
impl<F, TScore> ScoreSegmentTweaker<TScore> for F
|
impl<F, TScore> ScoreSegmentTweaker<TScore> for F
|
||||||
where
|
where
|
||||||
F: 'static + FnMut(DocId, Score) -> TScore,
|
F: 'static + Sync + Send + Fn(DocId, Score) -> TScore,
|
||||||
{
|
{
|
||||||
fn score(&mut self, doc: DocId, score: Score) -> TScore {
|
fn score(&self, doc: DocId, score: Score) -> TScore {
|
||||||
(self)(doc, score)
|
(self)(doc, score)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||||
use std::io;
|
use std::io;
|
||||||
|
use std::ops::Deref;
|
||||||
use crate::directory::OwnedBytes;
|
|
||||||
|
|
||||||
pub(crate) struct BitPacker {
|
pub(crate) struct BitPacker {
|
||||||
mini_buffer: u64,
|
mini_buffer: u64,
|
||||||
@@ -61,14 +60,20 @@ impl BitPacker {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BitUnpacker {
|
pub struct BitUnpacker<Data>
|
||||||
|
where
|
||||||
|
Data: Deref<Target = [u8]>,
|
||||||
|
{
|
||||||
num_bits: u64,
|
num_bits: u64,
|
||||||
mask: u64,
|
mask: u64,
|
||||||
data: OwnedBytes,
|
data: Data,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BitUnpacker {
|
impl<Data> BitUnpacker<Data>
|
||||||
pub fn new(data: OwnedBytes, num_bits: u8) -> BitUnpacker {
|
where
|
||||||
|
Data: Deref<Target = [u8]>,
|
||||||
|
{
|
||||||
|
pub fn new(data: Data, num_bits: u8) -> BitUnpacker<Data> {
|
||||||
let mask: u64 = if num_bits == 64 {
|
let mask: u64 = if num_bits == 64 {
|
||||||
!0u64
|
!0u64
|
||||||
} else {
|
} else {
|
||||||
@@ -85,7 +90,7 @@ impl BitUnpacker {
|
|||||||
if self.num_bits == 0 {
|
if self.num_bits == 0 {
|
||||||
return 0u64;
|
return 0u64;
|
||||||
}
|
}
|
||||||
let data: &[u8] = self.data.as_slice();
|
let data: &[u8] = &*self.data;
|
||||||
let num_bits = self.num_bits;
|
let num_bits = self.num_bits;
|
||||||
let mask = self.mask;
|
let mask = self.mask;
|
||||||
let addr_in_bits = idx * num_bits;
|
let addr_in_bits = idx * num_bits;
|
||||||
@@ -104,9 +109,8 @@ impl BitUnpacker {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::{BitPacker, BitUnpacker};
|
use super::{BitPacker, BitUnpacker};
|
||||||
use crate::directory::OwnedBytes;
|
|
||||||
|
|
||||||
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>) {
|
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
|
||||||
let mut data = Vec::new();
|
let mut data = Vec::new();
|
||||||
let mut bitpacker = BitPacker::new();
|
let mut bitpacker = BitPacker::new();
|
||||||
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
|
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
|
||||||
@@ -118,7 +122,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
bitpacker.close(&mut data).unwrap();
|
bitpacker.close(&mut data).unwrap();
|
||||||
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
|
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
|
||||||
let bitunpacker = BitUnpacker::new(OwnedBytes::new(data), num_bits);
|
let bitunpacker = BitUnpacker::new(data, num_bits);
|
||||||
(bitunpacker, vals)
|
(bitunpacker, vals)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -33,10 +33,6 @@ impl TinySet {
|
|||||||
TinySet(0u64)
|
TinySet(0u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear(&mut self) {
|
|
||||||
self.0 = 0u64;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the complement of the set in `[0, 64[`.
|
/// Returns the complement of the set in `[0, 64[`.
|
||||||
fn complement(self) -> TinySet {
|
fn complement(self) -> TinySet {
|
||||||
TinySet(!self.0)
|
TinySet(!self.0)
|
||||||
@@ -47,11 +43,6 @@ impl TinySet {
|
|||||||
!self.intersect(TinySet::singleton(el)).is_empty()
|
!self.intersect(TinySet::singleton(el)).is_empty()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of elements in the TinySet.
|
|
||||||
pub fn len(self) -> u32 {
|
|
||||||
self.0.count_ones()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the intersection of `self` and `other`
|
/// Returns the intersection of `self` and `other`
|
||||||
pub fn intersect(self, other: TinySet) -> TinySet {
|
pub fn intersect(self, other: TinySet) -> TinySet {
|
||||||
TinySet(self.0 & other.0)
|
TinySet(self.0 & other.0)
|
||||||
@@ -118,12 +109,22 @@ impl TinySet {
|
|||||||
pub fn range_greater_or_equal(from_included: u32) -> TinySet {
|
pub fn range_greater_or_equal(from_included: u32) -> TinySet {
|
||||||
TinySet::range_lower(from_included).complement()
|
TinySet::range_lower(from_included).complement()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn clear(&mut self) {
|
||||||
|
self.0 = 0u64;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn len(self) -> u32 {
|
||||||
|
self.0.count_ones()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BitSet {
|
pub struct BitSet {
|
||||||
tinysets: Box<[TinySet]>,
|
tinysets: Box<[TinySet]>,
|
||||||
len: usize,
|
len: usize, //< Technically it should be u32, but we
|
||||||
|
// count multiple inserts.
|
||||||
|
// `usize` guards us from overflow.
|
||||||
max_value: u32,
|
max_value: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -203,7 +204,7 @@ mod tests {
|
|||||||
|
|
||||||
use super::BitSet;
|
use super::BitSet;
|
||||||
use super::TinySet;
|
use super::TinySet;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::DocSet;
|
||||||
use crate::query::BitSetDocSet;
|
use crate::query::BitSetDocSet;
|
||||||
use crate::tests;
|
use crate::tests;
|
||||||
use crate::tests::generate_nonunique_unsorted;
|
use crate::tests::generate_nonunique_unsorted;
|
||||||
@@ -277,13 +278,11 @@ mod tests {
|
|||||||
}
|
}
|
||||||
assert_eq!(btreeset.len(), bitset.len());
|
assert_eq!(btreeset.len(), bitset.len());
|
||||||
let mut bitset_docset = BitSetDocSet::from(bitset);
|
let mut bitset_docset = BitSetDocSet::from(bitset);
|
||||||
let mut remaining = true;
|
|
||||||
for el in btreeset.into_iter() {
|
for el in btreeset.into_iter() {
|
||||||
assert!(remaining);
|
bitset_docset.advance();
|
||||||
assert_eq!(bitset_docset.doc(), el);
|
assert_eq!(bitset_docset.doc(), el);
|
||||||
remaining = bitset_docset.advance() != TERMINATED;
|
|
||||||
}
|
}
|
||||||
assert!(!remaining);
|
assert!(!bitset_docset.advance());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -1,15 +1,14 @@
|
|||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::common::CountingWriter;
|
use crate::common::CountingWriter;
|
||||||
use crate::common::VInt;
|
use crate::common::VInt;
|
||||||
use crate::directory::FileSlice;
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::directory::{TerminatingWrite, WritePtr};
|
use crate::directory::{TerminatingWrite, WritePtr};
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::space_usage::FieldUsage;
|
use crate::space_usage::FieldUsage;
|
||||||
use crate::space_usage::PerFieldSpaceUsage;
|
use crate::space_usage::PerFieldSpaceUsage;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::io::{self, Read, Write};
|
use std::io::Write;
|
||||||
|
use std::io::{self, Read};
|
||||||
use super::HasLen;
|
|
||||||
|
|
||||||
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
|
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
|
||||||
pub struct FileAddr {
|
pub struct FileAddr {
|
||||||
@@ -104,26 +103,25 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
|||||||
/// for each field.
|
/// for each field.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct CompositeFile {
|
pub struct CompositeFile {
|
||||||
data: FileSlice,
|
data: ReadOnlySource,
|
||||||
offsets_index: HashMap<FileAddr, (usize, usize)>,
|
offsets_index: HashMap<FileAddr, (usize, usize)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CompositeFile {
|
impl CompositeFile {
|
||||||
/// Opens a composite file stored in a given
|
/// Opens a composite file stored in a given
|
||||||
/// `FileSlice`.
|
/// `ReadOnlySource`.
|
||||||
pub fn open(data: &FileSlice) -> io::Result<CompositeFile> {
|
pub fn open(data: &ReadOnlySource) -> io::Result<CompositeFile> {
|
||||||
let end = data.len();
|
let end = data.len();
|
||||||
let footer_len_data = data.slice_from(end - 4).read_bytes()?;
|
let footer_len_data = data.slice_from(end - 4);
|
||||||
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
|
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
|
||||||
let footer_start = end - 4 - footer_len;
|
let footer_start = end - 4 - footer_len;
|
||||||
let footer_data = data
|
let footer_data = data.slice(footer_start, footer_start + footer_len);
|
||||||
.slice(footer_start, footer_start + footer_len)
|
|
||||||
.read_bytes()?;
|
|
||||||
let mut footer_buffer = footer_data.as_slice();
|
let mut footer_buffer = footer_data.as_slice();
|
||||||
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
|
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
|
||||||
|
|
||||||
let mut file_addrs = vec![];
|
let mut file_addrs = vec![];
|
||||||
let mut offsets = vec![];
|
let mut offsets = vec![];
|
||||||
|
|
||||||
let mut field_index = HashMap::new();
|
let mut field_index = HashMap::new();
|
||||||
|
|
||||||
let mut offset = 0;
|
let mut offset = 0;
|
||||||
@@ -152,19 +150,19 @@ impl CompositeFile {
|
|||||||
pub fn empty() -> CompositeFile {
|
pub fn empty() -> CompositeFile {
|
||||||
CompositeFile {
|
CompositeFile {
|
||||||
offsets_index: HashMap::new(),
|
offsets_index: HashMap::new(),
|
||||||
data: FileSlice::empty(),
|
data: ReadOnlySource::empty(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `FileSlice` associated
|
/// Returns the `ReadOnlySource` associated
|
||||||
/// to a given `Field` and stored in a `CompositeFile`.
|
/// to a given `Field` and stored in a `CompositeFile`.
|
||||||
pub fn open_read(&self, field: Field) -> Option<FileSlice> {
|
pub fn open_read(&self, field: Field) -> Option<ReadOnlySource> {
|
||||||
self.open_read_with_idx(field, 0)
|
self.open_read_with_idx(field, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `FileSlice` associated
|
/// Returns the `ReadOnlySource` associated
|
||||||
/// to a given `Field` and stored in a `CompositeFile`.
|
/// to a given `Field` and stored in a `CompositeFile`.
|
||||||
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> {
|
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<ReadOnlySource> {
|
||||||
self.offsets_index
|
self.offsets_index
|
||||||
.get(&FileAddr { field, idx })
|
.get(&FileAddr { field, idx })
|
||||||
.map(|&(from, to)| self.data.slice(from, to))
|
.map(|&(from, to)| self.data.slice(from, to))
|
||||||
@@ -192,46 +190,49 @@ mod test {
|
|||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_composite_file() -> crate::Result<()> {
|
fn test_composite_file() {
|
||||||
let path = Path::new("test_path");
|
let path = Path::new("test_path");
|
||||||
let directory = RAMDirectory::create();
|
let mut directory = RAMDirectory::default();
|
||||||
{
|
{
|
||||||
let w = directory.open_write(path).unwrap();
|
let w = directory.open_write(path).unwrap();
|
||||||
let mut composite_write = CompositeWrite::wrap(w);
|
let mut composite_write = CompositeWrite::wrap(w);
|
||||||
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
|
{
|
||||||
VInt(32431123u64).serialize(&mut write_0)?;
|
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
|
||||||
write_0.flush()?;
|
VInt(32431123u64).serialize(&mut write_0).unwrap();
|
||||||
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
|
write_0.flush().unwrap();
|
||||||
VInt(2).serialize(&mut write_4)?;
|
}
|
||||||
write_4.flush()?;
|
|
||||||
composite_write.close()?;
|
{
|
||||||
|
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
|
||||||
|
VInt(2).serialize(&mut write_4).unwrap();
|
||||||
|
write_4.flush().unwrap();
|
||||||
|
}
|
||||||
|
composite_write.close().unwrap();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let r = directory.open_read(path)?;
|
let r = directory.open_read(path).unwrap();
|
||||||
let composite_file = CompositeFile::open(&r)?;
|
let composite_file = CompositeFile::open(&r).unwrap();
|
||||||
{
|
{
|
||||||
let file0 = composite_file
|
let file0 = composite_file
|
||||||
.open_read(Field::from_field_id(0u32))
|
.open_read(Field::from_field_id(0u32))
|
||||||
.unwrap()
|
.unwrap();
|
||||||
.read_bytes()?;
|
|
||||||
let mut file0_buf = file0.as_slice();
|
let mut file0_buf = file0.as_slice();
|
||||||
let payload_0 = VInt::deserialize(&mut file0_buf)?.0;
|
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
|
||||||
assert_eq!(file0_buf.len(), 0);
|
assert_eq!(file0_buf.len(), 0);
|
||||||
assert_eq!(payload_0, 32431123u64);
|
assert_eq!(payload_0, 32431123u64);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let file4 = composite_file
|
let file4 = composite_file
|
||||||
.open_read(Field::from_field_id(4u32))
|
.open_read(Field::from_field_id(4u32))
|
||||||
.unwrap()
|
.unwrap();
|
||||||
.read_bytes()?;
|
|
||||||
let mut file4_buf = file4.as_slice();
|
let mut file4_buf = file4.as_slice();
|
||||||
let payload_4 = VInt::deserialize(&mut file4_buf)?.0;
|
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
|
||||||
assert_eq!(file4_buf.len(), 0);
|
assert_eq!(file4_buf.len(), 0);
|
||||||
assert_eq!(payload_4, 2u64);
|
assert_eq!(payload_4, 2u64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,10 +20,9 @@ impl<W: Write> CountingWriter<W> {
|
|||||||
self.written_bytes
|
self.written_bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the underlying write object.
|
pub fn finish(mut self) -> io::Result<(W, u64)> {
|
||||||
/// Note that this method does not trigger any flushing.
|
self.flush()?;
|
||||||
pub fn finish(self) -> W {
|
Ok((self.underlying, self.written_bytes))
|
||||||
self.underlying
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,6 +46,7 @@ impl<W: Write> Write for CountingWriter<W> {
|
|||||||
|
|
||||||
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
||||||
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
|
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
|
||||||
|
self.flush()?;
|
||||||
self.underlying.terminate_ref(token)
|
self.underlying.terminate_ref(token)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -63,9 +63,8 @@ mod test {
|
|||||||
let mut counting_writer = CountingWriter::wrap(buffer);
|
let mut counting_writer = CountingWriter::wrap(buffer);
|
||||||
let bytes = (0u8..10u8).collect::<Vec<u8>>();
|
let bytes = (0u8..10u8).collect::<Vec<u8>>();
|
||||||
counting_writer.write_all(&bytes).unwrap();
|
counting_writer.write_all(&bytes).unwrap();
|
||||||
let len = counting_writer.written_bytes();
|
let (w, len): (Vec<u8>, u64) = counting_writer.finish().unwrap();
|
||||||
let buffer_restituted: Vec<u8> = counting_writer.finish();
|
|
||||||
assert_eq!(len, 10u64);
|
assert_eq!(len, 10u64);
|
||||||
assert_eq!(buffer_restituted.len(), 10);
|
assert_eq!(w.len(), 10);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ pub mod bitpacker;
|
|||||||
mod bitset;
|
mod bitset;
|
||||||
mod composite_file;
|
mod composite_file;
|
||||||
mod counting_writer;
|
mod counting_writer;
|
||||||
|
mod mutable_enum;
|
||||||
mod serialize;
|
mod serialize;
|
||||||
mod vint;
|
mod vint;
|
||||||
|
|
||||||
@@ -9,10 +10,9 @@ pub use self::bitset::BitSet;
|
|||||||
pub(crate) use self::bitset::TinySet;
|
pub(crate) use self::bitset::TinySet;
|
||||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||||
pub use self::counting_writer::CountingWriter;
|
pub use self::counting_writer::CountingWriter;
|
||||||
|
pub(crate) use self::mutable_enum::MutableEnum;
|
||||||
pub use self::serialize::{BinarySerializable, FixedSize};
|
pub use self::serialize::{BinarySerializable, FixedSize};
|
||||||
pub use self::vint::{
|
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
|
||||||
read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt,
|
|
||||||
};
|
|
||||||
pub use byteorder::LittleEndian as Endianness;
|
pub use byteorder::LittleEndian as Endianness;
|
||||||
|
|
||||||
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
||||||
@@ -66,6 +66,10 @@ pub(crate) fn compute_num_bits(n: u64) -> u8 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn is_power_of_2(n: usize) -> bool {
|
||||||
|
(n > 0) && (n & (n - 1) == 0)
|
||||||
|
}
|
||||||
|
|
||||||
/// Has length trait
|
/// Has length trait
|
||||||
pub trait HasLen {
|
pub trait HasLen {
|
||||||
/// Return length
|
/// Return length
|
||||||
@@ -115,16 +119,11 @@ pub fn u64_to_i64(val: u64) -> i64 {
|
|||||||
/// For simplicity, tantivy internally handles `f64` as `u64`.
|
/// For simplicity, tantivy internally handles `f64` as `u64`.
|
||||||
/// The mapping is defined by this function.
|
/// The mapping is defined by this function.
|
||||||
///
|
///
|
||||||
/// Maps `f64` to `u64` in a monotonic manner, so that bytes lexical order is preserved.
|
/// Maps `f64` to `u64` so that lexical order is preserved.
|
||||||
///
|
///
|
||||||
/// This is more suited than simply casting (`val as u64`)
|
/// This is more suited than simply casting (`val as u64`)
|
||||||
/// which would truncate the result
|
/// which would truncate the result
|
||||||
///
|
///
|
||||||
/// # Reference
|
|
||||||
///
|
|
||||||
/// Daniel Lemire's [blog post](https://lemire.me/blog/2020/12/14/converting-floating-point-numbers-to-integers-while-preserving-order/)
|
|
||||||
/// explains the mapping in a clear manner.
|
|
||||||
///
|
|
||||||
/// # See also
|
/// # See also
|
||||||
/// The [reverse mapping is `u64_to_f64`](./fn.u64_to_f64.html).
|
/// The [reverse mapping is `u64_to_f64`](./fn.u64_to_f64.html).
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
@@ -153,7 +152,6 @@ pub(crate) mod test {
|
|||||||
pub use super::minmax;
|
pub use super::minmax;
|
||||||
pub use super::serialize::test::fixed_size_test;
|
pub use super::serialize::test::fixed_size_test;
|
||||||
use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
||||||
use proptest::prelude::*;
|
|
||||||
use std::f64;
|
use std::f64;
|
||||||
|
|
||||||
fn test_i64_converter_helper(val: i64) {
|
fn test_i64_converter_helper(val: i64) {
|
||||||
@@ -164,15 +162,6 @@ pub(crate) mod test {
|
|||||||
assert_eq!(u64_to_f64(f64_to_u64(val)), val);
|
assert_eq!(u64_to_f64(f64_to_u64(val)), val);
|
||||||
}
|
}
|
||||||
|
|
||||||
proptest! {
|
|
||||||
#[test]
|
|
||||||
fn test_f64_converter_monotonicity_proptest((left, right) in (proptest::num::f64::NORMAL, proptest::num::f64::NORMAL)) {
|
|
||||||
let left_u64 = f64_to_u64(left);
|
|
||||||
let right_u64 = f64_to_u64(right);
|
|
||||||
assert_eq!(left_u64 < right_u64, left < right);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_i64_converter() {
|
fn test_i64_converter() {
|
||||||
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
|
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
|
||||||
|
|||||||
37
src/common/mutable_enum.rs
Normal file
37
src/common/mutable_enum.rs
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
use std::ops::{Deref, DerefMut};
|
||||||
|
|
||||||
|
pub(crate) struct MutableEnum<T>(Option<T>);
|
||||||
|
|
||||||
|
impl<T> MutableEnum<T> {
|
||||||
|
pub fn wrap(val: T) -> Self {
|
||||||
|
MutableEnum(Some(val))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn into(self) -> T {
|
||||||
|
self.0.unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> MutableEnum<T> {
|
||||||
|
pub fn map_mutate<E, F>(&mut self, transformation: F) -> Result<(), E>
|
||||||
|
where
|
||||||
|
F: FnOnce(T) -> Result<T, E>,
|
||||||
|
{
|
||||||
|
self.0 = self.0.take().map(transformation).transpose()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Deref for MutableEnum<T> {
|
||||||
|
type Target = T;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
self.0.as_ref().unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> DerefMut for MutableEnum<T> {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
self.0.as_mut().unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -89,19 +89,6 @@ impl FixedSize for u64 {
|
|||||||
const SIZE_IN_BYTES: usize = 8;
|
const SIZE_IN_BYTES: usize = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BinarySerializable for f32 {
|
|
||||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
writer.write_f32::<Endianness>(*self)
|
|
||||||
}
|
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
reader.read_f32::<Endianness>()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FixedSize for f32 {
|
|
||||||
const SIZE_IN_BYTES: usize = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BinarySerializable for i64 {
|
impl BinarySerializable for i64 {
|
||||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_i64::<Endianness>(*self)
|
writer.write_i64::<Endianness>(*self)
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ use std::io::Read;
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
/// Wrapper over a `u64` that serializes as a variable int.
|
/// Wrapper over a `u64` that serializes as a variable int.
|
||||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
#[derive(Debug, Eq, PartialEq)]
|
||||||
pub struct VInt(pub u64);
|
pub struct VInt(pub u64);
|
||||||
|
|
||||||
const STOP_BIT: u8 = 128;
|
const STOP_BIT: u8 = 128;
|
||||||
|
|
||||||
pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
|
pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
||||||
const START_2: u64 = 1 << 7;
|
const START_2: u64 = 1 << 7;
|
||||||
const START_3: u64 = 1 << 14;
|
const START_3: u64 = 1 << 14;
|
||||||
const START_4: u64 = 1 << 21;
|
const START_4: u64 = 1 << 21;
|
||||||
@@ -29,7 +29,7 @@ pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
|
|||||||
|
|
||||||
let val = u64::from(val);
|
let val = u64::from(val);
|
||||||
const STOP_BIT: u64 = 128u64;
|
const STOP_BIT: u64 = 128u64;
|
||||||
let (res, num_bytes) = match val {
|
match val {
|
||||||
0..=STOP_1 => (val | STOP_BIT, 1),
|
0..=STOP_1 => (val | STOP_BIT, 1),
|
||||||
START_2..=STOP_2 => (
|
START_2..=STOP_2 => (
|
||||||
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
|
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
|
||||||
@@ -56,9 +56,7 @@ pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
|
|||||||
| (STOP_BIT << (8 * 4)),
|
| (STOP_BIT << (8 * 4)),
|
||||||
5,
|
5,
|
||||||
),
|
),
|
||||||
};
|
}
|
||||||
LittleEndian::write_u64(&mut buf[..], res);
|
|
||||||
&buf[0..num_bytes]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of bytes covered by a
|
/// Returns the number of bytes covered by a
|
||||||
@@ -87,26 +85,23 @@ fn vint_len(data: &[u8]) -> usize {
|
|||||||
/// If the buffer does not start by a valid
|
/// If the buffer does not start by a valid
|
||||||
/// vint payload
|
/// vint payload
|
||||||
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
||||||
let (result, vlen) = read_u32_vint_no_advance(*data);
|
let vlen = vint_len(*data);
|
||||||
*data = &data[vlen..];
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
|
|
||||||
let vlen = vint_len(data);
|
|
||||||
let mut result = 0u32;
|
let mut result = 0u32;
|
||||||
let mut shift = 0u64;
|
let mut shift = 0u64;
|
||||||
for &b in &data[..vlen] {
|
for &b in &data[..vlen] {
|
||||||
result |= u32::from(b & 127u8) << shift;
|
result |= u32::from(b & 127u8) << shift;
|
||||||
shift += 7;
|
shift += 7;
|
||||||
}
|
}
|
||||||
(result, vlen)
|
*data = &data[vlen..];
|
||||||
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write a `u32` as a vint payload.
|
/// Write a `u32` as a vint payload.
|
||||||
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
||||||
let mut buf = [0u8; 8];
|
let (val, num_bytes) = serialize_vint_u32(val);
|
||||||
let data = serialize_vint_u32(val, &mut buf);
|
let mut buffer = [0u8; 8];
|
||||||
writer.write_all(&data)
|
LittleEndian::write_u64(&mut buffer, val);
|
||||||
|
writer.write_all(&buffer[..num_bytes])
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VInt {
|
impl VInt {
|
||||||
@@ -177,6 +172,7 @@ mod tests {
|
|||||||
use super::serialize_vint_u32;
|
use super::serialize_vint_u32;
|
||||||
use super::VInt;
|
use super::VInt;
|
||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
|
use byteorder::{ByteOrder, LittleEndian};
|
||||||
|
|
||||||
fn aux_test_vint(val: u64) {
|
fn aux_test_vint(val: u64) {
|
||||||
let mut v = [14u8; 10];
|
let mut v = [14u8; 10];
|
||||||
@@ -212,10 +208,12 @@ mod tests {
|
|||||||
|
|
||||||
fn aux_test_serialize_vint_u32(val: u32) {
|
fn aux_test_serialize_vint_u32(val: u32) {
|
||||||
let mut buffer = [0u8; 10];
|
let mut buffer = [0u8; 10];
|
||||||
let mut buffer2 = [0u8; 8];
|
let mut buffer2 = [0u8; 10];
|
||||||
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
|
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
|
||||||
let res2 = serialize_vint_u32(val, &mut buffer2);
|
let (vint, len) = serialize_vint_u32(val);
|
||||||
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
|
assert_eq!(len, len_vint, "len wrong for val {}", val);
|
||||||
|
LittleEndian::write_u64(&mut buffer2, vint);
|
||||||
|
assert_eq!(&buffer[..len], &buffer2[..len], "array wrong for {}", val);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ use crate::core::SegmentId;
|
|||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::core::SegmentMetaInventory;
|
use crate::core::SegmentMetaInventory;
|
||||||
use crate::core::META_FILEPATH;
|
use crate::core::META_FILEPATH;
|
||||||
use crate::directory::error::OpenReadError;
|
|
||||||
use crate::directory::ManagedDirectory;
|
use crate::directory::ManagedDirectory;
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
use crate::directory::MmapDirectory;
|
use crate::directory::MmapDirectory;
|
||||||
@@ -13,8 +12,8 @@ use crate::directory::INDEX_WRITER_LOCK;
|
|||||||
use crate::directory::{Directory, RAMDirectory};
|
use crate::directory::{Directory, RAMDirectory};
|
||||||
use crate::error::DataCorruption;
|
use crate::error::DataCorruption;
|
||||||
use crate::error::TantivyError;
|
use crate::error::TantivyError;
|
||||||
use crate::indexer::index_writer::HEAP_SIZE_MIN;
|
|
||||||
use crate::indexer::segment_updater::save_new_metas;
|
use crate::indexer::segment_updater::save_new_metas;
|
||||||
|
use crate::indexer::IndexWriterConfig;
|
||||||
use crate::reader::IndexReader;
|
use crate::reader::IndexReader;
|
||||||
use crate::reader::IndexReaderBuilder;
|
use crate::reader::IndexReaderBuilder;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
@@ -22,12 +21,12 @@ use crate::schema::FieldType;
|
|||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||||
use crate::IndexWriter;
|
use crate::IndexWriter;
|
||||||
|
use num_cpus;
|
||||||
|
use std::borrow::BorrowMut;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
use std::path::Path;
|
use std::path::{Path, PathBuf};
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
fn load_metas(
|
fn load_metas(
|
||||||
@@ -35,18 +34,12 @@ fn load_metas(
|
|||||||
inventory: &SegmentMetaInventory,
|
inventory: &SegmentMetaInventory,
|
||||||
) -> crate::Result<IndexMeta> {
|
) -> crate::Result<IndexMeta> {
|
||||||
let meta_data = directory.atomic_read(&META_FILEPATH)?;
|
let meta_data = directory.atomic_read(&META_FILEPATH)?;
|
||||||
let meta_string = String::from_utf8(meta_data)
|
let meta_string = String::from_utf8_lossy(&meta_data);
|
||||||
.map_err(|utf8_err| {
|
|
||||||
DataCorruption::new(
|
|
||||||
META_FILEPATH.to_path_buf(),
|
|
||||||
format!("Meta file is not valid utf-8. {:?}", utf8_err)
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
IndexMeta::deserialize(&meta_string, &inventory)
|
IndexMeta::deserialize(&meta_string, &inventory)
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
DataCorruption::new(
|
DataCorruption::new(
|
||||||
META_FILEPATH.to_path_buf(),
|
META_FILEPATH.to_path_buf(),
|
||||||
format!("Meta file cannot be deserialized. {:?}. content = {}", e, meta_string),
|
format!("Meta file cannot be deserialized. {:?}.", e),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.map_err(From::from)
|
.map_err(From::from)
|
||||||
@@ -63,10 +56,8 @@ pub struct Index {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Index {
|
impl Index {
|
||||||
/// Examines the directory to see if it contains an index.
|
/// Examines the director to see if it contains an index
|
||||||
///
|
pub fn exists<Dir: Directory>(dir: &Dir) -> bool {
|
||||||
/// Effectively, it only checks for the presence of the `meta.json` file.
|
|
||||||
pub fn exists<Dir: Directory>(dir: &Dir) -> Result<bool, OpenReadError> {
|
|
||||||
dir.exists(&META_FILEPATH)
|
dir.exists(&META_FILEPATH)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -113,7 +104,7 @@ impl Index {
|
|||||||
schema: Schema,
|
schema: Schema,
|
||||||
) -> crate::Result<Index> {
|
) -> crate::Result<Index> {
|
||||||
let mmap_directory = MmapDirectory::open(directory_path)?;
|
let mmap_directory = MmapDirectory::open(directory_path)?;
|
||||||
if Index::exists(&mmap_directory)? {
|
if Index::exists(&mmap_directory) {
|
||||||
return Err(TantivyError::IndexAlreadyExists);
|
return Err(TantivyError::IndexAlreadyExists);
|
||||||
}
|
}
|
||||||
Index::create(mmap_directory, schema)
|
Index::create(mmap_directory, schema)
|
||||||
@@ -121,7 +112,7 @@ impl Index {
|
|||||||
|
|
||||||
/// Opens or creates a new index in the provided directory
|
/// Opens or creates a new index in the provided directory
|
||||||
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
||||||
if !Index::exists(&dir)? {
|
if !Index::exists(&dir) {
|
||||||
return Index::create(dir, schema);
|
return Index::create(dir, schema);
|
||||||
}
|
}
|
||||||
let index = Index::open(dir)?;
|
let index = Index::open(dir)?;
|
||||||
@@ -148,9 +139,7 @@ impl Index {
|
|||||||
Index::create(mmap_directory, schema)
|
Index::create(mmap_directory, schema)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new index given an implementation of the trait `Directory`.
|
/// Creates a new index given an implementation of the trait `Directory`
|
||||||
///
|
|
||||||
/// If a directory previously existed, it will be erased.
|
|
||||||
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
||||||
let directory = ManagedDirectory::wrap(dir)?;
|
let directory = ManagedDirectory::wrap(dir)?;
|
||||||
Index::from_directory(directory, schema)
|
Index::from_directory(directory, schema)
|
||||||
@@ -159,8 +148,8 @@ impl Index {
|
|||||||
/// Create a new index from a directory.
|
/// Create a new index from a directory.
|
||||||
///
|
///
|
||||||
/// This will overwrite existing meta.json
|
/// This will overwrite existing meta.json
|
||||||
fn from_directory(directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
|
fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
|
||||||
save_new_metas(schema.clone(), &directory)?;
|
save_new_metas(schema.clone(), directory.borrow_mut())?;
|
||||||
let metas = IndexMeta::with_schema(schema);
|
let metas = IndexMeta::with_schema(schema);
|
||||||
Index::create_from_metas(directory, &metas, SegmentMetaInventory::default())
|
Index::create_from_metas(directory, &metas, SegmentMetaInventory::default())
|
||||||
}
|
}
|
||||||
@@ -284,8 +273,40 @@ impl Index {
|
|||||||
pub fn writer_with_num_threads(
|
pub fn writer_with_num_threads(
|
||||||
&self,
|
&self,
|
||||||
num_threads: usize,
|
num_threads: usize,
|
||||||
overall_heap_size_in_bytes: usize,
|
overall_heap_size_in_bytes: u64,
|
||||||
) -> crate::Result<IndexWriter> {
|
) -> crate::Result<IndexWriter> {
|
||||||
|
let config = IndexWriterConfig {
|
||||||
|
max_indexing_threads: num_threads,
|
||||||
|
memory_budget: overall_heap_size_in_bytes,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
self.writer_from_config(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a multithreaded writer
|
||||||
|
///
|
||||||
|
/// Tantivy will automatically define the number of threads to use.
|
||||||
|
/// `overall_heap_size_in_bytes` is the total target memory usage that will be split
|
||||||
|
/// between a given number of threads.
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
||||||
|
/// # Panics
|
||||||
|
/// If the heap size per thread is too small, panics.
|
||||||
|
pub fn writer(&self, overall_heap_size_in_bytes: u64) -> crate::Result<IndexWriter> {
|
||||||
|
let config = IndexWriterConfig {
|
||||||
|
max_indexing_threads: num_cpus::get(),
|
||||||
|
memory_budget: overall_heap_size_in_bytes,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
self.writer_from_config(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new writer with a given configuration.
|
||||||
|
///
|
||||||
|
/// See [`IndexWriterConfig`](./struct.IndexWriterConfig.html) for more information.
|
||||||
|
pub fn writer_from_config(&self, mut config: IndexWriterConfig) -> crate::Result<IndexWriter> {
|
||||||
|
config.validate()?;
|
||||||
let directory_lock = self
|
let directory_lock = self
|
||||||
.directory
|
.directory
|
||||||
.acquire_lock(&INDEX_WRITER_LOCK)
|
.acquire_lock(&INDEX_WRITER_LOCK)
|
||||||
@@ -301,41 +322,7 @@ impl Index {
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
|
IndexWriter::new(self, config, directory_lock)
|
||||||
IndexWriter::new(
|
|
||||||
self,
|
|
||||||
num_threads,
|
|
||||||
heap_size_in_bytes_per_thread,
|
|
||||||
directory_lock,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper to create an index writer for tests.
|
|
||||||
///
|
|
||||||
/// That index writer only simply has a single thread and a heap of 5 MB.
|
|
||||||
/// Using a single thread gives us a deterministic allocation of DocId.
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn writer_for_tests(&self) -> crate::Result<IndexWriter> {
|
|
||||||
self.writer_with_num_threads(1, 10_000_000)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a multithreaded writer
|
|
||||||
///
|
|
||||||
/// Tantivy will automatically define the number of threads to use.
|
|
||||||
/// `overall_heap_size_in_bytes` is the total target memory usage that will be split
|
|
||||||
/// between a given number of threads.
|
|
||||||
///
|
|
||||||
/// # Errors
|
|
||||||
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
|
||||||
/// # Panics
|
|
||||||
/// If the heap size per thread is too small, panics.
|
|
||||||
pub fn writer(&self, overall_heap_size_in_bytes: usize) -> crate::Result<IndexWriter> {
|
|
||||||
let mut num_threads = num_cpus::get();
|
|
||||||
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
|
|
||||||
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
|
||||||
num_threads = (overall_heap_size_in_bytes / HEAP_SIZE_MIN).max(1);
|
|
||||||
}
|
|
||||||
self.writer_with_num_threads(num_threads, overall_heap_size_in_bytes)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the index schema
|
/// Accessor to the index schema
|
||||||
@@ -406,7 +393,7 @@ impl fmt::Debug for Index {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::directory::{RAMDirectory, WatchCallback};
|
use crate::directory::RAMDirectory;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::{Schema, INDEXED, TEXT};
|
use crate::schema::{Schema, INDEXED, TEXT};
|
||||||
use crate::IndexReader;
|
use crate::IndexReader;
|
||||||
@@ -430,24 +417,24 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_index_exists() {
|
fn test_index_exists() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(!Index::exists(&directory).unwrap());
|
assert!(!Index::exists(&directory));
|
||||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory).unwrap());
|
assert!(Index::exists(&directory));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn open_or_create_should_create() {
|
fn open_or_create_should_create() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(!Index::exists(&directory).unwrap());
|
assert!(!Index::exists(&directory));
|
||||||
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory).unwrap());
|
assert!(Index::exists(&directory));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn open_or_create_should_open() {
|
fn open_or_create_should_open() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory).unwrap());
|
assert!(Index::exists(&directory));
|
||||||
assert!(Index::open_or_create(directory, throw_away_schema()).is_ok());
|
assert!(Index::open_or_create(directory, throw_away_schema()).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -455,7 +442,7 @@ mod tests {
|
|||||||
fn create_should_wipeoff_existing() {
|
fn create_should_wipeoff_existing() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory).unwrap());
|
assert!(Index::exists(&directory));
|
||||||
assert!(Index::create(directory.clone(), Schema::builder().build()).is_ok());
|
assert!(Index::create(directory.clone(), Schema::builder().build()).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -463,7 +450,7 @@ mod tests {
|
|||||||
fn open_or_create_exists_but_schema_does_not_match() {
|
fn open_or_create_exists_but_schema_does_not_match() {
|
||||||
let directory = RAMDirectory::create();
|
let directory = RAMDirectory::create();
|
||||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
assert!(Index::exists(&directory).unwrap());
|
assert!(Index::exists(&directory));
|
||||||
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
||||||
let err = Index::open_or_create(directory, Schema::builder().build());
|
let err = Index::open_or_create(directory, Schema::builder().build());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -489,7 +476,7 @@ mod tests {
|
|||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
test_index_on_commit_reload_policy_aux(field, index.clone(), &index, &reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
@@ -513,32 +500,32 @@ mod tests {
|
|||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
test_index_on_commit_reload_policy_aux(field, index.clone(), &index, &reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_index_manual_policy_mmap() -> crate::Result<()> {
|
fn test_index_manual_policy_mmap() {
|
||||||
let schema = throw_away_schema();
|
let schema = throw_away_schema();
|
||||||
let field = schema.get_field("num_likes").unwrap();
|
let field = schema.get_field("num_likes").unwrap();
|
||||||
let mut index = Index::create_from_tempdir(schema)?;
|
let mut index = Index::create_from_tempdir(schema).unwrap();
|
||||||
let mut writer = index.writer_for_tests()?;
|
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
writer.commit()?;
|
writer.commit().unwrap();
|
||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()?;
|
.try_into()
|
||||||
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
writer.add_document(doc!(field=>1u64));
|
writer.add_document(doc!(field=>1u64));
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||||
let _handle = index.directory_mut().watch(WatchCallback::new(move || {
|
let _handle = index.directory_mut().watch(Box::new(move || {
|
||||||
let _ = sender.send(());
|
let _ = sender.send(());
|
||||||
}));
|
}));
|
||||||
writer.commit()?;
|
writer.commit().unwrap();
|
||||||
assert!(receiver.recv().is_ok());
|
assert!(receiver.recv().is_ok());
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
reader.reload()?;
|
reader.reload().unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 1);
|
assert_eq!(reader.searcher().num_docs(), 1);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -555,38 +542,30 @@ mod tests {
|
|||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
|
test_index_on_commit_reload_policy_aux(field, read_index, &write_index, &reader);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
|
|
||||||
let mut reader_index = reader.index();
|
fn test_index_on_commit_reload_policy_aux(
|
||||||
|
field: Field,
|
||||||
|
mut reader_index: Index,
|
||||||
|
index: &Index,
|
||||||
|
reader: &IndexReader,
|
||||||
|
) {
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||||
let _watch_handle = reader_index
|
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
|
||||||
.directory_mut()
|
let _ = sender.send(());
|
||||||
.watch(WatchCallback::new(move || {
|
}));
|
||||||
let _ = sender.send(());
|
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
}));
|
|
||||||
let mut writer = index.writer_for_tests().unwrap();
|
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
writer.add_document(doc!(field=>1u64));
|
writer.add_document(doc!(field=>1u64));
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
// We need a loop here because it is possible for notify to send more than
|
assert!(receiver.recv().is_ok());
|
||||||
// one modify event. It was observed on CI on MacOS.
|
assert_eq!(reader.searcher().num_docs(), 1);
|
||||||
loop {
|
|
||||||
assert!(receiver.recv().is_ok());
|
|
||||||
if reader.searcher().num_docs() == 1 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writer.add_document(doc!(field=>2u64));
|
writer.add_document(doc!(field=>2u64));
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
// ... Same as above
|
assert!(receiver.recv().is_ok());
|
||||||
loop {
|
assert_eq!(reader.searcher().num_docs(), 2);
|
||||||
assert!(receiver.recv().is_ok());
|
|
||||||
if reader.searcher().num_docs() == 2 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test will not pass on windows, because windows
|
// This test will not pass on windows, because windows
|
||||||
@@ -604,7 +583,7 @@ mod tests {
|
|||||||
writer.add_document(doc!(field => i));
|
writer.add_document(doc!(field => i));
|
||||||
}
|
}
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||||
let _handle = directory.watch(WatchCallback::new(move || {
|
let _handle = directory.watch(Box::new(move || {
|
||||||
let _ = sender.send(());
|
let _ = sender.send(());
|
||||||
}));
|
}));
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
|
|||||||
@@ -3,7 +3,9 @@ use crate::core::SegmentId;
|
|||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use census::{Inventory, TrackedObject};
|
use census::{Inventory, TrackedObject};
|
||||||
|
use serde;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
@@ -213,7 +215,7 @@ pub struct IndexMeta {
|
|||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
/// Payload associated to the last commit.
|
/// Payload associated to the last commit.
|
||||||
///
|
///
|
||||||
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
/// Upon commit, clients can optionally add a small `Striing` payload to their commit
|
||||||
/// to help identify this commit.
|
/// to help identify this commit.
|
||||||
/// This payload is entirely unused by tantivy.
|
/// This payload is entirely unused by tantivy.
|
||||||
pub payload: Option<String>,
|
pub payload: Option<String>,
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
use std::io;
|
|
||||||
|
|
||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::directory::FileSlice;
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::positions::PositionReader;
|
use crate::positions::PositionReader;
|
||||||
use crate::postings::TermInfo;
|
use crate::postings::TermInfo;
|
||||||
use crate::postings::{BlockSegmentPostings, SegmentPostings};
|
use crate::postings::{BlockSegmentPostings, SegmentPostings};
|
||||||
|
use crate::schema::FieldType;
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
|
use owned_read::OwnedRead;
|
||||||
|
|
||||||
/// The inverted index reader is in charge of accessing
|
/// The inverted index reader is in charge of accessing
|
||||||
/// the inverted index associated to a specific field.
|
/// the inverted index associated to a specific field.
|
||||||
@@ -16,7 +16,7 @@ use crate::termdict::TermDictionary;
|
|||||||
///
|
///
|
||||||
/// It is safe to delete the segment associated to
|
/// It is safe to delete the segment associated to
|
||||||
/// an `InvertedIndexReader`. As long as it is open,
|
/// an `InvertedIndexReader`. As long as it is open,
|
||||||
/// the `FileSlice` it is relying on should
|
/// the `ReadOnlySource` it is relying on should
|
||||||
/// stay available.
|
/// stay available.
|
||||||
///
|
///
|
||||||
///
|
///
|
||||||
@@ -24,9 +24,9 @@ use crate::termdict::TermDictionary;
|
|||||||
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
||||||
pub struct InvertedIndexReader {
|
pub struct InvertedIndexReader {
|
||||||
termdict: TermDictionary,
|
termdict: TermDictionary,
|
||||||
postings_file_slice: FileSlice,
|
postings_source: ReadOnlySource,
|
||||||
positions_file_slice: FileSlice,
|
positions_source: ReadOnlySource,
|
||||||
positions_idx_file_slice: FileSlice,
|
positions_idx_source: ReadOnlySource,
|
||||||
record_option: IndexRecordOption,
|
record_option: IndexRecordOption,
|
||||||
total_num_tokens: u64,
|
total_num_tokens: u64,
|
||||||
}
|
}
|
||||||
@@ -35,38 +35,42 @@ impl InvertedIndexReader {
|
|||||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
|
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
termdict: TermDictionary,
|
termdict: TermDictionary,
|
||||||
postings_file_slice: FileSlice,
|
postings_source: ReadOnlySource,
|
||||||
positions_file_slice: FileSlice,
|
positions_source: ReadOnlySource,
|
||||||
positions_idx_file_slice: FileSlice,
|
positions_idx_source: ReadOnlySource,
|
||||||
record_option: IndexRecordOption,
|
record_option: IndexRecordOption,
|
||||||
) -> io::Result<InvertedIndexReader> {
|
) -> InvertedIndexReader {
|
||||||
let (total_num_tokens_slice, postings_body) = postings_file_slice.split(8);
|
let total_num_tokens_data = postings_source.slice(0, 8);
|
||||||
let total_num_tokens = u64::deserialize(&mut total_num_tokens_slice.read_bytes()?)?;
|
let mut total_num_tokens_cursor = total_num_tokens_data.as_slice();
|
||||||
Ok(InvertedIndexReader {
|
let total_num_tokens = u64::deserialize(&mut total_num_tokens_cursor).unwrap_or(0u64);
|
||||||
|
InvertedIndexReader {
|
||||||
termdict,
|
termdict,
|
||||||
postings_file_slice: postings_body,
|
postings_source: postings_source.slice_from(8),
|
||||||
positions_file_slice,
|
positions_source,
|
||||||
positions_idx_file_slice,
|
positions_idx_source,
|
||||||
record_option,
|
record_option,
|
||||||
total_num_tokens,
|
total_num_tokens,
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates an empty `InvertedIndexReader` object, which
|
/// Creates an empty `InvertedIndexReader` object, which
|
||||||
/// contains no terms at all.
|
/// contains no terms at all.
|
||||||
pub fn empty(record_option: IndexRecordOption) -> InvertedIndexReader {
|
pub fn empty(field_type: &FieldType) -> InvertedIndexReader {
|
||||||
|
let record_option = field_type
|
||||||
|
.get_index_record_option()
|
||||||
|
.unwrap_or(IndexRecordOption::Basic);
|
||||||
InvertedIndexReader {
|
InvertedIndexReader {
|
||||||
termdict: TermDictionary::empty(),
|
termdict: TermDictionary::empty(),
|
||||||
postings_file_slice: FileSlice::empty(),
|
postings_source: ReadOnlySource::empty(),
|
||||||
positions_file_slice: FileSlice::empty(),
|
positions_source: ReadOnlySource::empty(),
|
||||||
positions_idx_file_slice: FileSlice::empty(),
|
positions_idx_source: ReadOnlySource::empty(),
|
||||||
record_option,
|
record_option,
|
||||||
total_num_tokens: 0u64,
|
total_num_tokens: 0u64,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the term info associated with the term.
|
/// Returns the term info associated with the term.
|
||||||
pub fn get_term_info(&self, term: &Term) -> io::Result<Option<TermInfo>> {
|
pub fn get_term_info(&self, term: &Term) -> Option<TermInfo> {
|
||||||
self.termdict.get(term.value_bytes())
|
self.termdict.get(term.value_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,12 +93,12 @@ impl InvertedIndexReader {
|
|||||||
&self,
|
&self,
|
||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
block_postings: &mut BlockSegmentPostings,
|
block_postings: &mut BlockSegmentPostings,
|
||||||
) -> io::Result<()> {
|
) {
|
||||||
let start_offset = term_info.postings_start_offset as usize;
|
let offset = term_info.postings_offset as usize;
|
||||||
let stop_offset = term_info.postings_stop_offset as usize;
|
let end_source = self.postings_source.len();
|
||||||
let postings_slice = self.postings_file_slice.slice(start_offset, stop_offset);
|
let postings_slice = self.postings_source.slice(offset, end_source);
|
||||||
block_postings.reset(term_info.doc_freq, postings_slice.read_bytes()?);
|
let postings_reader = OwnedRead::new(postings_slice);
|
||||||
Ok(())
|
block_postings.reset(term_info.doc_freq, postings_reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a block postings given a `Term`.
|
/// Returns a block postings given a `Term`.
|
||||||
@@ -105,10 +109,9 @@ impl InvertedIndexReader {
|
|||||||
&self,
|
&self,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
option: IndexRecordOption,
|
option: IndexRecordOption,
|
||||||
) -> io::Result<Option<BlockSegmentPostings>> {
|
) -> Option<BlockSegmentPostings> {
|
||||||
self.get_term_info(term)?
|
self.get_term_info(term)
|
||||||
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
|
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
|
||||||
.transpose()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a block postings given a `term_info`.
|
/// Returns a block postings given a `term_info`.
|
||||||
@@ -119,14 +122,12 @@ impl InvertedIndexReader {
|
|||||||
&self,
|
&self,
|
||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
requested_option: IndexRecordOption,
|
requested_option: IndexRecordOption,
|
||||||
) -> io::Result<BlockSegmentPostings> {
|
) -> BlockSegmentPostings {
|
||||||
let postings_data = self.postings_file_slice.slice(
|
let offset = term_info.postings_offset as usize;
|
||||||
term_info.postings_start_offset as usize,
|
let postings_data = self.postings_source.slice_from(offset);
|
||||||
term_info.postings_stop_offset as usize,
|
BlockSegmentPostings::from_data(
|
||||||
);
|
|
||||||
BlockSegmentPostings::open(
|
|
||||||
term_info.doc_freq,
|
term_info.doc_freq,
|
||||||
postings_data,
|
OwnedRead::new(postings_data),
|
||||||
self.record_option,
|
self.record_option,
|
||||||
requested_option,
|
requested_option,
|
||||||
)
|
)
|
||||||
@@ -140,23 +141,20 @@ impl InvertedIndexReader {
|
|||||||
&self,
|
&self,
|
||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
option: IndexRecordOption,
|
option: IndexRecordOption,
|
||||||
) -> io::Result<SegmentPostings> {
|
) -> SegmentPostings {
|
||||||
let block_postings = self.read_block_postings_from_terminfo(term_info, option)?;
|
let block_postings = self.read_block_postings_from_terminfo(term_info, option);
|
||||||
let position_stream = {
|
let position_stream = {
|
||||||
if option.has_positions() {
|
if option.has_positions() {
|
||||||
let position_reader = self.positions_file_slice.clone();
|
let position_reader = self.positions_source.clone();
|
||||||
let skip_reader = self.positions_idx_file_slice.clone();
|
let skip_reader = self.positions_idx_source.clone();
|
||||||
let position_reader =
|
let position_reader =
|
||||||
PositionReader::new(position_reader, skip_reader, term_info.positions_idx)?;
|
PositionReader::new(position_reader, skip_reader, term_info.positions_idx);
|
||||||
Some(position_reader)
|
Some(position_reader)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Ok(SegmentPostings::from_block_postings(
|
SegmentPostings::from_block_postings(block_postings, position_stream)
|
||||||
block_postings,
|
|
||||||
position_stream,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the total number of tokens recorded for all documents
|
/// Returns the total number of tokens recorded for all documents
|
||||||
@@ -175,31 +173,24 @@ impl InvertedIndexReader {
|
|||||||
/// For instance, requesting `IndexRecordOption::Freq` for a
|
/// For instance, requesting `IndexRecordOption::Freq` for a
|
||||||
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
||||||
/// with `DocId`s and frequencies.
|
/// with `DocId`s and frequencies.
|
||||||
pub fn read_postings(
|
pub fn read_postings(&self, term: &Term, option: IndexRecordOption) -> Option<SegmentPostings> {
|
||||||
&self,
|
self.get_term_info(term)
|
||||||
term: &Term,
|
|
||||||
option: IndexRecordOption,
|
|
||||||
) -> io::Result<Option<SegmentPostings>> {
|
|
||||||
self.get_term_info(term)?
|
|
||||||
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option))
|
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option))
|
||||||
.transpose()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn read_postings_no_deletes(
|
pub(crate) fn read_postings_no_deletes(
|
||||||
&self,
|
&self,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
option: IndexRecordOption,
|
option: IndexRecordOption,
|
||||||
) -> io::Result<Option<SegmentPostings>> {
|
) -> Option<SegmentPostings> {
|
||||||
self.get_term_info(term)?
|
self.get_term_info(term)
|
||||||
.map(|term_info| self.read_postings_from_terminfo(&term_info, option))
|
.map(|term_info| self.read_postings_from_terminfo(&term_info, option))
|
||||||
.transpose()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of documents containing the term.
|
/// Returns the number of documents containing the term.
|
||||||
pub fn doc_freq(&self, term: &Term) -> io::Result<u32> {
|
pub fn doc_freq(&self, term: &Term) -> u32 {
|
||||||
Ok(self
|
self.get_term_info(term)
|
||||||
.get_term_info(term)?
|
|
||||||
.map(|term_info| term_info.doc_freq)
|
.map(|term_info| term_info.doc_freq)
|
||||||
.unwrap_or(0u32))
|
.unwrap_or(0u32)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,17 +1,41 @@
|
|||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
|
use crate::collector::SegmentCollector;
|
||||||
use crate::core::Executor;
|
use crate::core::Executor;
|
||||||
|
use crate::core::InvertedIndexReader;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::query::Query;
|
use crate::query::Query;
|
||||||
|
use crate::query::Scorer;
|
||||||
|
use crate::query::Weight;
|
||||||
use crate::schema::Document;
|
use crate::schema::Document;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::schema::Term;
|
use crate::schema::{Field, Term};
|
||||||
use crate::space_usage::SearcherSpaceUsage;
|
use crate::space_usage::SearcherSpaceUsage;
|
||||||
use crate::store::StoreReader;
|
use crate::store::StoreReader;
|
||||||
|
use crate::termdict::TermMerger;
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
|
use std::fmt;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use std::{fmt, io};
|
fn collect_segment<C: Collector>(
|
||||||
|
collector: &C,
|
||||||
|
weight: &dyn Weight,
|
||||||
|
segment_ord: u32,
|
||||||
|
segment_reader: &SegmentReader,
|
||||||
|
) -> crate::Result<C::Fruit> {
|
||||||
|
let mut scorer = weight.scorer(segment_reader, 1.0f32)?;
|
||||||
|
let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?;
|
||||||
|
if let Some(delete_bitset) = segment_reader.delete_bitset() {
|
||||||
|
scorer.for_each(&mut |doc, score| {
|
||||||
|
if delete_bitset.is_alive(doc) {
|
||||||
|
segment_collector.collect(doc, score);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
scorer.for_each(&mut |doc, score| segment_collector.collect(doc, score));
|
||||||
|
}
|
||||||
|
Ok(segment_collector.harvest())
|
||||||
|
}
|
||||||
|
|
||||||
/// Holds a list of `SegmentReader`s ready for search.
|
/// Holds a list of `SegmentReader`s ready for search.
|
||||||
///
|
///
|
||||||
@@ -31,17 +55,17 @@ impl Searcher {
|
|||||||
schema: Schema,
|
schema: Schema,
|
||||||
index: Index,
|
index: Index,
|
||||||
segment_readers: Vec<SegmentReader>,
|
segment_readers: Vec<SegmentReader>,
|
||||||
) -> io::Result<Searcher> {
|
) -> Searcher {
|
||||||
let store_readers: Vec<StoreReader> = segment_readers
|
let store_readers = segment_readers
|
||||||
.iter()
|
.iter()
|
||||||
.map(SegmentReader::get_store_reader)
|
.map(SegmentReader::get_store_reader)
|
||||||
.collect::<io::Result<Vec<_>>>()?;
|
.collect();
|
||||||
Ok(Searcher {
|
Searcher {
|
||||||
schema,
|
schema,
|
||||||
index,
|
index,
|
||||||
segment_readers,
|
segment_readers,
|
||||||
store_readers,
|
store_readers,
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `Index` associated to the `Searcher`
|
/// Returns the `Index` associated to the `Searcher`
|
||||||
@@ -74,14 +98,13 @@ impl Searcher {
|
|||||||
|
|
||||||
/// Return the overall number of documents containing
|
/// Return the overall number of documents containing
|
||||||
/// the given term.
|
/// the given term.
|
||||||
pub fn doc_freq(&self, term: &Term) -> crate::Result<u64> {
|
pub fn doc_freq(&self, term: &Term) -> u64 {
|
||||||
let mut total_doc_freq = 0;
|
self.segment_readers
|
||||||
for segment_reader in &self.segment_readers {
|
.iter()
|
||||||
let inverted_index = segment_reader.inverted_index(term.field())?;
|
.map(|segment_reader| {
|
||||||
let doc_freq = inverted_index.doc_freq(term)?;
|
u64::from(segment_reader.inverted_index(term.field()).doc_freq(term))
|
||||||
total_doc_freq += u64::from(doc_freq);
|
})
|
||||||
}
|
.sum::<u64>()
|
||||||
Ok(total_doc_freq)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the list of segment readers
|
/// Return the list of segment readers
|
||||||
@@ -140,20 +163,56 @@ impl Searcher {
|
|||||||
let segment_readers = self.segment_readers();
|
let segment_readers = self.segment_readers();
|
||||||
let fruits = executor.map(
|
let fruits = executor.map(
|
||||||
|(segment_ord, segment_reader)| {
|
|(segment_ord, segment_reader)| {
|
||||||
collector.collect_segment(weight.as_ref(), segment_ord as u32, segment_reader)
|
collect_segment(
|
||||||
|
collector,
|
||||||
|
weight.as_ref(),
|
||||||
|
segment_ord as u32,
|
||||||
|
segment_reader,
|
||||||
|
)
|
||||||
},
|
},
|
||||||
segment_readers.iter().enumerate(),
|
segment_readers.iter().enumerate(),
|
||||||
)?;
|
)?;
|
||||||
collector.merge_fruits(fruits)
|
collector.merge_fruits(fruits)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the field searcher associated to a `Field`.
|
||||||
|
pub fn field(&self, field: Field) -> FieldSearcher {
|
||||||
|
let inv_index_readers = self
|
||||||
|
.segment_readers
|
||||||
|
.iter()
|
||||||
|
.map(|segment_reader| segment_reader.inverted_index(field))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
FieldSearcher::new(inv_index_readers)
|
||||||
|
}
|
||||||
|
|
||||||
/// Summarize total space usage of this searcher.
|
/// Summarize total space usage of this searcher.
|
||||||
pub fn space_usage(&self) -> io::Result<SearcherSpaceUsage> {
|
pub fn space_usage(&self) -> SearcherSpaceUsage {
|
||||||
let mut space_usage = SearcherSpaceUsage::new();
|
let mut space_usage = SearcherSpaceUsage::new();
|
||||||
for segment_reader in &self.segment_readers {
|
for segment_reader in self.segment_readers.iter() {
|
||||||
space_usage.add_segment(segment_reader.space_usage()?);
|
space_usage.add_segment(segment_reader.space_usage());
|
||||||
}
|
}
|
||||||
Ok(space_usage)
|
space_usage
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FieldSearcher {
|
||||||
|
inv_index_readers: Vec<Arc<InvertedIndexReader>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FieldSearcher {
|
||||||
|
fn new(inv_index_readers: Vec<Arc<InvertedIndexReader>>) -> FieldSearcher {
|
||||||
|
FieldSearcher { inv_index_readers }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a Stream over all of the sorted unique terms of
|
||||||
|
/// for the given field.
|
||||||
|
pub fn terms(&self) -> TermMerger<'_> {
|
||||||
|
let term_streamers: Vec<_> = self
|
||||||
|
.inv_index_readers
|
||||||
|
.iter()
|
||||||
|
.map(|inverted_index| inverted_index.terms().stream())
|
||||||
|
.collect();
|
||||||
|
TermMerger::new(term_streamers)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,41 +3,140 @@ use crate::core::Index;
|
|||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::directory::error::{OpenReadError, OpenWriteError};
|
use crate::directory::error::{OpenReadError, OpenWriteError};
|
||||||
use crate::directory::Directory;
|
use crate::directory::{Directory, ManagedDirectory, RAMDirectory};
|
||||||
use crate::directory::{FileSlice, WritePtr};
|
use crate::directory::{ReadOnlySource, WritePtr};
|
||||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
use std::ops::{Deref, DerefMut};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use crate::indexer::{ResourceManager};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) enum SegmentDirectory {
|
||||||
|
Persisted(ManagedDirectory),
|
||||||
|
Volatile {
|
||||||
|
directory: RAMDirectory,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SegmentDirectory {
|
||||||
|
fn new_volatile(memory_manager: ResourceManager) -> SegmentDirectory {
|
||||||
|
SegmentDirectory::Volatile {
|
||||||
|
directory: RAMDirectory::create_with_memory_manager(memory_manager),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for SegmentDirectory {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
SegmentDirectory::Volatile { .. } => write!(f, "volatile")?,
|
||||||
|
SegmentDirectory::Persisted(dir) => write!(f, "Persisted({:?})", dir)?,
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ManagedDirectory> for SegmentDirectory {
|
||||||
|
fn from(directory: ManagedDirectory) -> Self {
|
||||||
|
SegmentDirectory::Persisted(directory)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for SegmentDirectory {
|
||||||
|
type Target = dyn Directory;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
match self {
|
||||||
|
SegmentDirectory::Volatile {
|
||||||
|
directory, ..
|
||||||
|
} => directory,
|
||||||
|
SegmentDirectory::Persisted(dir) => dir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DerefMut for SegmentDirectory {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
match self {
|
||||||
|
SegmentDirectory::Volatile { directory, .. } => directory,
|
||||||
|
SegmentDirectory::Persisted(dir) => dir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A segment is a piece of the index.
|
/// A segment is a piece of the index.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Segment {
|
pub struct Segment {
|
||||||
index: Index,
|
schema: Schema,
|
||||||
meta: SegmentMeta,
|
meta: SegmentMeta,
|
||||||
|
directory: SegmentDirectory,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Debug for Segment {
|
impl fmt::Debug for Segment {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "Segment({:?})", self.id().uuid_string())
|
write!(
|
||||||
|
f,
|
||||||
|
"Segment(id={:?}, directory={:?})",
|
||||||
|
self.id().uuid_string(),
|
||||||
|
self.directory
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Segment {
|
impl Segment {
|
||||||
/// Creates a new segment given an `Index` and a `SegmentId`
|
pub(crate) fn new_persisted(
|
||||||
pub(crate) fn for_index(index: Index, meta: SegmentMeta) -> Segment {
|
meta: SegmentMeta,
|
||||||
Segment { index, meta }
|
directory: ManagedDirectory,
|
||||||
|
schema: Schema,
|
||||||
|
) -> Segment {
|
||||||
|
Segment {
|
||||||
|
meta,
|
||||||
|
schema,
|
||||||
|
directory: SegmentDirectory::from(directory),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the index the segment belongs to.
|
/// Creates a new segment that embeds its own `RAMDirectory`.
|
||||||
pub fn index(&self) -> &Index {
|
///
|
||||||
&self.index
|
/// That segment is entirely dissociated from the index directory.
|
||||||
|
/// It will be persisted by a background thread in charge of IO.
|
||||||
|
pub fn new_volatile(meta: SegmentMeta, schema: Schema, resource_manager: ResourceManager) -> Segment {
|
||||||
|
Segment {
|
||||||
|
schema,
|
||||||
|
meta,
|
||||||
|
directory: SegmentDirectory::new_volatile(resource_manager),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new segment given an `Index` and a `SegmentId`
|
||||||
|
pub(crate) fn for_index(index: Index, meta: SegmentMeta) -> Segment {
|
||||||
|
let segment_directory = index.directory().clone();
|
||||||
|
Segment::new_persisted(meta, segment_directory, index.schema())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Persists a given `Segment` to a directory.
|
||||||
|
pub fn persist(&mut self, mut dest_directory: ManagedDirectory) -> crate::Result<()> {
|
||||||
|
if let SegmentDirectory::Persisted(_) = self.directory {
|
||||||
|
// this segment is already persisted.
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
if let SegmentDirectory::Volatile { directory, ..} = &self.directory {
|
||||||
|
directory.persist(&mut dest_directory)?;
|
||||||
|
}
|
||||||
|
self.directory = SegmentDirectory::Persisted(dest_directory);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn into_volatile(&self, memory_manager: ResourceManager) -> Segment {
|
||||||
|
Segment::new_volatile(self.meta.clone(), self.schema.clone(), memory_manager)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns our index's schema.
|
/// Returns our index's schema.
|
||||||
pub fn schema(&self) -> Schema {
|
pub fn schema(&self) -> Schema {
|
||||||
self.index.schema()
|
self.schema.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the segment meta-information
|
/// Returns the segment meta-information
|
||||||
@@ -51,16 +150,18 @@ impl Segment {
|
|||||||
/// as we finalize a fresh new segment.
|
/// as we finalize a fresh new segment.
|
||||||
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
|
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
|
||||||
Segment {
|
Segment {
|
||||||
index: self.index,
|
schema: self.schema,
|
||||||
meta: self.meta.with_max_doc(max_doc),
|
meta: self.meta.with_max_doc(max_doc),
|
||||||
|
directory: self.directory,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
||||||
Segment {
|
Segment {
|
||||||
index: self.index,
|
schema: self.schema,
|
||||||
meta: self.meta.with_delete_meta(num_deleted_docs, opstamp),
|
meta: self.meta.with_delete_meta(num_deleted_docs, opstamp),
|
||||||
|
directory: self.directory,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,16 +179,17 @@ impl Segment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Open one of the component file for a *regular* read.
|
/// Open one of the component file for a *regular* read.
|
||||||
pub fn open_read(&self, component: SegmentComponent) -> Result<FileSlice, OpenReadError> {
|
pub fn open_read(&self, component: SegmentComponent) -> Result<ReadOnlySource, OpenReadError> {
|
||||||
let path = self.relative_path(component);
|
let path = self.relative_path(component);
|
||||||
self.index.directory().open_read(&path)
|
let source = self.directory.open_read(&path)?;
|
||||||
|
Ok(source)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Open one of the component file for *regular* write.
|
/// Open one of the component file for *regular* write.
|
||||||
pub fn open_write(&mut self, component: SegmentComponent) -> Result<WritePtr, OpenWriteError> {
|
pub fn open_write(&mut self, component: SegmentComponent) -> Result<WritePtr, OpenWriteError> {
|
||||||
let path = self.relative_path(component);
|
let path = self.relative_path(component);
|
||||||
let write = self.index.directory_mut().open_write(&path)?;
|
let wrt = self.directory.open_write(&path)?;
|
||||||
Ok(write)
|
Ok(wrt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ pub enum SegmentComponent {
|
|||||||
/// Dictionary associating `Term`s to `TermInfo`s which is
|
/// Dictionary associating `Term`s to `TermInfo`s which is
|
||||||
/// simply an address into the `postings` file and the `positions` file.
|
/// simply an address into the `postings` file and the `positions` file.
|
||||||
TERMS,
|
TERMS,
|
||||||
/// Row-oriented, compressed storage of the documents.
|
/// Row-oriented, LZ4-compressed storage of the documents.
|
||||||
/// Accessing a document from the store is relatively slow, as it
|
/// Accessing a document from the store is relatively slow, as it
|
||||||
/// requires to decompress the entire block it belongs to.
|
/// requires to decompress the entire block it belongs to.
|
||||||
STORE,
|
STORE,
|
||||||
|
|||||||
@@ -1,26 +1,26 @@
|
|||||||
|
use crate::common::CompositeFile;
|
||||||
use crate::common::HasLen;
|
use crate::common::HasLen;
|
||||||
use crate::core::InvertedIndexReader;
|
use crate::core::InvertedIndexReader;
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::core::SegmentComponent;
|
use crate::core::SegmentComponent;
|
||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::directory::FileSlice;
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::fastfield::DeleteBitSet;
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::fastfield::FacetReader;
|
use crate::fastfield::FacetReader;
|
||||||
use crate::fastfield::FastFieldReaders;
|
use crate::fastfield::FastFieldReaders;
|
||||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
|
use crate::fieldnorm::FieldNormReader;
|
||||||
|
use crate::schema::Field;
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::schema::{Field, IndexRecordOption};
|
|
||||||
use crate::space_usage::SegmentSpaceUsage;
|
use crate::space_usage::SegmentSpaceUsage;
|
||||||
use crate::store::StoreReader;
|
use crate::store::StoreReader;
|
||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::{common::CompositeFile, error::DataCorruption};
|
|
||||||
use fail::fail_point;
|
use fail::fail_point;
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use std::{collections::HashMap, io};
|
|
||||||
|
|
||||||
/// Entry point to access all of the datastructures of the `Segment`
|
/// Entry point to access all of the datastructures of the `Segment`
|
||||||
///
|
///
|
||||||
@@ -48,9 +48,9 @@ pub struct SegmentReader {
|
|||||||
positions_composite: CompositeFile,
|
positions_composite: CompositeFile,
|
||||||
positions_idx_composite: CompositeFile,
|
positions_idx_composite: CompositeFile,
|
||||||
fast_fields_readers: Arc<FastFieldReaders>,
|
fast_fields_readers: Arc<FastFieldReaders>,
|
||||||
fieldnorm_readers: FieldNormReaders,
|
fieldnorms_composite: CompositeFile,
|
||||||
|
|
||||||
store_file: FileSlice,
|
store_source: ReadOnlySource,
|
||||||
delete_bitset_opt: Option<DeleteBitSet>,
|
delete_bitset_opt: Option<DeleteBitSet>,
|
||||||
schema: Schema,
|
schema: Schema,
|
||||||
}
|
}
|
||||||
@@ -106,26 +106,16 @@ impl SegmentReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the `FacetReader` associated to a given `Field`.
|
/// Accessor to the `FacetReader` associated to a given `Field`.
|
||||||
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> {
|
pub fn facet_reader(&self, field: Field) -> Option<FacetReader> {
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
if field_entry.field_type() != &FieldType::HierarchicalFacet {
|
if field_entry.field_type() != &FieldType::HierarchicalFacet {
|
||||||
return Err(crate::TantivyError::InvalidArgument(format!(
|
return None;
|
||||||
"Field {:?} is not a facet field.",
|
|
||||||
field_entry.name()
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
let term_ords_reader = self.fast_fields().u64s(field).ok_or_else(|| {
|
let term_ords_reader = self.fast_fields().u64s(field)?;
|
||||||
DataCorruption::comment_only(format!(
|
let termdict_source = self.termdict_composite.open_read(field)?;
|
||||||
"Cannot find data for hierarchical facet {:?}",
|
let termdict = TermDictionary::from_source(&termdict_source);
|
||||||
field_entry.name()
|
let facet_reader = FacetReader::new(term_ords_reader, termdict);
|
||||||
))
|
Some(facet_reader)
|
||||||
})?;
|
|
||||||
let termdict = self
|
|
||||||
.termdict_composite
|
|
||||||
.open_read(field)
|
|
||||||
.map(TermDictionary::open)
|
|
||||||
.unwrap_or_else(|| Ok(TermDictionary::empty()))?;
|
|
||||||
Ok(FacetReader::new(term_ords_reader, termdict))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the segment's `Field norms`'s reader.
|
/// Accessor to the segment's `Field norms`'s reader.
|
||||||
@@ -135,45 +125,47 @@ impl SegmentReader {
|
|||||||
///
|
///
|
||||||
/// They are simply stored as a fast field, serialized in
|
/// They are simply stored as a fast field, serialized in
|
||||||
/// the `.fieldnorm` file of the segment.
|
/// the `.fieldnorm` file of the segment.
|
||||||
pub fn get_fieldnorms_reader(&self, field: Field) -> crate::Result<FieldNormReader> {
|
pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader {
|
||||||
self.fieldnorm_readers.get_field(field)?.ok_or_else(|| {
|
if let Some(fieldnorm_source) = self.fieldnorms_composite.open_read(field) {
|
||||||
|
FieldNormReader::open(fieldnorm_source)
|
||||||
|
} else {
|
||||||
let field_name = self.schema.get_field_name(field);
|
let field_name = self.schema.get_field_name(field);
|
||||||
let err_msg = format!(
|
let err_msg = format!(
|
||||||
"Field norm not found for field {:?}. Was it marked as indexed during indexing?",
|
"Field norm not found for field {:?}. Was it market as indexed during indexing.",
|
||||||
field_name
|
field_name
|
||||||
);
|
);
|
||||||
crate::TantivyError::SchemaError(err_msg)
|
panic!(err_msg);
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the segment's `StoreReader`.
|
/// Accessor to the segment's `StoreReader`.
|
||||||
pub fn get_store_reader(&self) -> io::Result<StoreReader> {
|
pub fn get_store_reader(&self) -> StoreReader {
|
||||||
StoreReader::open(self.store_file.clone())
|
StoreReader::from_source(self.store_source.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Open a new segment for reading.
|
/// Open a new segment for reading.
|
||||||
pub fn open(segment: &Segment) -> crate::Result<SegmentReader> {
|
pub fn open(segment: &Segment) -> crate::Result<SegmentReader> {
|
||||||
let termdict_file = segment.open_read(SegmentComponent::TERMS)?;
|
let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
|
||||||
let termdict_composite = CompositeFile::open(&termdict_file)?;
|
let termdict_composite = CompositeFile::open(&termdict_source)?;
|
||||||
|
|
||||||
let store_file = segment.open_read(SegmentComponent::STORE)?;
|
let store_source = segment.open_read(SegmentComponent::STORE)?;
|
||||||
|
|
||||||
fail_point!("SegmentReader::open#middle");
|
fail_point!("SegmentReader::open#middle");
|
||||||
|
|
||||||
let postings_file = segment.open_read(SegmentComponent::POSTINGS)?;
|
let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
|
||||||
let postings_composite = CompositeFile::open(&postings_file)?;
|
let postings_composite = CompositeFile::open(&postings_source)?;
|
||||||
|
|
||||||
let positions_composite = {
|
let positions_composite = {
|
||||||
if let Ok(positions_file) = segment.open_read(SegmentComponent::POSITIONS) {
|
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONS) {
|
||||||
CompositeFile::open(&positions_file)?
|
CompositeFile::open(&source)?
|
||||||
} else {
|
} else {
|
||||||
CompositeFile::empty()
|
CompositeFile::empty()
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let positions_idx_composite = {
|
let positions_idx_composite = {
|
||||||
if let Ok(positions_skip_file) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
|
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
|
||||||
CompositeFile::open(&positions_skip_file)?
|
CompositeFile::open(&source)?
|
||||||
} else {
|
} else {
|
||||||
CompositeFile::empty()
|
CompositeFile::empty()
|
||||||
}
|
}
|
||||||
@@ -186,27 +178,26 @@ impl SegmentReader {
|
|||||||
let fast_field_readers =
|
let fast_field_readers =
|
||||||
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
|
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
|
||||||
|
|
||||||
let fieldnorm_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
||||||
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
|
||||||
|
|
||||||
let delete_bitset_opt = if segment.meta().has_deletes() {
|
let delete_bitset_opt = if segment.meta().has_deletes() {
|
||||||
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
||||||
let delete_bitset = DeleteBitSet::open(delete_data)?;
|
Some(DeleteBitSet::open(delete_data))
|
||||||
Some(delete_bitset)
|
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(SegmentReader {
|
Ok(SegmentReader {
|
||||||
inv_idx_reader_cache: Default::default(),
|
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
|
||||||
max_doc: segment.meta().max_doc(),
|
max_doc: segment.meta().max_doc(),
|
||||||
num_docs: segment.meta().num_docs(),
|
num_docs: segment.meta().num_docs(),
|
||||||
termdict_composite,
|
termdict_composite,
|
||||||
postings_composite,
|
postings_composite,
|
||||||
fast_fields_readers: fast_field_readers,
|
fast_fields_readers: fast_field_readers,
|
||||||
fieldnorm_readers,
|
fieldnorms_composite,
|
||||||
segment_id: segment.id(),
|
segment_id: segment.id(),
|
||||||
store_file,
|
store_source,
|
||||||
delete_bitset_opt,
|
delete_bitset_opt,
|
||||||
positions_composite,
|
positions_composite,
|
||||||
positions_idx_composite,
|
positions_idx_composite,
|
||||||
@@ -221,64 +212,58 @@ impl SegmentReader {
|
|||||||
/// The field reader is in charge of iterating through the
|
/// The field reader is in charge of iterating through the
|
||||||
/// term dictionary associated to a specific field,
|
/// term dictionary associated to a specific field,
|
||||||
/// and opening the posting list associated to any term.
|
/// and opening the posting list associated to any term.
|
||||||
///
|
pub fn inverted_index(&self, field: Field) -> Arc<InvertedIndexReader> {
|
||||||
/// If the field is marked as index, a warn is logged and an empty `InvertedIndexReader`
|
|
||||||
/// is returned.
|
|
||||||
/// Similarly if the field is marked as indexed but no term has been indexed for the given
|
|
||||||
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
|
|
||||||
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
|
|
||||||
if let Some(inv_idx_reader) = self
|
if let Some(inv_idx_reader) = self
|
||||||
.inv_idx_reader_cache
|
.inv_idx_reader_cache
|
||||||
.read()
|
.read()
|
||||||
.expect("Lock poisoned. This should never happen")
|
.expect("Lock poisoned. This should never happen")
|
||||||
.get(&field)
|
.get(&field)
|
||||||
{
|
{
|
||||||
return Ok(Arc::clone(inv_idx_reader));
|
return Arc::clone(inv_idx_reader);
|
||||||
}
|
}
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
let field_type = field_entry.field_type();
|
let field_type = field_entry.field_type();
|
||||||
let record_option_opt = field_type.get_index_record_option();
|
let record_option_opt = field_type.get_index_record_option();
|
||||||
|
|
||||||
if record_option_opt.is_none() {
|
if record_option_opt.is_none() {
|
||||||
warn!("Field {:?} does not seem indexed.", field_entry.name());
|
panic!("Field {:?} does not seem indexed.", field_entry.name());
|
||||||
}
|
}
|
||||||
|
|
||||||
let postings_file_opt = self.postings_composite.open_read(field);
|
let record_option = record_option_opt.unwrap();
|
||||||
|
|
||||||
if postings_file_opt.is_none() || record_option_opt.is_none() {
|
let postings_source_opt = self.postings_composite.open_read(field);
|
||||||
|
|
||||||
|
if postings_source_opt.is_none() {
|
||||||
// no documents in the segment contained this field.
|
// no documents in the segment contained this field.
|
||||||
// As a result, no data is associated to the inverted index.
|
// As a result, no data is associated to the inverted index.
|
||||||
//
|
//
|
||||||
// Returns an empty inverted index.
|
// Returns an empty inverted index.
|
||||||
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
|
return Arc::new(InvertedIndexReader::empty(field_type));
|
||||||
return Ok(Arc::new(InvertedIndexReader::empty(record_option)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let record_option = record_option_opt.unwrap();
|
let postings_source = postings_source_opt.unwrap();
|
||||||
let postings_file = postings_file_opt.unwrap();
|
|
||||||
|
|
||||||
let termdict_file: FileSlice = self.termdict_composite.open_read(field)
|
let termdict_source = self.termdict_composite.open_read(field).expect(
|
||||||
.ok_or_else(||
|
"Failed to open field term dictionary in composite file. Is the field indexed?",
|
||||||
DataCorruption::comment_only(format!("Failed to open field {:?}'s term dictionary in the composite file. Has the schema been modified?", field_entry.name()))
|
);
|
||||||
)?;
|
|
||||||
|
|
||||||
let positions_file = self
|
let positions_source = self
|
||||||
.positions_composite
|
.positions_composite
|
||||||
.open_read(field)
|
.open_read(field)
|
||||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||||
|
|
||||||
let positions_idx_file = self
|
let positions_idx_source = self
|
||||||
.positions_idx_composite
|
.positions_idx_composite
|
||||||
.open_read(field)
|
.open_read(field)
|
||||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||||
|
|
||||||
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
||||||
TermDictionary::open(termdict_file)?,
|
TermDictionary::from_source(&termdict_source),
|
||||||
postings_file,
|
postings_source,
|
||||||
positions_file,
|
positions_source,
|
||||||
positions_idx_file,
|
positions_idx_source,
|
||||||
record_option,
|
record_option,
|
||||||
)?);
|
));
|
||||||
|
|
||||||
// by releasing the lock in between, we may end up opening the inverting index
|
// by releasing the lock in between, we may end up opening the inverting index
|
||||||
// twice, but this is fine.
|
// twice, but this is fine.
|
||||||
@@ -287,7 +272,7 @@ impl SegmentReader {
|
|||||||
.expect("Field reader cache lock poisoned. This should never happen.")
|
.expect("Field reader cache lock poisoned. This should never happen.")
|
||||||
.insert(field, Arc::clone(&inv_idx_reader));
|
.insert(field, Arc::clone(&inv_idx_reader));
|
||||||
|
|
||||||
Ok(inv_idx_reader)
|
inv_idx_reader
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the segment id
|
/// Returns the segment id
|
||||||
@@ -310,26 +295,26 @@ impl SegmentReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator that will iterate over the alive document ids
|
/// Returns an iterator that will iterate over the alive document ids
|
||||||
pub fn doc_ids_alive<'a>(&'a self) -> impl Iterator<Item = DocId> + 'a {
|
pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator<'_> {
|
||||||
(0u32..self.max_doc).filter(move |doc| !self.is_deleted(*doc))
|
SegmentReaderAliveDocsIterator::new(&self)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Summarize total space usage of this segment.
|
/// Summarize total space usage of this segment.
|
||||||
pub fn space_usage(&self) -> io::Result<SegmentSpaceUsage> {
|
pub fn space_usage(&self) -> SegmentSpaceUsage {
|
||||||
Ok(SegmentSpaceUsage::new(
|
SegmentSpaceUsage::new(
|
||||||
self.num_docs(),
|
self.num_docs(),
|
||||||
self.termdict_composite.space_usage(),
|
self.termdict_composite.space_usage(),
|
||||||
self.postings_composite.space_usage(),
|
self.postings_composite.space_usage(),
|
||||||
self.positions_composite.space_usage(),
|
self.positions_composite.space_usage(),
|
||||||
self.positions_idx_composite.space_usage(),
|
self.positions_idx_composite.space_usage(),
|
||||||
self.fast_fields_readers.space_usage(),
|
self.fast_fields_readers.space_usage(),
|
||||||
self.fieldnorm_readers.space_usage(),
|
self.fieldnorms_composite.space_usage(),
|
||||||
self.get_store_reader()?.space_usage(),
|
self.get_store_reader().space_usage(),
|
||||||
self.delete_bitset_opt
|
self.delete_bitset_opt
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(DeleteBitSet::space_usage)
|
.map(DeleteBitSet::space_usage)
|
||||||
.unwrap_or(0),
|
.unwrap_or(0),
|
||||||
))
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -339,6 +324,52 @@ impl fmt::Debug for SegmentReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Implements the iterator trait to allow easy iteration
|
||||||
|
/// over non-deleted ("alive") DocIds in a SegmentReader
|
||||||
|
pub struct SegmentReaderAliveDocsIterator<'a> {
|
||||||
|
reader: &'a SegmentReader,
|
||||||
|
max_doc: DocId,
|
||||||
|
current: DocId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> SegmentReaderAliveDocsIterator<'a> {
|
||||||
|
pub fn new(reader: &'a SegmentReader) -> SegmentReaderAliveDocsIterator<'a> {
|
||||||
|
SegmentReaderAliveDocsIterator {
|
||||||
|
reader,
|
||||||
|
max_doc: reader.max_doc(),
|
||||||
|
current: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Iterator for SegmentReaderAliveDocsIterator<'a> {
|
||||||
|
type Item = DocId;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
// TODO: Use TinySet (like in BitSetDocSet) to speed this process up
|
||||||
|
if self.current >= self.max_doc {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// find the next alive doc id
|
||||||
|
while self.reader.is_deleted(self.current) {
|
||||||
|
self.current += 1;
|
||||||
|
|
||||||
|
if self.current >= self.max_doc {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// capture the current alive DocId
|
||||||
|
let result = Some(self.current);
|
||||||
|
|
||||||
|
// move down the chain
|
||||||
|
self.current += 1;
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
@@ -346,7 +377,7 @@ mod test {
|
|||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_alive_docs_iterator() -> crate::Result<()> {
|
fn test_alive_docs_iterator() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
schema_builder.add_text_field("name", TEXT | STORED);
|
schema_builder.add_text_field("name", TEXT | STORED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -354,26 +385,26 @@ mod test {
|
|||||||
let name = schema.get_field("name").unwrap();
|
let name = schema.get_field("name").unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(name => "tantivy"));
|
index_writer.add_document(doc!(name => "tantivy"));
|
||||||
index_writer.add_document(doc!(name => "horse"));
|
index_writer.add_document(doc!(name => "horse"));
|
||||||
index_writer.add_document(doc!(name => "jockey"));
|
index_writer.add_document(doc!(name => "jockey"));
|
||||||
index_writer.add_document(doc!(name => "cap"));
|
index_writer.add_document(doc!(name => "cap"));
|
||||||
|
|
||||||
// we should now have one segment with two docs
|
// we should now have one segment with two docs
|
||||||
index_writer.commit()?;
|
index_writer.commit().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer2 = index.writer(50_000_000)?;
|
let mut index_writer2 = index.writer(50_000_000).unwrap();
|
||||||
index_writer2.delete_term(Term::from_field_text(name, "horse"));
|
index_writer2.delete_term(Term::from_field_text(name, "horse"));
|
||||||
index_writer2.delete_term(Term::from_field_text(name, "cap"));
|
index_writer2.delete_term(Term::from_field_text(name, "cap"));
|
||||||
|
|
||||||
// ok, now we should have a deleted doc
|
// ok, now we should have a deleted doc
|
||||||
index_writer2.commit()?;
|
index_writer2.commit().unwrap();
|
||||||
}
|
}
|
||||||
let searcher = index.reader()?.searcher();
|
let searcher = index.reader().unwrap().searcher();
|
||||||
let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect();
|
let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect();
|
||||||
assert_eq!(vec![0u32, 2u32], docs);
|
assert_eq!(vec![0u32, 2u32], docs);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use crate::directory::directory_lock::Lock;
|
use crate::directory::directory_lock::Lock;
|
||||||
use crate::directory::error::LockError;
|
use crate::directory::error::LockError;
|
||||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||||
|
use crate::directory::WatchCallback;
|
||||||
use crate::directory::WatchHandle;
|
use crate::directory::WatchHandle;
|
||||||
use crate::directory::{FileHandle, WatchCallback};
|
use crate::directory::{ReadOnlySource, WritePtr};
|
||||||
use crate::directory::{FileSlice, WritePtr};
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@@ -11,6 +11,7 @@ use std::marker::Send;
|
|||||||
use std::marker::Sync;
|
use std::marker::Sync;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use std::result;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
@@ -79,7 +80,7 @@ fn try_acquire_lock(
|
|||||||
) -> Result<DirectoryLock, TryAcquireLockError> {
|
) -> Result<DirectoryLock, TryAcquireLockError> {
|
||||||
let mut write = directory.open_write(filepath).map_err(|e| match e {
|
let mut write = directory.open_write(filepath).map_err(|e| match e {
|
||||||
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
|
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
|
||||||
OpenWriteError::IOError { io_error, .. } => TryAcquireLockError::IOError(io_error),
|
OpenWriteError::IOError(io_error) => TryAcquireLockError::IOError(io_error.into()),
|
||||||
})?;
|
})?;
|
||||||
write.flush().map_err(TryAcquireLockError::IOError)?;
|
write.flush().map_err(TryAcquireLockError::IOError)?;
|
||||||
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
|
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
|
||||||
@@ -108,43 +109,37 @@ fn retry_policy(is_blocking: bool) -> RetryPolicy {
|
|||||||
/// should be your default choice.
|
/// should be your default choice.
|
||||||
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
|
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
|
||||||
/// should be used mostly for tests.
|
/// should be used mostly for tests.
|
||||||
|
///
|
||||||
pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||||
/// Opens a file and returns a boxed `FileHandle`.
|
/// Opens a virtual file for read.
|
||||||
///
|
///
|
||||||
/// Users of `Directory` should typically call `Directory::open_read(...)`,
|
|
||||||
/// while `Directory` implementor should implement `get_file_handle()`.
|
|
||||||
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError>;
|
|
||||||
|
|
||||||
/// Once a virtual file is open, its data may not
|
/// Once a virtual file is open, its data may not
|
||||||
/// change.
|
/// change.
|
||||||
///
|
///
|
||||||
/// Specifically, subsequent writes or flushes should
|
/// Specifically, subsequent writes or flushes should
|
||||||
/// have no effect on the returned `FileSlice` object.
|
/// have no effect on the returned `ReadOnlySource` object.
|
||||||
///
|
///
|
||||||
/// You should only use this to read files create with [Directory::open_write].
|
/// You should only use this to read files create with [Directory::open_write].
|
||||||
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> {
|
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
|
||||||
let file_handle = self.get_file_handle(path)?;
|
|
||||||
Ok(FileSlice::new(file_handle))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Removes a file
|
/// Removes a file
|
||||||
///
|
///
|
||||||
/// Removing a file will not affect an eventual
|
/// Removing a file will not affect an eventual
|
||||||
/// existing FileSlice pointing to it.
|
/// existing ReadOnlySource pointing to it.
|
||||||
///
|
///
|
||||||
/// Removing a nonexistent file, yields a
|
/// Removing a nonexistent file, yields a
|
||||||
/// `DeleteError::DoesNotExist`.
|
/// `DeleteError::DoesNotExist`.
|
||||||
fn delete(&self, path: &Path) -> Result<(), DeleteError>;
|
fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
|
||||||
|
|
||||||
/// Returns true iff the file exists
|
/// Returns true iff the file exists
|
||||||
fn exists(&self, path: &Path) -> Result<bool, OpenReadError>;
|
fn exists(&self, path: &Path) -> bool;
|
||||||
|
|
||||||
/// Opens a writer for the *virtual file* associated with
|
/// Opens a writer for the *virtual file* associated with
|
||||||
/// a Path.
|
/// a Path.
|
||||||
///
|
///
|
||||||
/// Right after this call, the file should be created
|
/// Right after this call, the file should be created
|
||||||
/// and any subsequent call to `open_read` for the
|
/// and any subsequent call to `open_read` for the
|
||||||
/// same path should return a `FileSlice`.
|
/// same path should return a `ReadOnlySource`.
|
||||||
///
|
///
|
||||||
/// Write operations may be aggressively buffered.
|
/// Write operations may be aggressively buffered.
|
||||||
/// The client of this trait is responsible for calling flush
|
/// The client of this trait is responsible for calling flush
|
||||||
@@ -158,7 +153,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
/// was not called.
|
/// was not called.
|
||||||
///
|
///
|
||||||
/// The file may not previously exist.
|
/// The file may not previously exist.
|
||||||
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
||||||
|
|
||||||
/// Reads the full content file that has been written using
|
/// Reads the full content file that has been written using
|
||||||
/// atomic_write.
|
/// atomic_write.
|
||||||
@@ -174,7 +169,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
/// a partially written file.
|
/// a partially written file.
|
||||||
///
|
///
|
||||||
/// The file may or may not previously exist.
|
/// The file may or may not previously exist.
|
||||||
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()>;
|
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()>;
|
||||||
|
|
||||||
/// Acquire a lock in the given directory.
|
/// Acquire a lock in the given directory.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -1,68 +1,162 @@
|
|||||||
use crate::Version;
|
use crate::Version;
|
||||||
|
use std::error::Error as StdError;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
/// Error while trying to acquire a directory lock.
|
/// Error while trying to acquire a directory lock.
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Fail)]
|
||||||
pub enum LockError {
|
pub enum LockError {
|
||||||
/// Failed to acquired a lock as it is already held by another
|
/// Failed to acquired a lock as it is already held by another
|
||||||
/// client.
|
/// client.
|
||||||
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
|
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
|
||||||
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
|
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
|
||||||
#[error("Could not acquire lock as it is already held, possibly by a different process.")]
|
#[fail(
|
||||||
|
display = "Could not acquire lock as it is already held, possibly by a different process."
|
||||||
|
)]
|
||||||
LockBusy,
|
LockBusy,
|
||||||
/// Trying to acquire a lock failed with an `IOError`
|
/// Trying to acquire a lock failed with an `IOError`
|
||||||
#[error("Failed to acquire the lock due to an io:Error.")]
|
#[fail(display = "Failed to acquire the lock due to an io:Error.")]
|
||||||
IOError(io::Error),
|
IOError(io::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// General IO error with an optional path to the offending file.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct IOError {
|
||||||
|
path: Option<PathBuf>,
|
||||||
|
err: io::Error,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Into<io::Error> for IOError {
|
||||||
|
fn into(self) -> io::Error {
|
||||||
|
self.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for IOError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self.path {
|
||||||
|
Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err),
|
||||||
|
None => write!(f, "io error occurred: '{}'", self.err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StdError for IOError {
|
||||||
|
fn description(&self) -> &str {
|
||||||
|
"io error occurred"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cause(&self) -> Option<&dyn StdError> {
|
||||||
|
Some(&self.err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IOError {
|
||||||
|
pub(crate) fn with_path(path: PathBuf, err: io::Error) -> Self {
|
||||||
|
IOError {
|
||||||
|
path: Some(path),
|
||||||
|
err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<io::Error> for IOError {
|
||||||
|
fn from(err: io::Error) -> IOError {
|
||||||
|
IOError { path: None, err }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Error that may occur when opening a directory
|
/// Error that may occur when opening a directory
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug)]
|
||||||
pub enum OpenDirectoryError {
|
pub enum OpenDirectoryError {
|
||||||
/// The underlying directory does not exists.
|
/// The underlying directory does not exists.
|
||||||
#[error("Directory does not exist: '{0}'.")]
|
|
||||||
DoesNotExist(PathBuf),
|
DoesNotExist(PathBuf),
|
||||||
/// The path exists but is not a directory.
|
/// The path exists but is not a directory.
|
||||||
#[error("Path exists but is not a directory: '{0}'.")]
|
|
||||||
NotADirectory(PathBuf),
|
NotADirectory(PathBuf),
|
||||||
/// Failed to create a temp directory.
|
|
||||||
#[error("Failed to create a temporary directory: '{0}'.")]
|
|
||||||
FailedToCreateTempDir(io::Error),
|
|
||||||
/// IoError
|
/// IoError
|
||||||
#[error("IOError '{io_error:?}' while create directory in: '{directory_path:?}'.")]
|
IoError(io::Error),
|
||||||
IoError {
|
}
|
||||||
/// underlying io Error.
|
|
||||||
io_error: io::Error,
|
impl From<io::Error> for OpenDirectoryError {
|
||||||
/// directory we tried to open.
|
fn from(io_err: io::Error) -> Self {
|
||||||
directory_path: PathBuf,
|
OpenDirectoryError::IoError(io_err)
|
||||||
},
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for OpenDirectoryError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match *self {
|
||||||
|
OpenDirectoryError::DoesNotExist(ref path) => {
|
||||||
|
write!(f, "the underlying directory '{:?}' does not exist", path)
|
||||||
|
}
|
||||||
|
OpenDirectoryError::NotADirectory(ref path) => {
|
||||||
|
write!(f, "the path '{:?}' exists but is not a directory", path)
|
||||||
|
}
|
||||||
|
OpenDirectoryError::IoError(ref err) => write!(
|
||||||
|
f,
|
||||||
|
"IOError while trying to open/create the directory. {:?}",
|
||||||
|
err
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StdError for OpenDirectoryError {
|
||||||
|
fn description(&self) -> &str {
|
||||||
|
"error occurred while opening a directory"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cause(&self) -> Option<&dyn StdError> {
|
||||||
|
None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Error that may occur when starting to write in a file
|
/// Error that may occur when starting to write in a file
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug)]
|
||||||
pub enum OpenWriteError {
|
pub enum OpenWriteError {
|
||||||
/// Our directory is WORM, writing an existing file is forbidden.
|
/// Our directory is WORM, writing an existing file is forbidden.
|
||||||
/// Checkout the `Directory` documentation.
|
/// Checkout the `Directory` documentation.
|
||||||
#[error("File already exists: '{0}'")]
|
|
||||||
FileAlreadyExists(PathBuf),
|
FileAlreadyExists(PathBuf),
|
||||||
/// Any kind of IO error that happens when
|
/// Any kind of IO error that happens when
|
||||||
/// writing in the underlying IO device.
|
/// writing in the underlying IO device.
|
||||||
#[error("IOError '{io_error:?}' while opening file for write: '{filepath}'.")]
|
IOError(IOError),
|
||||||
IOError {
|
|
||||||
/// The underlying `io::Error`.
|
|
||||||
io_error: io::Error,
|
|
||||||
/// File path of the file that tantivy failed to open for write.
|
|
||||||
filepath: PathBuf,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OpenWriteError {
|
impl From<IOError> for OpenWriteError {
|
||||||
/// Wraps an io error.
|
fn from(err: IOError) -> OpenWriteError {
|
||||||
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
OpenWriteError::IOError(err)
|
||||||
Self::IOError { io_error, filepath }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for OpenWriteError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match *self {
|
||||||
|
OpenWriteError::FileAlreadyExists(ref path) => {
|
||||||
|
write!(f, "the file '{:?}' already exists", path)
|
||||||
|
}
|
||||||
|
OpenWriteError::IOError(ref err) => write!(
|
||||||
|
f,
|
||||||
|
"an io error occurred while opening a file for writing: '{}'",
|
||||||
|
err
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StdError for OpenWriteError {
|
||||||
|
fn description(&self) -> &str {
|
||||||
|
"error occurred while opening a file for writing"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cause(&self) -> Option<&dyn StdError> {
|
||||||
|
match *self {
|
||||||
|
OpenWriteError::FileAlreadyExists(_) => None,
|
||||||
|
OpenWriteError::IOError(ref err) => Some(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Type of index incompatibility between the library and the index found on disk
|
/// Type of index incompatibility between the library and the index found on disk
|
||||||
/// Used to catch and provide a hint to solve this incompatibility issue
|
/// Used to catch and provide a hint to solve this incompatibility issue
|
||||||
pub enum Incompatibility {
|
pub enum Incompatibility {
|
||||||
@@ -123,47 +217,55 @@ impl fmt::Debug for Incompatibility {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Error that may occur when accessing a file read
|
/// Error that may occur when accessing a file read
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug)]
|
||||||
pub enum OpenReadError {
|
pub enum OpenReadError {
|
||||||
/// The file does not exists.
|
/// The file does not exists.
|
||||||
#[error("Files does not exists: {0:?}")]
|
|
||||||
FileDoesNotExist(PathBuf),
|
|
||||||
/// Any kind of io::Error.
|
|
||||||
#[error(
|
|
||||||
"IOError: '{io_error:?}' happened while opening the following file for Read: {filepath}."
|
|
||||||
)]
|
|
||||||
IOError {
|
|
||||||
/// The underlying `io::Error`.
|
|
||||||
io_error: io::Error,
|
|
||||||
/// File path of the file that tantivy failed to open for read.
|
|
||||||
filepath: PathBuf,
|
|
||||||
},
|
|
||||||
/// This library does not support the index version found in file footer.
|
|
||||||
#[error("Index version unsupported: {0:?}")]
|
|
||||||
IncompatibleIndex(Incompatibility),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OpenReadError {
|
|
||||||
/// Wraps an io error.
|
|
||||||
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
|
||||||
Self::IOError { io_error, filepath }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/// Error that may occur when trying to delete a file
|
|
||||||
#[derive(Debug, Error)]
|
|
||||||
pub enum DeleteError {
|
|
||||||
/// The file does not exists.
|
|
||||||
#[error("File does not exists: '{0}'.")]
|
|
||||||
FileDoesNotExist(PathBuf),
|
FileDoesNotExist(PathBuf),
|
||||||
/// Any kind of IO error that happens when
|
/// Any kind of IO error that happens when
|
||||||
/// interacting with the underlying IO device.
|
/// interacting with the underlying IO device.
|
||||||
#[error("The following IO error happened while deleting file '{filepath}': '{io_error:?}'.")]
|
IOError(IOError),
|
||||||
IOError {
|
/// This library doesn't support the index version found on disk
|
||||||
/// The underlying `io::Error`.
|
IncompatibleIndex(Incompatibility),
|
||||||
io_error: io::Error,
|
}
|
||||||
/// File path of the file that tantivy failed to delete.
|
|
||||||
filepath: PathBuf,
|
impl From<IOError> for OpenReadError {
|
||||||
},
|
fn from(err: IOError) -> OpenReadError {
|
||||||
|
OpenReadError::IOError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for OpenReadError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match *self {
|
||||||
|
OpenReadError::FileDoesNotExist(ref path) => {
|
||||||
|
write!(f, "the file '{:?}' does not exist", path)
|
||||||
|
}
|
||||||
|
OpenReadError::IOError(ref err) => write!(
|
||||||
|
f,
|
||||||
|
"an io error occurred while opening a file for reading: '{}'",
|
||||||
|
err
|
||||||
|
),
|
||||||
|
OpenReadError::IncompatibleIndex(ref footer) => {
|
||||||
|
write!(f, "Incompatible index format: {:?}", footer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Error that may occur when trying to delete a file
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum DeleteError {
|
||||||
|
/// The file does not exists.
|
||||||
|
FileDoesNotExist(PathBuf),
|
||||||
|
/// Any kind of IO error that happens when
|
||||||
|
/// interacting with the underlying IO device.
|
||||||
|
IOError(IOError),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<IOError> for DeleteError {
|
||||||
|
fn from(err: IOError) -> DeleteError {
|
||||||
|
DeleteError::IOError(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Incompatibility> for OpenReadError {
|
impl From<Incompatibility> for OpenReadError {
|
||||||
@@ -171,3 +273,29 @@ impl From<Incompatibility> for OpenReadError {
|
|||||||
OpenReadError::IncompatibleIndex(incompatibility)
|
OpenReadError::IncompatibleIndex(incompatibility)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for DeleteError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match *self {
|
||||||
|
DeleteError::FileDoesNotExist(ref path) => {
|
||||||
|
write!(f, "the file '{:?}' does not exist", path)
|
||||||
|
}
|
||||||
|
DeleteError::IOError(ref err) => {
|
||||||
|
write!(f, "an io error occurred while deleting a file: '{}'", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StdError for DeleteError {
|
||||||
|
fn description(&self) -> &str {
|
||||||
|
"error occurred while deleting a file"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cause(&self) -> Option<&dyn StdError> {
|
||||||
|
match *self {
|
||||||
|
DeleteError::FileDoesNotExist(_) => None,
|
||||||
|
DeleteError::IOError(ref err) => Some(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,247 +0,0 @@
|
|||||||
use stable_deref_trait::StableDeref;
|
|
||||||
|
|
||||||
use crate::common::HasLen;
|
|
||||||
use crate::directory::OwnedBytes;
|
|
||||||
use std::sync::{Arc, Weak};
|
|
||||||
use std::{io, ops::Deref};
|
|
||||||
|
|
||||||
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
|
||||||
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
|
||||||
|
|
||||||
/// Objects that represents files sections in tantivy.
|
|
||||||
///
|
|
||||||
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
|
||||||
/// is alive, the data associated with it cannot be altered or destroyed.
|
|
||||||
///
|
|
||||||
/// The underlying behavior is therefore specific to the `Directory` that created it.
|
|
||||||
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
|
|
||||||
/// on the filesystem.
|
|
||||||
pub trait FileHandle: 'static + Send + Sync + HasLen {
|
|
||||||
/// Reads a slice of bytes.
|
|
||||||
///
|
|
||||||
/// This method may panic if the range requested is invalid.
|
|
||||||
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FileHandle for &'static [u8] {
|
|
||||||
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
|
||||||
let bytes = &self[from..to];
|
|
||||||
Ok(OwnedBytes::new(bytes))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Deref<Target = [u8]>> HasLen for T {
|
|
||||||
fn len(&self) -> usize {
|
|
||||||
self.as_ref().len()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B> From<B> for FileSlice
|
|
||||||
where
|
|
||||||
B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync,
|
|
||||||
{
|
|
||||||
fn from(bytes: B) -> FileSlice {
|
|
||||||
FileSlice::new(Box::new(OwnedBytes::new(bytes)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Logical slice of read only file in tantivy.
|
|
||||||
//
|
|
||||||
/// It can be cloned and sliced cheaply.
|
|
||||||
///
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct FileSlice {
|
|
||||||
data: Arc<dyn FileHandle>,
|
|
||||||
start: usize,
|
|
||||||
stop: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FileSlice {
|
|
||||||
/// Wraps a FileHandle.
|
|
||||||
pub fn new(file_handle: Box<dyn FileHandle>) -> Self {
|
|
||||||
let num_bytes = file_handle.len();
|
|
||||||
FileSlice::new_with_num_bytes(file_handle, num_bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Wraps a FileHandle.
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub fn new_with_num_bytes(file_handle: Box<dyn FileHandle>, num_bytes: usize) -> Self {
|
|
||||||
FileSlice {
|
|
||||||
data: Arc::from(file_handle),
|
|
||||||
start: 0,
|
|
||||||
stop: num_bytes,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a fileslice that is just a view over a slice of the data.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// Panics if `to < from` or if `to` exceeds the filesize.
|
|
||||||
pub fn slice(&self, from: usize, to: usize) -> FileSlice {
|
|
||||||
assert!(to <= self.len());
|
|
||||||
assert!(to >= from);
|
|
||||||
FileSlice {
|
|
||||||
data: self.data.clone(),
|
|
||||||
start: self.start + from,
|
|
||||||
stop: self.start + to,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates an empty FileSlice
|
|
||||||
pub fn empty() -> FileSlice {
|
|
||||||
const EMPTY_SLICE: &[u8] = &[];
|
|
||||||
FileSlice::from(EMPTY_SLICE)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a `OwnedBytes` with all of the data in the `FileSlice`.
|
|
||||||
///
|
|
||||||
/// The behavior is strongly dependant on the implementation of the underlying
|
|
||||||
/// `Directory` and the `FileSliceTrait` it creates.
|
|
||||||
/// In particular, it is up to the `Directory` implementation
|
|
||||||
/// to handle caching if needed.
|
|
||||||
pub fn read_bytes(&self) -> io::Result<OwnedBytes> {
|
|
||||||
self.data.read_bytes(self.start, self.stop)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reads a specific slice of data.
|
|
||||||
///
|
|
||||||
/// This is equivalent to running `file_slice.slice(from, to).read_bytes()`.
|
|
||||||
pub fn read_bytes_slice(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
|
||||||
assert!(from <= to);
|
|
||||||
assert!(
|
|
||||||
self.start + to <= self.stop,
|
|
||||||
"`to` exceeds the fileslice length"
|
|
||||||
);
|
|
||||||
self.data.read_bytes(self.start + from, self.start + to)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Splits the FileSlice at the given offset and return two file slices.
|
|
||||||
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
|
||||||
///
|
|
||||||
/// This operation is cheap and must not copy any underlying data.
|
|
||||||
pub fn split(self, left_len: usize) -> (FileSlice, FileSlice) {
|
|
||||||
let left = self.slice_to(left_len);
|
|
||||||
let right = self.slice_from(left_len);
|
|
||||||
(left, right)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Splits the file slice at the given offset and return two file slices.
|
|
||||||
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
|
||||||
pub fn split_from_end(self, right_len: usize) -> (FileSlice, FileSlice) {
|
|
||||||
let left_len = self.len() - right_len;
|
|
||||||
self.split(left_len)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Like `.slice(...)` but enforcing only the `from`
|
|
||||||
/// boundary.
|
|
||||||
///
|
|
||||||
/// Equivalent to `.slice(from_offset, self.len())`
|
|
||||||
pub fn slice_from(&self, from_offset: usize) -> FileSlice {
|
|
||||||
self.slice(from_offset, self.len())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Like `.slice(...)` but enforcing only the `to`
|
|
||||||
/// boundary.
|
|
||||||
///
|
|
||||||
/// Equivalent to `.slice(0, to_offset)`
|
|
||||||
pub fn slice_to(&self, to_offset: usize) -> FileSlice {
|
|
||||||
self.slice(0, to_offset)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FileHandle for FileSlice {
|
|
||||||
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
|
||||||
self.read_bytes_slice(from, to)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HasLen for FileSlice {
|
|
||||||
fn len(&self) -> usize {
|
|
||||||
self.stop - self.start
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::{FileHandle, FileSlice};
|
|
||||||
use crate::common::HasLen;
|
|
||||||
use std::io;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_file_slice() -> io::Result<()> {
|
|
||||||
let file_slice = FileSlice::new(Box::new(b"abcdef".as_ref()));
|
|
||||||
assert_eq!(file_slice.len(), 6);
|
|
||||||
assert_eq!(file_slice.slice_from(2).read_bytes()?.as_slice(), b"cdef");
|
|
||||||
assert_eq!(file_slice.slice_to(2).read_bytes()?.as_slice(), b"ab");
|
|
||||||
assert_eq!(
|
|
||||||
file_slice
|
|
||||||
.slice_from(1)
|
|
||||||
.slice_to(2)
|
|
||||||
.read_bytes()?
|
|
||||||
.as_slice(),
|
|
||||||
b"bc"
|
|
||||||
);
|
|
||||||
{
|
|
||||||
let (left, right) = file_slice.clone().split(0);
|
|
||||||
assert_eq!(left.read_bytes()?.as_slice(), b"");
|
|
||||||
assert_eq!(right.read_bytes()?.as_slice(), b"abcdef");
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let (left, right) = file_slice.clone().split(2);
|
|
||||||
assert_eq!(left.read_bytes()?.as_slice(), b"ab");
|
|
||||||
assert_eq!(right.read_bytes()?.as_slice(), b"cdef");
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let (left, right) = file_slice.clone().split_from_end(0);
|
|
||||||
assert_eq!(left.read_bytes()?.as_slice(), b"abcdef");
|
|
||||||
assert_eq!(right.read_bytes()?.as_slice(), b"");
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let (left, right) = file_slice.clone().split_from_end(2);
|
|
||||||
assert_eq!(left.read_bytes()?.as_slice(), b"abcd");
|
|
||||||
assert_eq!(right.read_bytes()?.as_slice(), b"ef");
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_file_slice_trait_slice_len() {
|
|
||||||
let blop: &'static [u8] = b"abc";
|
|
||||||
let owned_bytes: Box<dyn FileHandle> = Box::new(blop);
|
|
||||||
assert_eq!(owned_bytes.len(), 3);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_slice_simple_read() -> io::Result<()> {
|
|
||||||
let slice = FileSlice::new(Box::new(&b"abcdef"[..]));
|
|
||||||
assert_eq!(slice.len(), 6);
|
|
||||||
assert_eq!(slice.read_bytes()?.as_ref(), b"abcdef");
|
|
||||||
assert_eq!(slice.slice(1, 4).read_bytes()?.as_ref(), b"bcd");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_slice_read_slice() -> io::Result<()> {
|
|
||||||
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
|
||||||
assert_eq!(slice_deref.read_bytes_slice(1, 4)?.as_ref(), b"bcd");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[should_panic(expected = "assertion failed: from <= to")]
|
|
||||||
fn test_slice_read_slice_invalid_range() {
|
|
||||||
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
|
||||||
assert_eq!(slice_deref.read_bytes_slice(1, 0).unwrap().as_ref(), b"bcd");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[should_panic(expected = "`to` exceeds the fileslice length")]
|
|
||||||
fn test_slice_read_slice_invalid_range_exceeds() {
|
|
||||||
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
|
||||||
assert_eq!(
|
|
||||||
slice_deref.read_bytes_slice(0, 10).unwrap().as_ref(),
|
|
||||||
b"bcd"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,178 +0,0 @@
|
|||||||
use crate::directory::{WatchCallback, WatchCallbackList, WatchHandle};
|
|
||||||
use crc32fast::Hasher;
|
|
||||||
use std::fs;
|
|
||||||
use std::io;
|
|
||||||
use std::io::BufRead;
|
|
||||||
use std::path::Path;
|
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::thread;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
pub const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
|
|
||||||
|
|
||||||
// Watches a file and executes registered callbacks when the file is modified.
|
|
||||||
pub struct FileWatcher {
|
|
||||||
path: Arc<Path>,
|
|
||||||
callbacks: Arc<WatchCallbackList>,
|
|
||||||
state: Arc<AtomicUsize>, // 0: new, 1: runnable, 2: terminated
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FileWatcher {
|
|
||||||
pub fn new(path: &Path) -> FileWatcher {
|
|
||||||
FileWatcher {
|
|
||||||
path: Arc::from(path),
|
|
||||||
callbacks: Default::default(),
|
|
||||||
state: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn spawn(&self) {
|
|
||||||
if self.state.compare_and_swap(0, 1, Ordering::SeqCst) > 0 {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let path = self.path.clone();
|
|
||||||
let callbacks = self.callbacks.clone();
|
|
||||||
let state = self.state.clone();
|
|
||||||
|
|
||||||
thread::Builder::new()
|
|
||||||
.name("thread-tantivy-meta-file-watcher".to_string())
|
|
||||||
.spawn(move || {
|
|
||||||
let mut current_checksum = None;
|
|
||||||
|
|
||||||
while state.load(Ordering::SeqCst) == 1 {
|
|
||||||
if let Ok(checksum) = FileWatcher::compute_checksum(&path) {
|
|
||||||
// `None.unwrap_or_else(|| !checksum) != checksum` evaluates to `true`
|
|
||||||
if current_checksum.unwrap_or_else(|| !checksum) != checksum {
|
|
||||||
info!("Meta file {:?} was modified", path);
|
|
||||||
current_checksum = Some(checksum);
|
|
||||||
futures::executor::block_on(callbacks.broadcast());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
thread::sleep(POLLING_INTERVAL);
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.expect("Failed to spawn meta file watcher thread");
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn watch(&self, callback: WatchCallback) -> WatchHandle {
|
|
||||||
let handle = self.callbacks.subscribe(callback);
|
|
||||||
self.spawn();
|
|
||||||
handle
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compute_checksum(path: &Path) -> Result<u32, io::Error> {
|
|
||||||
let reader = match fs::File::open(path) {
|
|
||||||
Ok(f) => io::BufReader::new(f),
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Failed to open meta file {:?}: {:?}", path, e);
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut hasher = Hasher::new();
|
|
||||||
|
|
||||||
for line in reader.lines() {
|
|
||||||
hasher.update(line?.as_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(hasher.finalize())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for FileWatcher {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.state.store(2, Ordering::SeqCst);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
|
|
||||||
use std::mem;
|
|
||||||
|
|
||||||
use crate::directory::mmap_directory::atomic_write;
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_file_watcher_drop_watcher() -> crate::Result<()> {
|
|
||||||
let tmp_dir = tempfile::TempDir::new()?;
|
|
||||||
let tmp_file = tmp_dir.path().join("watched.txt");
|
|
||||||
|
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
|
||||||
let (tx, rx) = crossbeam::channel::unbounded();
|
|
||||||
let timeout = Duration::from_millis(100);
|
|
||||||
|
|
||||||
let watcher = FileWatcher::new(&tmp_file);
|
|
||||||
|
|
||||||
let state = watcher.state.clone();
|
|
||||||
assert_eq!(state.load(Ordering::SeqCst), 0);
|
|
||||||
|
|
||||||
let counter_clone = counter.clone();
|
|
||||||
|
|
||||||
let _handle = watcher.watch(WatchCallback::new(move || {
|
|
||||||
let val = counter_clone.fetch_add(1, Ordering::SeqCst);
|
|
||||||
tx.send(val + 1).unwrap();
|
|
||||||
}));
|
|
||||||
|
|
||||||
assert_eq!(counter.load(Ordering::SeqCst), 0);
|
|
||||||
assert_eq!(state.load(Ordering::SeqCst), 1);
|
|
||||||
|
|
||||||
atomic_write(&tmp_file, b"foo")?;
|
|
||||||
assert_eq!(rx.recv_timeout(timeout), Ok(1));
|
|
||||||
|
|
||||||
atomic_write(&tmp_file, b"foo")?;
|
|
||||||
assert!(rx.recv_timeout(timeout).is_err());
|
|
||||||
|
|
||||||
atomic_write(&tmp_file, b"bar")?;
|
|
||||||
assert_eq!(rx.recv_timeout(timeout), Ok(2));
|
|
||||||
|
|
||||||
mem::drop(watcher);
|
|
||||||
|
|
||||||
atomic_write(&tmp_file, b"qux")?;
|
|
||||||
thread::sleep(Duration::from_millis(10));
|
|
||||||
assert_eq!(counter.load(Ordering::SeqCst), 2);
|
|
||||||
assert_eq!(state.load(Ordering::SeqCst), 2);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_file_watcher_drop_handle() -> crate::Result<()> {
|
|
||||||
let tmp_dir = tempfile::TempDir::new()?;
|
|
||||||
let tmp_file = tmp_dir.path().join("watched.txt");
|
|
||||||
|
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
|
||||||
let (tx, rx) = crossbeam::channel::unbounded();
|
|
||||||
let timeout = Duration::from_millis(100);
|
|
||||||
|
|
||||||
let watcher = FileWatcher::new(&tmp_file);
|
|
||||||
|
|
||||||
let state = watcher.state.clone();
|
|
||||||
assert_eq!(state.load(Ordering::SeqCst), 0);
|
|
||||||
|
|
||||||
let counter_clone = counter.clone();
|
|
||||||
|
|
||||||
let handle = watcher.watch(WatchCallback::new(move || {
|
|
||||||
let val = counter_clone.fetch_add(1, Ordering::SeqCst);
|
|
||||||
tx.send(val + 1).unwrap();
|
|
||||||
}));
|
|
||||||
|
|
||||||
assert_eq!(counter.load(Ordering::SeqCst), 0);
|
|
||||||
assert_eq!(state.load(Ordering::SeqCst), 1);
|
|
||||||
|
|
||||||
atomic_write(&tmp_file, b"foo")?;
|
|
||||||
assert_eq!(rx.recv_timeout(timeout), Ok(1));
|
|
||||||
|
|
||||||
mem::drop(handle);
|
|
||||||
|
|
||||||
atomic_write(&tmp_file, b"qux")?;
|
|
||||||
assert_eq!(counter.load(Ordering::SeqCst), 1);
|
|
||||||
assert_eq!(state.load(Ordering::SeqCst), 1);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,8 +1,9 @@
|
|||||||
use crate::common::{BinarySerializable, CountingWriter, FixedSize, HasLen, VInt};
|
use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt};
|
||||||
use crate::directory::error::Incompatibility;
|
use crate::directory::error::Incompatibility;
|
||||||
use crate::directory::FileSlice;
|
use crate::directory::read_only_source::ReadOnlySource;
|
||||||
use crate::directory::{AntiCallToken, TerminatingWrite};
|
use crate::directory::{AntiCallToken, TerminatingWrite};
|
||||||
use crate::Version;
|
use crate::Version;
|
||||||
|
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||||
use crc32fast::Hasher;
|
use crc32fast::Hasher;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@@ -63,26 +64,26 @@ impl Footer {
|
|||||||
let mut counting_write = CountingWriter::wrap(&mut write);
|
let mut counting_write = CountingWriter::wrap(&mut write);
|
||||||
self.serialize(&mut counting_write)?;
|
self.serialize(&mut counting_write)?;
|
||||||
let written_len = counting_write.written_bytes();
|
let written_len = counting_write.written_bytes();
|
||||||
(written_len as u32).serialize(write)?;
|
write.write_u32::<LittleEndian>(written_len as u32)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extract_footer(file: FileSlice) -> io::Result<(Footer, FileSlice)> {
|
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
|
||||||
if file.len() < 4 {
|
if source.len() < 4 {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
io::ErrorKind::UnexpectedEof,
|
io::ErrorKind::UnexpectedEof,
|
||||||
format!(
|
format!(
|
||||||
"File corrupted. The file is smaller than 4 bytes (len={}).",
|
"File corrupted. The file is smaller than 4 bytes (len={}).",
|
||||||
file.len()
|
source.len()
|
||||||
),
|
),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
let (body_footer, footer_len_file) = file.split_from_end(u32::SIZE_IN_BYTES);
|
let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES);
|
||||||
let mut footer_len_bytes = footer_len_file.read_bytes()?;
|
let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize;
|
||||||
let footer_len = u32::deserialize(&mut footer_len_bytes)? as usize;
|
let body_len = body_footer.len() - footer_len;
|
||||||
let (body, footer) = body_footer.split_from_end(footer_len);
|
let (body, footer_data) = body_footer.split(body_len);
|
||||||
let mut footer_bytes = footer.read_bytes()?;
|
let mut cursor = footer_data.as_slice();
|
||||||
let footer = Footer::deserialize(&mut footer_bytes)?;
|
let footer = Footer::deserialize(&mut cursor)?;
|
||||||
Ok((footer, body))
|
Ok((footer, body))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -93,36 +94,12 @@ impl Footer {
|
|||||||
match &self.versioned_footer {
|
match &self.versioned_footer {
|
||||||
VersionedFooter::V1 {
|
VersionedFooter::V1 {
|
||||||
crc32: _crc,
|
crc32: _crc,
|
||||||
store_compression,
|
store_compression: compression,
|
||||||
} => {
|
} => {
|
||||||
if &library_version.store_compression != store_compression {
|
if &library_version.store_compression != compression {
|
||||||
return Err(Incompatibility::CompressionMismatch {
|
return Err(Incompatibility::CompressionMismatch {
|
||||||
library_compression_format: library_version.store_compression.to_string(),
|
library_compression_format: library_version.store_compression.to_string(),
|
||||||
index_compression_format: store_compression.to_string(),
|
index_compression_format: compression.to_string(),
|
||||||
});
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
VersionedFooter::V2 {
|
|
||||||
crc32: _crc,
|
|
||||||
store_compression,
|
|
||||||
} => {
|
|
||||||
if &library_version.store_compression != store_compression {
|
|
||||||
return Err(Incompatibility::CompressionMismatch {
|
|
||||||
library_compression_format: library_version.store_compression.to_string(),
|
|
||||||
index_compression_format: store_compression.to_string(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
VersionedFooter::V3 {
|
|
||||||
crc32: _crc,
|
|
||||||
store_compression,
|
|
||||||
} => {
|
|
||||||
if &library_version.store_compression != store_compression {
|
|
||||||
return Err(Incompatibility::CompressionMismatch {
|
|
||||||
library_compression_format: library_version.store_compression.to_string(),
|
|
||||||
index_compression_format: store_compression.to_string(),
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -143,36 +120,24 @@ pub enum VersionedFooter {
|
|||||||
crc32: CrcHashU32,
|
crc32: CrcHashU32,
|
||||||
store_compression: String,
|
store_compression: String,
|
||||||
},
|
},
|
||||||
// Introduction of the Block WAND information.
|
|
||||||
V2 {
|
|
||||||
crc32: CrcHashU32,
|
|
||||||
store_compression: String,
|
|
||||||
},
|
|
||||||
// Block wand max termfred on 1 byte
|
|
||||||
V3 {
|
|
||||||
crc32: CrcHashU32,
|
|
||||||
store_compression: String,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BinarySerializable for VersionedFooter {
|
impl BinarySerializable for VersionedFooter {
|
||||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
let mut buf = Vec::new();
|
let mut buf = Vec::new();
|
||||||
match self {
|
match self {
|
||||||
VersionedFooter::V3 {
|
VersionedFooter::V1 {
|
||||||
crc32,
|
crc32,
|
||||||
store_compression: compression,
|
store_compression: compression,
|
||||||
} => {
|
} => {
|
||||||
// Serializes a valid `VersionedFooter` or panics if the version is unknown
|
// Serializes a valid `VersionedFooter` or panics if the version is unknown
|
||||||
// [ version | crc_hash | compression_mode ]
|
// [ version | crc_hash | compression_mode ]
|
||||||
// [ 0..4 | 4..8 | variable ]
|
// [ 0..4 | 4..8 | variable ]
|
||||||
BinarySerializable::serialize(&3u32, &mut buf)?;
|
BinarySerializable::serialize(&1u32, &mut buf)?;
|
||||||
BinarySerializable::serialize(crc32, &mut buf)?;
|
BinarySerializable::serialize(crc32, &mut buf)?;
|
||||||
BinarySerializable::serialize(compression, &mut buf)?;
|
BinarySerializable::serialize(compression, &mut buf)?;
|
||||||
}
|
}
|
||||||
VersionedFooter::V2 { .. }
|
VersionedFooter::UnknownVersion => {
|
||||||
| VersionedFooter::V1 { .. }
|
|
||||||
| VersionedFooter::UnknownVersion => {
|
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
io::ErrorKind::InvalidInput,
|
io::ErrorKind::InvalidInput,
|
||||||
"Cannot serialize an unknown versioned footer ",
|
"Cannot serialize an unknown versioned footer ",
|
||||||
@@ -201,36 +166,22 @@ impl BinarySerializable for VersionedFooter {
|
|||||||
reader.read_exact(&mut buf[..])?;
|
reader.read_exact(&mut buf[..])?;
|
||||||
let mut cursor = &buf[..];
|
let mut cursor = &buf[..];
|
||||||
let version = u32::deserialize(&mut cursor)?;
|
let version = u32::deserialize(&mut cursor)?;
|
||||||
if version > 3 {
|
if version == 1 {
|
||||||
return Ok(VersionedFooter::UnknownVersion);
|
let crc32 = u32::deserialize(&mut cursor)?;
|
||||||
}
|
let compression = String::deserialize(&mut cursor)?;
|
||||||
let crc32 = u32::deserialize(&mut cursor)?;
|
Ok(VersionedFooter::V1 {
|
||||||
let store_compression = String::deserialize(&mut cursor)?;
|
|
||||||
Ok(if version == 1 {
|
|
||||||
VersionedFooter::V1 {
|
|
||||||
crc32,
|
crc32,
|
||||||
store_compression,
|
store_compression: compression,
|
||||||
}
|
})
|
||||||
} else if version == 2 {
|
|
||||||
VersionedFooter::V2 {
|
|
||||||
crc32,
|
|
||||||
store_compression,
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
assert_eq!(version, 3);
|
Ok(VersionedFooter::UnknownVersion)
|
||||||
VersionedFooter::V3 {
|
}
|
||||||
crc32,
|
|
||||||
store_compression,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VersionedFooter {
|
impl VersionedFooter {
|
||||||
pub fn crc(&self) -> Option<CrcHashU32> {
|
pub fn crc(&self) -> Option<CrcHashU32> {
|
||||||
match self {
|
match self {
|
||||||
VersionedFooter::V3 { crc32, .. } => Some(*crc32),
|
|
||||||
VersionedFooter::V2 { crc32, .. } => Some(*crc32),
|
|
||||||
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
|
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
|
||||||
VersionedFooter::UnknownVersion { .. } => None,
|
VersionedFooter::UnknownVersion { .. } => None,
|
||||||
}
|
}
|
||||||
@@ -268,7 +219,7 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
|
|||||||
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
||||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||||
let crc32 = self.hasher.take().unwrap().finalize();
|
let crc32 = self.hasher.take().unwrap().finalize();
|
||||||
let footer = Footer::new(VersionedFooter::V3 {
|
let footer = Footer::new(VersionedFooter::V1 {
|
||||||
crc32,
|
crc32,
|
||||||
store_compression: crate::store::COMPRESSION.to_string(),
|
store_compression: crate::store::COMPRESSION.to_string(),
|
||||||
});
|
});
|
||||||
@@ -295,17 +246,17 @@ mod tests {
|
|||||||
let mut vec = Vec::new();
|
let mut vec = Vec::new();
|
||||||
let footer_proxy = FooterProxy::new(&mut vec);
|
let footer_proxy = FooterProxy::new(&mut vec);
|
||||||
assert!(footer_proxy.terminate().is_ok());
|
assert!(footer_proxy.terminate().is_ok());
|
||||||
if crate::store::COMPRESSION == "lz4" {
|
assert_eq!(vec.len(), 167);
|
||||||
assert_eq!(vec.len(), 158);
|
|
||||||
} else {
|
|
||||||
assert_eq!(vec.len(), 167);
|
|
||||||
}
|
|
||||||
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
|
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
|
||||||
assert!(matches!(
|
if let VersionedFooter::V1 {
|
||||||
footer.versioned_footer,
|
crc32: _,
|
||||||
VersionedFooter::V3 { store_compression, .. }
|
store_compression,
|
||||||
if store_compression == crate::store::COMPRESSION
|
} = footer.versioned_footer
|
||||||
));
|
{
|
||||||
|
assert_eq!(store_compression, crate::store::COMPRESSION);
|
||||||
|
} else {
|
||||||
|
panic!("Versioned footer should be V1.");
|
||||||
|
}
|
||||||
assert_eq!(&footer.version, crate::version());
|
assert_eq!(&footer.version, crate::version());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -313,7 +264,7 @@ mod tests {
|
|||||||
fn test_serialize_deserialize_footer() {
|
fn test_serialize_deserialize_footer() {
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
let crc32 = 123456u32;
|
let crc32 = 123456u32;
|
||||||
let footer: Footer = Footer::new(VersionedFooter::V3 {
|
let footer: Footer = Footer::new(VersionedFooter::V1 {
|
||||||
crc32,
|
crc32,
|
||||||
store_compression: "lz4".to_string(),
|
store_compression: "lz4".to_string(),
|
||||||
});
|
});
|
||||||
@@ -325,7 +276,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn footer_length() {
|
fn footer_length() {
|
||||||
let crc32 = 1111111u32;
|
let crc32 = 1111111u32;
|
||||||
let versioned_footer = VersionedFooter::V3 {
|
let versioned_footer = VersionedFooter::V1 {
|
||||||
crc32,
|
crc32,
|
||||||
store_compression: "lz4".to_string(),
|
store_compression: "lz4".to_string(),
|
||||||
};
|
};
|
||||||
@@ -346,7 +297,7 @@ mod tests {
|
|||||||
// versionned footer length
|
// versionned footer length
|
||||||
12 | 128,
|
12 | 128,
|
||||||
// index format version
|
// index format version
|
||||||
3,
|
1,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
@@ -365,7 +316,7 @@ mod tests {
|
|||||||
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
|
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
|
||||||
assert!(cursor.is_empty());
|
assert!(cursor.is_empty());
|
||||||
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
|
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
|
||||||
let expected_versioned_footer: VersionedFooter = VersionedFooter::V3 {
|
let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 {
|
||||||
crc32: expected_crc,
|
crc32: expected_crc,
|
||||||
store_compression: "lz4".to_string(),
|
store_compression: "lz4".to_string(),
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,16 +1,17 @@
|
|||||||
use crate::core::{MANAGED_FILEPATH, META_FILEPATH};
|
use crate::core::MANAGED_FILEPATH;
|
||||||
use crate::directory::error::{DeleteError, LockError, OpenReadError, OpenWriteError};
|
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
|
||||||
use crate::directory::footer::{Footer, FooterProxy};
|
use crate::directory::footer::{Footer, FooterProxy};
|
||||||
|
use crate::directory::DirectoryLock;
|
||||||
use crate::directory::GarbageCollectionResult;
|
use crate::directory::GarbageCollectionResult;
|
||||||
use crate::directory::Lock;
|
use crate::directory::Lock;
|
||||||
use crate::directory::META_LOCK;
|
use crate::directory::META_LOCK;
|
||||||
use crate::directory::{DirectoryLock, FileHandle};
|
use crate::directory::{ReadOnlySource, WritePtr};
|
||||||
use crate::directory::{FileSlice, WritePtr};
|
|
||||||
use crate::directory::{WatchCallback, WatchHandle};
|
use crate::directory::{WatchCallback, WatchHandle};
|
||||||
use crate::error::DataCorruption;
|
use crate::error::DataCorruption;
|
||||||
use crate::Directory;
|
use crate::Directory;
|
||||||
|
|
||||||
use crc32fast::Hasher;
|
use crc32fast::Hasher;
|
||||||
|
use serde_json;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@@ -53,7 +54,7 @@ struct MetaInformation {
|
|||||||
/// Saves the file containing the list of existing files
|
/// Saves the file containing the list of existing files
|
||||||
/// that were created by tantivy.
|
/// that were created by tantivy.
|
||||||
fn save_managed_paths(
|
fn save_managed_paths(
|
||||||
directory: &dyn Directory,
|
directory: &mut dyn Directory,
|
||||||
wlock: &RwLockWriteGuard<'_, MetaInformation>,
|
wlock: &RwLockWriteGuard<'_, MetaInformation>,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let mut w = serde_json::to_vec(&wlock.managed_paths)?;
|
let mut w = serde_json::to_vec(&wlock.managed_paths)?;
|
||||||
@@ -86,7 +87,7 @@ impl ManagedDirectory {
|
|||||||
directory: Box::new(directory),
|
directory: Box::new(directory),
|
||||||
meta_informations: Arc::default(),
|
meta_informations: Arc::default(),
|
||||||
}),
|
}),
|
||||||
io_err @ Err(OpenReadError::IOError { .. }) => Err(io_err.err().unwrap().into()),
|
Err(OpenReadError::IOError(e)) => Err(From::from(e)),
|
||||||
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
|
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
|
||||||
// For the moment, this should never happen `meta.json`
|
// For the moment, this should never happen `meta.json`
|
||||||
// do not have any footer and cannot detect incompatibility.
|
// do not have any footer and cannot detect incompatibility.
|
||||||
@@ -168,7 +169,7 @@ impl ManagedDirectory {
|
|||||||
DeleteError::FileDoesNotExist(_) => {
|
DeleteError::FileDoesNotExist(_) => {
|
||||||
deleted_files.push(file_to_delete.clone());
|
deleted_files.push(file_to_delete.clone());
|
||||||
}
|
}
|
||||||
DeleteError::IOError { .. } => {
|
DeleteError::IOError(_) => {
|
||||||
failed_to_delete_files.push(file_to_delete.clone());
|
failed_to_delete_files.push(file_to_delete.clone());
|
||||||
if !cfg!(target_os = "windows") {
|
if !cfg!(target_os = "windows") {
|
||||||
// On windows, delete is expected to fail if the file
|
// On windows, delete is expected to fail if the file
|
||||||
@@ -212,7 +213,7 @@ impl ManagedDirectory {
|
|||||||
/// File starting by "." are reserved to locks.
|
/// File starting by "." are reserved to locks.
|
||||||
/// They are not managed and cannot be subjected
|
/// They are not managed and cannot be subjected
|
||||||
/// to garbage collection.
|
/// to garbage collection.
|
||||||
fn register_file_as_managed(&self, filepath: &Path) -> io::Result<()> {
|
fn register_file_as_managed(&mut self, filepath: &Path) -> io::Result<()> {
|
||||||
// Files starting by "." (e.g. lock files) are not managed.
|
// Files starting by "." (e.g. lock files) are not managed.
|
||||||
if !is_managed(filepath) {
|
if !is_managed(filepath) {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@@ -223,7 +224,7 @@ impl ManagedDirectory {
|
|||||||
.expect("Managed file lock poisoned");
|
.expect("Managed file lock poisoned");
|
||||||
let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned());
|
let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned());
|
||||||
if has_changed {
|
if has_changed {
|
||||||
save_managed_paths(self.directory.as_ref(), &meta_wlock)?;
|
save_managed_paths(self.directory.as_mut(), &meta_wlock)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -231,19 +232,10 @@ impl ManagedDirectory {
|
|||||||
/// Verify checksum of a managed file
|
/// Verify checksum of a managed file
|
||||||
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
|
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
|
||||||
let reader = self.directory.open_read(path)?;
|
let reader = self.directory.open_read(path)?;
|
||||||
let (footer, data) =
|
let (footer, data) = Footer::extract_footer(reader)
|
||||||
Footer::extract_footer(reader).map_err(|io_error| OpenReadError::IOError {
|
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||||
io_error,
|
|
||||||
filepath: path.to_path_buf(),
|
|
||||||
})?;
|
|
||||||
let bytes = data
|
|
||||||
.read_bytes()
|
|
||||||
.map_err(|io_error| OpenReadError::IOError {
|
|
||||||
filepath: path.to_path_buf(),
|
|
||||||
io_error,
|
|
||||||
})?;
|
|
||||||
let mut hasher = Hasher::new();
|
let mut hasher = Hasher::new();
|
||||||
hasher.update(bytes.as_slice());
|
hasher.update(data.as_slice());
|
||||||
let crc = hasher.finalize();
|
let crc = hasher.finalize();
|
||||||
Ok(footer
|
Ok(footer
|
||||||
.versioned_footer
|
.versioned_footer
|
||||||
@@ -254,42 +246,35 @@ impl ManagedDirectory {
|
|||||||
|
|
||||||
/// List files for which checksum does not match content
|
/// List files for which checksum does not match content
|
||||||
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
|
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
|
||||||
let mut managed_paths = self
|
let mut hashset = HashSet::new();
|
||||||
|
let managed_paths = self
|
||||||
.meta_informations
|
.meta_informations
|
||||||
.read()
|
.read()
|
||||||
.expect("Managed directory rlock poisoned in list damaged.")
|
.expect("Managed directory rlock poisoned in list damaged.")
|
||||||
.managed_paths
|
.managed_paths
|
||||||
.clone();
|
.clone();
|
||||||
|
|
||||||
managed_paths.remove(*META_FILEPATH);
|
for path in managed_paths.into_iter() {
|
||||||
|
|
||||||
let mut damaged_files = HashSet::new();
|
|
||||||
for path in managed_paths {
|
|
||||||
if !self.validate_checksum(&path)? {
|
if !self.validate_checksum(&path)? {
|
||||||
damaged_files.insert(path);
|
hashset.insert(path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(damaged_files)
|
Ok(hashset)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Directory for ManagedDirectory {
|
impl Directory for ManagedDirectory {
|
||||||
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> {
|
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||||
let file_slice = self.open_read(path)?;
|
let read_only_source = self.directory.open_read(path)?;
|
||||||
Ok(Box::new(file_slice))
|
let (footer, reader) = Footer::extract_footer(read_only_source)
|
||||||
}
|
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||||
|
|
||||||
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
|
||||||
let file_slice = self.directory.open_read(path)?;
|
|
||||||
let (footer, reader) = Footer::extract_footer(file_slice)
|
|
||||||
.map_err(|io_error| OpenReadError::wrap_io_error(io_error, path.to_path_buf()))?;
|
|
||||||
footer.is_compatible()?;
|
footer.is_compatible()?;
|
||||||
Ok(reader)
|
Ok(reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_write(&self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
|
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
|
||||||
self.register_file_as_managed(path)
|
self.register_file_as_managed(path)
|
||||||
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?;
|
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||||
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
|
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
|
||||||
self.directory
|
self.directory
|
||||||
.open_write(path)?
|
.open_write(path)?
|
||||||
@@ -299,7 +284,7 @@ impl Directory for ManagedDirectory {
|
|||||||
))))
|
))))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> {
|
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||||
self.register_file_as_managed(path)?;
|
self.register_file_as_managed(path)?;
|
||||||
self.directory.atomic_write(path, data)
|
self.directory.atomic_write(path, data)
|
||||||
}
|
}
|
||||||
@@ -312,7 +297,7 @@ impl Directory for ManagedDirectory {
|
|||||||
self.directory.delete(path)
|
self.directory.delete(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exists(&self, path: &Path) -> Result<bool, OpenReadError> {
|
fn exists(&self, path: &Path) -> bool {
|
||||||
self.directory.exists(path)
|
self.directory.exists(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -360,22 +345,22 @@ mod tests_mmap_specific {
|
|||||||
managed_directory
|
managed_directory
|
||||||
.atomic_write(test_path2, &[0u8, 1u8])
|
.atomic_write(test_path2, &[0u8, 1u8])
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(managed_directory.exists(test_path1).unwrap());
|
assert!(managed_directory.exists(test_path1));
|
||||||
assert!(managed_directory.exists(test_path2).unwrap());
|
assert!(managed_directory.exists(test_path2));
|
||||||
let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect();
|
let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect();
|
||||||
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
||||||
assert!(managed_directory.exists(test_path1).unwrap());
|
assert!(managed_directory.exists(test_path1));
|
||||||
assert!(!managed_directory.exists(test_path2).unwrap());
|
assert!(!managed_directory.exists(test_path2));
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||||
assert!(managed_directory.exists(test_path1).unwrap());
|
assert!(managed_directory.exists(test_path1));
|
||||||
assert!(!managed_directory.exists(test_path2).unwrap());
|
assert!(!managed_directory.exists(test_path2));
|
||||||
let living_files: HashSet<PathBuf> = HashSet::new();
|
let living_files: HashSet<PathBuf> = HashSet::new();
|
||||||
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
||||||
assert!(!managed_directory.exists(test_path1).unwrap());
|
assert!(!managed_directory.exists(test_path1));
|
||||||
assert!(!managed_directory.exists(test_path2).unwrap());
|
assert!(!managed_directory.exists(test_path2));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -392,7 +377,7 @@ mod tests_mmap_specific {
|
|||||||
let mut write = managed_directory.open_write(test_path1).unwrap();
|
let mut write = managed_directory.open_write(test_path1).unwrap();
|
||||||
write.write_all(&[0u8, 1u8]).unwrap();
|
write.write_all(&[0u8, 1u8]).unwrap();
|
||||||
write.terminate().unwrap();
|
write.terminate().unwrap();
|
||||||
assert!(managed_directory.exists(test_path1).unwrap());
|
assert!(managed_directory.exists(test_path1));
|
||||||
|
|
||||||
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
|
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
|
||||||
assert!(managed_directory
|
assert!(managed_directory
|
||||||
@@ -400,50 +385,52 @@ mod tests_mmap_specific {
|
|||||||
.is_ok());
|
.is_ok());
|
||||||
if cfg!(target_os = "windows") {
|
if cfg!(target_os = "windows") {
|
||||||
// On Windows, gc should try and fail the file as it is mmapped.
|
// On Windows, gc should try and fail the file as it is mmapped.
|
||||||
assert!(managed_directory.exists(test_path1).unwrap());
|
assert!(managed_directory.exists(test_path1));
|
||||||
// unmap should happen here.
|
// unmap should happen here.
|
||||||
drop(_mmap_read);
|
drop(_mmap_read);
|
||||||
// The file should still be in the list of managed file and
|
// The file should still be in the list of managed file and
|
||||||
// eventually be deleted once mmap is released.
|
// eventually be deleted once mmap is released.
|
||||||
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
||||||
assert!(!managed_directory.exists(test_path1).unwrap());
|
assert!(!managed_directory.exists(test_path1));
|
||||||
} else {
|
} else {
|
||||||
assert!(!managed_directory.exists(test_path1).unwrap());
|
assert!(!managed_directory.exists(test_path1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_checksum() -> crate::Result<()> {
|
fn test_checksum() {
|
||||||
let test_path1: &'static Path = Path::new("some_path_for_test");
|
let test_path1: &'static Path = Path::new("some_path_for_test");
|
||||||
let test_path2: &'static Path = Path::new("other_test_path");
|
let test_path2: &'static Path = Path::new("other_test_path");
|
||||||
|
|
||||||
let tempdir = TempDir::new().unwrap();
|
let tempdir = TempDir::new().unwrap();
|
||||||
let tempdir_path = PathBuf::from(tempdir.path());
|
let tempdir_path = PathBuf::from(tempdir.path());
|
||||||
|
|
||||||
let mmap_directory = MmapDirectory::open(&tempdir_path)?;
|
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||||
let managed_directory = ManagedDirectory::wrap(mmap_directory)?;
|
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||||
let mut write = managed_directory.open_write(test_path1)?;
|
let mut write = managed_directory.open_write(test_path1).unwrap();
|
||||||
write.write_all(&[0u8, 1u8])?;
|
write.write_all(&[0u8, 1u8]).unwrap();
|
||||||
write.terminate()?;
|
write.terminate().unwrap();
|
||||||
|
|
||||||
let mut write = managed_directory.open_write(test_path2)?;
|
let mut write = managed_directory.open_write(test_path2).unwrap();
|
||||||
write.write_all(&[3u8, 4u8, 5u8])?;
|
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
|
||||||
write.terminate()?;
|
write.terminate().unwrap();
|
||||||
|
|
||||||
let read_file = managed_directory.open_read(test_path2)?.read_bytes()?;
|
let read_source = managed_directory.open_read(test_path2).unwrap();
|
||||||
assert_eq!(read_file.as_slice(), &[3u8, 4u8, 5u8]);
|
assert_eq!(read_source.as_slice(), &[3u8, 4u8, 5u8]);
|
||||||
assert!(managed_directory.list_damaged().unwrap().is_empty());
|
assert!(managed_directory.list_damaged().unwrap().is_empty());
|
||||||
|
|
||||||
let mut corrupted_path = tempdir_path.clone();
|
let mut corrupted_path = tempdir_path.clone();
|
||||||
corrupted_path.push(test_path2);
|
corrupted_path.push(test_path2);
|
||||||
let mut file = OpenOptions::new().write(true).open(&corrupted_path)?;
|
let mut file = OpenOptions::new()
|
||||||
file.write_all(&[255u8])?;
|
.write(true)
|
||||||
file.flush()?;
|
.open(&corrupted_path)
|
||||||
|
.unwrap();
|
||||||
|
file.write_all(&[255u8]).unwrap();
|
||||||
|
file.flush().unwrap();
|
||||||
drop(file);
|
drop(file);
|
||||||
|
|
||||||
let damaged = managed_directory.list_damaged()?;
|
let damaged = managed_directory.list_damaged().unwrap();
|
||||||
assert_eq!(damaged.len(), 1);
|
assert_eq!(damaged.len(), 1);
|
||||||
assert!(damaged.contains(test_path2));
|
assert!(damaged.contains(test_path2));
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,19 +1,29 @@
|
|||||||
|
use fs2;
|
||||||
|
use notify;
|
||||||
|
|
||||||
|
use self::fs2::FileExt;
|
||||||
|
use self::notify::RawEvent;
|
||||||
|
use self::notify::RecursiveMode;
|
||||||
|
use self::notify::Watcher;
|
||||||
use crate::core::META_FILEPATH;
|
use crate::core::META_FILEPATH;
|
||||||
use crate::directory::error::LockError;
|
use crate::directory::error::LockError;
|
||||||
use crate::directory::error::{DeleteError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
use crate::directory::error::{
|
||||||
use crate::directory::file_watcher::FileWatcher;
|
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
|
||||||
|
};
|
||||||
|
use crate::directory::read_only_source::BoxedData;
|
||||||
|
use crate::directory::AntiCallToken;
|
||||||
use crate::directory::Directory;
|
use crate::directory::Directory;
|
||||||
use crate::directory::DirectoryLock;
|
use crate::directory::DirectoryLock;
|
||||||
use crate::directory::Lock;
|
use crate::directory::Lock;
|
||||||
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::directory::WatchCallback;
|
use crate::directory::WatchCallback;
|
||||||
|
use crate::directory::WatchCallbackList;
|
||||||
use crate::directory::WatchHandle;
|
use crate::directory::WatchHandle;
|
||||||
use crate::directory::{AntiCallToken, FileHandle, OwnedBytes};
|
|
||||||
use crate::directory::{ArcBytes, WeakArcBytes};
|
|
||||||
use crate::directory::{TerminatingWrite, WritePtr};
|
use crate::directory::{TerminatingWrite, WritePtr};
|
||||||
use fs2::FileExt;
|
use atomicwrites;
|
||||||
use memmap::Mmap;
|
use memmap::Mmap;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use stable_deref_trait::StableDeref;
|
use std::collections::HashMap;
|
||||||
use std::convert::From;
|
use std::convert::From;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::fs::OpenOptions;
|
use std::fs::OpenOptions;
|
||||||
@@ -22,9 +32,12 @@ use std::io::{self, Seek, SeekFrom};
|
|||||||
use std::io::{BufWriter, Read, Write};
|
use std::io::{BufWriter, Read, Write};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::result;
|
use std::result;
|
||||||
|
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::Mutex;
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use std::{collections::HashMap, ops::Deref};
|
use std::sync::Weak;
|
||||||
|
use std::thread;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
|
|
||||||
/// Create a default io error given a string.
|
/// Create a default io error given a string.
|
||||||
@@ -35,17 +48,17 @@ pub(crate) fn make_io_err(msg: String) -> io::Error {
|
|||||||
/// Returns None iff the file exists, can be read, but is empty (and hence
|
/// Returns None iff the file exists, can be read, but is empty (and hence
|
||||||
/// cannot be mmapped)
|
/// cannot be mmapped)
|
||||||
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
||||||
let file = File::open(full_path).map_err(|io_err| {
|
let file = File::open(full_path).map_err(|e| {
|
||||||
if io_err.kind() == io::ErrorKind::NotFound {
|
if e.kind() == io::ErrorKind::NotFound {
|
||||||
OpenReadError::FileDoesNotExist(full_path.to_path_buf())
|
OpenReadError::FileDoesNotExist(full_path.to_owned())
|
||||||
} else {
|
} else {
|
||||||
OpenReadError::wrap_io_error(io_err, full_path.to_path_buf())
|
OpenReadError::IOError(IOError::with_path(full_path.to_owned(), e))
|
||||||
}
|
}
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let meta_data = file
|
let meta_data = file
|
||||||
.metadata()
|
.metadata()
|
||||||
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_owned()))?;
|
.map_err(|e| IOError::with_path(full_path.to_owned(), e))?;
|
||||||
if meta_data.len() == 0 {
|
if meta_data.len() == 0 {
|
||||||
// if the file size is 0, it will not be possible
|
// if the file size is 0, it will not be possible
|
||||||
// to mmap the file, so we return None
|
// to mmap the file, so we return None
|
||||||
@@ -55,7 +68,7 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
|||||||
unsafe {
|
unsafe {
|
||||||
memmap::Mmap::map(&file)
|
memmap::Mmap::map(&file)
|
||||||
.map(Some)
|
.map(Some)
|
||||||
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_path_buf()))
|
.map_err(|e| From::from(IOError::with_path(full_path.to_owned(), e)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -76,7 +89,7 @@ pub struct CacheInfo {
|
|||||||
|
|
||||||
struct MmapCache {
|
struct MmapCache {
|
||||||
counters: CacheCounters,
|
counters: CacheCounters,
|
||||||
cache: HashMap<PathBuf, WeakArcBytes>,
|
cache: HashMap<PathBuf, Weak<BoxedData>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for MmapCache {
|
impl Default for MmapCache {
|
||||||
@@ -110,7 +123,7 @@ impl MmapCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns None if the file exists but as a len of 0 (and hence is not mmappable).
|
// Returns None if the file exists but as a len of 0 (and hence is not mmappable).
|
||||||
fn get_mmap(&mut self, full_path: &Path) -> Result<Option<ArcBytes>, OpenReadError> {
|
fn get_mmap(&mut self, full_path: &Path) -> Result<Option<Arc<BoxedData>>, OpenReadError> {
|
||||||
if let Some(mmap_weak) = self.cache.get(full_path) {
|
if let Some(mmap_weak) = self.cache.get(full_path) {
|
||||||
if let Some(mmap_arc) = mmap_weak.upgrade() {
|
if let Some(mmap_arc) = mmap_weak.upgrade() {
|
||||||
self.counters.hit += 1;
|
self.counters.hit += 1;
|
||||||
@@ -121,7 +134,7 @@ impl MmapCache {
|
|||||||
self.counters.miss += 1;
|
self.counters.miss += 1;
|
||||||
let mmap_opt = open_mmap(full_path)?;
|
let mmap_opt = open_mmap(full_path)?;
|
||||||
Ok(mmap_opt.map(|mmap| {
|
Ok(mmap_opt.map(|mmap| {
|
||||||
let mmap_arc: ArcBytes = Arc::new(mmap);
|
let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap));
|
||||||
let mmap_weak = Arc::downgrade(&mmap_arc);
|
let mmap_weak = Arc::downgrade(&mmap_arc);
|
||||||
self.cache.insert(full_path.to_owned(), mmap_weak);
|
self.cache.insert(full_path.to_owned(), mmap_weak);
|
||||||
mmap_arc
|
mmap_arc
|
||||||
@@ -129,6 +142,63 @@ impl MmapCache {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct WatcherWrapper {
|
||||||
|
_watcher: Mutex<notify::RecommendedWatcher>,
|
||||||
|
watcher_router: Arc<WatchCallbackList>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WatcherWrapper {
|
||||||
|
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
|
||||||
|
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
|
||||||
|
// We need to initialize the
|
||||||
|
let watcher = notify::raw_watcher(tx)
|
||||||
|
.and_then(|mut watcher| {
|
||||||
|
watcher.watch(path, RecursiveMode::Recursive)?;
|
||||||
|
Ok(watcher)
|
||||||
|
})
|
||||||
|
.map_err(|err| match err {
|
||||||
|
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
|
||||||
|
_ => {
|
||||||
|
panic!("Unknown error while starting watching directory {:?}", path);
|
||||||
|
}
|
||||||
|
})?;
|
||||||
|
let watcher_router: Arc<WatchCallbackList> = Default::default();
|
||||||
|
let watcher_router_clone = watcher_router.clone();
|
||||||
|
thread::Builder::new()
|
||||||
|
.name("meta-file-watch-thread".to_string())
|
||||||
|
.spawn(move || {
|
||||||
|
loop {
|
||||||
|
match watcher_recv.recv().map(|evt| evt.path) {
|
||||||
|
Ok(Some(changed_path)) => {
|
||||||
|
// ... Actually subject to false positive.
|
||||||
|
// We might want to be more accurate than this at one point.
|
||||||
|
if let Some(filename) = changed_path.file_name() {
|
||||||
|
if filename == *META_FILEPATH {
|
||||||
|
let _ = watcher_router_clone.broadcast();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None) => {
|
||||||
|
// not an event we are interested in.
|
||||||
|
}
|
||||||
|
Err(_e) => {
|
||||||
|
// the watch send channel was dropped
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})?;
|
||||||
|
Ok(WatcherWrapper {
|
||||||
|
_watcher: Mutex::new(watcher),
|
||||||
|
watcher_router,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle {
|
||||||
|
self.watcher_router.subscribe(watch_callback)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Directory storing data in files, read via mmap.
|
/// Directory storing data in files, read via mmap.
|
||||||
///
|
///
|
||||||
/// The Mmap object are cached to limit the
|
/// The Mmap object are cached to limit the
|
||||||
@@ -150,21 +220,44 @@ struct MmapDirectoryInner {
|
|||||||
root_path: PathBuf,
|
root_path: PathBuf,
|
||||||
mmap_cache: RwLock<MmapCache>,
|
mmap_cache: RwLock<MmapCache>,
|
||||||
_temp_directory: Option<TempDir>,
|
_temp_directory: Option<TempDir>,
|
||||||
watcher: FileWatcher,
|
watcher: RwLock<Option<WatcherWrapper>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MmapDirectoryInner {
|
impl MmapDirectoryInner {
|
||||||
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectoryInner {
|
fn new(
|
||||||
MmapDirectoryInner {
|
root_path: PathBuf,
|
||||||
|
temp_directory: Option<TempDir>,
|
||||||
|
) -> Result<MmapDirectoryInner, OpenDirectoryError> {
|
||||||
|
let mmap_directory_inner = MmapDirectoryInner {
|
||||||
|
root_path,
|
||||||
mmap_cache: Default::default(),
|
mmap_cache: Default::default(),
|
||||||
_temp_directory: temp_directory,
|
_temp_directory: temp_directory,
|
||||||
watcher: FileWatcher::new(&root_path.join(*META_FILEPATH)),
|
watcher: RwLock::new(None),
|
||||||
root_path,
|
};
|
||||||
}
|
Ok(mmap_directory_inner)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn watch(&self, callback: WatchCallback) -> crate::Result<WatchHandle> {
|
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
|
||||||
Ok(self.watcher.watch(callback))
|
// a lot of juggling here, to ensure we don't do anything that panics
|
||||||
|
// while the rwlock is held. That way we ensure that the rwlock cannot
|
||||||
|
// be poisoned.
|
||||||
|
//
|
||||||
|
// The downside is that we might create a watch wrapper that is not useful.
|
||||||
|
let need_initialization = self.watcher.read().unwrap().is_none();
|
||||||
|
if need_initialization {
|
||||||
|
let watch_wrapper = WatcherWrapper::new(&self.root_path)?;
|
||||||
|
let mut watch_wlock = self.watcher.write().unwrap();
|
||||||
|
// the watcher could have been initialized when we released the lock, and
|
||||||
|
// we do not want to lose the watched files that were set.
|
||||||
|
if watch_wlock.is_none() {
|
||||||
|
*watch_wlock = Some(watch_wrapper);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(watch_wrapper) = self.watcher.write().unwrap().as_mut() {
|
||||||
|
Ok(watch_wrapper.watch(watch_callback))
|
||||||
|
} else {
|
||||||
|
unreachable!("At this point, watch wrapper is supposed to be initialized");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -175,11 +268,14 @@ impl fmt::Debug for MmapDirectory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl MmapDirectory {
|
impl MmapDirectory {
|
||||||
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectory {
|
fn new(
|
||||||
let inner = MmapDirectoryInner::new(root_path, temp_directory);
|
root_path: PathBuf,
|
||||||
MmapDirectory {
|
temp_directory: Option<TempDir>,
|
||||||
|
) -> Result<MmapDirectory, OpenDirectoryError> {
|
||||||
|
let inner = MmapDirectoryInner::new(root_path, temp_directory)?;
|
||||||
|
Ok(MmapDirectory {
|
||||||
inner: Arc::new(inner),
|
inner: Arc::new(inner),
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new MmapDirectory in a temporary directory.
|
/// Creates a new MmapDirectory in a temporary directory.
|
||||||
@@ -187,11 +283,9 @@ impl MmapDirectory {
|
|||||||
/// This is mostly useful to test the MmapDirectory itself.
|
/// This is mostly useful to test the MmapDirectory itself.
|
||||||
/// For your unit tests, prefer the RAMDirectory.
|
/// For your unit tests, prefer the RAMDirectory.
|
||||||
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
||||||
let tempdir = TempDir::new().map_err(OpenDirectoryError::FailedToCreateTempDir)?;
|
let tempdir = TempDir::new().map_err(OpenDirectoryError::IoError)?;
|
||||||
Ok(MmapDirectory::new(
|
let tempdir_path = PathBuf::from(tempdir.path());
|
||||||
tempdir.path().to_path_buf(),
|
MmapDirectory::new(tempdir_path, Some(tempdir))
|
||||||
Some(tempdir),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Opens a MmapDirectory in a directory.
|
/// Opens a MmapDirectory in a directory.
|
||||||
@@ -209,7 +303,7 @@ impl MmapDirectory {
|
|||||||
directory_path,
|
directory_path,
|
||||||
)))
|
)))
|
||||||
} else {
|
} else {
|
||||||
Ok(MmapDirectory::new(PathBuf::from(directory_path), None))
|
Ok(MmapDirectory::new(PathBuf::from(directory_path), None)?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -313,38 +407,8 @@ impl TerminatingWrite for SafeFileWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct MmapArc(Arc<dyn Deref<Target = [u8]> + Send + Sync>);
|
|
||||||
|
|
||||||
impl Deref for MmapArc {
|
|
||||||
type Target = [u8];
|
|
||||||
|
|
||||||
fn deref(&self) -> &[u8] {
|
|
||||||
self.0.deref()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
unsafe impl StableDeref for MmapArc {}
|
|
||||||
|
|
||||||
/// Writes a file in an atomic manner.
|
|
||||||
pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
|
|
||||||
// We create the temporary file in the same directory as the target file.
|
|
||||||
// Indeed the canonical temp directory and the target file might sit in different
|
|
||||||
// filesystem, in which case the atomic write may actually not work.
|
|
||||||
let parent_path = path.parent().ok_or_else(|| {
|
|
||||||
io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
"Path {:?} does not have parent directory.",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
let mut tempfile = tempfile::Builder::new().tempfile_in(&parent_path)?;
|
|
||||||
tempfile.write_all(content)?;
|
|
||||||
tempfile.flush()?;
|
|
||||||
tempfile.into_temp_path().persist(path)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Directory for MmapDirectory {
|
impl Directory for MmapDirectory {
|
||||||
fn get_file_handle(&self, path: &Path) -> result::Result<Box<dyn FileHandle>, OpenReadError> {
|
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||||
debug!("Open Read {:?}", path);
|
debug!("Open Read {:?}", path);
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
|
|
||||||
@@ -354,19 +418,12 @@ impl Directory for MmapDirectory {
|
|||||||
on mmap cache while reading {:?}",
|
on mmap cache while reading {:?}",
|
||||||
path
|
path
|
||||||
);
|
);
|
||||||
let io_err = make_io_err(msg);
|
IOError::with_path(path.to_owned(), make_io_err(msg))
|
||||||
OpenReadError::wrap_io_error(io_err, path.to_path_buf())
|
|
||||||
})?;
|
})?;
|
||||||
|
Ok(mmap_cache
|
||||||
let owned_bytes = mmap_cache
|
|
||||||
.get_mmap(&full_path)?
|
.get_mmap(&full_path)?
|
||||||
.map(|mmap_arc| {
|
.map(ReadOnlySource::from)
|
||||||
let mmap_arc_obj = MmapArc(mmap_arc);
|
.unwrap_or_else(ReadOnlySource::empty))
|
||||||
OwnedBytes::new(mmap_arc_obj)
|
|
||||||
})
|
|
||||||
.unwrap_or_else(OwnedBytes::empty);
|
|
||||||
|
|
||||||
Ok(Box::new(owned_bytes))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Any entry associated to the path in the mmap will be
|
/// Any entry associated to the path in the mmap will be
|
||||||
@@ -374,29 +431,25 @@ impl Directory for MmapDirectory {
|
|||||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
match fs::remove_file(&full_path) {
|
match fs::remove_file(&full_path) {
|
||||||
Ok(_) => self.sync_directory().map_err(|e| DeleteError::IOError {
|
Ok(_) => self
|
||||||
io_error: e,
|
.sync_directory()
|
||||||
filepath: path.to_path_buf(),
|
.map_err(|e| IOError::with_path(path.to_owned(), e).into()),
|
||||||
}),
|
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if e.kind() == io::ErrorKind::NotFound {
|
if e.kind() == io::ErrorKind::NotFound {
|
||||||
Err(DeleteError::FileDoesNotExist(path.to_owned()))
|
Err(DeleteError::FileDoesNotExist(path.to_owned()))
|
||||||
} else {
|
} else {
|
||||||
Err(DeleteError::IOError {
|
Err(IOError::with_path(path.to_owned(), e).into())
|
||||||
io_error: e,
|
|
||||||
filepath: path.to_path_buf(),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exists(&self, path: &Path) -> Result<bool, OpenReadError> {
|
fn exists(&self, path: &Path) -> bool {
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
Ok(full_path.exists())
|
full_path.exists()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||||
debug!("Open Write {:?}", path);
|
debug!("Open Write {:?}", path);
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
|
|
||||||
@@ -405,22 +458,22 @@ impl Directory for MmapDirectory {
|
|||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open(full_path);
|
.open(full_path);
|
||||||
|
|
||||||
let mut file = open_res.map_err(|io_err| {
|
let mut file = open_res.map_err(|err| {
|
||||||
if io_err.kind() == io::ErrorKind::AlreadyExists {
|
if err.kind() == io::ErrorKind::AlreadyExists {
|
||||||
OpenWriteError::FileAlreadyExists(path.to_path_buf())
|
OpenWriteError::FileAlreadyExists(path.to_owned())
|
||||||
} else {
|
} else {
|
||||||
OpenWriteError::wrap_io_error(io_err, path.to_path_buf())
|
IOError::with_path(path.to_owned(), err).into()
|
||||||
}
|
}
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// making sure the file is created.
|
// making sure the file is created.
|
||||||
file.flush()
|
file.flush()
|
||||||
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?;
|
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||||
|
|
||||||
// Apparetntly, on some filesystem syncing the parent
|
// Apparetntly, on some filesystem syncing the parent
|
||||||
// directory is required.
|
// directory is required.
|
||||||
self.sync_directory()
|
self.sync_directory()
|
||||||
.map_err(|io_err| OpenWriteError::wrap_io_error(io_err, path.to_path_buf()))?;
|
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||||
|
|
||||||
let writer = SafeFileWriter::new(file);
|
let writer = SafeFileWriter::new(file);
|
||||||
Ok(BufWriter::new(Box::new(writer)))
|
Ok(BufWriter::new(Box::new(writer)))
|
||||||
@@ -431,26 +484,26 @@ impl Directory for MmapDirectory {
|
|||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
match File::open(&full_path) {
|
match File::open(&full_path) {
|
||||||
Ok(mut file) => {
|
Ok(mut file) => {
|
||||||
file.read_to_end(&mut buffer).map_err(|io_error| {
|
file.read_to_end(&mut buffer)
|
||||||
OpenReadError::wrap_io_error(io_error, path.to_path_buf())
|
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||||
})?;
|
|
||||||
Ok(buffer)
|
Ok(buffer)
|
||||||
}
|
}
|
||||||
Err(io_error) => {
|
Err(e) => {
|
||||||
if io_error.kind() == io::ErrorKind::NotFound {
|
if e.kind() == io::ErrorKind::NotFound {
|
||||||
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
|
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
|
||||||
} else {
|
} else {
|
||||||
Err(OpenReadError::wrap_io_error(io_error, path.to_path_buf()))
|
Err(IOError::with_path(path.to_owned(), e).into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_write(&self, path: &Path, content: &[u8]) -> io::Result<()> {
|
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||||
debug!("Atomic Write {:?}", path);
|
debug!("Atomic Write {:?}", path);
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
atomic_write(&full_path, content)?;
|
let meta_file = atomicwrites::AtomicFile::new(full_path, atomicwrites::AllowOverwrite);
|
||||||
self.sync_directory()
|
meta_file.write(|f| f.write_all(data))?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn acquire_lock(&self, lock: &Lock) -> Result<DirectoryLock, LockError> {
|
fn acquire_lock(&self, lock: &Lock) -> Result<DirectoryLock, LockError> {
|
||||||
@@ -485,10 +538,12 @@ mod tests {
|
|||||||
// The following tests are specific to the MmapDirectory
|
// The following tests are specific to the MmapDirectory
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::indexer::LogMergePolicy;
|
||||||
use crate::schema::{Schema, SchemaBuilder, TEXT};
|
use crate::schema::{Schema, SchemaBuilder, TEXT};
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::ReloadPolicy;
|
use crate::ReloadPolicy;
|
||||||
use crate::{common::HasLen, indexer::LogMergePolicy};
|
use std::fs;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_open_non_existent_path() {
|
fn test_open_non_existent_path() {
|
||||||
@@ -501,7 +556,7 @@ mod tests {
|
|||||||
// cannot be mmapped.
|
// cannot be mmapped.
|
||||||
//
|
//
|
||||||
// In that case the directory returns a SharedVecSlice.
|
// In that case the directory returns a SharedVecSlice.
|
||||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||||
let path = PathBuf::from("test");
|
let path = PathBuf::from("test");
|
||||||
{
|
{
|
||||||
let mut w = mmap_directory.open_write(&path).unwrap();
|
let mut w = mmap_directory.open_write(&path).unwrap();
|
||||||
@@ -517,7 +572,7 @@ mod tests {
|
|||||||
|
|
||||||
// here we test if the cache releases
|
// here we test if the cache releases
|
||||||
// mmaps correctly.
|
// mmaps correctly.
|
||||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||||
let num_paths = 10;
|
let num_paths = 10;
|
||||||
let paths: Vec<PathBuf> = (0..num_paths)
|
let paths: Vec<PathBuf> = (0..num_paths)
|
||||||
.map(|i| PathBuf::from(&*format!("file_{}", i)))
|
.map(|i| PathBuf::from(&*format!("file_{}", i)))
|
||||||
@@ -577,6 +632,27 @@ mod tests {
|
|||||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_watch_wrapper() {
|
||||||
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
|
let counter_clone = counter.clone();
|
||||||
|
let tmp_dir = tempfile::TempDir::new().unwrap();
|
||||||
|
let tmp_dirpath = tmp_dir.path().to_owned();
|
||||||
|
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
|
||||||
|
let tmp_file = tmp_dirpath.join(*META_FILEPATH);
|
||||||
|
let _handle = watch_wrapper.watch(Box::new(move || {
|
||||||
|
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
|
}));
|
||||||
|
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||||
|
let _handle2 = watch_wrapper.watch(Box::new(move || {
|
||||||
|
let _ = sender.send(());
|
||||||
|
}));
|
||||||
|
assert_eq!(counter.load(Ordering::SeqCst), 0);
|
||||||
|
fs::write(&tmp_file, b"whateverwilldo").unwrap();
|
||||||
|
assert!(receiver.recv().is_ok());
|
||||||
|
assert!(counter.load(Ordering::SeqCst) >= 1);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_mmap_released() {
|
fn test_mmap_released() {
|
||||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||||
@@ -587,7 +663,7 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let index = Index::create(mmap_directory.clone(), schema).unwrap();
|
let index = Index::create(mmap_directory.clone(), schema).unwrap();
|
||||||
|
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
let mut log_merge_policy = LogMergePolicy::default();
|
let mut log_merge_policy = LogMergePolicy::default();
|
||||||
log_merge_policy.set_min_merge_size(3);
|
log_merge_policy.set_min_merge_size(3);
|
||||||
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
||||||
|
|||||||
@@ -9,13 +9,14 @@ mod mmap_directory;
|
|||||||
|
|
||||||
mod directory;
|
mod directory;
|
||||||
mod directory_lock;
|
mod directory_lock;
|
||||||
mod file_slice;
|
|
||||||
mod file_watcher;
|
|
||||||
mod footer;
|
mod footer;
|
||||||
mod managed_directory;
|
mod managed_directory;
|
||||||
mod owned_bytes;
|
|
||||||
mod ram_directory;
|
mod ram_directory;
|
||||||
|
mod read_only_source;
|
||||||
|
mod spilling_writer;
|
||||||
mod watch_event_router;
|
mod watch_event_router;
|
||||||
|
mod persistor;
|
||||||
|
|
||||||
|
|
||||||
/// Errors specific to the directory module.
|
/// Errors specific to the directory module.
|
||||||
pub mod error;
|
pub mod error;
|
||||||
@@ -23,14 +24,12 @@ pub mod error;
|
|||||||
pub use self::directory::DirectoryLock;
|
pub use self::directory::DirectoryLock;
|
||||||
pub use self::directory::{Directory, DirectoryClone};
|
pub use self::directory::{Directory, DirectoryClone};
|
||||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||||
pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes};
|
|
||||||
pub use self::file_slice::{FileHandle, FileSlice};
|
|
||||||
pub use self::owned_bytes::OwnedBytes;
|
|
||||||
pub use self::ram_directory::RAMDirectory;
|
pub use self::ram_directory::RAMDirectory;
|
||||||
|
pub use self::read_only_source::ReadOnlySource;
|
||||||
|
pub(crate) use self::spilling_writer::{SpillingResult, SpillingWriter};
|
||||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||||
use std::io::{self, BufWriter, Write};
|
use std::io::{self, BufWriter, Write};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
/// Outcome of the Garbage collection
|
/// Outcome of the Garbage collection
|
||||||
pub struct GarbageCollectionResult {
|
pub struct GarbageCollectionResult {
|
||||||
/// List of files that were deleted in this cycle
|
/// List of files that were deleted in this cycle
|
||||||
@@ -84,10 +83,16 @@ impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl TerminatingWrite for Vec<u8> {
|
||||||
|
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
|
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
|
||||||
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
|
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
|
||||||
self.flush()
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,290 +0,0 @@
|
|||||||
use crate::directory::FileHandle;
|
|
||||||
use stable_deref_trait::StableDeref;
|
|
||||||
use std::convert::TryInto;
|
|
||||||
use std::mem;
|
|
||||||
use std::ops::Deref;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::{fmt, io};
|
|
||||||
|
|
||||||
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
|
|
||||||
/// this data as a static slice.
|
|
||||||
///
|
|
||||||
/// The backing object is required to be `StableDeref`.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct OwnedBytes {
|
|
||||||
data: &'static [u8],
|
|
||||||
box_stable_deref: Arc<dyn Deref<Target = [u8]> + Sync + Send>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FileHandle for OwnedBytes {
|
|
||||||
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
|
||||||
Ok(self.slice(from, to))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OwnedBytes {
|
|
||||||
/// Creates an empty `OwnedBytes`.
|
|
||||||
pub fn empty() -> OwnedBytes {
|
|
||||||
OwnedBytes::new(&[][..])
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates an `OwnedBytes` intance given a `StableDeref` object.
|
|
||||||
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
|
|
||||||
data_holder: T,
|
|
||||||
) -> OwnedBytes {
|
|
||||||
let box_stable_deref = Arc::new(data_holder);
|
|
||||||
let bytes: &[u8] = box_stable_deref.as_ref();
|
|
||||||
let data = unsafe { mem::transmute::<_, &'static [u8]>(bytes.deref()) };
|
|
||||||
OwnedBytes {
|
|
||||||
box_stable_deref,
|
|
||||||
data,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// creates a fileslice that is just a view over a slice of the data.
|
|
||||||
pub fn slice(&self, from: usize, to: usize) -> Self {
|
|
||||||
OwnedBytes {
|
|
||||||
data: &self.data[from..to],
|
|
||||||
box_stable_deref: self.box_stable_deref.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the underlying slice of data.
|
|
||||||
/// `Deref` and `AsRef` are also available.
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn as_slice(&self) -> &[u8] {
|
|
||||||
self.data
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the len of the slice.
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn len(&self) -> usize {
|
|
||||||
self.data.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
|
|
||||||
///
|
|
||||||
/// Left will hold `split_len` bytes.
|
|
||||||
///
|
|
||||||
/// This operation is cheap and does not require to copy any memory.
|
|
||||||
/// On the other hand, both `left` and `right` retain a handle over
|
|
||||||
/// the entire slice of memory. In other words, the memory will only
|
|
||||||
/// be released when both left and right are dropped.
|
|
||||||
pub fn split(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
|
|
||||||
let right_box_stable_deref = self.box_stable_deref.clone();
|
|
||||||
let left = OwnedBytes {
|
|
||||||
data: &self.data[..split_len],
|
|
||||||
box_stable_deref: self.box_stable_deref,
|
|
||||||
};
|
|
||||||
let right = OwnedBytes {
|
|
||||||
data: &self.data[split_len..],
|
|
||||||
box_stable_deref: right_box_stable_deref,
|
|
||||||
};
|
|
||||||
(left, right)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns true iff this `OwnedBytes` is empty.
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn is_empty(&self) -> bool {
|
|
||||||
self.as_slice().is_empty()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Drops the left most `advance_len` bytes.
|
|
||||||
///
|
|
||||||
/// See also [.clip(clip_len: usize))](#method.clip).
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn advance(&mut self, advance_len: usize) {
|
|
||||||
self.data = &self.data[advance_len..]
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reads an `u8` from the `OwnedBytes` and advance by one byte.
|
|
||||||
pub fn read_u8(&mut self) -> u8 {
|
|
||||||
assert!(!self.is_empty());
|
|
||||||
|
|
||||||
let byte = self.as_slice()[0];
|
|
||||||
self.advance(1);
|
|
||||||
byte
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reads an `u64` encoded as little-endian from the `OwnedBytes` and advance by 8 bytes.
|
|
||||||
pub fn read_u64(&mut self) -> u64 {
|
|
||||||
assert!(self.len() > 7);
|
|
||||||
|
|
||||||
let octlet: [u8; 8] = self.as_slice()[..8].try_into().unwrap();
|
|
||||||
self.advance(8);
|
|
||||||
u64::from_le_bytes(octlet)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for OwnedBytes {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
// We truncate the bytes in order to make sure the debug string
|
|
||||||
// is not too long.
|
|
||||||
let bytes_truncated: &[u8] = if self.len() > 8 {
|
|
||||||
&self.as_slice()[..10]
|
|
||||||
} else {
|
|
||||||
self.as_slice()
|
|
||||||
};
|
|
||||||
write!(f, "OwnedBytes({:?}, len={})", bytes_truncated, self.len())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Deref for OwnedBytes {
|
|
||||||
type Target = [u8];
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
self.as_slice()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl io::Read for OwnedBytes {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
|
||||||
let read_len = {
|
|
||||||
let data = self.as_slice();
|
|
||||||
if data.len() >= buf.len() {
|
|
||||||
let buf_len = buf.len();
|
|
||||||
buf.copy_from_slice(&data[..buf_len]);
|
|
||||||
buf.len()
|
|
||||||
} else {
|
|
||||||
let data_len = data.len();
|
|
||||||
buf[..data_len].copy_from_slice(data);
|
|
||||||
data_len
|
|
||||||
}
|
|
||||||
};
|
|
||||||
self.advance(read_len);
|
|
||||||
Ok(read_len)
|
|
||||||
}
|
|
||||||
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
|
||||||
let read_len = {
|
|
||||||
let data = self.as_slice();
|
|
||||||
buf.extend(data);
|
|
||||||
data.len()
|
|
||||||
};
|
|
||||||
self.advance(read_len);
|
|
||||||
Ok(read_len)
|
|
||||||
}
|
|
||||||
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
|
||||||
let read_len = self.read(buf)?;
|
|
||||||
if read_len != buf.len() {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::UnexpectedEof,
|
|
||||||
"failed to fill whole buffer",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AsRef<[u8]> for OwnedBytes {
|
|
||||||
fn as_ref(&self) -> &[u8] {
|
|
||||||
self.as_slice()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::io::{self, Read};
|
|
||||||
|
|
||||||
use super::OwnedBytes;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_owned_bytes_debug() {
|
|
||||||
let short_bytes = OwnedBytes::new(b"abcd".as_ref());
|
|
||||||
assert_eq!(
|
|
||||||
format!("{:?}", short_bytes),
|
|
||||||
"OwnedBytes([97, 98, 99, 100], len=4)"
|
|
||||||
);
|
|
||||||
let long_bytes = OwnedBytes::new(b"abcdefghijklmnopq".as_ref());
|
|
||||||
assert_eq!(
|
|
||||||
format!("{:?}", long_bytes),
|
|
||||||
"OwnedBytes([97, 98, 99, 100, 101, 102, 103, 104, 105, 106], len=17)"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_owned_bytes_read() -> io::Result<()> {
|
|
||||||
let mut bytes = OwnedBytes::new(b"abcdefghiklmnopqrstuvwxyz".as_ref());
|
|
||||||
{
|
|
||||||
let mut buf = [0u8; 5];
|
|
||||||
bytes.read_exact(&mut buf[..]).unwrap();
|
|
||||||
assert_eq!(&buf, b"abcde");
|
|
||||||
assert_eq!(bytes.as_slice(), b"fghiklmnopqrstuvwxyz")
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let mut buf = [0u8; 2];
|
|
||||||
bytes.read_exact(&mut buf[..]).unwrap();
|
|
||||||
assert_eq!(&buf, b"fg");
|
|
||||||
assert_eq!(bytes.as_slice(), b"hiklmnopqrstuvwxyz")
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_owned_bytes_read_right_at_the_end() -> io::Result<()> {
|
|
||||||
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
|
||||||
let mut buf = [0u8; 5];
|
|
||||||
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
|
|
||||||
assert_eq!(&buf, b"abcde");
|
|
||||||
assert_eq!(bytes.as_slice(), b"");
|
|
||||||
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
|
|
||||||
assert_eq!(&buf, b"abcde");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
fn test_owned_bytes_read_incomplete() -> io::Result<()> {
|
|
||||||
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
|
||||||
let mut buf = [0u8; 7];
|
|
||||||
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
|
|
||||||
assert_eq!(&buf[..5], b"abcde");
|
|
||||||
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_owned_bytes_read_to_end() -> io::Result<()> {
|
|
||||||
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
|
||||||
let mut buf = Vec::new();
|
|
||||||
bytes.read_to_end(&mut buf)?;
|
|
||||||
assert_eq!(buf.as_slice(), b"abcde".as_ref());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_owned_bytes_read_u8() -> io::Result<()> {
|
|
||||||
let mut bytes = OwnedBytes::new(b"\xFF".as_ref());
|
|
||||||
assert_eq!(bytes.read_u8(), 255);
|
|
||||||
assert_eq!(bytes.len(), 0);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_owned_bytes_read_u64() -> io::Result<()> {
|
|
||||||
let mut bytes = OwnedBytes::new(b"\0\xFF\xFF\xFF\xFF\xFF\xFF\xFF".as_ref());
|
|
||||||
assert_eq!(bytes.read_u64(), u64::MAX - 255);
|
|
||||||
assert_eq!(bytes.len(), 0);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_owned_bytes_split() {
|
|
||||||
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
|
|
||||||
let (left, right) = bytes.split(3);
|
|
||||||
assert_eq!(left.as_slice(), b"abc");
|
|
||||||
assert_eq!(right.as_slice(), b"defghi");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_owned_bytes_split_boundary() {
|
|
||||||
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
|
|
||||||
{
|
|
||||||
let (left, right) = bytes.clone().split(0);
|
|
||||||
assert_eq!(left.as_slice(), b"");
|
|
||||||
assert_eq!(right.as_slice(), b"abcdefghi");
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let (left, right) = bytes.split(9);
|
|
||||||
assert_eq!(left.as_slice(), b"abcdefghi");
|
|
||||||
assert_eq!(right.as_slice(), b"");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
38
src/directory/persistor.rs
Normal file
38
src/directory/persistor.rs
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
use crate::indexer::{SegmentManager, ResourceManager, MergeOperationInventory};
|
||||||
|
use std::thread::JoinHandle;
|
||||||
|
use crate::{IndexWriterConfig, SegmentId};
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
pub(crate) struct Persistor {
|
||||||
|
memory_manager: ResourceManager,
|
||||||
|
thread_handle: JoinHandle<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Persistor {
|
||||||
|
pub(crate) fn create_and_start(segment_manager: SegmentManager,
|
||||||
|
memory_manager: ResourceManager,
|
||||||
|
config: IndexWriterConfig) -> crate::Result<Persistor> {
|
||||||
|
let memory_manager_clone = memory_manager.clone();
|
||||||
|
let thread_handle = std::thread::Builder::new()
|
||||||
|
.name("persistor-thread".to_string())
|
||||||
|
.spawn(move || {
|
||||||
|
while let Ok(_) = memory_manager_clone.wait_until_in_range(config.persist_low..) {
|
||||||
|
segment_manager.largest_segment_not_in_merge();
|
||||||
|
}
|
||||||
|
}).map_err(|_err| crate::TantivyError::ErrorInThread("Failed to start persistor thread.".to_string()))?;
|
||||||
|
Ok(Persistor {
|
||||||
|
memory_manager,
|
||||||
|
thread_handle
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stop the persisting thread.
|
||||||
|
///
|
||||||
|
/// The memory manager will be terminated, which will unlock the thread from any waiting
|
||||||
|
/// position.
|
||||||
|
/// This method blocks for a short amount of tim until the persistor thread has terminated.
|
||||||
|
pub fn stop(self) {
|
||||||
|
self.memory_manager.terminate();
|
||||||
|
let _ = self.thread_handle.join();
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
|
use crate::core::META_FILEPATH;
|
||||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||||
use crate::directory::AntiCallToken;
|
use crate::directory::AntiCallToken;
|
||||||
use crate::directory::WatchCallbackList;
|
use crate::directory::WatchCallbackList;
|
||||||
use crate::directory::{Directory, FileSlice, WatchCallback, WatchHandle};
|
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
|
||||||
use crate::directory::{TerminatingWrite, WritePtr};
|
use crate::directory::{TerminatingWrite, WritePtr};
|
||||||
use crate::{common::HasLen, core::META_FILEPATH};
|
|
||||||
use fail::fail_point;
|
use fail::fail_point;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
@@ -11,8 +11,7 @@ use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write};
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::result;
|
use std::result;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
use crate::indexer::ResourceManager;
|
||||||
use super::FileHandle;
|
|
||||||
|
|
||||||
/// Writer associated with the `RAMDirectory`
|
/// Writer associated with the `RAMDirectory`
|
||||||
///
|
///
|
||||||
@@ -82,17 +81,18 @@ impl TerminatingWrite for VecWriter {
|
|||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct InnerDirectory {
|
struct InnerDirectory {
|
||||||
fs: HashMap<PathBuf, FileSlice>,
|
fs: HashMap<PathBuf, ReadOnlySource>,
|
||||||
watch_router: WatchCallbackList,
|
watch_router: WatchCallbackList,
|
||||||
|
memory_manager: ResourceManager,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InnerDirectory {
|
impl InnerDirectory {
|
||||||
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
|
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
|
||||||
let data = FileSlice::from(data.to_vec());
|
let data = ReadOnlySource::new_with_allocation(Vec::from(data), &self.memory_manager);
|
||||||
self.fs.insert(path, data).is_some()
|
self.fs.insert(path, data).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> {
|
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
|
||||||
self.fs
|
self.fs
|
||||||
.get(path)
|
.get(path)
|
||||||
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
||||||
@@ -114,8 +114,8 @@ impl InnerDirectory {
|
|||||||
self.watch_router.subscribe(watch_handle)
|
self.watch_router.subscribe(watch_handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn total_mem_usage(&self) -> usize {
|
fn total_mem_usage(&self) -> u64 {
|
||||||
self.fs.values().map(|f| f.len()).sum()
|
self.fs.values().map(|source| source.len() as u64).sum()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -136,14 +136,30 @@ pub struct RAMDirectory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl RAMDirectory {
|
impl RAMDirectory {
|
||||||
/// Constructor
|
|
||||||
|
/// Creates a new RAMDirectory.
|
||||||
|
///
|
||||||
|
/// Check `.create_with_memory_manager(..)` if you want to associate an external memory
|
||||||
|
/// manager to your RAMDirectory.
|
||||||
pub fn create() -> RAMDirectory {
|
pub fn create() -> RAMDirectory {
|
||||||
Self::default()
|
RAMDirectory::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Constructor
|
||||||
|
pub fn create_with_memory_manager(memory_manager: ResourceManager) -> RAMDirectory {
|
||||||
|
let inner_directory = InnerDirectory {
|
||||||
|
fs: Default::default(),
|
||||||
|
watch_router: Default::default(),
|
||||||
|
memory_manager
|
||||||
|
};
|
||||||
|
RAMDirectory {
|
||||||
|
fs: Arc::new(RwLock::new(inner_directory))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the sum of the size of the different files
|
/// Returns the sum of the size of the different files
|
||||||
/// in the RAMDirectory.
|
/// in the RAMDirectory.
|
||||||
pub fn total_mem_usage(&self) -> usize {
|
pub fn total_mem_usage(&self) -> u64 {
|
||||||
self.fs.read().unwrap().total_mem_usage()
|
self.fs.read().unwrap().total_mem_usage()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -153,11 +169,11 @@ impl RAMDirectory {
|
|||||||
/// written using the `atomic_write` api.
|
/// written using the `atomic_write` api.
|
||||||
///
|
///
|
||||||
/// If an error is encounterred, files may be persisted partially.
|
/// If an error is encounterred, files may be persisted partially.
|
||||||
pub fn persist(&self, dest: &dyn Directory) -> crate::Result<()> {
|
pub fn persist(&self, dest: &mut dyn Directory) -> crate::Result<()> {
|
||||||
let wlock = self.fs.write().unwrap();
|
let wlock = self.fs.write().unwrap();
|
||||||
for (path, file) in wlock.fs.iter() {
|
for (path, source) in wlock.fs.iter() {
|
||||||
let mut dest_wrt = dest.open_write(path)?;
|
let mut dest_wrt = dest.open_write(path)?;
|
||||||
dest_wrt.write_all(file.read_bytes()?.as_slice())?;
|
dest_wrt.write_all(source.as_slice())?;
|
||||||
dest_wrt.terminate()?;
|
dest_wrt.terminate()?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -165,37 +181,24 @@ impl RAMDirectory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Directory for RAMDirectory {
|
impl Directory for RAMDirectory {
|
||||||
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> {
|
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||||
let file_slice = self.open_read(path)?;
|
|
||||||
Ok(Box::new(file_slice))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
|
||||||
self.fs.read().unwrap().open_read(path)
|
self.fs.read().unwrap().open_read(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||||
fail_point!("RAMDirectory::delete", |_| {
|
fail_point!("RAMDirectory::delete", |_| {
|
||||||
Err(DeleteError::IOError {
|
use crate::directory::error::IOError;
|
||||||
io_error: io::Error::from(io::ErrorKind::Other),
|
let io_error = IOError::from(io::Error::from(io::ErrorKind::Other));
|
||||||
filepath: path.to_path_buf(),
|
Err(DeleteError::from(io_error))
|
||||||
})
|
|
||||||
});
|
});
|
||||||
self.fs.write().unwrap().delete(path)
|
self.fs.write().unwrap().delete(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exists(&self, path: &Path) -> Result<bool, OpenReadError> {
|
fn exists(&self, path: &Path) -> bool {
|
||||||
Ok(self
|
self.fs.read().unwrap().exists(path)
|
||||||
.fs
|
|
||||||
.read()
|
|
||||||
.map_err(|e| OpenReadError::IOError {
|
|
||||||
io_error: io::Error::new(io::ErrorKind::Other, e.to_string()),
|
|
||||||
filepath: path.to_path_buf(),
|
|
||||||
})?
|
|
||||||
.exists(path))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||||
let mut fs = self.fs.write().unwrap();
|
let mut fs = self.fs.write().unwrap();
|
||||||
let path_buf = PathBuf::from(path);
|
let path_buf = PathBuf::from(path);
|
||||||
let vec_writer = VecWriter::new(path_buf.clone(), self.clone());
|
let vec_writer = VecWriter::new(path_buf.clone(), self.clone());
|
||||||
@@ -209,17 +212,10 @@ impl Directory for RAMDirectory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||||
let bytes =
|
Ok(self.open_read(path)?.as_slice().to_owned())
|
||||||
self.open_read(path)?
|
|
||||||
.read_bytes()
|
|
||||||
.map_err(|io_error| OpenReadError::IOError {
|
|
||||||
io_error,
|
|
||||||
filepath: path.to_path_buf(),
|
|
||||||
})?;
|
|
||||||
Ok(bytes.as_slice().to_owned())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> {
|
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||||
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
|
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
|
||||||
io::ErrorKind::Other,
|
io::ErrorKind::Other,
|
||||||
msg.unwrap_or_else(|| "Undefined".to_string())
|
msg.unwrap_or_else(|| "Undefined".to_string())
|
||||||
@@ -249,6 +245,9 @@ mod tests {
|
|||||||
use crate::Directory;
|
use crate::Directory;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
use crate::indexer::ResourceManager;
|
||||||
|
use crate::directory::TerminatingWrite;
|
||||||
|
use std::mem;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_persist() {
|
fn test_persist() {
|
||||||
@@ -256,14 +255,69 @@ mod tests {
|
|||||||
let msg_seq: &'static [u8] = b"sequential is the way";
|
let msg_seq: &'static [u8] = b"sequential is the way";
|
||||||
let path_atomic: &'static Path = Path::new("atomic");
|
let path_atomic: &'static Path = Path::new("atomic");
|
||||||
let path_seq: &'static Path = Path::new("seq");
|
let path_seq: &'static Path = Path::new("seq");
|
||||||
let directory = RAMDirectory::create();
|
let mut directory = RAMDirectory::create();
|
||||||
assert!(directory.atomic_write(path_atomic, msg_atomic).is_ok());
|
assert!(directory.atomic_write(path_atomic, msg_atomic).is_ok());
|
||||||
let mut wrt = directory.open_write(path_seq).unwrap();
|
let mut wrt = directory.open_write(path_seq).unwrap();
|
||||||
assert!(wrt.write_all(msg_seq).is_ok());
|
assert!(wrt.write_all(msg_seq).is_ok());
|
||||||
assert!(wrt.flush().is_ok());
|
assert!(wrt.flush().is_ok());
|
||||||
let directory_copy = RAMDirectory::create();
|
let mut directory_copy = RAMDirectory::create();
|
||||||
assert!(directory.persist(&directory_copy).is_ok());
|
assert!(directory.persist(&mut directory_copy).is_ok());
|
||||||
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
||||||
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_memory_manager_several_path() {
|
||||||
|
let memory_manager = ResourceManager::default();
|
||||||
|
let mut ram_directory = RAMDirectory::create_with_memory_manager(memory_manager.clone());
|
||||||
|
assert!(ram_directory.atomic_write(Path::new("/titi"), b"abcd").is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 4u64);
|
||||||
|
assert!(ram_directory.atomic_write(Path::new("/toto"), b"abcde").is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 9u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_memory_manager_override() {
|
||||||
|
let memory_manager = ResourceManager::default();
|
||||||
|
let mut ram_directory = RAMDirectory::create_with_memory_manager(memory_manager.clone());
|
||||||
|
assert!(ram_directory.atomic_write(Path::new("/titi"), b"abcde").is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||||
|
assert!(ram_directory.atomic_write(Path::new("/titi"), b"abcdef").is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 6u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_memory_manager_seq_wrt() {
|
||||||
|
let memory_manager = ResourceManager::default();
|
||||||
|
let mut ram_directory = RAMDirectory::create_with_memory_manager(memory_manager.clone());
|
||||||
|
let mut wrt = ram_directory.open_write(Path::new("/titi")).unwrap();
|
||||||
|
assert!(wrt.write_all(b"abcde").is_ok());
|
||||||
|
assert!(wrt.terminate().is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||||
|
assert!(ram_directory.atomic_write(Path::new("/titi"), b"abcdef").is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 6u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_release_on_drop() {
|
||||||
|
let memory_manager = ResourceManager::default();
|
||||||
|
let mut ram_directory = RAMDirectory::create_with_memory_manager(memory_manager.clone());
|
||||||
|
let mut wrt = ram_directory.open_write(Path::new("/titi")).unwrap();
|
||||||
|
assert!(wrt.write_all(b"abcde").is_ok());
|
||||||
|
assert!(wrt.terminate().is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||||
|
let mut wrt2 = ram_directory.open_write(Path::new("/toto")).unwrap();
|
||||||
|
assert!(wrt2.write_all(b"abcdefghijkl").is_ok());
|
||||||
|
assert!(wrt2.terminate().is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 17u64);
|
||||||
|
let source = ram_directory.open_read(Path::new("/titi")).unwrap();
|
||||||
|
let source_clone = source.clone();
|
||||||
|
assert_eq!(memory_manager.total_amount(), 17u64);
|
||||||
|
mem::drop(ram_directory);
|
||||||
|
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||||
|
mem::drop(source);
|
||||||
|
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||||
|
mem::drop(source_clone);
|
||||||
|
assert_eq!(memory_manager.total_amount(), 0u64);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
157
src/directory/read_only_source.rs
Normal file
157
src/directory/read_only_source.rs
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
use crate::common::HasLen;
|
||||||
|
use stable_deref_trait::{CloneStableDeref, StableDeref};
|
||||||
|
use std::ops::Deref;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use crate::indexer::{Allocation, ResourceManager};
|
||||||
|
|
||||||
|
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||||
|
|
||||||
|
/// Read object that represents files in tantivy.
|
||||||
|
///
|
||||||
|
/// These read objects are only in charge to deliver
|
||||||
|
/// the data in the form of a constant read-only `&[u8]`.
|
||||||
|
/// Whatever happens to the directory file, the data
|
||||||
|
/// hold by this object should never be altered or destroyed.
|
||||||
|
pub struct ReadOnlySource {
|
||||||
|
data: Arc<BoxedData>,
|
||||||
|
start: usize,
|
||||||
|
stop: usize,
|
||||||
|
allocation: Option<Arc<Allocation>>
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl StableDeref for ReadOnlySource {}
|
||||||
|
unsafe impl CloneStableDeref for ReadOnlySource {}
|
||||||
|
|
||||||
|
impl Deref for ReadOnlySource {
|
||||||
|
type Target = [u8];
|
||||||
|
|
||||||
|
fn deref(&self) -> &[u8] {
|
||||||
|
self.as_slice()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Arc<BoxedData>> for ReadOnlySource {
|
||||||
|
fn from(data: Arc<BoxedData>) -> Self {
|
||||||
|
let len = data.len();
|
||||||
|
ReadOnlySource {
|
||||||
|
data,
|
||||||
|
start: 0,
|
||||||
|
stop: len,
|
||||||
|
allocation: None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReadOnlySource {
|
||||||
|
|
||||||
|
pub(crate) fn new<D>(data: D) -> ReadOnlySource
|
||||||
|
where
|
||||||
|
D: Deref<Target = [u8]> + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
let len = data.len();
|
||||||
|
ReadOnlySource {
|
||||||
|
data: Arc::new(Box::new(data)),
|
||||||
|
start: 0,
|
||||||
|
stop: len,
|
||||||
|
allocation: None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn new_with_allocation<D>(data: D, memory_manager: &ResourceManager) -> ReadOnlySource
|
||||||
|
where
|
||||||
|
D: Deref<Target = [u8]> + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
let len = data.len();
|
||||||
|
ReadOnlySource {
|
||||||
|
data: Arc::new(Box::new(data)),
|
||||||
|
start: 0,
|
||||||
|
stop: len,
|
||||||
|
allocation: Some(Arc::new(memory_manager.allocate(len as u64)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Creates an empty ReadOnlySource
|
||||||
|
pub fn empty() -> ReadOnlySource {
|
||||||
|
ReadOnlySource::new(&[][..])
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the data underlying the ReadOnlySource object.
|
||||||
|
pub fn as_slice(&self) -> &[u8] {
|
||||||
|
&self.data[self.start..self.stop]
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Splits into 2 `ReadOnlySource`, at the offset given
|
||||||
|
/// as an argument.
|
||||||
|
pub fn split(self, addr: usize) -> (ReadOnlySource, ReadOnlySource) {
|
||||||
|
let left = self.slice(0, addr);
|
||||||
|
let right = self.slice_from(addr);
|
||||||
|
(left, right)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Splits into 2 `ReadOnlySource`, at the offset `end - right_len`.
|
||||||
|
pub fn split_from_end(self, right_len: usize) -> (ReadOnlySource, ReadOnlySource) {
|
||||||
|
let left_len = self.len() - right_len;
|
||||||
|
self.split(left_len)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a ReadOnlySource that is just a
|
||||||
|
/// view over a slice of the data.
|
||||||
|
///
|
||||||
|
/// Keep in mind that any living slice extends
|
||||||
|
/// the lifetime of the original ReadOnlySource,
|
||||||
|
///
|
||||||
|
/// For instance, if `ReadOnlySource` wraps 500MB
|
||||||
|
/// worth of data in anonymous memory, and only a
|
||||||
|
/// 1KB slice is remaining, the whole `500MBs`
|
||||||
|
/// are retained in memory.
|
||||||
|
pub fn slice(&self, start: usize, stop: usize) -> ReadOnlySource {
|
||||||
|
assert!(
|
||||||
|
start <= stop,
|
||||||
|
"Requested negative slice [{}..{}]",
|
||||||
|
start,
|
||||||
|
stop
|
||||||
|
);
|
||||||
|
assert!(stop <= self.len());
|
||||||
|
ReadOnlySource {
|
||||||
|
data: self.data.clone(),
|
||||||
|
start: self.start + start,
|
||||||
|
stop: self.start + stop,
|
||||||
|
allocation: self.allocation.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Like `.slice(...)` but enforcing only the `from`
|
||||||
|
/// boundary.
|
||||||
|
///
|
||||||
|
/// Equivalent to `.slice(from_offset, self.len())`
|
||||||
|
pub fn slice_from(&self, from_offset: usize) -> ReadOnlySource {
|
||||||
|
self.slice(from_offset, self.len())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Like `.slice(...)` but enforcing only the `to`
|
||||||
|
/// boundary.
|
||||||
|
///
|
||||||
|
/// Equivalent to `.slice(0, to_offset)`
|
||||||
|
pub fn slice_to(&self, to_offset: usize) -> ReadOnlySource {
|
||||||
|
self.slice(0, to_offset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HasLen for ReadOnlySource {
|
||||||
|
fn len(&self) -> usize {
|
||||||
|
self.stop - self.start
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Clone for ReadOnlySource {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
self.slice_from(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Vec<u8>> for ReadOnlySource {
|
||||||
|
fn from(data: Vec<u8>) -> ReadOnlySource {
|
||||||
|
ReadOnlySource::new(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
186
src/directory/spilling_writer.rs
Normal file
186
src/directory/spilling_writer.rs
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
use crate::common::MutableEnum;
|
||||||
|
use crate::directory::{TerminatingWrite, WritePtr};
|
||||||
|
use std::io::{self, Write};
|
||||||
|
|
||||||
|
/// Represents the state of the `SpillingWriter`.
|
||||||
|
enum SpillingState {
|
||||||
|
Buffer {
|
||||||
|
buffer: Vec<u8>,
|
||||||
|
capacity: usize,
|
||||||
|
write_factory: Box<dyn FnOnce() -> io::Result<WritePtr>>,
|
||||||
|
},
|
||||||
|
Spilled(WritePtr),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SpillingState {
|
||||||
|
fn new(
|
||||||
|
limit: usize,
|
||||||
|
write_factory: Box<dyn FnOnce() -> io::Result<WritePtr>>,
|
||||||
|
) -> SpillingState {
|
||||||
|
SpillingState::Buffer {
|
||||||
|
buffer: Vec::with_capacity(limit),
|
||||||
|
capacity: limit,
|
||||||
|
write_factory,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change the state in such a way that it is ready to accept
|
||||||
|
// `extra_capacity` bytes.
|
||||||
|
//
|
||||||
|
fn reserve(self, extra_capacity: usize) -> io::Result<SpillingState> {
|
||||||
|
match self {
|
||||||
|
SpillingState::Buffer {
|
||||||
|
buffer,
|
||||||
|
capacity,
|
||||||
|
write_factory,
|
||||||
|
} => {
|
||||||
|
if capacity >= extra_capacity {
|
||||||
|
Ok(SpillingState::Buffer {
|
||||||
|
buffer,
|
||||||
|
capacity: capacity - extra_capacity,
|
||||||
|
write_factory,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
let mut wrt = write_factory()?;
|
||||||
|
wrt.write_all(&buffer[..])?;
|
||||||
|
Ok(SpillingState::Spilled(wrt))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SpillingState::Spilled(wrt) => Ok(SpillingState::Spilled(wrt)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The `SpillingWriter` is a writer that start by writing in a
|
||||||
|
/// buffer.
|
||||||
|
///
|
||||||
|
/// Once a memory limit is reached, the spilling writer will
|
||||||
|
/// call a given `WritePtr` factory and start spilling into it.
|
||||||
|
///
|
||||||
|
/// Spilling here includes:
|
||||||
|
/// - writing all of the data that were written in the in-memory buffer so far
|
||||||
|
/// - writing subsequent data as well.
|
||||||
|
///
|
||||||
|
/// Once entering "spilling" mode, the `SpillingWriter` stays in this mode.
|
||||||
|
pub struct SpillingWriter {
|
||||||
|
state: MutableEnum<SpillingState>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SpillingWriter {
|
||||||
|
//// Creates a new `Spilling Writer`.
|
||||||
|
pub fn new(
|
||||||
|
limit: usize,
|
||||||
|
write_factory: Box<dyn FnOnce() -> io::Result<WritePtr>>,
|
||||||
|
) -> SpillingWriter {
|
||||||
|
let state = SpillingState::new(limit, write_factory);
|
||||||
|
SpillingWriter {
|
||||||
|
state: MutableEnum::wrap(state),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Finalizes the `SpillingWriter`.
|
||||||
|
///
|
||||||
|
/// The `SpillingResult` object is an enum specific
|
||||||
|
/// to whether the `SpillingWriter` reached the spilling limit
|
||||||
|
/// (In that case, the buffer is returned).
|
||||||
|
///
|
||||||
|
/// If the writer reached the spilling mode, the underlying `WritePtr`
|
||||||
|
/// is terminated and SpillingResult::Spilled is returned.
|
||||||
|
pub fn finalize(self) -> io::Result<SpillingResult> {
|
||||||
|
match self.state.into() {
|
||||||
|
SpillingState::Spilled(wrt) => {
|
||||||
|
wrt.terminate()?;
|
||||||
|
Ok(SpillingResult::Spilled)
|
||||||
|
}
|
||||||
|
SpillingState::Buffer { buffer, .. } => Ok(SpillingResult::Buffer(buffer)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// enum used as the result of `.finalize()`.
|
||||||
|
pub enum SpillingResult {
|
||||||
|
Spilled,
|
||||||
|
Buffer(Vec<u8>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl io::Write for SpillingWriter {
|
||||||
|
fn write(&mut self, payload: &[u8]) -> io::Result<usize> {
|
||||||
|
self.write_all(payload)?;
|
||||||
|
Ok(payload.len())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&mut self) -> io::Result<()> {
|
||||||
|
if let SpillingState::Spilled(wrt) = &mut *self.state {
|
||||||
|
wrt.flush()?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_all(&mut self, payload: &[u8]) -> io::Result<()> {
|
||||||
|
self.state.map_mutate(|mut state| {
|
||||||
|
state = state.reserve(payload.len())?;
|
||||||
|
match &mut state {
|
||||||
|
SpillingState::Buffer { buffer, .. } => {
|
||||||
|
buffer.extend_from_slice(payload);
|
||||||
|
}
|
||||||
|
SpillingState::Spilled(wrt) => {
|
||||||
|
wrt.write_all(payload)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(state)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::SpillingWriter;
|
||||||
|
use crate::directory::spilling_writer::SpillingResult;
|
||||||
|
use crate::directory::RAMDirectory;
|
||||||
|
use crate::Directory;
|
||||||
|
use std::io::{self, Write};
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_no_spilling() {
|
||||||
|
let ram_directory = RAMDirectory::create();
|
||||||
|
let mut ram_directory_clone = ram_directory.clone();
|
||||||
|
let path = Path::new("test");
|
||||||
|
let write_factory = Box::new(move || {
|
||||||
|
ram_directory_clone
|
||||||
|
.open_write(path)
|
||||||
|
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
|
||||||
|
});
|
||||||
|
let mut spilling_wrt = SpillingWriter::new(10, write_factory);
|
||||||
|
assert!(spilling_wrt.write_all(b"abcd").is_ok());
|
||||||
|
if let SpillingResult::Buffer(buf) = spilling_wrt.finalize().unwrap() {
|
||||||
|
assert_eq!(buf, b"abcd")
|
||||||
|
} else {
|
||||||
|
panic!("spill writer should not have spilled");
|
||||||
|
}
|
||||||
|
assert!(!ram_directory.exists(path));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_spilling() {
|
||||||
|
let ram_directory = RAMDirectory::create();
|
||||||
|
let mut ram_directory_clone = ram_directory.clone();
|
||||||
|
let path = Path::new("test");
|
||||||
|
let write_factory = Box::new(move || {
|
||||||
|
ram_directory_clone
|
||||||
|
.open_write(path)
|
||||||
|
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
|
||||||
|
});
|
||||||
|
let mut spilling_wrt = SpillingWriter::new(10, write_factory);
|
||||||
|
assert!(spilling_wrt.write_all(b"abcd").is_ok());
|
||||||
|
assert!(spilling_wrt.write_all(b"efghijklmnop").is_ok());
|
||||||
|
if let SpillingResult::Spilled = spilling_wrt.finalize().unwrap() {
|
||||||
|
} else {
|
||||||
|
panic!("spill writer should have spilled");
|
||||||
|
}
|
||||||
|
assert_eq!(
|
||||||
|
ram_directory.atomic_read(path).unwrap(),
|
||||||
|
b"abcdefghijklmnop"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -20,47 +20,45 @@ mod mmap_directory_tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_simple() -> crate::Result<()> {
|
fn test_simple() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_simple(&directory)
|
super::test_simple(&mut directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_write_create_the_file() {
|
fn test_write_create_the_file() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_write_create_the_file(&directory);
|
super::test_write_create_the_file(&mut directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_rewrite_forbidden() -> crate::Result<()> {
|
fn test_rewrite_forbidden() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_rewrite_forbidden(&directory)?;
|
super::test_rewrite_forbidden(&mut directory);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_directory_delete() -> crate::Result<()> {
|
fn test_directory_delete() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_directory_delete(&directory)?;
|
super::test_directory_delete(&mut directory);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_lock_non_blocking() {
|
fn test_lock_non_blocking() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_lock_non_blocking(&directory);
|
super::test_lock_non_blocking(&mut directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_lock_blocking() {
|
fn test_lock_blocking() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_lock_blocking(&directory);
|
super::test_lock_blocking(&mut directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_watch() {
|
fn test_watch() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_watch(&directory);
|
super::test_watch(&mut directory);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,47 +72,45 @@ mod ram_directory_tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_simple() -> crate::Result<()> {
|
fn test_simple() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_simple(&directory)
|
super::test_simple(&mut directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_write_create_the_file() {
|
fn test_write_create_the_file() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_write_create_the_file(&directory);
|
super::test_write_create_the_file(&mut directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_rewrite_forbidden() -> crate::Result<()> {
|
fn test_rewrite_forbidden() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_rewrite_forbidden(&directory)?;
|
super::test_rewrite_forbidden(&mut directory);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_directory_delete() -> crate::Result<()> {
|
fn test_directory_delete() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_directory_delete(&directory)?;
|
super::test_directory_delete(&mut directory);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_lock_non_blocking() {
|
fn test_lock_non_blocking() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_lock_non_blocking(&directory);
|
super::test_lock_non_blocking(&mut directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_lock_blocking() {
|
fn test_lock_blocking() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_lock_blocking(&directory);
|
super::test_lock_blocking(&mut directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_watch() {
|
fn test_watch() {
|
||||||
let directory = make_directory();
|
let mut directory = make_directory();
|
||||||
super::test_watch(&directory);
|
super::test_watch(&mut directory);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,61 +118,68 @@ mod ram_directory_tests {
|
|||||||
#[should_panic]
|
#[should_panic]
|
||||||
fn ram_directory_panics_if_flush_forgotten() {
|
fn ram_directory_panics_if_flush_forgotten() {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
let ram_directory = RAMDirectory::create();
|
let mut ram_directory = RAMDirectory::create();
|
||||||
let mut write_file = ram_directory.open_write(test_path).unwrap();
|
let mut write_file = ram_directory.open_write(test_path).unwrap();
|
||||||
assert!(write_file.write_all(&[4]).is_ok());
|
assert!(write_file.write_all(&[4]).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_simple(directory: &dyn Directory) -> crate::Result<()> {
|
fn test_simple(directory: &mut dyn Directory) {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
let mut write_file = directory.open_write(test_path)?;
|
{
|
||||||
assert!(directory.exists(test_path).unwrap());
|
let mut write_file = directory.open_write(test_path).unwrap();
|
||||||
write_file.write_all(&[4])?;
|
assert!(directory.exists(test_path));
|
||||||
write_file.write_all(&[3])?;
|
write_file.write_all(&[4]).unwrap();
|
||||||
write_file.write_all(&[7, 3, 5])?;
|
write_file.write_all(&[3]).unwrap();
|
||||||
write_file.flush()?;
|
write_file.write_all(&[7, 3, 5]).unwrap();
|
||||||
let read_file = directory.open_read(test_path)?.read_bytes()?;
|
write_file.flush().unwrap();
|
||||||
assert_eq!(read_file.as_slice(), &[4u8, 3u8, 7u8, 3u8, 5u8]);
|
}
|
||||||
mem::drop(read_file);
|
{
|
||||||
|
let read_file = directory.open_read(test_path).unwrap();
|
||||||
|
let data: &[u8] = &*read_file;
|
||||||
|
assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]);
|
||||||
|
}
|
||||||
assert!(directory.delete(test_path).is_ok());
|
assert!(directory.delete(test_path).is_ok());
|
||||||
assert!(!directory.exists(test_path).unwrap());
|
assert!(!directory.exists(test_path));
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_rewrite_forbidden(directory: &dyn Directory) -> crate::Result<()> {
|
fn test_rewrite_forbidden(directory: &mut dyn Directory) {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
directory.open_write(test_path)?;
|
{
|
||||||
assert!(directory.exists(test_path).unwrap());
|
directory.open_write(test_path).unwrap();
|
||||||
assert!(directory.open_write(test_path).is_err());
|
assert!(directory.exists(test_path));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
assert!(directory.open_write(test_path).is_err());
|
||||||
|
}
|
||||||
assert!(directory.delete(test_path).is_ok());
|
assert!(directory.delete(test_path).is_ok());
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_write_create_the_file(directory: &dyn Directory) {
|
fn test_write_create_the_file(directory: &mut dyn Directory) {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
{
|
{
|
||||||
assert!(directory.open_read(test_path).is_err());
|
assert!(directory.open_read(test_path).is_err());
|
||||||
let _w = directory.open_write(test_path).unwrap();
|
let _w = directory.open_write(test_path).unwrap();
|
||||||
assert!(directory.exists(test_path).unwrap());
|
assert!(directory.exists(test_path));
|
||||||
assert!(directory.open_read(test_path).is_ok());
|
assert!(directory.open_read(test_path).is_ok());
|
||||||
assert!(directory.delete(test_path).is_ok());
|
assert!(directory.delete(test_path).is_ok());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> {
|
fn test_directory_delete(directory: &mut dyn Directory) {
|
||||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||||
assert!(directory.open_read(test_path).is_err());
|
assert!(directory.open_read(test_path).is_err());
|
||||||
let mut write_file = directory.open_write(&test_path)?;
|
let mut write_file = directory.open_write(&test_path).unwrap();
|
||||||
write_file.write_all(&[1, 2, 3, 4])?;
|
write_file.write_all(&[1, 2, 3, 4]).unwrap();
|
||||||
write_file.flush()?;
|
write_file.flush().unwrap();
|
||||||
{
|
{
|
||||||
let read_handle = directory.open_read(&test_path)?.read_bytes()?;
|
let read_handle = directory.open_read(&test_path).unwrap();
|
||||||
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]);
|
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
||||||
// Mapped files can't be deleted on Windows
|
// Mapped files can't be deleted on Windows
|
||||||
if !cfg!(windows) {
|
if !cfg!(windows) {
|
||||||
assert!(directory.delete(&test_path).is_ok());
|
assert!(directory.delete(&test_path).is_ok());
|
||||||
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]);
|
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
|
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -186,40 +189,44 @@ fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> {
|
|||||||
|
|
||||||
assert!(directory.open_read(&test_path).is_err());
|
assert!(directory.open_read(&test_path).is_err());
|
||||||
assert!(directory.delete(&test_path).is_err());
|
assert!(directory.delete(&test_path).is_err());
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_watch(directory: &dyn Directory) {
|
fn test_watch(directory: &mut dyn Directory) {
|
||||||
|
let num_progress: Arc<AtomicUsize> = Default::default();
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let (tx, rx) = crossbeam::channel::unbounded();
|
let counter_clone = counter.clone();
|
||||||
let timeout = Duration::from_millis(500);
|
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||||
|
let watch_callback = Box::new(move || {
|
||||||
let handle = directory
|
counter_clone.fetch_add(1, SeqCst);
|
||||||
.watch(WatchCallback::new(move || {
|
});
|
||||||
let val = counter.fetch_add(1, SeqCst);
|
// This callback is used to synchronize watching in our unit test.
|
||||||
tx.send(val + 1).unwrap();
|
// We bind it to a variable because the callback is removed when that
|
||||||
|
// handle is dropped.
|
||||||
|
let watch_handle = directory.watch(watch_callback).unwrap();
|
||||||
|
let _progress_listener = directory
|
||||||
|
.watch(Box::new(move || {
|
||||||
|
let val = num_progress.fetch_add(1, SeqCst);
|
||||||
|
let _ = sender.send(val);
|
||||||
}))
|
}))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
for i in 0..10 {
|
||||||
|
assert_eq!(i, counter.load(SeqCst));
|
||||||
|
assert!(directory
|
||||||
|
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
|
||||||
|
.is_ok());
|
||||||
|
assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i));
|
||||||
|
assert_eq!(i + 1, counter.load(SeqCst));
|
||||||
|
}
|
||||||
|
mem::drop(watch_handle);
|
||||||
assert!(directory
|
assert!(directory
|
||||||
.atomic_write(Path::new("meta.json"), b"foo")
|
.atomic_write(Path::new("meta.json"), b"random_test_data")
|
||||||
.is_ok());
|
.is_ok());
|
||||||
assert_eq!(rx.recv_timeout(timeout), Ok(1));
|
assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok());
|
||||||
|
assert_eq!(10, counter.load(SeqCst));
|
||||||
assert!(directory
|
|
||||||
.atomic_write(Path::new("meta.json"), b"bar")
|
|
||||||
.is_ok());
|
|
||||||
assert_eq!(rx.recv_timeout(timeout), Ok(2));
|
|
||||||
|
|
||||||
mem::drop(handle);
|
|
||||||
|
|
||||||
assert!(directory
|
|
||||||
.atomic_write(Path::new("meta.json"), b"qux")
|
|
||||||
.is_ok());
|
|
||||||
assert!(rx.recv_timeout(timeout).is_err());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_lock_non_blocking(directory: &dyn Directory) {
|
fn test_lock_non_blocking(directory: &mut dyn Directory) {
|
||||||
{
|
{
|
||||||
let lock_a_res = directory.acquire_lock(&Lock {
|
let lock_a_res = directory.acquire_lock(&Lock {
|
||||||
filepath: PathBuf::from("a.lock"),
|
filepath: PathBuf::from("a.lock"),
|
||||||
@@ -244,7 +251,7 @@ fn test_lock_non_blocking(directory: &dyn Directory) {
|
|||||||
assert!(lock_a_res.is_ok());
|
assert!(lock_a_res.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_lock_blocking(directory: &dyn Directory) {
|
fn test_lock_blocking(directory: &mut dyn Directory) {
|
||||||
let lock_a_res = directory.acquire_lock(&Lock {
|
let lock_a_res = directory.acquire_lock(&Lock {
|
||||||
filepath: PathBuf::from("a.lock"),
|
filepath: PathBuf::from("a.lock"),
|
||||||
is_blocking: true,
|
is_blocking: true,
|
||||||
|
|||||||
@@ -4,20 +4,8 @@ use std::sync::Arc;
|
|||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use std::sync::Weak;
|
use std::sync::Weak;
|
||||||
|
|
||||||
/// Cloneable wrapper for callbacks registered when watching files of a `Directory`.
|
/// Type alias for callbacks registered when watching files of a `Directory`.
|
||||||
#[derive(Clone)]
|
pub type WatchCallback = Box<dyn Fn() -> () + Sync + Send>;
|
||||||
pub struct WatchCallback(Arc<dyn Fn() + Sync + Send>);
|
|
||||||
|
|
||||||
impl WatchCallback {
|
|
||||||
/// Wraps a `Fn()` to create a WatchCallback.
|
|
||||||
pub fn new<F: Fn() + Sync + Send + 'static>(op: F) -> Self {
|
|
||||||
WatchCallback(Arc::new(op))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn call(&self) {
|
|
||||||
self.0()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper struct to implement the watch method in `Directory` implementations.
|
/// Helper struct to implement the watch method in `Directory` implementations.
|
||||||
///
|
///
|
||||||
@@ -41,17 +29,10 @@ impl WatchHandle {
|
|||||||
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
|
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
|
||||||
WatchHandle(watch_callback)
|
WatchHandle(watch_callback)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an empty watch handle.
|
|
||||||
///
|
|
||||||
/// This function is only useful when implementing a readonly directory.
|
|
||||||
pub fn empty() -> WatchHandle {
|
|
||||||
WatchHandle::new(Arc::new(WatchCallback::new(|| {})))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WatchCallbackList {
|
impl WatchCallbackList {
|
||||||
/// Subscribes a new callback and returns a handle that controls the lifetime of the callback.
|
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
|
||||||
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
|
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
|
||||||
let watch_callback_arc = Arc::new(watch_callback);
|
let watch_callback_arc = Arc::new(watch_callback);
|
||||||
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
|
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
|
||||||
@@ -59,13 +40,13 @@ impl WatchCallbackList {
|
|||||||
WatchHandle::new(watch_callback_arc)
|
WatchHandle::new(watch_callback_arc)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn list_callback(&self) -> Vec<WatchCallback> {
|
fn list_callback(&self) -> Vec<Arc<WatchCallback>> {
|
||||||
let mut callbacks: Vec<WatchCallback> = vec![];
|
let mut callbacks = vec![];
|
||||||
let mut router_wlock = self.router.write().unwrap();
|
let mut router_wlock = self.router.write().unwrap();
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
while i < router_wlock.len() {
|
while i < router_wlock.len() {
|
||||||
if let Some(watch) = router_wlock[i].upgrade() {
|
if let Some(watch) = router_wlock[i].upgrade() {
|
||||||
callbacks.push(watch.as_ref().clone());
|
callbacks.push(watch);
|
||||||
i += 1;
|
i += 1;
|
||||||
} else {
|
} else {
|
||||||
router_wlock.swap_remove(i);
|
router_wlock.swap_remove(i);
|
||||||
@@ -87,7 +68,7 @@ impl WatchCallbackList {
|
|||||||
.name("watch-callbacks".to_string())
|
.name("watch-callbacks".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
for callback in callbacks {
|
for callback in callbacks {
|
||||||
callback.call();
|
callback();
|
||||||
}
|
}
|
||||||
let _ = sender.send(());
|
let _ = sender.send(());
|
||||||
});
|
});
|
||||||
@@ -103,7 +84,7 @@ impl WatchCallbackList {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::directory::{WatchCallback, WatchCallbackList};
|
use crate::directory::WatchCallbackList;
|
||||||
use futures::executor::block_on;
|
use futures::executor::block_on;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
@@ -114,7 +95,7 @@ mod tests {
|
|||||||
let watch_event_router = WatchCallbackList::default();
|
let watch_event_router = WatchCallbackList::default();
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let counter_clone = counter.clone();
|
let counter_clone = counter.clone();
|
||||||
let inc_callback = WatchCallback::new(move || {
|
let inc_callback = Box::new(move || {
|
||||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
});
|
});
|
||||||
block_on(watch_event_router.broadcast());
|
block_on(watch_event_router.broadcast());
|
||||||
@@ -142,7 +123,7 @@ mod tests {
|
|||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let inc_callback = |inc: usize| {
|
let inc_callback = |inc: usize| {
|
||||||
let counter_clone = counter.clone();
|
let counter_clone = counter.clone();
|
||||||
WatchCallback::new(move || {
|
Box::new(move || {
|
||||||
counter_clone.fetch_add(inc, Ordering::SeqCst);
|
counter_clone.fetch_add(inc, Ordering::SeqCst);
|
||||||
})
|
})
|
||||||
};
|
};
|
||||||
@@ -170,7 +151,7 @@ mod tests {
|
|||||||
let watch_event_router = WatchCallbackList::default();
|
let watch_event_router = WatchCallbackList::default();
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let counter_clone = counter.clone();
|
let counter_clone = counter.clone();
|
||||||
let inc_callback = WatchCallback::new(move || {
|
let inc_callback = Box::new(move || {
|
||||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
});
|
});
|
||||||
let handle_a = watch_event_router.subscribe(inc_callback);
|
let handle_a = watch_event_router.subscribe(inc_callback);
|
||||||
|
|||||||
141
src/docset.rs
141
src/docset.rs
@@ -1,48 +1,58 @@
|
|||||||
|
use crate::common::BitSet;
|
||||||
use crate::fastfield::DeleteBitSet;
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use std::borrow::Borrow;
|
use std::borrow::Borrow;
|
||||||
use std::borrow::BorrowMut;
|
use std::borrow::BorrowMut;
|
||||||
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
/// Sentinel value returned when a DocSet has been entirely consumed.
|
/// Expresses the outcome of a call to `DocSet`'s `.skip_next(...)`.
|
||||||
///
|
#[derive(PartialEq, Eq, Debug)]
|
||||||
/// This is not u32::MAX as one would have expected, due to the lack of SSE2 instructions
|
pub enum SkipResult {
|
||||||
/// to compare [u32; 4].
|
/// target was in the docset
|
||||||
pub const TERMINATED: DocId = std::i32::MAX as u32;
|
Reached,
|
||||||
|
/// target was not in the docset, skipping stopped as a greater element was found
|
||||||
|
OverStep,
|
||||||
|
/// the docset was entirely consumed without finding the target, nor any
|
||||||
|
/// element greater than the target.
|
||||||
|
End,
|
||||||
|
}
|
||||||
|
|
||||||
/// Represents an iterable set of sorted doc ids.
|
/// Represents an iterable set of sorted doc ids.
|
||||||
pub trait DocSet: Send {
|
pub trait DocSet {
|
||||||
/// Goes to the next element.
|
/// Goes to the next element.
|
||||||
///
|
/// `.advance(...)` needs to be called a first time to point to the correct
|
||||||
/// The DocId of the next element is returned.
|
/// element.
|
||||||
/// In other words we should always have :
|
fn advance(&mut self) -> bool;
|
||||||
/// ```ignore
|
|
||||||
/// let doc = docset.advance();
|
|
||||||
/// assert_eq!(doc, docset.doc());
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// If we reached the end of the DocSet, TERMINATED should be returned.
|
|
||||||
///
|
|
||||||
/// Calling `.advance()` on a terminated DocSet should be supported, and TERMINATED should
|
|
||||||
/// be returned.
|
|
||||||
/// TODO Test existing docsets.
|
|
||||||
fn advance(&mut self) -> DocId;
|
|
||||||
|
|
||||||
/// Advances the DocSet forward until reaching the target, or going to the
|
/// After skipping, position the iterator in such a way that `.doc()`
|
||||||
/// lowest DocId greater than the target.
|
/// will return a value greater than or equal to target.
|
||||||
///
|
///
|
||||||
/// If the end of the DocSet is reached, TERMINATED is returned.
|
/// SkipResult expresses whether the `target value` was reached, overstepped,
|
||||||
|
/// or if the `DocSet` was entirely consumed without finding any value
|
||||||
|
/// greater or equal to the `target`.
|
||||||
///
|
///
|
||||||
/// Calling `.seek(target)` on a terminated DocSet is legal. Implementation
|
/// WARNING: Calling skip always advances the docset.
|
||||||
/// of DocSet should support it.
|
/// More specifically, if the docset is already positionned on the target
|
||||||
|
/// skipping will advance to the next position and return SkipResult::Overstep.
|
||||||
///
|
///
|
||||||
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a DocSet.
|
/// If `.skip_next()` oversteps, then the docset must be positionned correctly
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
/// on an existing document. In other words, `.doc()` should return the first document
|
||||||
let mut doc = self.doc();
|
/// greater than `DocId`.
|
||||||
debug_assert!(doc <= target);
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
while doc < target {
|
if !self.advance() {
|
||||||
doc = self.advance();
|
return SkipResult::End;
|
||||||
|
}
|
||||||
|
loop {
|
||||||
|
match self.doc().cmp(&target) {
|
||||||
|
Ordering::Less => {
|
||||||
|
if !self.advance() {
|
||||||
|
return SkipResult::End;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ordering::Equal => return SkipResult::Reached,
|
||||||
|
Ordering::Greater => return SkipResult::OverStep,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
doc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fills a given mutable buffer with the next doc ids from the
|
/// Fills a given mutable buffer with the next doc ids from the
|
||||||
@@ -61,38 +71,38 @@ pub trait DocSet: Send {
|
|||||||
/// use case where batching. The normal way to
|
/// use case where batching. The normal way to
|
||||||
/// go through the `DocId`'s is to call `.advance()`.
|
/// go through the `DocId`'s is to call `.advance()`.
|
||||||
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
||||||
if self.doc() == TERMINATED {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
||||||
*buffer_val = self.doc();
|
if self.advance() {
|
||||||
if self.advance() == TERMINATED {
|
*buffer_val = self.doc();
|
||||||
return i + 1;
|
} else {
|
||||||
|
return i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
buffer.len()
|
buffer.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the current document
|
/// Returns the current document
|
||||||
/// Right after creating a new DocSet, the docset points to the first document.
|
|
||||||
///
|
|
||||||
/// If the DocSet is empty, .doc() should return `TERMINATED`.
|
|
||||||
fn doc(&self) -> DocId;
|
fn doc(&self) -> DocId;
|
||||||
|
|
||||||
/// Returns a best-effort hint of the
|
/// Returns a best-effort hint of the
|
||||||
/// length of the docset.
|
/// length of the docset.
|
||||||
fn size_hint(&self) -> u32;
|
fn size_hint(&self) -> u32;
|
||||||
|
|
||||||
|
/// Appends all docs to a `bitset`.
|
||||||
|
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||||
|
while self.advance() {
|
||||||
|
bitset.insert(self.doc());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the number documents matching.
|
/// Returns the number documents matching.
|
||||||
/// Calling this method consumes the `DocSet`.
|
/// Calling this method consumes the `DocSet`.
|
||||||
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||||
let mut count = 0u32;
|
let mut count = 0u32;
|
||||||
let mut doc = self.doc();
|
while self.advance() {
|
||||||
while doc != TERMINATED {
|
if !delete_bitset.is_deleted(self.doc()) {
|
||||||
if !delete_bitset.is_deleted(doc) {
|
|
||||||
count += 1u32;
|
count += 1u32;
|
||||||
}
|
}
|
||||||
doc = self.advance();
|
|
||||||
}
|
}
|
||||||
count
|
count
|
||||||
}
|
}
|
||||||
@@ -104,50 +114,22 @@ pub trait DocSet: Send {
|
|||||||
/// given by `count()`.
|
/// given by `count()`.
|
||||||
fn count_including_deleted(&mut self) -> u32 {
|
fn count_including_deleted(&mut self) -> u32 {
|
||||||
let mut count = 0u32;
|
let mut count = 0u32;
|
||||||
let mut doc = self.doc();
|
while self.advance() {
|
||||||
while doc != TERMINATED {
|
|
||||||
count += 1u32;
|
count += 1u32;
|
||||||
doc = self.advance();
|
|
||||||
}
|
}
|
||||||
count
|
count
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> DocSet for &'a mut dyn DocSet {
|
|
||||||
fn advance(&mut self) -> u32 {
|
|
||||||
(**self).advance()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
|
||||||
(**self).seek(target)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn doc(&self) -> u32 {
|
|
||||||
(**self).doc()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn size_hint(&self) -> u32 {
|
|
||||||
(**self).size_hint()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
|
||||||
(**self).count(delete_bitset)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn count_including_deleted(&mut self) -> u32 {
|
|
||||||
(**self).count_including_deleted()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||||
unboxed.advance()
|
unboxed.advance()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||||
unboxed.seek(target)
|
unboxed.skip_next(target)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
@@ -169,4 +151,9 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
|||||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||||
unboxed.count_including_deleted()
|
unboxed.count_including_deleted()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||||
|
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||||
|
unboxed.append_to_bitset(bitset);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
111
src/error.rs
111
src/error.rs
@@ -2,27 +2,22 @@
|
|||||||
|
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
|
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||||
use crate::directory::error::{Incompatibility, LockError};
|
use crate::directory::error::{Incompatibility, LockError};
|
||||||
use crate::fastfield::FastFieldNotAvailableError;
|
use crate::fastfield::FastFieldNotAvailableError;
|
||||||
use crate::query;
|
use crate::query;
|
||||||
use crate::{
|
use crate::schema;
|
||||||
directory::error::{OpenDirectoryError, OpenReadError, OpenWriteError},
|
use serde_json;
|
||||||
schema,
|
|
||||||
};
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::PoisonError;
|
use std::sync::PoisonError;
|
||||||
|
|
||||||
/// Represents a `DataCorruption` error.
|
|
||||||
///
|
|
||||||
/// When facing data corruption, tantivy actually panic or return this error.
|
|
||||||
pub struct DataCorruption {
|
pub struct DataCorruption {
|
||||||
filepath: Option<PathBuf>,
|
filepath: Option<PathBuf>,
|
||||||
comment: String,
|
comment: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DataCorruption {
|
impl DataCorruption {
|
||||||
/// Creates a `DataCorruption` Error.
|
|
||||||
pub fn new(filepath: PathBuf, comment: String) -> DataCorruption {
|
pub fn new(filepath: PathBuf, comment: String) -> DataCorruption {
|
||||||
DataCorruption {
|
DataCorruption {
|
||||||
filepath: Some(filepath),
|
filepath: Some(filepath),
|
||||||
@@ -30,11 +25,10 @@ impl DataCorruption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a `DataCorruption` Error, when the filepath is irrelevant.
|
pub fn comment_only(comment: String) -> DataCorruption {
|
||||||
pub fn comment_only<TStr: ToString>(comment: TStr) -> DataCorruption {
|
|
||||||
DataCorruption {
|
DataCorruption {
|
||||||
filepath: None,
|
filepath: None,
|
||||||
comment: comment.to_string(),
|
comment,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -50,47 +44,44 @@ impl fmt::Debug for DataCorruption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The library's error enum
|
/// The library's failure based error enum
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Fail)]
|
||||||
pub enum TantivyError {
|
pub enum TantivyError {
|
||||||
/// Failed to open the directory.
|
/// Path does not exist.
|
||||||
#[error("Failed to open the directory: '{0:?}'")]
|
#[fail(display = "Path does not exist: '{:?}'", _0)]
|
||||||
OpenDirectoryError(#[from] OpenDirectoryError),
|
PathDoesNotExist(PathBuf),
|
||||||
/// Failed to open a file for read.
|
/// File already exists, this is a problem when we try to write into a new file.
|
||||||
#[error("Failed to open file for read: '{0:?}'")]
|
#[fail(display = "File already exists: '{:?}'", _0)]
|
||||||
OpenReadError(#[from] OpenReadError),
|
FileAlreadyExists(PathBuf),
|
||||||
/// Failed to open a file for write.
|
|
||||||
#[error("Failed to open file for write: '{0:?}'")]
|
|
||||||
OpenWriteError(#[from] OpenWriteError),
|
|
||||||
/// Index already exists in this directory
|
/// Index already exists in this directory
|
||||||
#[error("Index already exists")]
|
#[fail(display = "Index already exists")]
|
||||||
IndexAlreadyExists,
|
IndexAlreadyExists,
|
||||||
/// Failed to acquire file lock
|
/// Failed to acquire file lock
|
||||||
#[error("Failed to acquire Lockfile: {0:?}. {1:?}")]
|
#[fail(display = "Failed to acquire Lockfile: {:?}. {:?}", _0, _1)]
|
||||||
LockFailure(LockError, Option<String>),
|
LockFailure(LockError, Option<String>),
|
||||||
/// IO Error.
|
/// IO Error.
|
||||||
#[error("An IO error occurred: '{0}'")]
|
#[fail(display = "An IO error occurred: '{}'", _0)]
|
||||||
IOError(#[from] io::Error),
|
IOError(#[cause] IOError),
|
||||||
/// Data corruption.
|
/// Data corruption.
|
||||||
#[error("Data corrupted: '{0:?}'")]
|
#[fail(display = "{:?}", _0)]
|
||||||
DataCorruption(DataCorruption),
|
DataCorruption(DataCorruption),
|
||||||
/// A thread holding the locked panicked and poisoned the lock.
|
/// A thread holding the locked panicked and poisoned the lock.
|
||||||
#[error("A thread holding the locked panicked and poisoned the lock")]
|
#[fail(display = "A thread holding the locked panicked and poisoned the lock")]
|
||||||
Poisoned,
|
Poisoned,
|
||||||
/// Invalid argument was passed by the user.
|
/// Invalid argument was passed by the user.
|
||||||
#[error("An invalid argument was passed: '{0}'")]
|
#[fail(display = "An invalid argument was passed: '{}'", _0)]
|
||||||
InvalidArgument(String),
|
InvalidArgument(String),
|
||||||
/// An Error happened in one of the thread.
|
/// An Error happened in one of the thread.
|
||||||
#[error("An error occurred in a thread: '{0}'")]
|
#[fail(display = "An error occurred in a thread: '{}'", _0)]
|
||||||
ErrorInThread(String),
|
ErrorInThread(String),
|
||||||
/// An Error appeared related to the schema.
|
/// An Error appeared related to the schema.
|
||||||
#[error("Schema error: '{0}'")]
|
#[fail(display = "Schema error: '{}'", _0)]
|
||||||
SchemaError(String),
|
SchemaError(String),
|
||||||
/// System error. (e.g.: We failed spawning a new thread)
|
/// System error. (e.g.: We failed spawning a new thread)
|
||||||
#[error("System error.'{0}'")]
|
#[fail(display = "System error.'{}'", _0)]
|
||||||
SystemError(String),
|
SystemError(String),
|
||||||
/// Index incompatible with current version of tantivy
|
/// Index incompatible with current version of tantivy
|
||||||
#[error("{0:?}")]
|
#[fail(display = "{:?}", _0)]
|
||||||
IncompatibleIndex(Incompatibility),
|
IncompatibleIndex(Incompatibility),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -99,17 +90,31 @@ impl From<DataCorruption> for TantivyError {
|
|||||||
TantivyError::DataCorruption(data_corruption)
|
TantivyError::DataCorruption(data_corruption)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<FastFieldNotAvailableError> for TantivyError {
|
impl From<FastFieldNotAvailableError> for TantivyError {
|
||||||
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
|
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
|
||||||
TantivyError::SchemaError(format!("{}", fastfield_error))
|
TantivyError::SchemaError(format!("{}", fastfield_error))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<LockError> for TantivyError {
|
impl From<LockError> for TantivyError {
|
||||||
fn from(lock_error: LockError) -> TantivyError {
|
fn from(lock_error: LockError) -> TantivyError {
|
||||||
TantivyError::LockFailure(lock_error, None)
|
TantivyError::LockFailure(lock_error, None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<IOError> for TantivyError {
|
||||||
|
fn from(io_error: IOError) -> TantivyError {
|
||||||
|
TantivyError::IOError(io_error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<io::Error> for TantivyError {
|
||||||
|
fn from(io_error: io::Error) -> TantivyError {
|
||||||
|
TantivyError::IOError(io_error.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<query::QueryParserError> for TantivyError {
|
impl From<query::QueryParserError> for TantivyError {
|
||||||
fn from(parsing_error: query::QueryParserError) -> TantivyError {
|
fn from(parsing_error: query::QueryParserError) -> TantivyError {
|
||||||
TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error))
|
TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error))
|
||||||
@@ -122,9 +127,15 @@ impl<Guard> From<PoisonError<Guard>> for TantivyError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<chrono::ParseError> for TantivyError {
|
impl From<OpenReadError> for TantivyError {
|
||||||
fn from(err: chrono::ParseError) -> TantivyError {
|
fn from(error: OpenReadError) -> TantivyError {
|
||||||
TantivyError::InvalidArgument(err.to_string())
|
match error {
|
||||||
|
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
|
||||||
|
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
|
||||||
|
OpenReadError::IncompatibleIndex(incompatibility) => {
|
||||||
|
TantivyError::IncompatibleIndex(incompatibility)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,9 +145,35 @@ impl From<schema::DocParsingError> for TantivyError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<OpenWriteError> for TantivyError {
|
||||||
|
fn from(error: OpenWriteError) -> TantivyError {
|
||||||
|
match error {
|
||||||
|
OpenWriteError::FileAlreadyExists(filepath) => {
|
||||||
|
TantivyError::FileAlreadyExists(filepath)
|
||||||
|
}
|
||||||
|
OpenWriteError::IOError(io_error) => TantivyError::IOError(io_error),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<OpenDirectoryError> for TantivyError {
|
||||||
|
fn from(error: OpenDirectoryError) -> TantivyError {
|
||||||
|
match error {
|
||||||
|
OpenDirectoryError::DoesNotExist(directory_path) => {
|
||||||
|
TantivyError::PathDoesNotExist(directory_path)
|
||||||
|
}
|
||||||
|
OpenDirectoryError::NotADirectory(directory_path) => {
|
||||||
|
TantivyError::InvalidArgument(format!("{:?} is not a directory", directory_path))
|
||||||
|
}
|
||||||
|
OpenDirectoryError::IoError(err) => TantivyError::IOError(IOError::from(err)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<serde_json::Error> for TantivyError {
|
impl From<serde_json::Error> for TantivyError {
|
||||||
fn from(error: serde_json::Error) -> TantivyError {
|
fn from(error: serde_json::Error) -> TantivyError {
|
||||||
TantivyError::IOError(error.into())
|
let io_err = io::Error::from(error);
|
||||||
|
TantivyError::IOError(io_err.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,114 +6,31 @@ pub use self::writer::BytesFastFieldWriter;
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value};
|
use crate::schema::Schema;
|
||||||
use crate::{query::TermQuery, schema::FAST, schema::INDEXED, schema::STORED};
|
use crate::Index;
|
||||||
use crate::{DocAddress, DocSet, Index, Searcher, Term};
|
|
||||||
use std::ops::Deref;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_bytes() -> crate::Result<()> {
|
fn test_bytes() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let bytes_field = schema_builder.add_bytes_field("bytesfield", FAST);
|
let field = schema_builder.add_bytes_field("bytesfield");
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(bytes_field=>vec![0u8, 1, 2, 3]));
|
index_writer.add_document(doc!(field=>vec![0u8, 1, 2, 3]));
|
||||||
index_writer.add_document(doc!(bytes_field=>vec![]));
|
index_writer.add_document(doc!(field=>vec![]));
|
||||||
index_writer.add_document(doc!(bytes_field=>vec![255u8]));
|
index_writer.add_document(doc!(field=>vec![255u8]));
|
||||||
index_writer.add_document(doc!(bytes_field=>vec![1u8, 3, 5, 7, 9]));
|
index_writer.add_document(doc!(field=>vec![1u8, 3, 5, 7, 9]));
|
||||||
index_writer.add_document(doc!(bytes_field=>vec![0u8; 1000]));
|
index_writer.add_document(doc!(field=>vec![0u8; 1000]));
|
||||||
index_writer.commit()?;
|
assert!(index_writer.commit().is_ok());
|
||||||
let searcher = index.reader()?.searcher();
|
let searcher = index.reader().unwrap().searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let bytes_reader = segment_reader.fast_fields().bytes(bytes_field).unwrap();
|
let bytes_reader = segment_reader.fast_fields().bytes(field).unwrap();
|
||||||
|
|
||||||
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
|
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
|
||||||
assert!(bytes_reader.get_bytes(1).is_empty());
|
assert!(bytes_reader.get_bytes(1).is_empty());
|
||||||
assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
|
assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
|
||||||
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]);
|
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]);
|
||||||
let long = vec![0u8; 1000];
|
let long = vec![0u8; 1000];
|
||||||
assert_eq!(bytes_reader.get_bytes(4), long.as_slice());
|
assert_eq!(bytes_reader.get_bytes(4), long.as_slice());
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_index_for_test<T: Into<BytesOptions>>(
|
|
||||||
byte_options: T,
|
|
||||||
) -> crate::Result<impl Deref<Target = Searcher>> {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let field = schema_builder.add_bytes_field("string_bytes", byte_options.into());
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
field => b"tantivy".as_ref(),
|
|
||||||
field => b"lucene".as_ref()
|
|
||||||
));
|
|
||||||
index_writer.commit()?;
|
|
||||||
Ok(index.reader()?.searcher())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_stored_bytes() -> crate::Result<()> {
|
|
||||||
let searcher = create_index_for_test(STORED)?;
|
|
||||||
assert_eq!(searcher.num_docs(), 1);
|
|
||||||
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
|
||||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
|
||||||
let values: Vec<&Value> = retrieved_doc.get_all(field).collect();
|
|
||||||
assert_eq!(values.len(), 2);
|
|
||||||
let values_bytes: Vec<&[u8]> = values
|
|
||||||
.into_iter()
|
|
||||||
.flat_map(|value| value.bytes_value())
|
|
||||||
.collect();
|
|
||||||
assert_eq!(values_bytes, &[&b"tantivy"[..], &b"lucene"[..]]);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_non_stored_bytes() -> crate::Result<()> {
|
|
||||||
let searcher = create_index_for_test(INDEXED)?;
|
|
||||||
assert_eq!(searcher.num_docs(), 1);
|
|
||||||
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
|
||||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
|
||||||
assert!(retrieved_doc.get_first(field).is_none());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_index_bytes() -> crate::Result<()> {
|
|
||||||
let searcher = create_index_for_test(INDEXED)?;
|
|
||||||
assert_eq!(searcher.num_docs(), 1);
|
|
||||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
|
||||||
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
|
||||||
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
|
||||||
let term_weight = term_query.specialized_weight(&searcher, true)?;
|
|
||||||
let term_scorer = term_weight.specialized_scorer(searcher.segment_reader(0), 1.0)?;
|
|
||||||
assert_eq!(term_scorer.doc(), 0u32);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_non_index_bytes() -> crate::Result<()> {
|
|
||||||
let searcher = create_index_for_test(STORED)?;
|
|
||||||
assert_eq!(searcher.num_docs(), 1);
|
|
||||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
|
||||||
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
|
||||||
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
|
||||||
let term_weight_err = term_query.specialized_weight(&searcher, false);
|
|
||||||
assert!(matches!(
|
|
||||||
term_weight_err,
|
|
||||||
Err(crate::TantivyError::SchemaError(_))
|
|
||||||
));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_fast_bytes_multivalue_value() -> crate::Result<()> {
|
|
||||||
let searcher = create_index_for_test(FAST)?;
|
|
||||||
assert_eq!(searcher.num_docs(), 1);
|
|
||||||
let fast_fields = searcher.segment_reader(0u32).fast_fields();
|
|
||||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
|
||||||
let fast_field_reader = fast_fields.bytes(field).unwrap();
|
|
||||||
assert_eq!(fast_field_reader.get_bytes(0u32), b"tantivy");
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use crate::directory::FileSlice;
|
use owning_ref::OwningRef;
|
||||||
use crate::directory::OwnedBytes;
|
|
||||||
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::fastfield::FastFieldReader;
|
use crate::fastfield::FastFieldReader;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
@@ -16,16 +17,16 @@ use crate::DocId;
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BytesFastFieldReader {
|
pub struct BytesFastFieldReader {
|
||||||
idx_reader: FastFieldReader<u64>,
|
idx_reader: FastFieldReader<u64>,
|
||||||
values: OwnedBytes,
|
values: OwningRef<ReadOnlySource, [u8]>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BytesFastFieldReader {
|
impl BytesFastFieldReader {
|
||||||
pub(crate) fn open(
|
pub(crate) fn open(
|
||||||
idx_reader: FastFieldReader<u64>,
|
idx_reader: FastFieldReader<u64>,
|
||||||
values_file: FileSlice,
|
values_source: ReadOnlySource,
|
||||||
) -> crate::Result<BytesFastFieldReader> {
|
) -> BytesFastFieldReader {
|
||||||
let values = values_file.read_bytes()?;
|
let values = OwningRef::new(values_source).map(|source| &source[..]);
|
||||||
Ok(BytesFastFieldReader { idx_reader, values })
|
BytesFastFieldReader { idx_reader, values }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn range(&self, doc: DocId) -> (usize, usize) {
|
fn range(&self, doc: DocId) -> (usize, usize) {
|
||||||
@@ -37,7 +38,7 @@ impl BytesFastFieldReader {
|
|||||||
/// Returns the bytes associated to the given `doc`
|
/// Returns the bytes associated to the given `doc`
|
||||||
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
||||||
let (start, stop) = self.range(doc);
|
let (start, stop) = self.range(doc);
|
||||||
&self.values.as_slice()[start..stop]
|
&self.values[start..stop]
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the overall number of bytes in this bytes fast field.
|
/// Returns the overall number of bytes in this bytes fast field.
|
||||||
|
|||||||
@@ -49,10 +49,16 @@ impl BytesFastFieldWriter {
|
|||||||
/// matching field values present in the document.
|
/// matching field values present in the document.
|
||||||
pub fn add_document(&mut self, doc: &Document) {
|
pub fn add_document(&mut self, doc: &Document) {
|
||||||
self.next_doc();
|
self.next_doc();
|
||||||
for field_value in doc.get_all(self.field) {
|
for field_value in doc.field_values() {
|
||||||
if let Value::Bytes(ref bytes) = field_value {
|
if field_value.field() == self.field {
|
||||||
self.vals.extend_from_slice(bytes);
|
if let Value::Bytes(ref bytes) = *field_value.value() {
|
||||||
return;
|
self.vals.extend_from_slice(bytes);
|
||||||
|
} else {
|
||||||
|
panic!(
|
||||||
|
"Bytes field contained non-Bytes Value!. Field {:?} = {:?}",
|
||||||
|
self.field, field_value
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -70,18 +76,21 @@ impl BytesFastFieldWriter {
|
|||||||
|
|
||||||
/// Serializes the fast field values by pushing them to the `FastFieldSerializer`.
|
/// Serializes the fast field values by pushing them to the `FastFieldSerializer`.
|
||||||
pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> {
|
pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> {
|
||||||
// writing the offset index
|
{
|
||||||
let mut doc_index_serializer =
|
// writing the offset index
|
||||||
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
let mut doc_index_serializer =
|
||||||
for &offset in &self.doc_index {
|
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
||||||
doc_index_serializer.add_val(offset)?;
|
for &offset in &self.doc_index {
|
||||||
|
doc_index_serializer.add_val(offset)?;
|
||||||
|
}
|
||||||
|
doc_index_serializer.add_val(self.vals.len() as u64)?;
|
||||||
|
doc_index_serializer.close_field()?;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// writing the values themselves
|
||||||
|
let mut value_serializer = serializer.new_bytes_fast_field_with_idx(self.field, 1)?;
|
||||||
|
value_serializer.write_all(&self.vals)?;
|
||||||
}
|
}
|
||||||
doc_index_serializer.add_val(self.vals.len() as u64)?;
|
|
||||||
doc_index_serializer.close_field()?;
|
|
||||||
// writing the values themselves
|
|
||||||
serializer
|
|
||||||
.new_bytes_fast_field_with_idx(self.field, 1)?
|
|
||||||
.write_all(&self.vals)?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use crate::common::{BitSet, HasLen};
|
use crate::common::{BitSet, HasLen};
|
||||||
use crate::directory::FileSlice;
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::directory::OwnedBytes;
|
|
||||||
use crate::directory::WritePtr;
|
use crate::directory::WritePtr;
|
||||||
use crate::space_usage::ByteCount;
|
use crate::space_usage::ByteCount;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
@@ -10,8 +9,6 @@ use std::io::Write;
|
|||||||
/// Write a delete `BitSet`
|
/// Write a delete `BitSet`
|
||||||
///
|
///
|
||||||
/// where `delete_bitset` is the set of deleted `DocId`.
|
/// where `delete_bitset` is the set of deleted `DocId`.
|
||||||
/// Warning: this function does not call terminate. The caller is in charge of
|
|
||||||
/// closing the writer properly.
|
|
||||||
pub fn write_delete_bitset(
|
pub fn write_delete_bitset(
|
||||||
delete_bitset: &BitSet,
|
delete_bitset: &BitSet,
|
||||||
max_doc: u32,
|
max_doc: u32,
|
||||||
@@ -40,41 +37,22 @@ pub fn write_delete_bitset(
|
|||||||
/// Set of deleted `DocId`s.
|
/// Set of deleted `DocId`s.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DeleteBitSet {
|
pub struct DeleteBitSet {
|
||||||
data: OwnedBytes,
|
data: ReadOnlySource,
|
||||||
len: usize,
|
len: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DeleteBitSet {
|
impl DeleteBitSet {
|
||||||
#[cfg(test)]
|
/// Opens a delete bitset given its data source.
|
||||||
pub(crate) fn for_test(docs: &[DocId], max_doc: u32) -> DeleteBitSet {
|
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
|
||||||
use crate::directory::{Directory, RAMDirectory, TerminatingWrite};
|
let num_deleted: usize = data
|
||||||
use std::path::Path;
|
|
||||||
assert!(docs.iter().all(|&doc| doc < max_doc));
|
|
||||||
let mut bitset = BitSet::with_max_value(max_doc);
|
|
||||||
for &doc in docs {
|
|
||||||
bitset.insert(doc);
|
|
||||||
}
|
|
||||||
let directory = RAMDirectory::create();
|
|
||||||
let path = Path::new("dummydeletebitset");
|
|
||||||
let mut wrt = directory.open_write(path).unwrap();
|
|
||||||
write_delete_bitset(&bitset, max_doc, &mut wrt).unwrap();
|
|
||||||
wrt.terminate().unwrap();
|
|
||||||
let file = directory.open_read(path).unwrap();
|
|
||||||
Self::open(file).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Opens a delete bitset given its file.
|
|
||||||
pub fn open(file: FileSlice) -> crate::Result<DeleteBitSet> {
|
|
||||||
let bytes = file.read_bytes()?;
|
|
||||||
let num_deleted: usize = bytes
|
|
||||||
.as_slice()
|
.as_slice()
|
||||||
.iter()
|
.iter()
|
||||||
.map(|b| b.count_ones() as usize)
|
.map(|b| b.count_ones() as usize)
|
||||||
.sum();
|
.sum();
|
||||||
Ok(DeleteBitSet {
|
DeleteBitSet {
|
||||||
data: bytes,
|
data,
|
||||||
len: num_deleted,
|
len: num_deleted,
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true iff the document is still "alive". In other words, if it has not been deleted.
|
/// Returns true iff the document is still "alive". In other words, if it has not been deleted.
|
||||||
@@ -86,7 +64,7 @@ impl DeleteBitSet {
|
|||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||||
let byte_offset = doc / 8u32;
|
let byte_offset = doc / 8u32;
|
||||||
let b: u8 = self.data.as_slice()[byte_offset as usize];
|
let b: u8 = (*self.data)[byte_offset as usize];
|
||||||
let shift = (doc & 7u32) as u8;
|
let shift = (doc & 7u32) as u8;
|
||||||
b & (1u8 << shift) != 0
|
b & (1u8 << shift) != 0
|
||||||
}
|
}
|
||||||
@@ -105,35 +83,42 @@ impl HasLen for DeleteBitSet {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::DeleteBitSet;
|
use super::*;
|
||||||
use crate::common::HasLen;
|
use crate::directory::*;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
#[test]
|
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
|
||||||
fn test_delete_bitset_empty() {
|
let test_path = PathBuf::from("test");
|
||||||
let delete_bitset = DeleteBitSet::for_test(&[], 10);
|
let mut directory = RAMDirectory::default();
|
||||||
for doc in 0..10 {
|
{
|
||||||
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
|
let mut writer = directory.open_write(&*test_path).unwrap();
|
||||||
|
write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
|
||||||
|
writer.terminate().unwrap();
|
||||||
}
|
}
|
||||||
assert_eq!(delete_bitset.len(), 0);
|
let source = directory.open_read(&test_path).unwrap();
|
||||||
|
let delete_bitset = DeleteBitSet::open(source);
|
||||||
|
for doc in 0..max_doc {
|
||||||
|
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
|
||||||
|
}
|
||||||
|
assert_eq!(delete_bitset.len(), bitset.len());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_bitset() {
|
fn test_delete_bitset() {
|
||||||
let delete_bitset = DeleteBitSet::for_test(&[1, 9], 10);
|
{
|
||||||
assert!(delete_bitset.is_alive(0));
|
let mut bitset = BitSet::with_max_value(10);
|
||||||
assert!(delete_bitset.is_deleted(1));
|
bitset.insert(1);
|
||||||
assert!(delete_bitset.is_alive(2));
|
bitset.insert(9);
|
||||||
assert!(delete_bitset.is_alive(3));
|
test_delete_bitset_helper(&bitset, 10);
|
||||||
assert!(delete_bitset.is_alive(4));
|
}
|
||||||
assert!(delete_bitset.is_alive(5));
|
{
|
||||||
assert!(delete_bitset.is_alive(6));
|
let mut bitset = BitSet::with_max_value(8);
|
||||||
assert!(delete_bitset.is_alive(6));
|
bitset.insert(1);
|
||||||
assert!(delete_bitset.is_alive(7));
|
bitset.insert(2);
|
||||||
assert!(delete_bitset.is_alive(8));
|
bitset.insert(3);
|
||||||
assert!(delete_bitset.is_deleted(9));
|
bitset.insert(5);
|
||||||
for doc in 0..10 {
|
bitset.insert(7);
|
||||||
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
|
test_delete_bitset_helper(&bitset, 8);
|
||||||
}
|
}
|
||||||
assert_eq!(delete_bitset.len(), 2);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ use std::result;
|
|||||||
/// `FastFieldNotAvailableError` is returned when the
|
/// `FastFieldNotAvailableError` is returned when the
|
||||||
/// user requested for a fast field reader, and the field was not
|
/// user requested for a fast field reader, and the field was not
|
||||||
/// defined in the schema as a fast field.
|
/// defined in the schema as a fast field.
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Fail)]
|
||||||
#[error("Fast field not available: '{field_name:?}'")]
|
#[fail(display = "Fast field not available: '{:?}'", field_name)]
|
||||||
pub struct FastFieldNotAvailableError {
|
pub struct FastFieldNotAvailableError {
|
||||||
field_name: String,
|
field_name: String,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use super::MultiValueIntFastFieldReader;
|
use super::MultiValueIntFastFieldReader;
|
||||||
use crate::error::DataCorruption;
|
|
||||||
use crate::schema::Facet;
|
use crate::schema::Facet;
|
||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
use crate::termdict::TermOrdinal;
|
use crate::termdict::TermOrdinal;
|
||||||
@@ -63,73 +62,18 @@ impl FacetReader {
|
|||||||
&mut self,
|
&mut self,
|
||||||
facet_ord: TermOrdinal,
|
facet_ord: TermOrdinal,
|
||||||
output: &mut Facet,
|
output: &mut Facet,
|
||||||
) -> crate::Result<()> {
|
) -> Result<(), str::Utf8Error> {
|
||||||
let found_term = self
|
let found_term = self
|
||||||
.term_dict
|
.term_dict
|
||||||
.ord_to_term(facet_ord as u64, &mut self.buffer)?;
|
.ord_to_term(facet_ord as u64, &mut self.buffer);
|
||||||
assert!(found_term, "Term ordinal {} no found.", facet_ord);
|
assert!(found_term, "Term ordinal {} no found.", facet_ord);
|
||||||
let facet_str = str::from_utf8(&self.buffer[..])
|
let facet_str = str::from_utf8(&self.buffer[..])?;
|
||||||
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?;
|
|
||||||
output.set_facet_str(facet_str);
|
output.set_facet_str(facet_str);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the list of facet ordinals associated to a document.
|
/// Return the list of facet ordinals associated to a document.
|
||||||
pub fn facet_ords(&self, doc: DocId, output: &mut Vec<u64>) {
|
pub fn facet_ords(&mut self, doc: DocId, output: &mut Vec<u64>) {
|
||||||
self.term_ords.get_vals(doc, output);
|
self.term_ords.get_vals(doc, output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use crate::Index;
|
|
||||||
use crate::{
|
|
||||||
schema::{Facet, SchemaBuilder},
|
|
||||||
Document,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_facet_not_populated_for_all_docs() -> crate::Result<()> {
|
|
||||||
let mut schema_builder = SchemaBuilder::default();
|
|
||||||
let facet_field = schema_builder.add_facet_field("facet");
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
index_writer.add_document(doc!(facet_field=>Facet::from_text("/a/b")));
|
|
||||||
index_writer.add_document(Document::default());
|
|
||||||
index_writer.commit()?;
|
|
||||||
let searcher = index.reader()?.searcher();
|
|
||||||
let facet_reader = searcher
|
|
||||||
.segment_reader(0u32)
|
|
||||||
.facet_reader(facet_field)
|
|
||||||
.unwrap();
|
|
||||||
let mut facet_ords = Vec::new();
|
|
||||||
facet_reader.facet_ords(0u32, &mut facet_ords);
|
|
||||||
assert_eq!(&facet_ords, &[2u64]);
|
|
||||||
facet_reader.facet_ords(1u32, &mut facet_ords);
|
|
||||||
assert!(facet_ords.is_empty());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
fn test_facet_not_populated_for_any_docs() -> crate::Result<()> {
|
|
||||||
let mut schema_builder = SchemaBuilder::default();
|
|
||||||
let facet_field = schema_builder.add_facet_field("facet");
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
index_writer.add_document(Document::default());
|
|
||||||
index_writer.add_document(Document::default());
|
|
||||||
index_writer.commit()?;
|
|
||||||
let searcher = index.reader()?.searcher();
|
|
||||||
let facet_reader = searcher
|
|
||||||
.segment_reader(0u32)
|
|
||||||
.facet_reader(facet_field)
|
|
||||||
.unwrap();
|
|
||||||
let mut facet_ords = Vec::new();
|
|
||||||
facet_reader.facet_ords(0u32, &mut facet_ords);
|
|
||||||
assert!(facet_ords.is_empty());
|
|
||||||
facet_reader.facet_ords(1u32, &mut facet_ords);
|
|
||||||
assert!(facet_ords.is_empty());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -33,14 +33,11 @@ pub use self::reader::FastFieldReader;
|
|||||||
pub use self::readers::FastFieldReaders;
|
pub use self::readers::FastFieldReaders;
|
||||||
pub use self::serializer::FastFieldSerializer;
|
pub use self::serializer::FastFieldSerializer;
|
||||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||||
|
use crate::chrono::{NaiveDateTime, Utc};
|
||||||
use crate::common;
|
use crate::common;
|
||||||
use crate::schema::Cardinality;
|
use crate::schema::Cardinality;
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
use crate::schema::Value;
|
use crate::schema::Value;
|
||||||
use crate::{
|
|
||||||
chrono::{NaiveDateTime, Utc},
|
|
||||||
schema::Type,
|
|
||||||
};
|
|
||||||
|
|
||||||
mod bytes;
|
mod bytes;
|
||||||
mod delete;
|
mod delete;
|
||||||
@@ -79,9 +76,6 @@ pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd {
|
|||||||
fn make_zero() -> Self {
|
fn make_zero() -> Self {
|
||||||
Self::from_u64(0i64.to_u64())
|
Self::from_u64(0i64.to_u64())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `schema::Type` for this FastValue.
|
|
||||||
fn to_type() -> Type;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for u64 {
|
impl FastValue for u64 {
|
||||||
@@ -104,10 +98,6 @@ impl FastValue for u64 {
|
|||||||
fn as_u64(&self) -> u64 {
|
fn as_u64(&self) -> u64 {
|
||||||
*self
|
*self
|
||||||
}
|
}
|
||||||
|
|
||||||
fn to_type() -> Type {
|
|
||||||
Type::U64
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for i64 {
|
impl FastValue for i64 {
|
||||||
@@ -129,10 +119,6 @@ impl FastValue for i64 {
|
|||||||
fn as_u64(&self) -> u64 {
|
fn as_u64(&self) -> u64 {
|
||||||
*self as u64
|
*self as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
fn to_type() -> Type {
|
|
||||||
Type::I64
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for f64 {
|
impl FastValue for f64 {
|
||||||
@@ -154,10 +140,6 @@ impl FastValue for f64 {
|
|||||||
fn as_u64(&self) -> u64 {
|
fn as_u64(&self) -> u64 {
|
||||||
self.to_bits()
|
self.to_bits()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn to_type() -> Type {
|
|
||||||
Type::F64
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for crate::DateTime {
|
impl FastValue for crate::DateTime {
|
||||||
@@ -180,10 +162,6 @@ impl FastValue for crate::DateTime {
|
|||||||
fn as_u64(&self) -> u64 {
|
fn as_u64(&self) -> u64 {
|
||||||
self.timestamp().as_u64()
|
self.timestamp().as_u64()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn to_type() -> Type {
|
|
||||||
Type::Date
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn value_to_u64(value: &Value) -> u64 {
|
fn value_to_u64(value: &Value) -> u64 {
|
||||||
@@ -209,7 +187,6 @@ mod tests {
|
|||||||
use crate::schema::FAST;
|
use crate::schema::FAST;
|
||||||
use crate::schema::{Document, IntOptions};
|
use crate::schema::{Document, IntOptions};
|
||||||
use crate::{Index, SegmentId, SegmentReader};
|
use crate::{Index, SegmentId, SegmentReader};
|
||||||
use common::HasLen;
|
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use rand::prelude::SliceRandom;
|
use rand::prelude::SliceRandom;
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
@@ -240,9 +217,9 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_small() -> crate::Result<()> {
|
fn test_intfastfield_small() {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let directory: RAMDirectory = RAMDirectory::create();
|
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||||
@@ -255,24 +232,27 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(&path).unwrap();
|
let source = directory.open_read(&path).unwrap();
|
||||||
assert_eq!(file.len(), 36 as usize);
|
{
|
||||||
let composite_file = CompositeFile::open(&file)?;
|
assert_eq!(source.len(), 36 as usize);
|
||||||
let file = composite_file.open_read(*FIELD).unwrap();
|
}
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(file)?;
|
{
|
||||||
assert_eq!(fast_field_reader.get(0), 13u64);
|
let composite_file = CompositeFile::open(&source).unwrap();
|
||||||
assert_eq!(fast_field_reader.get(1), 14u64);
|
let field_source = composite_file.open_read(*FIELD).unwrap();
|
||||||
assert_eq!(fast_field_reader.get(2), 2u64);
|
let fast_field_reader = FastFieldReader::<u64>::open(field_source);
|
||||||
Ok(())
|
assert_eq!(fast_field_reader.get(0), 13u64);
|
||||||
|
assert_eq!(fast_field_reader.get(1), 14u64);
|
||||||
|
assert_eq!(fast_field_reader.get(2), 2u64);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_large() -> crate::Result<()> {
|
fn test_intfastfield_large() {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let directory: RAMDirectory = RAMDirectory::create();
|
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = FastFieldSerializer::from_write(write)?;
|
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>4u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>4u64));
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64));
|
||||||
@@ -283,15 +263,19 @@ mod tests {
|
|||||||
fast_field_writers.add_document(&doc!(*FIELD=>1_002u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>1_002u64));
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>1_501u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>1_501u64));
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>215u64));
|
fast_field_writers.add_document(&doc!(*FIELD=>215u64));
|
||||||
fast_field_writers.serialize(&mut serializer, &HashMap::new())?;
|
fast_field_writers
|
||||||
serializer.close()?;
|
.serialize(&mut serializer, &HashMap::new())
|
||||||
|
.unwrap();
|
||||||
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(&path)?;
|
let source = directory.open_read(&path).unwrap();
|
||||||
assert_eq!(file.len(), 61 as usize);
|
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
assert_eq!(source.len(), 61 as usize);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||||
assert_eq!(fast_field_reader.get(0), 4u64);
|
assert_eq!(fast_field_reader.get(0), 4u64);
|
||||||
assert_eq!(fast_field_reader.get(1), 14_082_001u64);
|
assert_eq!(fast_field_reader.get(1), 14_082_001u64);
|
||||||
assert_eq!(fast_field_reader.get(2), 3_052u64);
|
assert_eq!(fast_field_reader.get(2), 3_052u64);
|
||||||
@@ -302,13 +286,12 @@ mod tests {
|
|||||||
assert_eq!(fast_field_reader.get(7), 1_501u64);
|
assert_eq!(fast_field_reader.get(7), 1_501u64);
|
||||||
assert_eq!(fast_field_reader.get(8), 215u64);
|
assert_eq!(fast_field_reader.get(8), 215u64);
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_null_amplitude() -> crate::Result<()> {
|
fn test_intfastfield_null_amplitude() {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let directory: RAMDirectory = RAMDirectory::create();
|
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||||
|
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
@@ -322,23 +305,24 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(&path).unwrap();
|
let source = directory.open_read(&path).unwrap();
|
||||||
assert_eq!(file.len(), 34 as usize);
|
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
assert_eq!(source.len(), 34 as usize);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||||
for doc in 0..10_000 {
|
for doc in 0..10_000 {
|
||||||
assert_eq!(fast_field_reader.get(doc), 100_000u64);
|
assert_eq!(fast_field_reader.get(doc), 100_000u64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_large_numbers() -> crate::Result<()> {
|
fn test_intfastfield_large_numbers() {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let directory: RAMDirectory = RAMDirectory::create();
|
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||||
|
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
@@ -354,12 +338,14 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(&path).unwrap();
|
let source = directory.open_read(&path).unwrap();
|
||||||
assert_eq!(file.len(), 80042 as usize);
|
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
assert_eq!(source.len(), 80042 as usize);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||||
assert_eq!(fast_field_reader.get(0), 0u64);
|
assert_eq!(fast_field_reader.get(0), 0u64);
|
||||||
for doc in 1..10_001 {
|
for doc in 1..10_001 {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -368,13 +354,12 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_signed_intfastfield() -> crate::Result<()> {
|
fn test_signed_intfastfield() {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let directory: RAMDirectory = RAMDirectory::create();
|
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
let i64_field = schema_builder.add_i64_field("field", FAST);
|
let i64_field = schema_builder.add_i64_field("field", FAST);
|
||||||
@@ -393,12 +378,14 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(&path).unwrap();
|
let source = directory.open_read(&path).unwrap();
|
||||||
assert_eq!(file.len(), 17709 as usize);
|
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
assert_eq!(source.len(), 17709 as usize);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<i64>::open(data)?;
|
let fast_field_reader = FastFieldReader::<i64>::open(data);
|
||||||
|
|
||||||
assert_eq!(fast_field_reader.min_value(), -100i64);
|
assert_eq!(fast_field_reader.min_value(), -100i64);
|
||||||
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
||||||
@@ -411,13 +398,12 @@ mod tests {
|
|||||||
assert_eq!(buffer[i], -100i64 + 53i64 + i as i64);
|
assert_eq!(buffer[i], -100i64 + 53i64 + i as i64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_signed_intfastfield_default_val() -> crate::Result<()> {
|
fn test_signed_intfastfield_default_val() {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let directory: RAMDirectory = RAMDirectory::create();
|
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let i64_field = schema_builder.add_i64_field("field", FAST);
|
let i64_field = schema_builder.add_i64_field("field", FAST);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -434,14 +420,13 @@ mod tests {
|
|||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let file = directory.open_read(&path).unwrap();
|
let source = directory.open_read(&path).unwrap();
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<i64>::open(data)?;
|
let fast_field_reader = FastFieldReader::<i64>::open(data);
|
||||||
assert_eq!(fast_field_reader.get(0u32), 0i64);
|
assert_eq!(fast_field_reader.get(0u32), 0i64);
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warning: this generates the same permutation at each call
|
// Warning: this generates the same permutation at each call
|
||||||
@@ -452,26 +437,28 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_permutation() -> crate::Result<()> {
|
fn test_intfastfield_permutation() {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let permutation = generate_permutation();
|
let permutation = generate_permutation();
|
||||||
let n = permutation.len();
|
let n = permutation.len();
|
||||||
let directory = RAMDirectory::create();
|
let mut directory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = FastFieldSerializer::from_write(write)?;
|
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
for &x in &permutation {
|
for &x in &permutation {
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
||||||
}
|
}
|
||||||
fast_field_writers.serialize(&mut serializer, &HashMap::new())?;
|
fast_field_writers
|
||||||
serializer.close()?;
|
.serialize(&mut serializer, &HashMap::new())
|
||||||
|
.unwrap();
|
||||||
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(&path)?;
|
let source = directory.open_read(&path).unwrap();
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||||
|
|
||||||
let mut a = 0u64;
|
let mut a = 0u64;
|
||||||
for _ in 0..n {
|
for _ in 0..n {
|
||||||
@@ -479,7 +466,6 @@ mod tests {
|
|||||||
a = fast_field_reader.get(a as u32);
|
a = fast_field_reader.get(a as u32);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -488,7 +474,7 @@ mod tests {
|
|||||||
let date_field = schema_builder.add_date_field("date", FAST);
|
let date_field = schema_builder.add_date_field("date", FAST);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
|
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
@@ -525,7 +511,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
date_field => crate::DateTime::from_u64(1i64.to_u64()),
|
date_field => crate::DateTime::from_u64(1i64.to_u64()),
|
||||||
@@ -612,7 +598,7 @@ mod bench {
|
|||||||
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
|
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let permutation = generate_permutation();
|
let permutation = generate_permutation();
|
||||||
let directory: RAMDirectory = RAMDirectory::create();
|
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||||
@@ -625,11 +611,11 @@ mod bench {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(&path).unwrap();
|
let source = directory.open_read(&path).unwrap();
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data).unwrap();
|
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let n = test::black_box(7000u32);
|
let n = test::black_box(7000u32);
|
||||||
@@ -646,7 +632,7 @@ mod bench {
|
|||||||
fn bench_intfastfield_fflookup(b: &mut Bencher) {
|
fn bench_intfastfield_fflookup(b: &mut Bencher) {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
let permutation = generate_permutation();
|
let permutation = generate_permutation();
|
||||||
let directory: RAMDirectory = RAMDirectory::create();
|
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||||
@@ -659,11 +645,11 @@ mod bench {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
let file = directory.open_read(&path).unwrap();
|
let source = directory.open_read(&path).unwrap();
|
||||||
{
|
{
|
||||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||||
let fast_field_reader = FastFieldReader::<u64>::open(data).unwrap();
|
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let n = test::black_box(1000u32);
|
let n = test::black_box(1000u32);
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(field=>1u64, field=>3u64));
|
index_writer.add_document(doc!(field=>1u64, field=>3u64));
|
||||||
index_writer.add_document(doc!());
|
index_writer.add_document(doc!());
|
||||||
index_writer.add_document(doc!(field=>4u64));
|
index_writer.add_document(doc!(field=>4u64));
|
||||||
@@ -64,7 +64,7 @@ mod tests {
|
|||||||
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
|
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
let first_time_stamp = chrono::Utc::now();
|
let first_time_stamp = chrono::Utc::now();
|
||||||
index_writer.add_document(
|
index_writer.add_document(
|
||||||
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
|
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
|
||||||
@@ -100,7 +100,6 @@ mod tests {
|
|||||||
.get_first(date_field)
|
.get_first(date_field)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.date_value()
|
.date_value()
|
||||||
.unwrap()
|
|
||||||
.timestamp(),
|
.timestamp(),
|
||||||
first_time_stamp.timestamp()
|
first_time_stamp.timestamp()
|
||||||
);
|
);
|
||||||
@@ -109,7 +108,7 @@ mod tests {
|
|||||||
.get_first(time_i)
|
.get_first(time_i)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.i64_value(),
|
.i64_value(),
|
||||||
Some(1i64)
|
1i64
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -132,7 +131,6 @@ mod tests {
|
|||||||
.get_first(date_field)
|
.get_first(date_field)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.date_value()
|
.date_value()
|
||||||
.unwrap()
|
|
||||||
.timestamp(),
|
.timestamp(),
|
||||||
two_secs_ahead.timestamp()
|
two_secs_ahead.timestamp()
|
||||||
);
|
);
|
||||||
@@ -141,7 +139,7 @@ mod tests {
|
|||||||
.get_first(time_i)
|
.get_first(time_i)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.i64_value(),
|
.i64_value(),
|
||||||
Some(3i64)
|
3i64
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -188,7 +186,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(field=> 1i64, field => 3i64));
|
index_writer.add_document(doc!(field=> 1i64, field => 3i64));
|
||||||
index_writer.add_document(doc!());
|
index_writer.add_document(doc!());
|
||||||
index_writer.add_document(doc!(field=> -4i64));
|
index_writer.add_document(doc!(field=> -4i64));
|
||||||
@@ -199,14 +197,22 @@ mod tests {
|
|||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let mut vals = Vec::new();
|
let mut vals = Vec::new();
|
||||||
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
|
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
|
||||||
multi_value_reader.get_vals(2, &mut vals);
|
{
|
||||||
assert_eq!(&vals, &[-4i64]);
|
multi_value_reader.get_vals(2, &mut vals);
|
||||||
multi_value_reader.get_vals(0, &mut vals);
|
assert_eq!(&vals, &[-4i64]);
|
||||||
assert_eq!(&vals, &[1i64, 3i64]);
|
}
|
||||||
multi_value_reader.get_vals(1, &mut vals);
|
{
|
||||||
assert!(vals.is_empty());
|
multi_value_reader.get_vals(0, &mut vals);
|
||||||
multi_value_reader.get_vals(3, &mut vals);
|
assert_eq!(&vals, &[1i64, 3i64]);
|
||||||
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
|
}
|
||||||
|
{
|
||||||
|
multi_value_reader.get_vals(1, &mut vals);
|
||||||
|
assert!(vals.is_empty());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
multi_value_reader.get_vals(3, &mut vals);
|
||||||
|
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
#[ignore]
|
||||||
@@ -215,7 +221,7 @@ mod tests {
|
|||||||
let field = schema_builder.add_facet_field("facetfield");
|
let field = schema_builder.add_facet_field("facetfield");
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
for i in 0..100_000 {
|
for i in 0..100_000 {
|
||||||
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
|
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index
|
let mut index_writer = index
|
||||||
.writer_for_tests()
|
.writer_with_num_threads(1, 30_000_000)
|
||||||
.expect("Failed to create index writer.");
|
.expect("Failed to create index writer.");
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from("/category/cat2"),
|
facet_field => Facet::from("/category/cat2"),
|
||||||
|
|||||||
@@ -143,7 +143,7 @@ impl MultiValueIntFastFieldWriter {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|val| *mapping.get(val).expect("Missing term ordinal"));
|
.map(|val| *mapping.get(val).expect("Missing term ordinal"));
|
||||||
doc_vals.extend(remapped_vals);
|
doc_vals.extend(remapped_vals);
|
||||||
doc_vals.sort_unstable();
|
doc_vals.sort();
|
||||||
for &val in &doc_vals {
|
for &val in &doc_vals {
|
||||||
value_serializer.add_val(val)?;
|
value_serializer.add_val(val)?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,12 +3,13 @@ use crate::common::bitpacker::BitUnpacker;
|
|||||||
use crate::common::compute_num_bits;
|
use crate::common::compute_num_bits;
|
||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::common::CompositeFile;
|
use crate::common::CompositeFile;
|
||||||
use crate::directory::FileSlice;
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||||
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
|
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::schema::FAST;
|
use crate::schema::FAST;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
use owning_ref::OwningRef;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
@@ -19,39 +20,37 @@ use std::path::Path;
|
|||||||
/// fast field is required.
|
/// fast field is required.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct FastFieldReader<Item: FastValue> {
|
pub struct FastFieldReader<Item: FastValue> {
|
||||||
bit_unpacker: BitUnpacker,
|
bit_unpacker: BitUnpacker<OwningRef<ReadOnlySource, [u8]>>,
|
||||||
min_value_u64: u64,
|
min_value_u64: u64,
|
||||||
max_value_u64: u64,
|
max_value_u64: u64,
|
||||||
_phantom: PhantomData<Item>,
|
_phantom: PhantomData<Item>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Item: FastValue> FastFieldReader<Item> {
|
impl<Item: FastValue> FastFieldReader<Item> {
|
||||||
/// Opens a fast field given a file.
|
/// Opens a fast field given a source.
|
||||||
pub fn open(file: FileSlice) -> crate::Result<Self> {
|
pub fn open(data: ReadOnlySource) -> Self {
|
||||||
let mut bytes = file.read_bytes()?;
|
let min_value: u64;
|
||||||
let min_value = u64::deserialize(&mut bytes)?;
|
let amplitude: u64;
|
||||||
let amplitude = u64::deserialize(&mut bytes)?;
|
{
|
||||||
|
let mut cursor = data.as_slice();
|
||||||
|
min_value =
|
||||||
|
u64::deserialize(&mut cursor).expect("Failed to read the min_value of fast field.");
|
||||||
|
amplitude =
|
||||||
|
u64::deserialize(&mut cursor).expect("Failed to read the amplitude of fast field.");
|
||||||
|
}
|
||||||
let max_value = min_value + amplitude;
|
let max_value = min_value + amplitude;
|
||||||
let num_bits = compute_num_bits(amplitude);
|
let num_bits = compute_num_bits(amplitude);
|
||||||
let bit_unpacker = BitUnpacker::new(bytes, num_bits);
|
let owning_ref = OwningRef::new(data).map(|data| &data[16..]);
|
||||||
Ok(FastFieldReader {
|
let bit_unpacker = BitUnpacker::new(owning_ref, num_bits);
|
||||||
|
FastFieldReader {
|
||||||
min_value_u64: min_value,
|
min_value_u64: min_value,
|
||||||
max_value_u64: max_value,
|
max_value_u64: max_value,
|
||||||
bit_unpacker,
|
bit_unpacker,
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> {
|
|
||||||
FastFieldReader {
|
|
||||||
bit_unpacker: self.bit_unpacker,
|
|
||||||
min_value_u64: self.min_value_u64,
|
|
||||||
max_value_u64: self.max_value_u64,
|
|
||||||
_phantom: PhantomData,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn cast<TFastValue: FastValue>(self) -> FastFieldReader<TFastValue> {
|
pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> {
|
||||||
FastFieldReader {
|
FastFieldReader {
|
||||||
bit_unpacker: self.bit_unpacker,
|
bit_unpacker: self.bit_unpacker,
|
||||||
min_value_u64: self.min_value_u64,
|
min_value_u64: self.min_value_u64,
|
||||||
@@ -136,7 +135,7 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
|
|||||||
let field = schema_builder.add_u64_field("field", FAST);
|
let field = schema_builder.add_u64_field("field", FAST);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let path = Path::new("__dummy__");
|
let path = Path::new("__dummy__");
|
||||||
let directory: RAMDirectory = RAMDirectory::create();
|
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let write: WritePtr = directory
|
let write: WritePtr = directory
|
||||||
.open_write(path)
|
.open_write(path)
|
||||||
@@ -158,11 +157,12 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
|
|||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let file = directory.open_read(path).expect("Failed to open the file");
|
let source = directory.open_read(path).expect("Failed to open the file");
|
||||||
let composite_file = CompositeFile::open(&file).expect("Failed to read the composite file");
|
let composite_file =
|
||||||
let field_file = composite_file
|
CompositeFile::open(&source).expect("Failed to read the composite file");
|
||||||
|
let field_source = composite_file
|
||||||
.open_read(field)
|
.open_read(field)
|
||||||
.expect("File component not found");
|
.expect("File component not found");
|
||||||
FastFieldReader::open(field_file).unwrap()
|
FastFieldReader::open(field_source)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use crate::common::CompositeFile;
|
use crate::common::CompositeFile;
|
||||||
|
use crate::fastfield::BytesFastFieldReader;
|
||||||
use crate::fastfield::MultiValueIntFastFieldReader;
|
use crate::fastfield::MultiValueIntFastFieldReader;
|
||||||
use crate::fastfield::{BytesFastFieldReader, FastValue};
|
|
||||||
use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
|
use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
|
||||||
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
||||||
use crate::space_usage::PerFieldSpaceUsage;
|
use crate::space_usage::PerFieldSpaceUsage;
|
||||||
@@ -68,52 +68,45 @@ impl FastFieldReaders {
|
|||||||
};
|
};
|
||||||
for (field, field_entry) in schema.fields() {
|
for (field, field_entry) in schema.fields() {
|
||||||
let field_type = field_entry.field_type();
|
let field_type = field_entry.field_type();
|
||||||
if let FieldType::Bytes(bytes_option) = field_type {
|
if field_type == &FieldType::Bytes {
|
||||||
if !bytes_option.is_fast() {
|
let idx_reader = fast_fields_composite
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let fast_field_idx_file = fast_fields_composite
|
|
||||||
.open_read_with_idx(field, 0)
|
.open_read_with_idx(field, 0)
|
||||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
||||||
let idx_reader = FastFieldReader::open(fast_field_idx_file)?;
|
.map(FastFieldReader::open)?;
|
||||||
let data = fast_fields_composite
|
let data = fast_fields_composite
|
||||||
.open_read_with_idx(field, 1)
|
.open_read_with_idx(field, 1)
|
||||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
||||||
let bytes_fast_field_reader = BytesFastFieldReader::open(idx_reader, data)?;
|
|
||||||
fast_field_readers
|
fast_field_readers
|
||||||
.fast_bytes
|
.fast_bytes
|
||||||
.insert(field, bytes_fast_field_reader);
|
.insert(field, BytesFastFieldReader::open(idx_reader, data));
|
||||||
} else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) {
|
} else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) {
|
||||||
match cardinality {
|
match cardinality {
|
||||||
Cardinality::SingleValue => {
|
Cardinality::SingleValue => {
|
||||||
if let Some(fast_field_data) = fast_fields_composite.open_read(field) {
|
if let Some(fast_field_data) = fast_fields_composite.open_read(field) {
|
||||||
match fast_type {
|
match fast_type {
|
||||||
FastType::U64 => {
|
FastType::U64 => {
|
||||||
let fast_field_reader = FastFieldReader::open(fast_field_data)?;
|
let fast_field_reader = FastFieldReader::open(fast_field_data);
|
||||||
fast_field_readers
|
fast_field_readers
|
||||||
.fast_field_u64
|
.fast_field_u64
|
||||||
.insert(field, fast_field_reader);
|
.insert(field, fast_field_reader);
|
||||||
}
|
}
|
||||||
FastType::I64 => {
|
FastType::I64 => {
|
||||||
let fast_field_reader =
|
fast_field_readers.fast_field_i64.insert(
|
||||||
FastFieldReader::open(fast_field_data.clone())?;
|
field,
|
||||||
fast_field_readers
|
FastFieldReader::open(fast_field_data.clone()),
|
||||||
.fast_field_i64
|
);
|
||||||
.insert(field, fast_field_reader);
|
|
||||||
}
|
}
|
||||||
FastType::F64 => {
|
FastType::F64 => {
|
||||||
let fast_field_reader =
|
fast_field_readers.fast_field_f64.insert(
|
||||||
FastFieldReader::open(fast_field_data.clone())?;
|
field,
|
||||||
fast_field_readers
|
FastFieldReader::open(fast_field_data.clone()),
|
||||||
.fast_field_f64
|
);
|
||||||
.insert(field, fast_field_reader);
|
|
||||||
}
|
}
|
||||||
FastType::Date => {
|
FastType::Date => {
|
||||||
let fast_field_reader =
|
fast_field_readers.fast_field_date.insert(
|
||||||
FastFieldReader::open(fast_field_data.clone())?;
|
field,
|
||||||
fast_field_readers
|
FastFieldReader::open(fast_field_data.clone()),
|
||||||
.fast_field_date
|
);
|
||||||
.insert(field, fast_field_reader);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -124,10 +117,10 @@ impl FastFieldReaders {
|
|||||||
let idx_opt = fast_fields_composite.open_read_with_idx(field, 0);
|
let idx_opt = fast_fields_composite.open_read_with_idx(field, 0);
|
||||||
let data_opt = fast_fields_composite.open_read_with_idx(field, 1);
|
let data_opt = fast_fields_composite.open_read_with_idx(field, 1);
|
||||||
if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) {
|
if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) {
|
||||||
let idx_reader = FastFieldReader::open(fast_field_idx)?;
|
let idx_reader = FastFieldReader::open(fast_field_idx);
|
||||||
match fast_type {
|
match fast_type {
|
||||||
FastType::I64 => {
|
FastType::I64 => {
|
||||||
let vals_reader = FastFieldReader::open(fast_field_data)?;
|
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||||
let multivalued_int_fast_field =
|
let multivalued_int_fast_field =
|
||||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||||
fast_field_readers
|
fast_field_readers
|
||||||
@@ -135,7 +128,7 @@ impl FastFieldReaders {
|
|||||||
.insert(field, multivalued_int_fast_field);
|
.insert(field, multivalued_int_fast_field);
|
||||||
}
|
}
|
||||||
FastType::U64 => {
|
FastType::U64 => {
|
||||||
let vals_reader = FastFieldReader::open(fast_field_data)?;
|
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||||
let multivalued_int_fast_field =
|
let multivalued_int_fast_field =
|
||||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||||
fast_field_readers
|
fast_field_readers
|
||||||
@@ -143,7 +136,7 @@ impl FastFieldReaders {
|
|||||||
.insert(field, multivalued_int_fast_field);
|
.insert(field, multivalued_int_fast_field);
|
||||||
}
|
}
|
||||||
FastType::F64 => {
|
FastType::F64 => {
|
||||||
let vals_reader = FastFieldReader::open(fast_field_data)?;
|
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||||
let multivalued_int_fast_field =
|
let multivalued_int_fast_field =
|
||||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||||
fast_field_readers
|
fast_field_readers
|
||||||
@@ -151,7 +144,7 @@ impl FastFieldReaders {
|
|||||||
.insert(field, multivalued_int_fast_field);
|
.insert(field, multivalued_int_fast_field);
|
||||||
}
|
}
|
||||||
FastType::Date => {
|
FastType::Date => {
|
||||||
let vals_reader = FastFieldReader::open(fast_field_data)?;
|
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||||
let multivalued_int_fast_field =
|
let multivalued_int_fast_field =
|
||||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||||
fast_field_readers
|
fast_field_readers
|
||||||
@@ -201,14 +194,6 @@ impl FastFieldReaders {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn typed_fast_field_reader<TFastValue: FastValue>(
|
|
||||||
&self,
|
|
||||||
field: Field,
|
|
||||||
) -> Option<FastFieldReader<TFastValue>> {
|
|
||||||
self.u64_lenient(field)
|
|
||||||
.map(|fast_field_reader| fast_field_reader.cast())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the `i64` fast field reader reader associated to `field`.
|
/// Returns the `i64` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a i64 fast field, this method returns `None`.
|
/// If `field` is not a i64 fast field, this method returns `None`.
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ impl FastFieldsWriter {
|
|||||||
let mut bytes_value_writers = Vec::new();
|
let mut bytes_value_writers = Vec::new();
|
||||||
|
|
||||||
for (field, field_entry) in schema.fields() {
|
for (field, field_entry) in schema.fields() {
|
||||||
match field_entry.field_type() {
|
match *field_entry.field_type() {
|
||||||
FieldType::I64(ref int_options)
|
FieldType::I64(ref int_options)
|
||||||
| FieldType::U64(ref int_options)
|
| FieldType::U64(ref int_options)
|
||||||
| FieldType::F64(ref int_options)
|
| FieldType::F64(ref int_options)
|
||||||
@@ -56,11 +56,9 @@ impl FastFieldsWriter {
|
|||||||
let fast_field_writer = MultiValueIntFastFieldWriter::new(field, true);
|
let fast_field_writer = MultiValueIntFastFieldWriter::new(field, true);
|
||||||
multi_values_writers.push(fast_field_writer);
|
multi_values_writers.push(fast_field_writer);
|
||||||
}
|
}
|
||||||
FieldType::Bytes(bytes_option) => {
|
FieldType::Bytes => {
|
||||||
if bytes_option.is_fast() {
|
let fast_field_writer = BytesFastFieldWriter::new(field);
|
||||||
let fast_field_writer = BytesFastFieldWriter::new(field);
|
bytes_value_writers.push(fast_field_writer);
|
||||||
bytes_value_writers.push(fast_field_writer);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
@@ -128,7 +126,6 @@ impl FastFieldsWriter {
|
|||||||
for field_writer in &self.single_value_writers {
|
for field_writer in &self.single_value_writers {
|
||||||
field_writer.serialize(serializer)?;
|
field_writer.serialize(serializer)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
for field_writer in &self.multi_values_writers {
|
for field_writer in &self.multi_values_writers {
|
||||||
let field = field_writer.field();
|
let field = field_writer.field();
|
||||||
field_writer.serialize(serializer, mapping.get(&field))?;
|
field_writer.serialize(serializer, mapping.get(&field))?;
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ mod reader;
|
|||||||
mod serializer;
|
mod serializer;
|
||||||
mod writer;
|
mod writer;
|
||||||
|
|
||||||
pub use self::reader::{FieldNormReader, FieldNormReaders};
|
pub use self::reader::FieldNormReader;
|
||||||
pub use self::serializer::FieldNormsSerializer;
|
pub use self::serializer::FieldNormsSerializer;
|
||||||
pub use self::writer::FieldNormsWriter;
|
pub use self::writer::FieldNormsWriter;
|
||||||
|
|
||||||
|
|||||||
@@ -1,47 +1,6 @@
|
|||||||
use super::{fieldnorm_to_id, id_to_fieldnorm};
|
use super::{fieldnorm_to_id, id_to_fieldnorm};
|
||||||
use crate::common::CompositeFile;
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::directory::FileSlice;
|
|
||||||
use crate::directory::OwnedBytes;
|
|
||||||
use crate::schema::Field;
|
|
||||||
use crate::space_usage::PerFieldSpaceUsage;
|
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
/// Reader for the fieldnorm (for each document, the number of tokens indexed in the
|
|
||||||
/// field) of all indexed fields in the index.
|
|
||||||
///
|
|
||||||
/// Each fieldnorm is approximately compressed over one byte. We refer to this byte as
|
|
||||||
/// `fieldnorm_id`.
|
|
||||||
/// The mapping from `fieldnorm` to `fieldnorm_id` is given by monotonic.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct FieldNormReaders {
|
|
||||||
data: Arc<CompositeFile>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FieldNormReaders {
|
|
||||||
/// Creates a field norm reader.
|
|
||||||
pub fn open(file: FileSlice) -> crate::Result<FieldNormReaders> {
|
|
||||||
let data = CompositeFile::open(&file)?;
|
|
||||||
Ok(FieldNormReaders {
|
|
||||||
data: Arc::new(data),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the FieldNormReader for a specific field.
|
|
||||||
pub fn get_field(&self, field: Field) -> crate::Result<Option<FieldNormReader>> {
|
|
||||||
if let Some(file) = self.data.open_read(field) {
|
|
||||||
let fieldnorm_reader = FieldNormReader::open(file)?;
|
|
||||||
Ok(Some(fieldnorm_reader))
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return a break down of the space usage per field.
|
|
||||||
pub fn space_usage(&self) -> PerFieldSpaceUsage {
|
|
||||||
self.data.space_usage()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reads the fieldnorm associated to a document.
|
/// Reads the fieldnorm associated to a document.
|
||||||
/// The fieldnorm represents the length associated to
|
/// The fieldnorm represents the length associated to
|
||||||
@@ -60,57 +19,14 @@ impl FieldNormReaders {
|
|||||||
/// Apart from compression, this scale also makes it possible to
|
/// Apart from compression, this scale also makes it possible to
|
||||||
/// precompute computationally expensive functions of the fieldnorm
|
/// precompute computationally expensive functions of the fieldnorm
|
||||||
/// in a very short array.
|
/// in a very short array.
|
||||||
#[derive(Clone)]
|
pub struct FieldNormReader {
|
||||||
pub struct FieldNormReader(ReaderImplEnum);
|
data: ReadOnlySource,
|
||||||
|
|
||||||
impl From<ReaderImplEnum> for FieldNormReader {
|
|
||||||
fn from(reader_enum: ReaderImplEnum) -> FieldNormReader {
|
|
||||||
FieldNormReader(reader_enum)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
enum ReaderImplEnum {
|
|
||||||
FromData(OwnedBytes),
|
|
||||||
Const {
|
|
||||||
num_docs: u32,
|
|
||||||
fieldnorm_id: u8,
|
|
||||||
fieldnorm: u32,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FieldNormReader {
|
impl FieldNormReader {
|
||||||
/// Creates a `FieldNormReader` with a constant fieldnorm.
|
/// Opens a field norm reader given its data source.
|
||||||
///
|
pub fn open(data: ReadOnlySource) -> Self {
|
||||||
/// The fieldnorm will be subjected to compression as if it was coming
|
FieldNormReader { data }
|
||||||
/// from an array-backed fieldnorm reader.
|
|
||||||
pub fn constant(num_docs: u32, fieldnorm: u32) -> FieldNormReader {
|
|
||||||
let fieldnorm_id = fieldnorm_to_id(fieldnorm);
|
|
||||||
let fieldnorm = id_to_fieldnorm(fieldnorm_id);
|
|
||||||
ReaderImplEnum::Const {
|
|
||||||
num_docs,
|
|
||||||
fieldnorm_id,
|
|
||||||
fieldnorm,
|
|
||||||
}
|
|
||||||
.into()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Opens a field norm reader given its file.
|
|
||||||
pub fn open(fieldnorm_file: FileSlice) -> crate::Result<Self> {
|
|
||||||
let data = fieldnorm_file.read_bytes()?;
|
|
||||||
Ok(FieldNormReader::new(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new(data: OwnedBytes) -> Self {
|
|
||||||
ReaderImplEnum::FromData(data).into()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the number of documents in this segment.
|
|
||||||
pub fn num_docs(&self) -> u32 {
|
|
||||||
match &self.0 {
|
|
||||||
ReaderImplEnum::FromData(data) => data.len() as u32,
|
|
||||||
ReaderImplEnum::Const { num_docs, .. } => *num_docs,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `fieldnorm` associated to a doc id.
|
/// Returns the `fieldnorm` associated to a doc id.
|
||||||
@@ -123,25 +39,15 @@ impl FieldNormReader {
|
|||||||
/// The fieldnorm is effectively decoded from the
|
/// The fieldnorm is effectively decoded from the
|
||||||
/// `fieldnorm_id` by doing a simple table lookup.
|
/// `fieldnorm_id` by doing a simple table lookup.
|
||||||
pub fn fieldnorm(&self, doc_id: DocId) -> u32 {
|
pub fn fieldnorm(&self, doc_id: DocId) -> u32 {
|
||||||
match &self.0 {
|
let fieldnorm_id = self.fieldnorm_id(doc_id);
|
||||||
ReaderImplEnum::FromData(data) => {
|
id_to_fieldnorm(fieldnorm_id)
|
||||||
let fieldnorm_id = data.as_slice()[doc_id as usize];
|
|
||||||
id_to_fieldnorm(fieldnorm_id)
|
|
||||||
}
|
|
||||||
ReaderImplEnum::Const { fieldnorm, .. } => *fieldnorm,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `fieldnorm_id` associated to a document.
|
/// Returns the `fieldnorm_id` associated to a document.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 {
|
pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 {
|
||||||
match &self.0 {
|
let fielnorms_data = self.data.as_slice();
|
||||||
ReaderImplEnum::FromData(data) => {
|
fielnorms_data[doc_id as usize]
|
||||||
let fieldnorm_id = data.as_slice()[doc_id as usize];
|
|
||||||
fieldnorm_id
|
|
||||||
}
|
|
||||||
ReaderImplEnum::Const { fieldnorm_id, .. } => *fieldnorm_id,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a `fieldnorm_id` into a fieldnorm.
|
/// Converts a `fieldnorm_id` into a fieldnorm.
|
||||||
@@ -156,48 +62,18 @@ impl FieldNormReader {
|
|||||||
pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
|
pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
|
||||||
fieldnorm_to_id(fieldnorm)
|
fieldnorm_to_id(fieldnorm)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn for_test(field_norms: &[u32]) -> FieldNormReader {
|
|
||||||
let field_norms_id = field_norms
|
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.map(FieldNormReader::fieldnorm_to_id)
|
|
||||||
.collect::<Vec<u8>>();
|
|
||||||
let field_norms_data = OwnedBytes::new(field_norms_id);
|
|
||||||
FieldNormReader::new(field_norms_data)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
impl From<Vec<u32>> for FieldNormReader {
|
||||||
use crate::fieldnorm::FieldNormReader;
|
fn from(field_norms: Vec<u32>) -> FieldNormReader {
|
||||||
|
let field_norms_id = field_norms
|
||||||
#[test]
|
.into_iter()
|
||||||
fn test_from_fieldnorms_array() {
|
.map(FieldNormReader::fieldnorm_to_id)
|
||||||
let fieldnorms = &[1, 2, 3, 4, 1_000_000];
|
.collect::<Vec<u8>>();
|
||||||
let fieldnorm_reader = FieldNormReader::for_test(fieldnorms);
|
let field_norms_data = ReadOnlySource::from(field_norms_id);
|
||||||
assert_eq!(fieldnorm_reader.num_docs(), 5);
|
FieldNormReader {
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(0), 1);
|
data: field_norms_data,
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
|
}
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(2), 3);
|
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(3), 4);
|
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(4), 983_064);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_const_fieldnorm_reader_small_fieldnorm_id() {
|
|
||||||
let fieldnorm_reader = FieldNormReader::constant(1_000_000u32, 10u32);
|
|
||||||
assert_eq!(fieldnorm_reader.num_docs(), 1_000_000u32);
|
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(0u32), 10u32);
|
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm_id(0u32), 10u8);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_const_fieldnorm_reader_large_fieldnorm_id() {
|
|
||||||
let fieldnorm_reader = FieldNormReader::constant(1_000_000u32, 300u32);
|
|
||||||
assert_eq!(fieldnorm_reader.num_docs(), 1_000_000u32);
|
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(0u32), 280u32);
|
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm_id(0u32), 72u8);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use super::fieldnorm_to_id;
|
|||||||
use super::FieldNormsSerializer;
|
use super::FieldNormsSerializer;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use std::{io, iter};
|
use std::io;
|
||||||
|
|
||||||
/// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte
|
/// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte
|
||||||
/// of each document for each field with field norms.
|
/// of each document for each field with field norms.
|
||||||
@@ -44,9 +44,7 @@ impl FieldNormsWriter {
|
|||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
FieldNormsWriter {
|
FieldNormsWriter {
|
||||||
fields,
|
fields,
|
||||||
fieldnorms_buffer: iter::repeat_with(Vec::new)
|
fieldnorms_buffer: (0..max_field).map(|_| Vec::new()).collect::<Vec<_>>(),
|
||||||
.take(max_field)
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -80,12 +78,11 @@ impl FieldNormsWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Serialize the seen fieldnorm values to the serializer for all fields.
|
/// Serialize the seen fieldnorm values to the serializer for all fields.
|
||||||
pub fn serialize(&self, mut fieldnorms_serializer: FieldNormsSerializer) -> io::Result<()> {
|
pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> {
|
||||||
for &field in self.fields.iter() {
|
for &field in self.fields.iter() {
|
||||||
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
|
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
|
||||||
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
|
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
|
||||||
}
|
}
|
||||||
fieldnorms_serializer.close()?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,94 +1,45 @@
|
|||||||
use crate::Index;
|
|
||||||
use crate::Searcher;
|
|
||||||
use crate::{doc, schema::*};
|
|
||||||
use rand::thread_rng;
|
use rand::thread_rng;
|
||||||
use rand::Rng;
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
fn check_index_content(searcher: &Searcher, vals: &[u64]) -> crate::Result<()> {
|
use crate::schema::*;
|
||||||
|
use crate::Index;
|
||||||
|
use crate::Searcher;
|
||||||
|
use rand::Rng;
|
||||||
|
|
||||||
|
fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
|
||||||
assert!(searcher.segment_readers().len() < 20);
|
assert!(searcher.segment_readers().len() < 20);
|
||||||
assert_eq!(searcher.num_docs() as usize, vals.len());
|
assert_eq!(searcher.num_docs() as usize, vals.len());
|
||||||
for segment_reader in searcher.segment_readers() {
|
|
||||||
let store_reader = segment_reader.get_store_reader()?;
|
|
||||||
for doc_id in 0..segment_reader.max_doc() {
|
|
||||||
let _doc = store_reader.get(doc_id)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
#[ignore]
|
||||||
fn test_functional_store() -> crate::Result<()> {
|
fn test_indexing() {
|
||||||
env_logger::init();
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
|
|
||||||
let id_field = schema_builder.add_u64_field("id", INDEXED | STORED);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let reader = index.reader()?;
|
|
||||||
|
|
||||||
let mut rng = thread_rng();
|
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(3, 12_000_000)?;
|
|
||||||
|
|
||||||
let mut doc_set: Vec<u64> = Vec::new();
|
|
||||||
|
|
||||||
let mut doc_id = 0u64;
|
|
||||||
for iteration in 0.. {
|
|
||||||
let num_docs: usize = rng.gen_range(0..4);
|
|
||||||
if doc_set.len() >= 1 {
|
|
||||||
let doc_to_remove_id = rng.gen_range(0..doc_set.len());
|
|
||||||
let removed_doc_id = doc_set.swap_remove(doc_to_remove_id);
|
|
||||||
index_writer.delete_term(Term::from_field_u64(id_field, removed_doc_id));
|
|
||||||
}
|
|
||||||
for _ in 0..num_docs {
|
|
||||||
doc_set.push(doc_id);
|
|
||||||
index_writer.add_document(doc!(id_field=>doc_id));
|
|
||||||
doc_id += 1;
|
|
||||||
}
|
|
||||||
index_writer.commit()?;
|
|
||||||
reader.reload()?;
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
println!("#{} - {}", iteration, searcher.segment_readers().len());
|
|
||||||
check_index_content(&searcher, &doc_set)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[ignore]
|
|
||||||
fn test_functional_indexing() -> crate::Result<()> {
|
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
let id_field = schema_builder.add_u64_field("id", INDEXED);
|
let id_field = schema_builder.add_u64_field("id", INDEXED);
|
||||||
let multiples_field = schema_builder.add_u64_field("multiples", INDEXED);
|
let multiples_field = schema_builder.add_u64_field("multiples", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_from_tempdir(schema)?;
|
let index = Index::create_from_tempdir(schema).unwrap();
|
||||||
let reader = index.reader()?;
|
let reader = index.reader().unwrap();
|
||||||
|
|
||||||
let mut rng = thread_rng();
|
let mut rng = thread_rng();
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(3, 120_000_000)?;
|
let mut index_writer = index.writer_with_num_threads(3, 120_000_000).unwrap();
|
||||||
|
|
||||||
let mut committed_docs: HashSet<u64> = HashSet::new();
|
let mut committed_docs: HashSet<u64> = HashSet::new();
|
||||||
let mut uncommitted_docs: HashSet<u64> = HashSet::new();
|
let mut uncommitted_docs: HashSet<u64> = HashSet::new();
|
||||||
|
|
||||||
for _ in 0..200 {
|
for _ in 0..200 {
|
||||||
let random_val = rng.gen_range(0..20);
|
let random_val = rng.gen_range(0, 20);
|
||||||
if random_val == 0 {
|
if random_val == 0 {
|
||||||
index_writer.commit()?;
|
index_writer.commit().expect("Commit failed");
|
||||||
committed_docs.extend(&uncommitted_docs);
|
committed_docs.extend(&uncommitted_docs);
|
||||||
uncommitted_docs.clear();
|
uncommitted_docs.clear();
|
||||||
reader.reload()?;
|
reader.reload().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
// check that everything is correct.
|
// check that everything is correct.
|
||||||
check_index_content(
|
check_index_content(&searcher, &committed_docs);
|
||||||
&searcher,
|
|
||||||
&committed_docs.iter().cloned().collect::<Vec<u64>>(),
|
|
||||||
)?;
|
|
||||||
} else {
|
} else {
|
||||||
if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) {
|
if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) {
|
||||||
let doc_id_term = Term::from_field_u64(id_field, random_val);
|
let doc_id_term = Term::from_field_u64(id_field, random_val);
|
||||||
@@ -104,5 +55,4 @@ fn test_functional_indexing() -> crate::Result<()> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ impl DeleteQueue {
|
|||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
let block = Arc::new(Block {
|
let block = Arc::new(Block {
|
||||||
operations: Arc::new([]),
|
operations: Arc::default(),
|
||||||
next: NextBlock::from(self.clone()),
|
next: NextBlock::from(self.clone()),
|
||||||
});
|
});
|
||||||
wlock.last_block = Arc::downgrade(&block);
|
wlock.last_block = Arc::downgrade(&block);
|
||||||
@@ -108,7 +108,7 @@ impl DeleteQueue {
|
|||||||
let delete_operations = mem::replace(&mut self_wlock.writer, vec![]);
|
let delete_operations = mem::replace(&mut self_wlock.writer, vec![]);
|
||||||
|
|
||||||
let new_block = Arc::new(Block {
|
let new_block = Arc::new(Block {
|
||||||
operations: Arc::from(delete_operations.into_boxed_slice()),
|
operations: Arc::new(delete_operations.into_boxed_slice()),
|
||||||
next: NextBlock::from(self.clone()),
|
next: NextBlock::from(self.clone()),
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -167,7 +167,7 @@ impl NextBlock {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct Block {
|
struct Block {
|
||||||
operations: Arc<[DeleteOperation]>,
|
operations: Arc<Box<[DeleteOperation]>>,
|
||||||
next: NextBlock,
|
next: NextBlock,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,22 +8,26 @@ use crate::core::SegmentComponent;
|
|||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::directory::TerminatingWrite;
|
|
||||||
use crate::directory::{DirectoryLock, GarbageCollectionResult};
|
use crate::directory::{DirectoryLock, GarbageCollectionResult};
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::directory::{TerminatingWrite, WatchCallbackList};
|
||||||
|
use crate::docset::DocSet;
|
||||||
use crate::error::TantivyError;
|
use crate::error::TantivyError;
|
||||||
use crate::fastfield::write_delete_bitset;
|
use crate::fastfield::write_delete_bitset;
|
||||||
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
|
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
|
||||||
use crate::indexer::doc_opstamp_mapping::DocToOpstampMapping;
|
use crate::indexer::doc_opstamp_mapping::DocToOpstampMapping;
|
||||||
use crate::indexer::operation::DeleteOperation;
|
use crate::indexer::operation::DeleteOperation;
|
||||||
|
use crate::indexer::segment_manager::SegmentRegisters;
|
||||||
|
use crate::indexer::segment_register::SegmentRegister;
|
||||||
use crate::indexer::stamper::Stamper;
|
use crate::indexer::stamper::Stamper;
|
||||||
use crate::indexer::MergePolicy;
|
use crate::indexer::{SegmentEntry, ResourceManager};
|
||||||
use crate::indexer::SegmentEntry;
|
|
||||||
use crate::indexer::SegmentWriter;
|
use crate::indexer::SegmentWriter;
|
||||||
|
use crate::indexer::{IndexWriterConfig, MergePolicy};
|
||||||
|
use crate::reader::NRTReader;
|
||||||
use crate::schema::Document;
|
use crate::schema::Document;
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::Opstamp;
|
use crate::tokenizer::TokenizerManager;
|
||||||
|
use crate::{IndexReader, Opstamp};
|
||||||
use crossbeam::channel;
|
use crossbeam::channel;
|
||||||
use futures::executor::block_on;
|
use futures::executor::block_on;
|
||||||
use futures::future::Future;
|
use futures::future::Future;
|
||||||
@@ -31,18 +35,10 @@ use smallvec::smallvec;
|
|||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
|
|
||||||
// Size of the margin for the heap. A segment is closed when the remaining memory
|
|
||||||
// in the heap goes below MARGIN_IN_BYTES.
|
|
||||||
pub const MARGIN_IN_BYTES: usize = 1_000_000;
|
|
||||||
|
|
||||||
// We impose the memory per thread to be at least 3 MB.
|
|
||||||
pub const HEAP_SIZE_MIN: usize = ((MARGIN_IN_BYTES as u32) * 3u32) as usize;
|
|
||||||
pub const HEAP_SIZE_MAX: usize = u32::max_value() as usize - MARGIN_IN_BYTES;
|
|
||||||
|
|
||||||
// Add document will block if the number of docs waiting in the queue to be indexed
|
// Add document will block if the number of docs waiting in the queue to be indexed
|
||||||
// reaches `PIPELINE_MAX_SIZE_IN_DOCS`
|
// reaches `PIPELINE_MAX_SIZE_IN_DOCS`
|
||||||
const PIPELINE_MAX_SIZE_IN_DOCS: usize = 10_000;
|
const PIPELINE_MAX_SIZE_IN_DOCS: usize = 10_000;
|
||||||
@@ -69,8 +65,9 @@ pub struct IndexWriter {
|
|||||||
_directory_lock: Option<DirectoryLock>,
|
_directory_lock: Option<DirectoryLock>,
|
||||||
|
|
||||||
index: Index,
|
index: Index,
|
||||||
|
config: IndexWriterConfig,
|
||||||
|
|
||||||
heap_size_in_bytes_per_thread: usize,
|
segment_registers: Arc<RwLock<SegmentRegisters>>,
|
||||||
|
|
||||||
workers_join_handle: Vec<JoinHandle<crate::Result<()>>>,
|
workers_join_handle: Vec<JoinHandle<crate::Result<()>>>,
|
||||||
|
|
||||||
@@ -80,13 +77,14 @@ pub struct IndexWriter {
|
|||||||
segment_updater: SegmentUpdater,
|
segment_updater: SegmentUpdater,
|
||||||
|
|
||||||
worker_id: usize,
|
worker_id: usize,
|
||||||
|
|
||||||
num_threads: usize,
|
|
||||||
|
|
||||||
delete_queue: DeleteQueue,
|
delete_queue: DeleteQueue,
|
||||||
|
|
||||||
stamper: Stamper,
|
stamper: Stamper,
|
||||||
committed_opstamp: Opstamp,
|
committed_opstamp: Opstamp,
|
||||||
|
|
||||||
|
on_commit: WatchCallbackList,
|
||||||
|
|
||||||
|
memory_manager: ResourceManager,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compute_deleted_bitset(
|
fn compute_deleted_bitset(
|
||||||
@@ -108,19 +106,19 @@ fn compute_deleted_bitset(
|
|||||||
// Limit doc helps identify the first document
|
// Limit doc helps identify the first document
|
||||||
// that may be affected by the delete operation.
|
// that may be affected by the delete operation.
|
||||||
let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp);
|
let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp);
|
||||||
let inverted_index = segment_reader.inverted_index(delete_op.term.field())?;
|
let inverted_index = segment_reader.inverted_index(delete_op.term.field());
|
||||||
if let Some(mut docset) =
|
if let Some(mut docset) =
|
||||||
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)?
|
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)
|
||||||
{
|
{
|
||||||
let mut deleted_doc = docset.doc();
|
while docset.advance() {
|
||||||
while deleted_doc != TERMINATED {
|
let deleted_doc = docset.doc();
|
||||||
if deleted_doc < limit_doc {
|
if deleted_doc < limit_doc {
|
||||||
delete_bitset.insert(deleted_doc);
|
delete_bitset.insert(deleted_doc);
|
||||||
might_have_changed = true;
|
might_have_changed = true;
|
||||||
}
|
}
|
||||||
deleted_doc = docset.advance();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_cursor.advance();
|
delete_cursor.advance();
|
||||||
}
|
}
|
||||||
Ok(might_have_changed)
|
Ok(might_have_changed)
|
||||||
@@ -133,7 +131,6 @@ fn compute_deleted_bitset(
|
|||||||
/// For instance, there was no delete operation between the state of the `segment_entry` and
|
/// For instance, there was no delete operation between the state of the `segment_entry` and
|
||||||
/// the `target_opstamp`, `segment_entry` is not updated.
|
/// the `target_opstamp`, `segment_entry` is not updated.
|
||||||
pub(crate) fn advance_deletes(
|
pub(crate) fn advance_deletes(
|
||||||
mut segment: Segment,
|
|
||||||
segment_entry: &mut SegmentEntry,
|
segment_entry: &mut SegmentEntry,
|
||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
@@ -142,25 +139,33 @@ pub(crate) fn advance_deletes(
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
if segment_entry.delete_bitset().is_none() && segment_entry.delete_cursor().get().is_none() {
|
let delete_bitset_opt = segment_entry.take_delete_bitset();
|
||||||
|
|
||||||
|
// We avoid directly advancing the `SegmentEntry` delete cursor, because
|
||||||
|
// we do not want to end up in an invalid state if the delete bitset
|
||||||
|
// serialization fails.
|
||||||
|
let mut delete_cursor = segment_entry.delete_cursor();
|
||||||
|
|
||||||
|
if delete_bitset_opt.is_none() && segment_entry.delete_cursor().get().is_none() {
|
||||||
// There has been no `DeleteOperation` between the segment status and `target_opstamp`.
|
// There has been no `DeleteOperation` between the segment status and `target_opstamp`.
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We open our current serialized segment to compute the new deleted bitset.
|
||||||
|
let segment = segment_entry.segment().clone();
|
||||||
let segment_reader = SegmentReader::open(&segment)?;
|
let segment_reader = SegmentReader::open(&segment)?;
|
||||||
|
|
||||||
let max_doc = segment_reader.max_doc();
|
let max_doc = segment_reader.max_doc();
|
||||||
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
|
|
||||||
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
|
let mut delete_bitset: BitSet =
|
||||||
None => BitSet::with_max_value(max_doc),
|
delete_bitset_opt.unwrap_or_else(|| BitSet::with_max_value(max_doc));
|
||||||
};
|
|
||||||
|
|
||||||
let num_deleted_docs_before = segment.meta().num_deleted_docs();
|
let num_deleted_docs_before = segment.meta().num_deleted_docs();
|
||||||
|
|
||||||
compute_deleted_bitset(
|
compute_deleted_bitset(
|
||||||
&mut delete_bitset,
|
&mut delete_bitset,
|
||||||
&segment_reader,
|
&segment_reader,
|
||||||
segment_entry.delete_cursor(),
|
&mut delete_cursor,
|
||||||
&DocToOpstampMapping::None,
|
&DocToOpstampMapping::None,
|
||||||
target_opstamp,
|
target_opstamp,
|
||||||
)?;
|
)?;
|
||||||
@@ -179,32 +184,40 @@ pub(crate) fn advance_deletes(
|
|||||||
let num_deleted_docs: u32 = delete_bitset.len() as u32;
|
let num_deleted_docs: u32 = delete_bitset.len() as u32;
|
||||||
if num_deleted_docs > num_deleted_docs_before {
|
if num_deleted_docs > num_deleted_docs_before {
|
||||||
// There are new deletes. We need to write a new delete file.
|
// There are new deletes. We need to write a new delete file.
|
||||||
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
|
let mut delete_file = segment
|
||||||
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
|
.with_delete_meta(num_deleted_docs as u32, target_opstamp)
|
||||||
|
.open_write(SegmentComponent::DELETE)?;
|
||||||
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
|
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
|
||||||
delete_file.terminate()?;
|
delete_file.terminate()?;
|
||||||
|
segment_entry.reset_delete_meta(num_deleted_docs as u32, target_opstamp);
|
||||||
}
|
}
|
||||||
|
|
||||||
segment_entry.set_meta(segment.meta().clone());
|
// Regardless of whether we did end up having to write a new file or not
|
||||||
|
// we advance the `delete_cursor`. This is an optimisation. We want to ensure we do not
|
||||||
|
// check that a given deleted term does not match any of our docs more than once.
|
||||||
|
segment_entry.set_delete_cursor(delete_cursor);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn index_documents(
|
fn index_documents(
|
||||||
memory_budget: usize,
|
config: IndexWriterConfig,
|
||||||
segment: Segment,
|
segment: Segment,
|
||||||
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
|
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
|
||||||
segment_updater: &mut SegmentUpdater,
|
segment_updater: &mut SegmentUpdater,
|
||||||
|
tokenizers: &TokenizerManager,
|
||||||
mut delete_cursor: DeleteCursor,
|
mut delete_cursor: DeleteCursor,
|
||||||
|
memory_manager: ResourceManager
|
||||||
) -> crate::Result<bool> {
|
) -> crate::Result<bool> {
|
||||||
let schema = segment.schema();
|
let schema = segment.schema();
|
||||||
|
|
||||||
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
|
let mut segment_writer = SegmentWriter::for_segment(
|
||||||
|
&config, segment, &schema, tokenizers, memory_manager)?;
|
||||||
for document_group in grouped_document_iterator {
|
for document_group in grouped_document_iterator {
|
||||||
for doc in document_group {
|
for doc in document_group {
|
||||||
segment_writer.add_document(doc, &schema)?;
|
segment_writer.add_document(doc, &schema)?;
|
||||||
}
|
}
|
||||||
let mem_usage = segment_writer.mem_usage();
|
let mem_usage = segment_writer.mem_usage();
|
||||||
if mem_usage >= memory_budget - MARGIN_IN_BYTES {
|
if mem_usage >= config.heap_size_before_flushing() {
|
||||||
info!(
|
info!(
|
||||||
"Buffer limit reached, flushing segment with maxdoc={}.",
|
"Buffer limit reached, flushing segment with maxdoc={}.",
|
||||||
segment_writer.max_doc()
|
segment_writer.max_doc()
|
||||||
@@ -223,24 +236,14 @@ fn index_documents(
|
|||||||
// the worker thread.
|
// the worker thread.
|
||||||
assert!(max_doc > 0);
|
assert!(max_doc > 0);
|
||||||
|
|
||||||
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
|
let (segment, doc_opstamps): (Segment, Vec<Opstamp>) = segment_writer.finalize()?;
|
||||||
|
|
||||||
let segment_with_max_doc = segment.with_max_doc(max_doc);
|
|
||||||
|
|
||||||
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
|
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
|
||||||
|
|
||||||
let delete_bitset_opt = apply_deletes(
|
let delete_bitset_opt =
|
||||||
&segment_with_max_doc,
|
apply_deletes(&segment, &mut delete_cursor, &doc_opstamps, last_docstamp)?;
|
||||||
&mut delete_cursor,
|
|
||||||
&doc_opstamps,
|
|
||||||
last_docstamp,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let segment_entry = SegmentEntry::new(
|
let segment_entry = SegmentEntry::new(segment, delete_cursor, delete_bitset_opt);
|
||||||
segment_with_max_doc.meta().clone(),
|
|
||||||
delete_cursor,
|
|
||||||
delete_bitset_opt,
|
|
||||||
);
|
|
||||||
block_on(segment_updater.schedule_add_segment(segment_entry))?;
|
block_on(segment_updater.schedule_add_segment(segment_entry))?;
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
@@ -292,21 +295,10 @@ impl IndexWriter {
|
|||||||
/// If the heap size per thread is too small, panics.
|
/// If the heap size per thread is too small, panics.
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
index: &Index,
|
index: &Index,
|
||||||
num_threads: usize,
|
mut config: IndexWriterConfig,
|
||||||
heap_size_in_bytes_per_thread: usize,
|
|
||||||
directory_lock: DirectoryLock,
|
directory_lock: DirectoryLock,
|
||||||
) -> crate::Result<IndexWriter> {
|
) -> crate::Result<IndexWriter> {
|
||||||
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
config.validate()?;
|
||||||
let err_msg = format!(
|
|
||||||
"The heap size per thread needs to be at least {}.",
|
|
||||||
HEAP_SIZE_MIN
|
|
||||||
);
|
|
||||||
return Err(TantivyError::InvalidArgument(err_msg));
|
|
||||||
}
|
|
||||||
if heap_size_in_bytes_per_thread >= HEAP_SIZE_MAX {
|
|
||||||
let err_msg = format!("The heap size per thread cannot exceed {}", HEAP_SIZE_MAX);
|
|
||||||
return Err(TantivyError::InvalidArgument(err_msg));
|
|
||||||
}
|
|
||||||
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
||||||
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
||||||
|
|
||||||
@@ -314,16 +306,26 @@ impl IndexWriter {
|
|||||||
|
|
||||||
let current_opstamp = index.load_metas()?.opstamp;
|
let current_opstamp = index.load_metas()?.opstamp;
|
||||||
|
|
||||||
|
let meta = index.load_metas()?;
|
||||||
|
|
||||||
let stamper = Stamper::new(current_opstamp);
|
let stamper = Stamper::new(current_opstamp);
|
||||||
|
|
||||||
|
let commited_segments = SegmentRegister::new(
|
||||||
|
index.directory(),
|
||||||
|
&index.schema(),
|
||||||
|
meta.segments,
|
||||||
|
&delete_queue.cursor(),
|
||||||
|
);
|
||||||
|
let segment_registers = Arc::new(RwLock::new(SegmentRegisters::new(commited_segments)));
|
||||||
|
|
||||||
let segment_updater =
|
let segment_updater =
|
||||||
SegmentUpdater::create(index.clone(), stamper.clone(), &delete_queue.cursor())?;
|
SegmentUpdater::create(segment_registers.clone(), index.clone(), stamper.clone())?;
|
||||||
|
|
||||||
let mut index_writer = IndexWriter {
|
let mut index_writer = IndexWriter {
|
||||||
_directory_lock: Some(directory_lock),
|
_directory_lock: Some(directory_lock),
|
||||||
|
|
||||||
heap_size_in_bytes_per_thread,
|
|
||||||
index: index.clone(),
|
index: index.clone(),
|
||||||
|
config,
|
||||||
|
|
||||||
operation_receiver: document_receiver,
|
operation_receiver: document_receiver,
|
||||||
operation_sender: document_sender,
|
operation_sender: document_sender,
|
||||||
@@ -331,7 +333,6 @@ impl IndexWriter {
|
|||||||
segment_updater,
|
segment_updater,
|
||||||
|
|
||||||
workers_join_handle: vec![],
|
workers_join_handle: vec![],
|
||||||
num_threads,
|
|
||||||
|
|
||||||
delete_queue,
|
delete_queue,
|
||||||
|
|
||||||
@@ -339,6 +340,10 @@ impl IndexWriter {
|
|||||||
stamper,
|
stamper,
|
||||||
|
|
||||||
worker_id: 0,
|
worker_id: 0,
|
||||||
|
segment_registers,
|
||||||
|
on_commit: Default::default(),
|
||||||
|
|
||||||
|
memory_manager: Default::default()
|
||||||
};
|
};
|
||||||
index_writer.start_workers()?;
|
index_writer.start_workers()?;
|
||||||
Ok(index_writer)
|
Ok(index_writer)
|
||||||
@@ -346,7 +351,7 @@ impl IndexWriter {
|
|||||||
|
|
||||||
fn drop_sender(&mut self) {
|
fn drop_sender(&mut self) {
|
||||||
let (sender, _receiver) = channel::bounded(1);
|
let (sender, _receiver) = channel::bounded(1);
|
||||||
self.operation_sender = sender;
|
mem::replace(&mut self.operation_sender, sender);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If there are some merging threads, blocks until they all finish their work and
|
/// If there are some merging threads, blocks until they all finish their work and
|
||||||
@@ -366,23 +371,11 @@ impl IndexWriter {
|
|||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = self
|
self
|
||||||
.segment_updater
|
.segment_updater
|
||||||
.wait_merging_thread()
|
.wait_merging_thread();
|
||||||
.map_err(|_| TantivyError::ErrorInThread("Failed to join merging thread.".into()));
|
|
||||||
|
|
||||||
if let Err(ref e) = result {
|
Ok(())
|
||||||
error!("Some merging thread failed {:?}", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub fn add_segment(&self, segment_meta: SegmentMeta) -> crate::Result<()> {
|
|
||||||
let delete_cursor = self.delete_queue.cursor();
|
|
||||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
|
|
||||||
block_on(self.segment_updater.schedule_add_segment(segment_entry))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new segment.
|
/// Creates a new segment.
|
||||||
@@ -405,8 +398,9 @@ impl IndexWriter {
|
|||||||
|
|
||||||
let mut delete_cursor = self.delete_queue.cursor();
|
let mut delete_cursor = self.delete_queue.cursor();
|
||||||
|
|
||||||
let mem_budget = self.heap_size_in_bytes_per_thread;
|
|
||||||
let index = self.index.clone();
|
let index = self.index.clone();
|
||||||
|
let config = self.config.clone();
|
||||||
|
let memory_manager = self.memory_manager.clone();
|
||||||
let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new()
|
let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new()
|
||||||
.name(format!("thrd-tantivy-index{}", self.worker_id))
|
.name(format!("thrd-tantivy-index{}", self.worker_id))
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
@@ -435,11 +429,13 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
let segment = index.new_segment();
|
let segment = index.new_segment();
|
||||||
index_documents(
|
index_documents(
|
||||||
mem_budget,
|
config.clone(),
|
||||||
segment,
|
segment,
|
||||||
&mut document_iterator,
|
&mut document_iterator,
|
||||||
&mut segment_updater,
|
&mut segment_updater,
|
||||||
|
index.tokenizers(),
|
||||||
delete_cursor.clone(),
|
delete_cursor.clone(),
|
||||||
|
memory_manager.clone()
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
})?;
|
})?;
|
||||||
@@ -449,7 +445,7 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the merge policy.
|
/// Accessor to the merge policy.
|
||||||
pub fn get_merge_policy(&self) -> Arc<dyn MergePolicy> {
|
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
|
||||||
self.segment_updater.get_merge_policy()
|
self.segment_updater.get_merge_policy()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -459,7 +455,7 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn start_workers(&mut self) -> crate::Result<()> {
|
fn start_workers(&mut self) -> crate::Result<()> {
|
||||||
for _ in 0..self.num_threads {
|
for _ in 0..self.config.max_indexing_threads {
|
||||||
self.add_indexing_worker()?;
|
self.add_indexing_worker()?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -536,7 +532,6 @@ impl IndexWriter {
|
|||||||
/// when no documents are remaining.
|
/// when no documents are remaining.
|
||||||
///
|
///
|
||||||
/// Returns the former segment_ready channel.
|
/// Returns the former segment_ready channel.
|
||||||
#[allow(unused_must_use)]
|
|
||||||
fn recreate_document_channel(&mut self) -> OperationReceiver {
|
fn recreate_document_channel(&mut self) -> OperationReceiver {
|
||||||
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
||||||
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
||||||
@@ -565,18 +560,14 @@ impl IndexWriter {
|
|||||||
.take()
|
.take()
|
||||||
.expect("The IndexWriter does not have any lock. This is a bug, please report.");
|
.expect("The IndexWriter does not have any lock. This is a bug, please report.");
|
||||||
|
|
||||||
let new_index_writer: IndexWriter = IndexWriter::new(
|
let new_index_writer: IndexWriter =
|
||||||
&self.index,
|
IndexWriter::new(&self.index, self.config.clone(), directory_lock)?;
|
||||||
self.num_threads,
|
|
||||||
self.heap_size_in_bytes_per_thread,
|
|
||||||
directory_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// the current `self` is dropped right away because of this call.
|
// the current `self` is dropped right away because of this call.
|
||||||
//
|
//
|
||||||
// This will drop the document queue, and the thread
|
// This will drop the document queue, and the thread
|
||||||
// should terminate.
|
// should terminate.
|
||||||
*self = new_index_writer;
|
mem::replace(self, new_index_writer);
|
||||||
|
|
||||||
// Drains the document receiver pipeline :
|
// Drains the document receiver pipeline :
|
||||||
// Workers don't need to index the pending documents.
|
// Workers don't need to index the pending documents.
|
||||||
@@ -609,7 +600,7 @@ impl IndexWriter {
|
|||||||
/// It is also possible to add a payload to the `commit`
|
/// It is also possible to add a payload to the `commit`
|
||||||
/// using this API.
|
/// using this API.
|
||||||
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
|
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
|
||||||
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit> {
|
pub fn prepare_commit(&mut self, soft_commit: bool) -> crate::Result<PreparedCommit> {
|
||||||
// Here, because we join all of the worker threads,
|
// Here, because we join all of the worker threads,
|
||||||
// all of the segment update for this commit have been
|
// all of the segment update for this commit have been
|
||||||
// sent.
|
// sent.
|
||||||
@@ -637,7 +628,7 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let commit_opstamp = self.stamper.stamp();
|
let commit_opstamp = self.stamper.stamp();
|
||||||
let prepared_commit = PreparedCommit::new(self, commit_opstamp);
|
let prepared_commit = PreparedCommit::new(self, commit_opstamp, soft_commit);
|
||||||
info!("Prepared commit {}", commit_opstamp);
|
info!("Prepared commit {}", commit_opstamp);
|
||||||
Ok(prepared_commit)
|
Ok(prepared_commit)
|
||||||
}
|
}
|
||||||
@@ -657,7 +648,25 @@ impl IndexWriter {
|
|||||||
/// that made it in the commit.
|
/// that made it in the commit.
|
||||||
///
|
///
|
||||||
pub fn commit(&mut self) -> crate::Result<Opstamp> {
|
pub fn commit(&mut self) -> crate::Result<Opstamp> {
|
||||||
self.prepare_commit()?.commit()
|
self.prepare_commit(false)?.commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn soft_commit(&mut self) -> crate::Result<Opstamp> {
|
||||||
|
self.prepare_commit(true)?.commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn trigger_commit(&self) -> impl Future<Output = ()> {
|
||||||
|
self.on_commit.broadcast()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reader(&self, num_searchers: usize) -> crate::Result<IndexReader> {
|
||||||
|
let nrt_reader = NRTReader::create(
|
||||||
|
num_searchers,
|
||||||
|
self.index.clone(),
|
||||||
|
self.segment_registers.clone(),
|
||||||
|
&self.on_commit,
|
||||||
|
)?;
|
||||||
|
Ok(IndexReader::NRT(nrt_reader))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn segment_updater(&self) -> &SegmentUpdater {
|
pub(crate) fn segment_updater(&self) -> &SegmentUpdater {
|
||||||
@@ -800,7 +809,7 @@ mod tests {
|
|||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let index_writer = index.writer_for_tests().unwrap();
|
let index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
let operations = vec![
|
let operations = vec![
|
||||||
UserOperation::Add(doc!(text_field=>"a")),
|
UserOperation::Add(doc!(text_field=>"a")),
|
||||||
UserOperation::Add(doc!(text_field=>"b")),
|
UserOperation::Add(doc!(text_field=>"b")),
|
||||||
@@ -815,7 +824,7 @@ mod tests {
|
|||||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
|
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text_field => "hello1"));
|
index_writer.add_document(doc!(text_field => "hello1"));
|
||||||
index_writer.add_document(doc!(text_field => "hello2"));
|
index_writer.add_document(doc!(text_field => "hello2"));
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
@@ -864,7 +873,7 @@ mod tests {
|
|||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
let a_term = Term::from_field_text(text_field, "a");
|
let a_term = Term::from_field_text(text_field, "a");
|
||||||
let b_term = Term::from_field_text(text_field, "b");
|
let b_term = Term::from_field_text(text_field, "b");
|
||||||
let operations = vec![
|
let operations = vec![
|
||||||
@@ -926,8 +935,8 @@ mod tests {
|
|||||||
fn test_lockfile_already_exists_error_msg() {
|
fn test_lockfile_already_exists_error_msg() {
|
||||||
let schema_builder = schema::Schema::builder();
|
let schema_builder = schema::Schema::builder();
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let _index_writer = index.writer_for_tests().unwrap();
|
let _index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
match index.writer_for_tests() {
|
match index.writer_with_num_threads(1, 3_000_000) {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let err_msg = err.to_string();
|
let err_msg = err.to_string();
|
||||||
assert!(err_msg.contains("already an `IndexWriter`"));
|
assert!(err_msg.contains("already an `IndexWriter`"));
|
||||||
@@ -979,7 +988,7 @@ mod tests {
|
|||||||
let num_docs_containing = |s: &str| {
|
let num_docs_containing = |s: &str| {
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_text(text_field, s);
|
let term = Term::from_field_text(text_field, s);
|
||||||
searcher.doc_freq(&term).unwrap()
|
searcher.doc_freq(&term)
|
||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -1015,7 +1024,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
let num_docs_containing = |s: &str| {
|
let num_docs_containing = |s: &str| {
|
||||||
let term_a = Term::from_field_text(text_field, s);
|
let term_a = Term::from_field_text(text_field, s);
|
||||||
reader.searcher().doc_freq(&term_a).unwrap()
|
reader.searcher().doc_freq(&term_a)
|
||||||
};
|
};
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
@@ -1055,7 +1064,8 @@ mod tests {
|
|||||||
index_writer.add_document(doc!(text_field => "a"));
|
index_writer.add_document(doc!(text_field => "a"));
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut prepared_commit = index_writer.prepare_commit().expect("commit failed");
|
let mut prepared_commit =
|
||||||
|
index_writer.prepare_commit(false).expect("commit failed");
|
||||||
prepared_commit.set_payload("first commit");
|
prepared_commit.set_payload("first commit");
|
||||||
prepared_commit.commit().expect("commit failed");
|
prepared_commit.commit().expect("commit failed");
|
||||||
}
|
}
|
||||||
@@ -1088,7 +1098,8 @@ mod tests {
|
|||||||
index_writer.add_document(doc!(text_field => "a"));
|
index_writer.add_document(doc!(text_field => "a"));
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut prepared_commit = index_writer.prepare_commit().expect("commit failed");
|
let mut prepared_commit =
|
||||||
|
index_writer.prepare_commit(false).expect("commit failed");
|
||||||
prepared_commit.set_payload("first commit");
|
prepared_commit.set_payload("first commit");
|
||||||
prepared_commit.abort().expect("commit failed");
|
prepared_commit.abort().expect("commit failed");
|
||||||
}
|
}
|
||||||
@@ -1110,7 +1121,6 @@ mod tests {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.searcher()
|
.searcher()
|
||||||
.doc_freq(&term_a)
|
.doc_freq(&term_a)
|
||||||
.unwrap()
|
|
||||||
};
|
};
|
||||||
assert_eq!(num_docs_containing("a"), 0);
|
assert_eq!(num_docs_containing("a"), 0);
|
||||||
assert_eq!(num_docs_containing("b"), 100);
|
assert_eq!(num_docs_containing("b"), 100);
|
||||||
@@ -1130,7 +1140,7 @@ mod tests {
|
|||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_text(text_field, s);
|
let term = Term::from_field_text(text_field, s);
|
||||||
searcher.doc_freq(&term).unwrap()
|
searcher.doc_freq(&term)
|
||||||
};
|
};
|
||||||
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
|
||||||
|
|
||||||
@@ -1181,15 +1191,7 @@ mod tests {
|
|||||||
|
|
||||||
// working with an empty index == no documents
|
// working with an empty index == no documents
|
||||||
let term_b = Term::from_field_text(text_field, "b");
|
let term_b = Term::from_field_text(text_field, "b");
|
||||||
assert_eq!(
|
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_b), 0);
|
||||||
index
|
|
||||||
.reader()
|
|
||||||
.unwrap()
|
|
||||||
.searcher()
|
|
||||||
.doc_freq(&term_b)
|
|
||||||
.unwrap(),
|
|
||||||
0
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1209,15 +1211,7 @@ mod tests {
|
|||||||
|
|
||||||
let term_a = Term::from_field_text(text_field, "a");
|
let term_a = Term::from_field_text(text_field, "a");
|
||||||
// expect the document with that term to be in the index
|
// expect the document with that term to be in the index
|
||||||
assert_eq!(
|
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
|
||||||
index
|
|
||||||
.reader()
|
|
||||||
.unwrap()
|
|
||||||
.searcher()
|
|
||||||
.doc_freq(&term_a)
|
|
||||||
.unwrap(),
|
|
||||||
1
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1243,15 +1237,7 @@ mod tests {
|
|||||||
// Find original docs in the index
|
// Find original docs in the index
|
||||||
let term_a = Term::from_field_text(text_field, "a");
|
let term_a = Term::from_field_text(text_field, "a");
|
||||||
// expect the document with that term to be in the index
|
// expect the document with that term to be in the index
|
||||||
assert_eq!(
|
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
|
||||||
index
|
|
||||||
.reader()
|
|
||||||
.unwrap()
|
|
||||||
.searcher()
|
|
||||||
.doc_freq(&term_a)
|
|
||||||
.unwrap(),
|
|
||||||
1
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1286,9 +1272,46 @@ mod tests {
|
|||||||
let idfield = schema_builder.add_text_field("id", STRING);
|
let idfield = schema_builder.add_text_field("id", STRING);
|
||||||
schema_builder.add_text_field("optfield", STRING);
|
schema_builder.add_text_field("optfield", STRING);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(idfield=>"myid"));
|
index_writer.add_document(doc!(idfield=>"myid"));
|
||||||
let commit = index_writer.commit();
|
let commit = index_writer.commit();
|
||||||
assert!(commit.is_ok());
|
assert!(commit.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_writer_reader() {
|
||||||
|
let mut schema_builder = schema::Schema::builder();
|
||||||
|
let idfield = schema_builder.add_text_field("id", STRING);
|
||||||
|
schema_builder.add_text_field("optfield", STRING);
|
||||||
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
index_writer.add_document(doc!(idfield=>"myid"));
|
||||||
|
assert!(index_writer.commit().is_ok());
|
||||||
|
let reader = index_writer.reader(2).unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.num_docs(), 1u64);
|
||||||
|
index_writer.add_document(doc!(idfield=>"myid"));
|
||||||
|
assert!(index_writer.commit().is_ok());
|
||||||
|
assert_eq!(reader.searcher().num_docs(), 2u64);
|
||||||
|
assert_eq!(searcher.num_docs(), 1u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_writer_reader_soft_commit() {
|
||||||
|
let mut schema_builder = schema::Schema::builder();
|
||||||
|
let idfield = schema_builder.add_text_field("id", STRING);
|
||||||
|
schema_builder.add_text_field("optfield", STRING);
|
||||||
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
index_writer.add_document(doc!(idfield=>"myid"));
|
||||||
|
assert!(index_writer.soft_commit().is_ok());
|
||||||
|
let nrt_reader = index_writer.reader(2).unwrap();
|
||||||
|
let normal_reader = index.reader_builder().try_into().unwrap();
|
||||||
|
assert_eq!(nrt_reader.searcher().num_docs(), 1u64);
|
||||||
|
assert_eq!(normal_reader.searcher().num_docs(), 0u64);
|
||||||
|
assert!(index_writer.commit().is_ok());
|
||||||
|
assert!(normal_reader.reload().is_ok());
|
||||||
|
assert_eq!(nrt_reader.searcher().num_docs(), 1u64);
|
||||||
|
assert_eq!(normal_reader.searcher().num_docs(), 1u64);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
133
src/indexer/index_writer_config.rs
Normal file
133
src/indexer/index_writer_config.rs
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
// Size of the margin for the heap. A segment is closed when the remaining memory
|
||||||
|
// in the heap goes below MARGIN_IN_BYTES.
|
||||||
|
const MARGIN_IN_BYTES: u64 = 1_000_000;
|
||||||
|
|
||||||
|
// We impose the memory per thread to be at least 3 MB.
|
||||||
|
const HEAP_SIZE_MIN: u64 = MARGIN_IN_BYTES * 3u64;
|
||||||
|
const HEAP_SIZE_MAX: u64 = u32::max_value() as u64 - MARGIN_IN_BYTES;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct IndexWriterConfig {
|
||||||
|
pub max_indexing_threads: usize,
|
||||||
|
pub max_merging_threads: usize,
|
||||||
|
pub memory_budget: u64,
|
||||||
|
pub store_flush_num_bytes: u64,
|
||||||
|
pub persist_low: u64,
|
||||||
|
pub persist_high: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for IndexWriterConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
IndexWriterConfig {
|
||||||
|
max_indexing_threads: 1,
|
||||||
|
max_merging_threads: 3,
|
||||||
|
memory_budget: 50_000_000u64,
|
||||||
|
store_flush_num_bytes: 10_000_000u64,
|
||||||
|
persist_low: 10_000_000u64,
|
||||||
|
persist_high: 50_000_000u64
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IndexWriterConfig {
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn for_test() -> IndexWriterConfig {
|
||||||
|
IndexWriterConfig {
|
||||||
|
max_indexing_threads: 1,
|
||||||
|
max_merging_threads: 5,
|
||||||
|
memory_budget: 4_000_000u64,
|
||||||
|
store_flush_num_bytes: 500_000u64,
|
||||||
|
persist_low: 2_000_000u64,
|
||||||
|
persist_high: 3_000_000u64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensures the `IndexWriterConfig` is correct.
|
||||||
|
//
|
||||||
|
// This method checks that the values in the `IndexWriterConfig`
|
||||||
|
// are valid. If it is not, it may mutate some of the values (like `max_num_threads`) to
|
||||||
|
// fit the contracts or return an error with an explicit error message.
|
||||||
|
//
|
||||||
|
// If called twice, the config is guaranteed to not be updated the second time.
|
||||||
|
pub fn validate(&mut self) -> crate::Result<()> {
|
||||||
|
if self.memory_budget < HEAP_SIZE_MIN {
|
||||||
|
let err_msg = format!(
|
||||||
|
"The heap size per thread needs to be at least {}.",
|
||||||
|
HEAP_SIZE_MIN
|
||||||
|
);
|
||||||
|
return Err(crate::TantivyError::InvalidArgument(err_msg));
|
||||||
|
}
|
||||||
|
let heap_size_in_bytes_per_thread = self.heap_size_in_byte_per_thread();
|
||||||
|
if heap_size_in_bytes_per_thread >= HEAP_SIZE_MAX {
|
||||||
|
let err_msg = format!("The heap size per thread cannot exceed {}", HEAP_SIZE_MAX);
|
||||||
|
return Err(crate::TantivyError::InvalidArgument(err_msg));
|
||||||
|
}
|
||||||
|
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
||||||
|
self.max_indexing_threads = (self.memory_budget / HEAP_SIZE_MIN) as usize;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn heap_size_in_byte_per_thread(&self) -> u64 {
|
||||||
|
self.memory_budget / self.max_indexing_threads as u64
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn heap_size_before_flushing(&self) -> u64 {
|
||||||
|
self.heap_size_in_byte_per_thread() - MARGIN_IN_BYTES
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::IndexWriterConfig;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_writer_config_simple() {
|
||||||
|
let mut index = IndexWriterConfig {
|
||||||
|
max_indexing_threads: 3,
|
||||||
|
memory_budget: super::HEAP_SIZE_MIN * 3,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
assert!(index.validate().is_ok());
|
||||||
|
assert_eq!(index.max_indexing_threads, 3);
|
||||||
|
assert_eq!(index.heap_size_in_byte_per_thread(), super::HEAP_SIZE_MIN);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_writer_config_reduce_num_threads() {
|
||||||
|
let mut index = IndexWriterConfig {
|
||||||
|
max_indexing_threads: 3,
|
||||||
|
memory_budget: super::HEAP_SIZE_MIN,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
assert!(index.validate().is_ok());
|
||||||
|
assert_eq!(index.max_indexing_threads, 1);
|
||||||
|
assert_eq!(index.heap_size_in_byte_per_thread(), super::HEAP_SIZE_MIN);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_writer_config_not_enough_memory() {
|
||||||
|
let mut index = IndexWriterConfig {
|
||||||
|
max_indexing_threads: 1,
|
||||||
|
memory_budget: super::HEAP_SIZE_MIN - 1,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
assert!(
|
||||||
|
matches!(index.validate(), Err(crate::TantivyError::InvalidArgument(msg) ) if msg.contains("The heap size per thread needs to be at least"))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_writer_config_too_much_memory() {
|
||||||
|
let mut index = IndexWriterConfig {
|
||||||
|
max_indexing_threads: 1,
|
||||||
|
memory_budget: (u32::max_value() as u64) + 1,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
assert!(
|
||||||
|
matches!(index.validate(), Err(crate::TantivyError::InvalidArgument(msg) ) if msg.contains("The heap size per thread cannot exceed"))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -8,7 +8,7 @@ const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000;
|
|||||||
const DEFAULT_MIN_MERGE_SIZE: usize = 8;
|
const DEFAULT_MIN_MERGE_SIZE: usize = 8;
|
||||||
const DEFAULT_MAX_MERGE_SIZE: usize = 10_000_000;
|
const DEFAULT_MAX_MERGE_SIZE: usize = 10_000_000;
|
||||||
|
|
||||||
/// `LogMergePolicy` tries to merge segments that have a similar number of
|
/// `LogMergePolicy` tries tries to merge segments that have a similar number of
|
||||||
/// documents.
|
/// documents.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct LogMergePolicy {
|
pub struct LogMergePolicy {
|
||||||
@@ -54,6 +54,10 @@ impl LogMergePolicy {
|
|||||||
|
|
||||||
impl MergePolicy for LogMergePolicy {
|
impl MergePolicy for LogMergePolicy {
|
||||||
fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec<MergeCandidate> {
|
fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec<MergeCandidate> {
|
||||||
|
if segments.is_empty() {
|
||||||
|
return Vec::new();
|
||||||
|
}
|
||||||
|
|
||||||
let mut size_sorted_tuples = segments
|
let mut size_sorted_tuples = segments
|
||||||
.iter()
|
.iter()
|
||||||
.map(SegmentMeta::num_docs)
|
.map(SegmentMeta::num_docs)
|
||||||
@@ -63,35 +67,27 @@ impl MergePolicy for LogMergePolicy {
|
|||||||
|
|
||||||
size_sorted_tuples.sort_by(|x, y| y.1.cmp(&(x.1)));
|
size_sorted_tuples.sort_by(|x, y| y.1.cmp(&(x.1)));
|
||||||
|
|
||||||
if size_sorted_tuples.len() <= 1 {
|
|
||||||
return Vec::new();
|
|
||||||
}
|
|
||||||
|
|
||||||
let size_sorted_log_tuples: Vec<_> = size_sorted_tuples
|
let size_sorted_log_tuples: Vec<_> = size_sorted_tuples
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(ind, num_docs)| (ind, f64::from(self.clip_min_size(num_docs)).log2()))
|
.map(|(ind, num_docs)| (ind, f64::from(self.clip_min_size(num_docs)).log2()))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if let Some(&(first_ind, first_score)) = size_sorted_log_tuples.first() {
|
let (first_ind, first_score) = size_sorted_log_tuples[0];
|
||||||
let mut current_max_log_size = first_score;
|
let mut current_max_log_size = first_score;
|
||||||
let mut levels = vec![vec![first_ind]];
|
let mut levels = vec![vec![first_ind]];
|
||||||
for &(ind, score) in (&size_sorted_log_tuples).iter().skip(1) {
|
for &(ind, score) in (&size_sorted_log_tuples).iter().skip(1) {
|
||||||
if score < (current_max_log_size - self.level_log_size) {
|
if score < (current_max_log_size - self.level_log_size) {
|
||||||
current_max_log_size = score;
|
current_max_log_size = score;
|
||||||
levels.push(Vec::new());
|
levels.push(Vec::new());
|
||||||
}
|
|
||||||
levels.last_mut().unwrap().push(ind);
|
|
||||||
}
|
}
|
||||||
levels
|
levels.last_mut().unwrap().push(ind);
|
||||||
.iter()
|
|
||||||
.filter(|level| level.len() >= self.min_merge_size)
|
|
||||||
.map(|ind_vec| {
|
|
||||||
MergeCandidate(ind_vec.iter().map(|&ind| segments[ind].id()).collect())
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
} else {
|
|
||||||
return vec![];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
levels
|
||||||
|
.iter()
|
||||||
|
.filter(|level| level.len() >= self.min_merge_size)
|
||||||
|
.map(|ind_vec| MergeCandidate(ind_vec.iter().map(|&ind| segments[ind].id()).collect()))
|
||||||
|
.collect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -183,7 +179,6 @@ mod tests {
|
|||||||
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
||||||
assert_eq!(result_list.len(), 2);
|
assert_eq!(result_list.len(), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_log_merge_policy_small_segments() {
|
fn test_log_merge_policy_small_segments() {
|
||||||
// segments under min_layer_size are merged together
|
// segments under min_layer_size are merged together
|
||||||
@@ -199,17 +194,6 @@ mod tests {
|
|||||||
assert_eq!(result_list.len(), 1);
|
assert_eq!(result_list.len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_log_merge_policy_all_segments_too_large_to_merge() {
|
|
||||||
let eight_large_segments: Vec<SegmentMeta> =
|
|
||||||
std::iter::repeat_with(|| create_random_segment_meta(100_001))
|
|
||||||
.take(8)
|
|
||||||
.collect();
|
|
||||||
assert!(test_merge_policy()
|
|
||||||
.compute_merge_candidates(&eight_large_segments)
|
|
||||||
.is_empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_large_merge_segments() {
|
fn test_large_merge_segments() {
|
||||||
let test_input = vec![
|
let test_input = vec![
|
||||||
|
|||||||
@@ -1,17 +1,22 @@
|
|||||||
|
use crate::indexer::resource_manager::{Allocation, ResourceManager};
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use crate::SegmentId;
|
use crate::SegmentId;
|
||||||
use census::{Inventory, TrackedObject};
|
use census::{Inventory, TrackedObject};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
use std::fmt;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default, Clone)]
|
||||||
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>);
|
pub(crate) struct MergeOperationInventory {
|
||||||
|
inventory: Inventory<InnerMergeOperation>,
|
||||||
|
num_merge_watcher: ResourceManager,
|
||||||
|
}
|
||||||
|
|
||||||
impl Deref for MergeOperationInventory {
|
impl Deref for MergeOperationInventory {
|
||||||
type Target = Inventory<InnerMergeOperation>;
|
type Target = Inventory<InnerMergeOperation>;
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
fn deref(&self) -> &Self::Target {
|
||||||
&self.0
|
&self.inventory
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -25,6 +30,10 @@ impl MergeOperationInventory {
|
|||||||
}
|
}
|
||||||
segment_in_merge
|
segment_in_merge
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn wait_until_empty(&self) {
|
||||||
|
let _ = self.num_merge_watcher.wait_until_in_range(0..1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A `MergeOperation` has two roles.
|
/// A `MergeOperation` has two roles.
|
||||||
@@ -47,6 +56,17 @@ pub struct MergeOperation {
|
|||||||
pub(crate) struct InnerMergeOperation {
|
pub(crate) struct InnerMergeOperation {
|
||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
segment_ids: Vec<SegmentId>,
|
segment_ids: Vec<SegmentId>,
|
||||||
|
_allocation: Allocation,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for InnerMergeOperation {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"MergeOp(target_opstamp={:?}, segment_ids={:?})",
|
||||||
|
self.target_opstamp, self.segment_ids
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MergeOperation {
|
impl MergeOperation {
|
||||||
@@ -55,9 +75,11 @@ impl MergeOperation {
|
|||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
segment_ids: Vec<SegmentId>,
|
segment_ids: Vec<SegmentId>,
|
||||||
) -> MergeOperation {
|
) -> MergeOperation {
|
||||||
|
let allocation = inventory.num_merge_watcher.allocate(1);
|
||||||
let inner_merge_operation = InnerMergeOperation {
|
let inner_merge_operation = InnerMergeOperation {
|
||||||
target_opstamp,
|
target_opstamp,
|
||||||
segment_ids,
|
segment_ids,
|
||||||
|
_allocation: allocation,
|
||||||
};
|
};
|
||||||
MergeOperation {
|
MergeOperation {
|
||||||
inner: inventory.track(inner_merge_operation),
|
inner: inventory.track(inner_merge_operation),
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -2,12 +2,14 @@ pub mod delete_queue;
|
|||||||
|
|
||||||
mod doc_opstamp_mapping;
|
mod doc_opstamp_mapping;
|
||||||
pub mod index_writer;
|
pub mod index_writer;
|
||||||
|
mod index_writer_config;
|
||||||
mod log_merge_policy;
|
mod log_merge_policy;
|
||||||
mod merge_operation;
|
mod merge_operation;
|
||||||
pub mod merge_policy;
|
pub mod merge_policy;
|
||||||
pub mod merger;
|
pub mod merger;
|
||||||
pub mod operation;
|
pub mod operation;
|
||||||
mod prepared_commit;
|
mod prepared_commit;
|
||||||
|
mod resource_manager;
|
||||||
mod segment_entry;
|
mod segment_entry;
|
||||||
mod segment_manager;
|
mod segment_manager;
|
||||||
mod segment_register;
|
mod segment_register;
|
||||||
@@ -16,22 +18,25 @@ pub mod segment_updater;
|
|||||||
mod segment_writer;
|
mod segment_writer;
|
||||||
mod stamper;
|
mod stamper;
|
||||||
|
|
||||||
|
pub(crate) use self::resource_manager::{Allocation, ResourceManager};
|
||||||
|
pub(crate) use self::merge_operation::MergeOperationInventory;
|
||||||
pub use self::index_writer::IndexWriter;
|
pub use self::index_writer::IndexWriter;
|
||||||
|
pub use self::index_writer_config::IndexWriterConfig;
|
||||||
pub use self::log_merge_policy::LogMergePolicy;
|
pub use self::log_merge_policy::LogMergePolicy;
|
||||||
pub use self::merge_operation::MergeOperation;
|
pub use self::merge_operation::MergeOperation;
|
||||||
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
||||||
pub use self::prepared_commit::PreparedCommit;
|
pub use self::prepared_commit::PreparedCommit;
|
||||||
pub use self::segment_entry::SegmentEntry;
|
pub use self::segment_entry::SegmentEntry;
|
||||||
pub use self::segment_manager::SegmentManager;
|
pub use self::segment_manager::SegmentManager;
|
||||||
|
pub(crate) use self::segment_manager::SegmentRegisters;
|
||||||
pub use self::segment_serializer::SegmentSerializer;
|
pub use self::segment_serializer::SegmentSerializer;
|
||||||
pub use self::segment_writer::SegmentWriter;
|
pub use self::segment_writer::SegmentWriter;
|
||||||
|
|
||||||
/// Alias for the default merge policy, which is the `LogMergePolicy`.
|
/// Alias for the default merge policy, which is the `LogMergePolicy`.
|
||||||
pub type DefaultMergePolicy = LogMergePolicy;
|
pub type DefaultMergePolicy = LogMergePolicy;
|
||||||
|
|
||||||
#[cfg(feature = "mmap")]
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests_mmap {
|
mod tests {
|
||||||
use crate::schema::{self, Schema};
|
use crate::schema::{self, Schema};
|
||||||
use crate::{Index, Term};
|
use crate::{Index, Term};
|
||||||
|
|
||||||
@@ -40,7 +45,7 @@ mod tests_mmap {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||||
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
|
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
// there must be one deleted document in the segment
|
// there must be one deleted document in the segment
|
||||||
index_writer.add_document(doc!(text_field=>"b"));
|
index_writer.add_document(doc!(text_field=>"b"));
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "b"));
|
index_writer.delete_term(Term::from_field_text(text_field, "b"));
|
||||||
|
|||||||
@@ -9,15 +9,6 @@ pub struct DeleteOperation {
|
|||||||
pub term: Term,
|
pub term: Term,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for DeleteOperation {
|
|
||||||
fn default() -> Self {
|
|
||||||
DeleteOperation {
|
|
||||||
opstamp: 0u64,
|
|
||||||
term: Term::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Timestamped Add operation.
|
/// Timestamped Add operation.
|
||||||
#[derive(Eq, PartialEq, Debug)]
|
#[derive(Eq, PartialEq, Debug)]
|
||||||
pub struct AddOperation {
|
pub struct AddOperation {
|
||||||
|
|||||||
@@ -7,14 +7,20 @@ pub struct PreparedCommit<'a> {
|
|||||||
index_writer: &'a mut IndexWriter,
|
index_writer: &'a mut IndexWriter,
|
||||||
payload: Option<String>,
|
payload: Option<String>,
|
||||||
opstamp: Opstamp,
|
opstamp: Opstamp,
|
||||||
|
soft_commit: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> PreparedCommit<'a> {
|
impl<'a> PreparedCommit<'a> {
|
||||||
pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: Opstamp) -> PreparedCommit<'_> {
|
pub(crate) fn new(
|
||||||
|
index_writer: &'a mut IndexWriter,
|
||||||
|
opstamp: Opstamp,
|
||||||
|
soft_commit: bool,
|
||||||
|
) -> PreparedCommit<'_> {
|
||||||
PreparedCommit {
|
PreparedCommit {
|
||||||
index_writer,
|
index_writer,
|
||||||
payload: None,
|
payload: None,
|
||||||
opstamp,
|
opstamp,
|
||||||
|
soft_commit,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -32,11 +38,12 @@ impl<'a> PreparedCommit<'a> {
|
|||||||
|
|
||||||
pub fn commit(self) -> crate::Result<Opstamp> {
|
pub fn commit(self) -> crate::Result<Opstamp> {
|
||||||
info!("committing {}", self.opstamp);
|
info!("committing {}", self.opstamp);
|
||||||
let _ = block_on(
|
block_on(self.index_writer.segment_updater().schedule_commit(
|
||||||
self.index_writer
|
self.opstamp,
|
||||||
.segment_updater()
|
self.payload,
|
||||||
.schedule_commit(self.opstamp, self.payload),
|
self.soft_commit,
|
||||||
);
|
))?;
|
||||||
|
block_on(self.index_writer.trigger_commit());
|
||||||
Ok(self.opstamp)
|
Ok(self.opstamp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
213
src/indexer/resource_manager.rs
Normal file
213
src/indexer/resource_manager.rs
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
use std::ops::RangeBounds;
|
||||||
|
use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock};
|
||||||
|
|
||||||
|
struct LockedData {
|
||||||
|
count: u64,
|
||||||
|
enabled: bool
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for LockedData {
|
||||||
|
fn default() -> Self {
|
||||||
|
LockedData {
|
||||||
|
count: 0u64,
|
||||||
|
enabled: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct Inner {
|
||||||
|
resource_level: Mutex<LockedData>,
|
||||||
|
convdvar: Condvar,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// The resource manager makes it possible to track the amount of level of a given resource.
|
||||||
|
/// There is no magic here : it is to the description of the user to declare how much
|
||||||
|
/// of the resource is being held.
|
||||||
|
///
|
||||||
|
/// Allocation of a resource is bound to the lifetime of a `Allocation` instance.
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// let resource_manager = ResourceManager::default();
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// In tantivy, this is used to check the number of merging thread and the number of memory
|
||||||
|
/// used by the volatile segments.
|
||||||
|
///
|
||||||
|
#[derive(Clone, Default)]
|
||||||
|
pub struct ResourceManager {
|
||||||
|
inner: Arc<Inner>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ResourceManager {
|
||||||
|
/// Return the total amount of reousrce allocated
|
||||||
|
pub fn total_amount(&self) -> u64 {
|
||||||
|
self.lock().count
|
||||||
|
}
|
||||||
|
|
||||||
|
fn lock(&self) -> MutexGuard<LockedData> {
|
||||||
|
self.inner
|
||||||
|
.resource_level
|
||||||
|
.lock()
|
||||||
|
.expect("Failed to obtain lock for ReservedMemory. This should never happen.")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn record_delta(&self, delta: i64) {
|
||||||
|
if delta == 0i64 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let mut lock = self.lock();
|
||||||
|
let new_val = lock.count as i64 + delta;
|
||||||
|
lock.count = new_val as u64;
|
||||||
|
self.inner.convdvar.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Records a new allocation.
|
||||||
|
///
|
||||||
|
/// The returned allocate object is used to automatically release the allocated resource
|
||||||
|
/// on drop.
|
||||||
|
pub fn allocate(&self, amount: u64) -> Allocation {
|
||||||
|
self.record_delta(amount as i64);
|
||||||
|
Allocation {
|
||||||
|
resource_manager: self.clone(),
|
||||||
|
amount: RwLock::new(amount),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stops the resource manager.
|
||||||
|
///
|
||||||
|
/// If any thread is waiting via `.wait_until_in_range(...)`, the method will stop
|
||||||
|
/// being blocking and will return an error.
|
||||||
|
pub fn terminate(&self) {
|
||||||
|
self.lock().enabled = false;
|
||||||
|
self.inner.convdvar.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Blocks the current thread until the resource level reaches the given range,
|
||||||
|
/// in a cpu-efficient way.
|
||||||
|
///
|
||||||
|
/// This method does not necessarily wakes up the current thread at every transition
|
||||||
|
/// into the targetted range, but any durable entry in the range will be detected.
|
||||||
|
pub fn wait_until_in_range<R: RangeBounds<u64>>(&self, range: R) -> Result<u64, u64> {
|
||||||
|
let mut levels = self.lock();
|
||||||
|
if !levels.enabled {
|
||||||
|
return Err(levels.count)
|
||||||
|
}
|
||||||
|
while !range.contains(&levels.count) {
|
||||||
|
levels = self.inner.convdvar.wait(levels).unwrap();
|
||||||
|
if !levels.enabled {
|
||||||
|
return Err(levels.count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(levels.count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Allocation {
|
||||||
|
resource_manager: ResourceManager,
|
||||||
|
amount: RwLock<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Allocation {
|
||||||
|
pub fn amount(&self) -> u64 {
|
||||||
|
*self.amount.read().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn modify(&self, new_amount: u64) {
|
||||||
|
let mut wlock = self.amount.write().unwrap();
|
||||||
|
let delta = new_amount as i64 - *wlock as i64;
|
||||||
|
*wlock = new_amount;
|
||||||
|
self.resource_manager.record_delta(delta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for Allocation {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let amount = self.amount();
|
||||||
|
self.resource_manager.record_delta(-(amount as i64))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::ResourceManager;
|
||||||
|
use futures::channel::oneshot;
|
||||||
|
use futures::executor::block_on;
|
||||||
|
use std::{mem, thread};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_simple_allocation() {
|
||||||
|
let memory = ResourceManager::default();
|
||||||
|
assert_eq!(memory.total_amount(), 0u64);
|
||||||
|
let _allocation = memory.allocate(10u64);
|
||||||
|
assert_eq!(memory.total_amount(), 10u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_multiple_allocation() {
|
||||||
|
let memory = ResourceManager::default();
|
||||||
|
assert_eq!(memory.total_amount(), 0u64);
|
||||||
|
let _allocation = memory.allocate(10u64);
|
||||||
|
let _allocation_2 = memory.allocate(11u64);
|
||||||
|
assert_eq!(memory.total_amount(), 21u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_release_on_drop() {
|
||||||
|
let memory = ResourceManager::default();
|
||||||
|
assert_eq!(memory.total_amount(), 0u64);
|
||||||
|
let allocation = memory.allocate(10u64);
|
||||||
|
let allocation_2 = memory.allocate(11u64);
|
||||||
|
assert_eq!(memory.total_amount(), 21u64);
|
||||||
|
mem::drop(allocation);
|
||||||
|
assert_eq!(memory.total_amount(), 11u64);
|
||||||
|
mem::drop(allocation_2);
|
||||||
|
assert_eq!(memory.total_amount(), 0u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_wait_until() {
|
||||||
|
let memory = ResourceManager::default();
|
||||||
|
let (send, recv) = oneshot::channel::<()>();
|
||||||
|
let memory_clone = memory.clone();
|
||||||
|
thread::spawn(move || {
|
||||||
|
let _allocation1 = memory_clone.allocate(2u64);
|
||||||
|
let _allocation2 = memory_clone.allocate(3u64);
|
||||||
|
let _allocation3 = memory_clone.allocate(4u64);
|
||||||
|
std::mem::drop(_allocation3);
|
||||||
|
assert!(block_on(recv).is_ok());
|
||||||
|
});
|
||||||
|
assert_eq!(memory.wait_until_in_range(5u64..8u64), Ok(5u64));
|
||||||
|
assert!(send.send(()).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_modify_amount() {
|
||||||
|
let memory = ResourceManager::default();
|
||||||
|
let alloc = memory.allocate(2u64);
|
||||||
|
assert_eq!(memory.total_amount(), 2u64);
|
||||||
|
assert_eq!(alloc.amount(), 2u64);
|
||||||
|
let alloc2 = memory.allocate(3u64);
|
||||||
|
assert_eq!(memory.total_amount(), 2u64 + 3u64);
|
||||||
|
assert_eq!(alloc2.amount(), 3u64);
|
||||||
|
alloc.modify(14u64);
|
||||||
|
assert_eq!(alloc.amount(), 14u64);
|
||||||
|
assert_eq!(memory.total_amount(), 14u64 + 3u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_stop_resource_manager() {
|
||||||
|
let resource_manager = ResourceManager::default();
|
||||||
|
let resource_manager_clone = resource_manager.clone();
|
||||||
|
let (sender, recv) = oneshot::channel();
|
||||||
|
let join_handle = thread::spawn(move || {
|
||||||
|
assert!(sender.send(()).is_ok());
|
||||||
|
resource_manager_clone.wait_until_in_range(10..20)
|
||||||
|
});
|
||||||
|
let _ = block_on(recv);
|
||||||
|
resource_manager.terminate();
|
||||||
|
assert_eq!(join_handle.join().unwrap(), Err(0u64));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,9 @@
|
|||||||
use crate::common::BitSet;
|
use crate::common::BitSet;
|
||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
|
use crate::directory::ManagedDirectory;
|
||||||
use crate::indexer::delete_queue::DeleteCursor;
|
use crate::indexer::delete_queue::DeleteCursor;
|
||||||
|
use crate::{Opstamp, Segment};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
/// A segment entry describes the state of
|
/// A segment entry describes the state of
|
||||||
@@ -19,7 +21,7 @@ use std::fmt;
|
|||||||
/// in the .del file or in the `delete_bitset`.
|
/// in the .del file or in the `delete_bitset`.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct SegmentEntry {
|
pub struct SegmentEntry {
|
||||||
meta: SegmentMeta,
|
segment: Segment,
|
||||||
delete_bitset: Option<BitSet>,
|
delete_bitset: Option<BitSet>,
|
||||||
delete_cursor: DeleteCursor,
|
delete_cursor: DeleteCursor,
|
||||||
}
|
}
|
||||||
@@ -27,47 +29,67 @@ pub struct SegmentEntry {
|
|||||||
impl SegmentEntry {
|
impl SegmentEntry {
|
||||||
/// Create a new `SegmentEntry`
|
/// Create a new `SegmentEntry`
|
||||||
pub fn new(
|
pub fn new(
|
||||||
segment_meta: SegmentMeta,
|
segment: Segment,
|
||||||
delete_cursor: DeleteCursor,
|
delete_cursor: DeleteCursor,
|
||||||
delete_bitset: Option<BitSet>,
|
delete_bitset: Option<BitSet>,
|
||||||
) -> SegmentEntry {
|
) -> SegmentEntry {
|
||||||
SegmentEntry {
|
SegmentEntry {
|
||||||
meta: segment_meta,
|
segment,
|
||||||
delete_bitset,
|
delete_bitset,
|
||||||
delete_cursor,
|
delete_cursor,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a reference to the segment entry deleted bitset.
|
pub fn persist(&mut self, dest_directory: ManagedDirectory) -> crate::Result<()> {
|
||||||
///
|
// TODO take in account delete bitset?
|
||||||
/// `DocId` in this bitset are flagged as deleted.
|
self.segment.persist(dest_directory)?;
|
||||||
pub fn delete_bitset(&self) -> Option<&BitSet> {
|
Ok(())
|
||||||
self.delete_bitset.as_ref()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the `SegmentMeta` for this segment.
|
pub fn set_delete_cursor(&mut self, delete_cursor: DeleteCursor) {
|
||||||
pub fn set_meta(&mut self, segment_meta: SegmentMeta) {
|
self.delete_cursor = delete_cursor;
|
||||||
self.meta = segment_meta;
|
}
|
||||||
|
|
||||||
|
/// `Takes` (as in Option::take) the delete bitset of a segment entry.
|
||||||
|
/// `DocId` in this bitset are flagged as deleted.
|
||||||
|
pub fn take_delete_bitset(&mut self) -> Option<BitSet> {
|
||||||
|
self.delete_bitset.take()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reset the delete information in this segment.
|
||||||
|
///
|
||||||
|
/// The `SegmentEntry` segment's `SegmentMeta` gets updated, and
|
||||||
|
/// any delete bitset is drop and set to None.
|
||||||
|
pub fn reset_delete_meta(&mut self, num_deleted_docs: u32, target_opstamp: Opstamp) {
|
||||||
|
self.segment = self
|
||||||
|
.segment
|
||||||
|
.clone()
|
||||||
|
.with_delete_meta(num_deleted_docs, target_opstamp);
|
||||||
|
self.delete_bitset = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a reference to the segment_entry's delete cursor
|
/// Return a reference to the segment_entry's delete cursor
|
||||||
pub fn delete_cursor(&mut self) -> &mut DeleteCursor {
|
pub fn delete_cursor(&mut self) -> DeleteCursor {
|
||||||
&mut self.delete_cursor
|
self.delete_cursor.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the segment id.
|
/// Returns the segment id.
|
||||||
pub fn segment_id(&self) -> SegmentId {
|
pub fn segment_id(&self) -> SegmentId {
|
||||||
self.meta.id()
|
self.meta().id()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the `segment` associated to the `SegmentEntry`.
|
||||||
|
pub fn segment(&self) -> &Segment {
|
||||||
|
&self.segment
|
||||||
|
}
|
||||||
/// Accessor to the `SegmentMeta`
|
/// Accessor to the `SegmentMeta`
|
||||||
pub fn meta(&self) -> &SegmentMeta {
|
pub fn meta(&self) -> &SegmentMeta {
|
||||||
&self.meta
|
self.segment.meta()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Debug for SegmentEntry {
|
impl fmt::Debug for SegmentEntry {
|
||||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(formatter, "SegmentEntry({:?})", self.meta)
|
write!(formatter, "SegmentEntry({:?})", self.meta())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,15 +2,15 @@ use super::segment_register::SegmentRegister;
|
|||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::error::TantivyError;
|
use crate::error::TantivyError;
|
||||||
use crate::indexer::delete_queue::DeleteCursor;
|
use crate::indexer::{SegmentEntry, MergeOperationInventory, MergeCandidate, MergeOperation};
|
||||||
use crate::indexer::SegmentEntry;
|
use crate::{Segment, Opstamp};
|
||||||
use std::collections::hash_set::HashSet;
|
use std::collections::hash_set::HashSet;
|
||||||
use std::fmt::{self, Debug, Formatter};
|
use std::fmt::{self, Debug, Formatter};
|
||||||
use std::sync::RwLock;
|
use std::sync::{Arc, RwLock};
|
||||||
use std::sync::{RwLockReadGuard, RwLockWriteGuard};
|
use std::sync::{RwLockReadGuard, RwLockWriteGuard};
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct SegmentRegisters {
|
pub(crate) struct SegmentRegisters {
|
||||||
uncommitted: SegmentRegister,
|
uncommitted: SegmentRegister,
|
||||||
committed: SegmentRegister,
|
committed: SegmentRegister,
|
||||||
}
|
}
|
||||||
@@ -22,6 +22,17 @@ pub(crate) enum SegmentsStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentRegisters {
|
impl SegmentRegisters {
|
||||||
|
pub fn new(committed: SegmentRegister) -> SegmentRegisters {
|
||||||
|
SegmentRegisters {
|
||||||
|
uncommitted: Default::default(),
|
||||||
|
committed,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn committed_segment(&self) -> Vec<Segment> {
|
||||||
|
self.committed.segments()
|
||||||
|
}
|
||||||
|
|
||||||
/// Check if all the segments are committed or uncommited.
|
/// Check if all the segments are committed or uncommited.
|
||||||
///
|
///
|
||||||
/// If some segment is missing or segments are in a different state (this should not happen
|
/// If some segment is missing or segments are in a different state (this should not happen
|
||||||
@@ -44,7 +55,8 @@ impl SegmentRegisters {
|
|||||||
/// changes (merges especially)
|
/// changes (merges especially)
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct SegmentManager {
|
pub struct SegmentManager {
|
||||||
registers: RwLock<SegmentRegisters>,
|
registers: Arc<RwLock<SegmentRegisters>>,
|
||||||
|
merge_operations: MergeOperationInventory,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Debug for SegmentManager {
|
impl Debug for SegmentManager {
|
||||||
@@ -58,34 +70,28 @@ impl Debug for SegmentManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_mergeable_segments(
|
|
||||||
in_merge_segment_ids: &HashSet<SegmentId>,
|
|
||||||
segment_manager: &SegmentManager,
|
|
||||||
) -> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
|
|
||||||
let registers_lock = segment_manager.read();
|
|
||||||
(
|
|
||||||
registers_lock
|
|
||||||
.committed
|
|
||||||
.get_mergeable_segments(in_merge_segment_ids),
|
|
||||||
registers_lock
|
|
||||||
.uncommitted
|
|
||||||
.get_mergeable_segments(in_merge_segment_ids),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SegmentManager {
|
impl SegmentManager {
|
||||||
pub fn from_segments(
|
pub(crate) fn new(registers: Arc<RwLock<SegmentRegisters>>) -> SegmentManager {
|
||||||
segment_metas: Vec<SegmentMeta>,
|
|
||||||
delete_cursor: &DeleteCursor,
|
|
||||||
) -> SegmentManager {
|
|
||||||
SegmentManager {
|
SegmentManager {
|
||||||
registers: RwLock::new(SegmentRegisters {
|
registers,
|
||||||
uncommitted: SegmentRegister::default(),
|
merge_operations: Default::default()
|
||||||
committed: SegmentRegister::new(segment_metas, delete_cursor),
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn new_merge_operation(&self, opstamp: Opstamp, merge_candidate: MergeCandidate) -> MergeOperation {
|
||||||
|
MergeOperation::new(
|
||||||
|
&self.merge_operations,
|
||||||
|
opstamp,
|
||||||
|
merge_candidate.0
|
||||||
|
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn wait_merging_thread(&self) {
|
||||||
|
self.merge_operations.wait_until_empty()
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns all of the segment entries (committed or uncommitted)
|
/// Returns all of the segment entries (committed or uncommitted)
|
||||||
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
|
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
|
||||||
let registers_lock = self.read();
|
let registers_lock = self.read();
|
||||||
@@ -94,6 +100,34 @@ impl SegmentManager {
|
|||||||
segment_entries
|
segment_entries
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the segments that are currently not in merge.
|
||||||
|
///
|
||||||
|
/// They are split over two `Vec`. The committed segments on one hand
|
||||||
|
/// and the uncommitted ones on the other hand.
|
||||||
|
///
|
||||||
|
/// This method is useful when searching for merge candidate or segments
|
||||||
|
/// to persists.
|
||||||
|
pub fn segments_not_in_merge(&self) -> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
|
||||||
|
let in_merge_segment_ids: HashSet<SegmentId> = self.merge_operations.segment_in_merge();
|
||||||
|
let registers_lock = self.read();
|
||||||
|
(
|
||||||
|
registers_lock
|
||||||
|
.committed
|
||||||
|
.get_mergeable_segments(&in_merge_segment_ids),
|
||||||
|
registers_lock
|
||||||
|
.uncommitted
|
||||||
|
.get_mergeable_segments(&in_merge_segment_ids),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn largest_segment_not_in_merge(&self) -> Option<SegmentMeta> {
|
||||||
|
let (committed, uncommitted) = self.segments_not_in_merge();
|
||||||
|
let mut segments = vec![];
|
||||||
|
segments.extend(committed);
|
||||||
|
segments.extend(uncommitted);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
// Lock poisoning should never happen :
|
// Lock poisoning should never happen :
|
||||||
// The lock is acquired and released within this class,
|
// The lock is acquired and released within this class,
|
||||||
// and the operations cannot panic.
|
// and the operations cannot panic.
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
|
use crate::directory::ManagedDirectory;
|
||||||
use crate::indexer::delete_queue::DeleteCursor;
|
use crate::indexer::delete_queue::DeleteCursor;
|
||||||
use crate::indexer::segment_entry::SegmentEntry;
|
use crate::indexer::segment_entry::SegmentEntry;
|
||||||
|
use crate::schema::Schema;
|
||||||
|
use crate::Segment;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fmt::{self, Debug, Formatter};
|
use std::fmt::{self, Debug, Formatter};
|
||||||
@@ -46,6 +49,13 @@ impl SegmentRegister {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn segments(&self) -> Vec<Segment> {
|
||||||
|
self.segment_states
|
||||||
|
.values()
|
||||||
|
.map(|segment_entry| segment_entry.segment().clone())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
|
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
|
||||||
self.segment_states.values().cloned().collect()
|
self.segment_states.values().cloned().collect()
|
||||||
}
|
}
|
||||||
@@ -79,11 +89,17 @@ impl SegmentRegister {
|
|||||||
self.segment_states.get(segment_id).cloned()
|
self.segment_states.get(segment_id).cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(segment_metas: Vec<SegmentMeta>, delete_cursor: &DeleteCursor) -> SegmentRegister {
|
pub fn new(
|
||||||
|
directory: &ManagedDirectory,
|
||||||
|
schema: &Schema,
|
||||||
|
segment_metas: Vec<SegmentMeta>,
|
||||||
|
delete_cursor: &DeleteCursor,
|
||||||
|
) -> SegmentRegister {
|
||||||
let mut segment_states = HashMap::new();
|
let mut segment_states = HashMap::new();
|
||||||
for segment_meta in segment_metas {
|
for segment_meta in segment_metas {
|
||||||
let segment_id = segment_meta.id();
|
let segment_id = segment_meta.id();
|
||||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor.clone(), None);
|
let segment = Segment::new_persisted(segment_meta, directory.clone(), schema.clone());
|
||||||
|
let segment_entry = SegmentEntry::new(segment, delete_cursor.clone(), None);
|
||||||
segment_states.insert(segment_id, segment_entry);
|
segment_states.insert(segment_id, segment_entry);
|
||||||
}
|
}
|
||||||
SegmentRegister { segment_states }
|
SegmentRegister { segment_states }
|
||||||
@@ -95,6 +111,7 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use crate::core::{SegmentId, SegmentMetaInventory};
|
use crate::core::{SegmentId, SegmentMetaInventory};
|
||||||
use crate::indexer::delete_queue::*;
|
use crate::indexer::delete_queue::*;
|
||||||
|
use crate::indexer::ResourceManager;
|
||||||
|
|
||||||
fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> {
|
fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> {
|
||||||
segment_register
|
segment_register
|
||||||
@@ -108,28 +125,34 @@ mod tests {
|
|||||||
fn test_segment_register() {
|
fn test_segment_register() {
|
||||||
let inventory = SegmentMetaInventory::default();
|
let inventory = SegmentMetaInventory::default();
|
||||||
let delete_queue = DeleteQueue::new();
|
let delete_queue = DeleteQueue::new();
|
||||||
|
let schema = Schema::builder().build();
|
||||||
|
|
||||||
let mut segment_register = SegmentRegister::default();
|
let mut segment_register = SegmentRegister::default();
|
||||||
let segment_id_a = SegmentId::generate_random();
|
let segment_id_a = SegmentId::generate_random();
|
||||||
let segment_id_b = SegmentId::generate_random();
|
let segment_id_b = SegmentId::generate_random();
|
||||||
let segment_id_merged = SegmentId::generate_random();
|
let segment_id_merged = SegmentId::generate_random();
|
||||||
|
|
||||||
|
let memory_manager = ResourceManager::default();
|
||||||
|
|
||||||
{
|
{
|
||||||
let segment_meta = inventory.new_segment_meta(segment_id_a, 0u32);
|
let meta = inventory.new_segment_meta(segment_id_a, 0u32);
|
||||||
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
|
let segment = Segment::new_volatile(meta, schema.clone(), memory_manager.clone());
|
||||||
|
let segment_entry = SegmentEntry::new(segment, delete_queue.cursor(), None);
|
||||||
segment_register.add_segment_entry(segment_entry);
|
segment_register.add_segment_entry(segment_entry);
|
||||||
}
|
}
|
||||||
assert_eq!(segment_ids(&segment_register), vec![segment_id_a]);
|
assert_eq!(segment_ids(&segment_register), vec![segment_id_a]);
|
||||||
{
|
{
|
||||||
let segment_meta = inventory.new_segment_meta(segment_id_b, 0u32);
|
let meta = inventory.new_segment_meta(segment_id_b, 0u32);
|
||||||
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
|
let segment = Segment::new_volatile(meta, schema.clone(), memory_manager.clone());
|
||||||
|
let segment_entry = SegmentEntry::new(segment, delete_queue.cursor(), None);
|
||||||
segment_register.add_segment_entry(segment_entry);
|
segment_register.add_segment_entry(segment_entry);
|
||||||
}
|
}
|
||||||
segment_register.remove_segment(&segment_id_a);
|
segment_register.remove_segment(&segment_id_a);
|
||||||
segment_register.remove_segment(&segment_id_b);
|
segment_register.remove_segment(&segment_id_b);
|
||||||
{
|
{
|
||||||
let segment_meta_merged = inventory.new_segment_meta(segment_id_merged, 0u32);
|
let segment_meta_merged = inventory.new_segment_meta(segment_id_merged, 0u32);
|
||||||
let segment_entry = SegmentEntry::new(segment_meta_merged, delete_queue.cursor(), None);
|
let segment_merged = Segment::new_volatile(segment_meta_merged, schema.clone(), memory_manager.clone());
|
||||||
|
let segment_entry = SegmentEntry::new(segment_merged, delete_queue.cursor(), None);
|
||||||
segment_register.add_segment_entry(segment_entry);
|
segment_register.add_segment_entry(segment_entry);
|
||||||
}
|
}
|
||||||
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
|
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
|
||||||
|
|||||||
@@ -3,43 +3,32 @@ use crate::core::SegmentComponent;
|
|||||||
use crate::fastfield::FastFieldSerializer;
|
use crate::fastfield::FastFieldSerializer;
|
||||||
use crate::fieldnorm::FieldNormsSerializer;
|
use crate::fieldnorm::FieldNormsSerializer;
|
||||||
use crate::postings::InvertedIndexSerializer;
|
use crate::postings::InvertedIndexSerializer;
|
||||||
use crate::store::StoreWriter;
|
|
||||||
|
|
||||||
/// Segment serializer is in charge of laying out on disk
|
/// Segment serializer is in charge of laying out on disk
|
||||||
/// the data accumulated and sorted by the `SegmentWriter`.
|
/// the data accumulated and sorted by the `SegmentWriter`.
|
||||||
pub struct SegmentSerializer {
|
pub struct SegmentSerializer {
|
||||||
segment: Segment,
|
|
||||||
store_writer: StoreWriter,
|
|
||||||
fast_field_serializer: FastFieldSerializer,
|
fast_field_serializer: FastFieldSerializer,
|
||||||
fieldnorms_serializer: Option<FieldNormsSerializer>,
|
fieldnorms_serializer: FieldNormsSerializer,
|
||||||
postings_serializer: InvertedIndexSerializer,
|
postings_serializer: InvertedIndexSerializer,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentSerializer {
|
impl SegmentSerializer {
|
||||||
/// Creates a new `SegmentSerializer`.
|
/// Creates a new `SegmentSerializer`.
|
||||||
pub fn for_segment(mut segment: Segment) -> crate::Result<SegmentSerializer> {
|
pub fn for_segment(segment: &mut Segment) -> crate::Result<SegmentSerializer> {
|
||||||
let store_write = segment.open_write(SegmentComponent::STORE)?;
|
|
||||||
|
|
||||||
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
|
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
|
||||||
let fast_field_serializer = FastFieldSerializer::from_write(fast_field_write)?;
|
let fast_field_serializer = FastFieldSerializer::from_write(fast_field_write)?;
|
||||||
|
|
||||||
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
|
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
|
||||||
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
|
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
|
||||||
|
|
||||||
let postings_serializer = InvertedIndexSerializer::open(&mut segment)?;
|
let postings_serializer = InvertedIndexSerializer::open(segment)?;
|
||||||
Ok(SegmentSerializer {
|
Ok(SegmentSerializer {
|
||||||
segment,
|
|
||||||
store_writer: StoreWriter::new(store_write),
|
|
||||||
fast_field_serializer,
|
fast_field_serializer,
|
||||||
fieldnorms_serializer: Some(fieldnorms_serializer),
|
fieldnorms_serializer,
|
||||||
postings_serializer,
|
postings_serializer,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn segment(&self) -> &Segment {
|
|
||||||
&self.segment
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Accessor to the `PostingsSerializer`.
|
/// Accessor to the `PostingsSerializer`.
|
||||||
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer {
|
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer {
|
||||||
&mut self.postings_serializer
|
&mut self.postings_serializer
|
||||||
@@ -50,26 +39,16 @@ impl SegmentSerializer {
|
|||||||
&mut self.fast_field_serializer
|
&mut self.fast_field_serializer
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Extract the field norm serializer.
|
/// Accessor to the field norm serializer.
|
||||||
///
|
pub fn get_fieldnorms_serializer(&mut self) -> &mut FieldNormsSerializer {
|
||||||
/// Note the fieldnorms serializer can only be extracted once.
|
&mut self.fieldnorms_serializer
|
||||||
pub fn extract_fieldnorms_serializer(&mut self) -> Option<FieldNormsSerializer> {
|
|
||||||
self.fieldnorms_serializer.take()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Accessor to the `StoreWriter`.
|
|
||||||
pub fn get_store_writer(&mut self) -> &mut StoreWriter {
|
|
||||||
&mut self.store_writer
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Finalize the segment serialization.
|
/// Finalize the segment serialization.
|
||||||
pub fn close(mut self) -> crate::Result<()> {
|
pub fn close(self) -> crate::Result<()> {
|
||||||
if let Some(fieldnorms_serializer) = self.extract_fieldnorms_serializer() {
|
|
||||||
fieldnorms_serializer.close()?;
|
|
||||||
}
|
|
||||||
self.fast_field_serializer.close()?;
|
self.fast_field_serializer.close()?;
|
||||||
self.postings_serializer.close()?;
|
self.postings_serializer.close()?;
|
||||||
self.store_writer.close()?;
|
self.fieldnorms_serializer.close()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use super::segment_manager::{get_mergeable_segments, SegmentManager};
|
use super::segment_manager::SegmentManager;
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
use crate::core::IndexMeta;
|
use crate::core::IndexMeta;
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
@@ -7,22 +7,21 @@ use crate::core::SegmentMeta;
|
|||||||
use crate::core::SerializableSegment;
|
use crate::core::SerializableSegment;
|
||||||
use crate::core::META_FILEPATH;
|
use crate::core::META_FILEPATH;
|
||||||
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
|
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
|
||||||
use crate::indexer::delete_queue::DeleteCursor;
|
|
||||||
use crate::indexer::index_writer::advance_deletes;
|
use crate::indexer::index_writer::advance_deletes;
|
||||||
use crate::indexer::merge_operation::MergeOperationInventory;
|
|
||||||
use crate::indexer::merger::IndexMerger;
|
use crate::indexer::merger::IndexMerger;
|
||||||
use crate::indexer::segment_manager::SegmentsStatus;
|
use crate::indexer::segment_manager::{SegmentRegisters, SegmentsStatus};
|
||||||
use crate::indexer::stamper::Stamper;
|
use crate::indexer::stamper::Stamper;
|
||||||
use crate::indexer::SegmentEntry;
|
use crate::indexer::SegmentEntry;
|
||||||
use crate::indexer::SegmentSerializer;
|
use crate::indexer::SegmentSerializer;
|
||||||
use crate::indexer::{DefaultMergePolicy, MergePolicy};
|
use crate::indexer::{DefaultMergePolicy, MergePolicy};
|
||||||
use crate::indexer::{MergeCandidate, MergeOperation};
|
use crate::indexer::{MergeCandidate, MergeOperation};
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::Opstamp;
|
use crate::{Opstamp, SegmentComponent};
|
||||||
use futures::channel::oneshot;
|
use futures::channel::oneshot;
|
||||||
use futures::executor::{ThreadPool, ThreadPoolBuilder};
|
use futures::executor::{ThreadPool, ThreadPoolBuilder};
|
||||||
use futures::future::Future;
|
use futures::future::Future;
|
||||||
use futures::future::TryFutureExt;
|
use futures::future::TryFutureExt;
|
||||||
|
use serde_json;
|
||||||
use std::borrow::BorrowMut;
|
use std::borrow::BorrowMut;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@@ -43,7 +42,7 @@ const NUM_MERGE_THREADS: usize = 4;
|
|||||||
/// and flushed.
|
/// and flushed.
|
||||||
///
|
///
|
||||||
/// This method is not part of tantivy's public API
|
/// This method is not part of tantivy's public API
|
||||||
pub fn save_new_metas(schema: Schema, directory: &dyn Directory) -> crate::Result<()> {
|
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::Result<()> {
|
||||||
save_metas(
|
save_metas(
|
||||||
&IndexMeta {
|
&IndexMeta {
|
||||||
segments: Vec::new(),
|
segments: Vec::new(),
|
||||||
@@ -64,7 +63,7 @@ pub fn save_new_metas(schema: Schema, directory: &dyn Directory) -> crate::Resul
|
|||||||
/// and flushed.
|
/// and flushed.
|
||||||
///
|
///
|
||||||
/// This method is not part of tantivy's public API
|
/// This method is not part of tantivy's public API
|
||||||
fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate::Result<()> {
|
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result<()> {
|
||||||
info!("save metas");
|
info!("save metas");
|
||||||
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
||||||
// Just adding a new line at the end of the buffer.
|
// Just adding a new line at the end of the buffer.
|
||||||
@@ -112,33 +111,36 @@ fn merge(
|
|||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
) -> crate::Result<SegmentEntry> {
|
) -> crate::Result<SegmentEntry> {
|
||||||
// first we need to apply deletes to our segment.
|
// first we need to apply deletes to our segment.
|
||||||
let merged_segment = index.new_segment();
|
let mut merged_segment = index.new_segment();
|
||||||
|
|
||||||
// First we apply all of the delet to the merged segment, up to the target opstamp.
|
// First we apply all of the delet to the merged segment, up to the target opstamp.
|
||||||
for segment_entry in &mut segment_entries {
|
for segment_entry in &mut segment_entries {
|
||||||
let segment = index.segment(segment_entry.meta().clone());
|
advance_deletes(segment_entry, target_opstamp)?;
|
||||||
advance_deletes(segment, segment_entry, target_opstamp)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let delete_cursor = segment_entries[0].delete_cursor().clone();
|
let delete_cursor = segment_entries[0].delete_cursor();
|
||||||
|
|
||||||
let segments: Vec<Segment> = segment_entries
|
let segments: Vec<Segment> = segment_entries
|
||||||
.iter()
|
.iter()
|
||||||
.map(|segment_entry| index.segment(segment_entry.meta().clone()))
|
.map(|segment_entry| segment_entry.segment().clone())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// An IndexMerger is like a "view" of our merged segments.
|
// An IndexMerger is like a "view" of our merged segments.
|
||||||
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
|
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
|
||||||
|
|
||||||
// ... we just serialize this index merger in our new segment to merge the two segments.
|
// ... we just serialize this index merger in our new segment to merge the two segments.
|
||||||
let segment_serializer = SegmentSerializer::for_segment(merged_segment.clone())?;
|
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
|
||||||
|
|
||||||
let num_docs = merger.write(segment_serializer)?;
|
let store_wrt = merged_segment.open_write(SegmentComponent::STORE)?;
|
||||||
|
merger.write_storable_fields(store_wrt)?;
|
||||||
|
|
||||||
let merged_segment_id = merged_segment.id();
|
let max_doc = merger.write(segment_serializer)?;
|
||||||
|
|
||||||
let segment_meta = index.new_segment_meta(merged_segment_id, num_docs);
|
Ok(SegmentEntry::new(
|
||||||
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
|
merged_segment.with_max_doc(max_doc),
|
||||||
|
delete_cursor,
|
||||||
|
None,
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct InnerSegmentUpdater {
|
pub(crate) struct InnerSegmentUpdater {
|
||||||
@@ -154,20 +156,18 @@ pub(crate) struct InnerSegmentUpdater {
|
|||||||
|
|
||||||
index: Index,
|
index: Index,
|
||||||
segment_manager: SegmentManager,
|
segment_manager: SegmentManager,
|
||||||
merge_policy: RwLock<Arc<dyn MergePolicy>>,
|
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
|
||||||
killed: AtomicBool,
|
killed: AtomicBool,
|
||||||
stamper: Stamper,
|
stamper: Stamper,
|
||||||
merge_operations: MergeOperationInventory,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentUpdater {
|
impl SegmentUpdater {
|
||||||
pub fn create(
|
pub fn create(
|
||||||
|
segment_registers: Arc<RwLock<SegmentRegisters>>,
|
||||||
index: Index,
|
index: Index,
|
||||||
stamper: Stamper,
|
stamper: Stamper,
|
||||||
delete_cursor: &DeleteCursor,
|
|
||||||
) -> crate::Result<SegmentUpdater> {
|
) -> crate::Result<SegmentUpdater> {
|
||||||
let segments = index.searchable_segment_metas()?;
|
let segment_manager = SegmentManager::new(segment_registers);
|
||||||
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
|
|
||||||
let pool = ThreadPoolBuilder::new()
|
let pool = ThreadPoolBuilder::new()
|
||||||
.name_prefix("segment_updater")
|
.name_prefix("segment_updater")
|
||||||
.pool_size(1)
|
.pool_size(1)
|
||||||
@@ -193,19 +193,18 @@ impl SegmentUpdater {
|
|||||||
merge_thread_pool,
|
merge_thread_pool,
|
||||||
index,
|
index,
|
||||||
segment_manager,
|
segment_manager,
|
||||||
merge_policy: RwLock::new(Arc::new(DefaultMergePolicy::default())),
|
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
|
||||||
killed: AtomicBool::new(false),
|
killed: AtomicBool::new(false),
|
||||||
stamper,
|
stamper,
|
||||||
merge_operations: Default::default(),
|
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_merge_policy(&self) -> Arc<dyn MergePolicy> {
|
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
|
||||||
self.merge_policy.read().unwrap().clone()
|
self.merge_policy.read().unwrap().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
|
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
|
||||||
let arc_merge_policy = Arc::from(merge_policy);
|
let arc_merge_policy = Arc::new(merge_policy);
|
||||||
*self.merge_policy.write().unwrap() = arc_merge_policy;
|
*self.merge_policy.write().unwrap() = arc_merge_policy;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -262,8 +261,7 @@ impl SegmentUpdater {
|
|||||||
fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> {
|
fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> {
|
||||||
let mut segment_entries = self.segment_manager.segment_entries();
|
let mut segment_entries = self.segment_manager.segment_entries();
|
||||||
for segment_entry in &mut segment_entries {
|
for segment_entry in &mut segment_entries {
|
||||||
let segment = self.index.segment(segment_entry.meta().clone());
|
advance_deletes(segment_entry, target_opstamp)?;
|
||||||
advance_deletes(segment, segment_entry, target_opstamp)?;
|
|
||||||
}
|
}
|
||||||
Ok(segment_entries)
|
Ok(segment_entries)
|
||||||
}
|
}
|
||||||
@@ -331,12 +329,21 @@ impl SegmentUpdater {
|
|||||||
&self,
|
&self,
|
||||||
opstamp: Opstamp,
|
opstamp: Opstamp,
|
||||||
payload: Option<String>,
|
payload: Option<String>,
|
||||||
|
soft_commit: bool,
|
||||||
) -> impl Future<Output = crate::Result<()>> {
|
) -> impl Future<Output = crate::Result<()>> {
|
||||||
let segment_updater: SegmentUpdater = self.clone();
|
let segment_updater: SegmentUpdater = self.clone();
|
||||||
|
let directory = self.index.directory().clone();
|
||||||
self.schedule_future(async move {
|
self.schedule_future(async move {
|
||||||
let segment_entries = segment_updater.purge_deletes(opstamp)?;
|
let mut segment_entries = segment_updater.purge_deletes(opstamp)?;
|
||||||
|
if !soft_commit {
|
||||||
|
for segment_entry in &mut segment_entries {
|
||||||
|
segment_entry.persist(directory.clone())?;
|
||||||
|
}
|
||||||
|
}
|
||||||
segment_updater.segment_manager.commit(segment_entries);
|
segment_updater.segment_manager.commit(segment_entries);
|
||||||
segment_updater.save_metas(opstamp, payload)?;
|
if !soft_commit {
|
||||||
|
segment_updater.save_metas(opstamp, payload)?;
|
||||||
|
}
|
||||||
let _ = garbage_collect_files(segment_updater.clone()).await;
|
let _ = garbage_collect_files(segment_updater.clone()).await;
|
||||||
segment_updater.consider_merge_options().await;
|
segment_updater.consider_merge_options().await;
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -353,7 +360,7 @@ impl SegmentUpdater {
|
|||||||
|
|
||||||
pub(crate) fn make_merge_operation(&self, segment_ids: &[SegmentId]) -> MergeOperation {
|
pub(crate) fn make_merge_operation(&self, segment_ids: &[SegmentId]) -> MergeOperation {
|
||||||
let commit_opstamp = self.load_metas().opstamp;
|
let commit_opstamp = self.load_metas().opstamp;
|
||||||
MergeOperation::new(&self.merge_operations, commit_opstamp, segment_ids.to_vec())
|
self.segment_manager.new_merge_operation(commit_opstamp, MergeCandidate(segment_ids.to_vec()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Starts a merge operation. This function will block until the merge operation is effectively
|
// Starts a merge operation. This function will block until the merge operation is effectively
|
||||||
@@ -427,9 +434,8 @@ impl SegmentUpdater {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn consider_merge_options(&self) {
|
async fn consider_merge_options(&self) {
|
||||||
let merge_segment_ids: HashSet<SegmentId> = self.merge_operations.segment_in_merge();
|
|
||||||
let (committed_segments, uncommitted_segments) =
|
let (committed_segments, uncommitted_segments) =
|
||||||
get_mergeable_segments(&merge_segment_ids, &self.segment_manager);
|
self.segment_manager.segments_not_in_merge();
|
||||||
|
|
||||||
// Committed segments cannot be merged with uncommitted_segments.
|
// Committed segments cannot be merged with uncommitted_segments.
|
||||||
// We therefore consider merges using these two sets of segments independently.
|
// We therefore consider merges using these two sets of segments independently.
|
||||||
@@ -440,7 +446,7 @@ impl SegmentUpdater {
|
|||||||
.compute_merge_candidates(&uncommitted_segments)
|
.compute_merge_candidates(&uncommitted_segments)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|merge_candidate| {
|
.map(|merge_candidate| {
|
||||||
MergeOperation::new(&self.merge_operations, current_opstamp, merge_candidate.0)
|
self.segment_manager.new_merge_operation(current_opstamp, merge_candidate)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
@@ -449,9 +455,10 @@ impl SegmentUpdater {
|
|||||||
.compute_merge_candidates(&committed_segments)
|
.compute_merge_candidates(&committed_segments)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|merge_candidate: MergeCandidate| {
|
.map(|merge_candidate: MergeCandidate| {
|
||||||
MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0)
|
self.segment_manager.new_merge_operation(commit_opstamp, merge_candidate)
|
||||||
});
|
})
|
||||||
merge_candidates.extend(committed_merge_candidates);
|
.collect::<Vec<_>>();
|
||||||
|
merge_candidates.extend(committed_merge_candidates.into_iter());
|
||||||
|
|
||||||
for merge_operation in merge_candidates {
|
for merge_operation in merge_candidates {
|
||||||
if let Err(err) = self.start_merge(merge_operation) {
|
if let Err(err) = self.start_merge(merge_operation) {
|
||||||
@@ -473,17 +480,13 @@ impl SegmentUpdater {
|
|||||||
let end_merge_future = self.schedule_future(async move {
|
let end_merge_future = self.schedule_future(async move {
|
||||||
info!("End merge {:?}", after_merge_segment_entry.meta());
|
info!("End merge {:?}", after_merge_segment_entry.meta());
|
||||||
{
|
{
|
||||||
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
|
let mut delete_cursor = after_merge_segment_entry.delete_cursor();
|
||||||
if let Some(delete_operation) = delete_cursor.get() {
|
if let Some(delete_operation) = delete_cursor.get() {
|
||||||
let committed_opstamp = segment_updater.load_metas().opstamp;
|
let committed_opstamp = segment_updater.load_metas().opstamp;
|
||||||
if delete_operation.opstamp < committed_opstamp {
|
if delete_operation.opstamp < committed_opstamp {
|
||||||
let index = &segment_updater.index;
|
if let Err(e) =
|
||||||
let segment = index.segment(after_merge_segment_entry.meta().clone());
|
advance_deletes(&mut after_merge_segment_entry, committed_opstamp)
|
||||||
if let Err(e) = advance_deletes(
|
{
|
||||||
segment,
|
|
||||||
&mut after_merge_segment_entry,
|
|
||||||
committed_opstamp,
|
|
||||||
) {
|
|
||||||
error!(
|
error!(
|
||||||
"Merge of {:?} was cancelled (advancing deletes failed): {:?}",
|
"Merge of {:?} was cancelled (advancing deletes failed): {:?}",
|
||||||
merge_operation.segment_ids(),
|
merge_operation.segment_ids(),
|
||||||
@@ -521,7 +524,7 @@ impl SegmentUpdater {
|
|||||||
///
|
///
|
||||||
/// Upon termination of the current merging threads,
|
/// Upon termination of the current merging threads,
|
||||||
/// merge opportunity may appear.
|
/// merge opportunity may appear.
|
||||||
///
|
//
|
||||||
/// We keep waiting until the merge policy judges that
|
/// We keep waiting until the merge policy judges that
|
||||||
/// no opportunity is available.
|
/// no opportunity is available.
|
||||||
///
|
///
|
||||||
@@ -532,9 +535,8 @@ impl SegmentUpdater {
|
|||||||
///
|
///
|
||||||
/// Obsolete files will eventually be cleaned up
|
/// Obsolete files will eventually be cleaned up
|
||||||
/// by the directory garbage collector.
|
/// by the directory garbage collector.
|
||||||
pub fn wait_merging_thread(&self) -> crate::Result<()> {
|
pub fn wait_merging_thread(&self) {
|
||||||
self.merge_operations.wait_until_empty();
|
self.segment_manager.wait_merging_thread()
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -543,7 +545,8 @@ mod tests {
|
|||||||
|
|
||||||
use crate::indexer::merge_policy::tests::MergeWheneverPossible;
|
use crate::indexer::merge_policy::tests::MergeWheneverPossible;
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use crate::Index;
|
use crate::{Index, SegmentId};
|
||||||
|
use futures::executor::block_on;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_during_merge() {
|
fn test_delete_during_merge() {
|
||||||
@@ -554,7 +557,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(MergeWheneverPossible));
|
index_writer.set_merge_policy(Box::new(MergeWheneverPossible));
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -607,7 +610,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
@@ -678,7 +681,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
@@ -694,4 +697,27 @@ mod tests {
|
|||||||
.segment_entries();
|
.segment_entries();
|
||||||
assert!(seg_vec.is_empty());
|
assert!(seg_vec.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_merge_over_soft_commit() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
// writing the segment
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
index_writer.add_document(doc!(text_field=>"a"));
|
||||||
|
assert!(index_writer.soft_commit().is_ok());
|
||||||
|
index_writer.add_document(doc!(text_field=>"a"));
|
||||||
|
assert!(index_writer.soft_commit().is_ok());
|
||||||
|
|
||||||
|
let reader = index_writer.reader(1).unwrap();
|
||||||
|
let segment_ids: Vec<SegmentId> = reader
|
||||||
|
.searcher()
|
||||||
|
.segment_readers()
|
||||||
|
.iter()
|
||||||
|
.map(|reader| reader.segment_id())
|
||||||
|
.collect();
|
||||||
|
assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
use super::operation::AddOperation;
|
use super::operation::AddOperation;
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::core::SerializableSegment;
|
use crate::core::SerializableSegment;
|
||||||
|
use crate::directory::{SpillingResult, SpillingWriter, TerminatingWrite};
|
||||||
use crate::fastfield::FastFieldsWriter;
|
use crate::fastfield::FastFieldsWriter;
|
||||||
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
|
use crate::fieldnorm::FieldNormsWriter;
|
||||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||||
|
use crate::indexer::IndexWriterConfig;
|
||||||
use crate::postings::compute_table_size;
|
use crate::postings::compute_table_size;
|
||||||
use crate::postings::MultiFieldPostingsWriter;
|
use crate::postings::MultiFieldPostingsWriter;
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
@@ -11,19 +13,24 @@ use crate::schema::Schema;
|
|||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::schema::Value;
|
use crate::schema::Value;
|
||||||
use crate::schema::{Field, FieldEntry};
|
use crate::schema::{Field, FieldEntry};
|
||||||
|
use crate::store::StoreWriter;
|
||||||
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
|
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
|
||||||
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
|
use crate::tokenizer::{FacetTokenizer, TextAnalyzer, TokenizerManager};
|
||||||
use crate::tokenizer::{TokenStreamChain, Tokenizer};
|
use crate::tokenizer::{TokenStreamChain, Tokenizer};
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use crate::{DocId, SegmentComponent};
|
use crate::{DocId, SegmentComponent};
|
||||||
|
use std::io;
|
||||||
|
use std::io::Write;
|
||||||
|
use std::str;
|
||||||
|
use crate::indexer::resource_manager::ResourceManager;
|
||||||
|
|
||||||
/// Computes the initial size of the hash table.
|
/// Computes the initial size of the hash table.
|
||||||
///
|
///
|
||||||
/// Returns a number of bit `b`, such that the recommended initial table size is 2^b.
|
/// Returns a number of bit `b`, such that the recommended initial table size is 2^b.
|
||||||
fn initial_table_size(per_thread_memory_budget: usize) -> crate::Result<usize> {
|
fn initial_table_size(per_thread_memory_budget: u64) -> crate::Result<usize> {
|
||||||
let table_memory_upper_bound = per_thread_memory_budget / 3;
|
let table_memory_upper_bound = per_thread_memory_budget / 3u64;
|
||||||
if let Some(limit) = (10..)
|
if let Some(limit) = (10..)
|
||||||
.take_while(|num_bits: &usize| compute_table_size(*num_bits) < table_memory_upper_bound)
|
.take_while(|num_bits| compute_table_size(*num_bits) < table_memory_upper_bound)
|
||||||
.last()
|
.last()
|
||||||
{
|
{
|
||||||
Ok(limit.min(19)) // we cap it at 2^19 = 512K.
|
Ok(limit.min(19)) // we cap it at 2^19 = 512K.
|
||||||
@@ -41,12 +48,13 @@ fn initial_table_size(per_thread_memory_budget: usize) -> crate::Result<usize> {
|
|||||||
pub struct SegmentWriter {
|
pub struct SegmentWriter {
|
||||||
max_doc: DocId,
|
max_doc: DocId,
|
||||||
multifield_postings: MultiFieldPostingsWriter,
|
multifield_postings: MultiFieldPostingsWriter,
|
||||||
segment_serializer: SegmentSerializer,
|
segment: Segment,
|
||||||
fast_field_writers: FastFieldsWriter,
|
fast_field_writers: FastFieldsWriter,
|
||||||
fieldnorms_writer: FieldNormsWriter,
|
fieldnorms_writer: FieldNormsWriter,
|
||||||
doc_opstamps: Vec<Opstamp>,
|
doc_opstamps: Vec<Opstamp>,
|
||||||
tokenizers: Vec<Option<TextAnalyzer>>,
|
tokenizers: Vec<Option<TextAnalyzer>>,
|
||||||
term_buffer: Term,
|
store_writer: StoreWriter<SpillingWriter>,
|
||||||
|
memory_manager: ResourceManager,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentWriter {
|
impl SegmentWriter {
|
||||||
@@ -60,13 +68,13 @@ impl SegmentWriter {
|
|||||||
/// - segment: The segment being written
|
/// - segment: The segment being written
|
||||||
/// - schema
|
/// - schema
|
||||||
pub fn for_segment(
|
pub fn for_segment(
|
||||||
memory_budget: usize,
|
config: &IndexWriterConfig,
|
||||||
segment: Segment,
|
segment: Segment,
|
||||||
schema: &Schema,
|
schema: &Schema,
|
||||||
|
tokenizer_manager: &TokenizerManager,
|
||||||
|
memory_manager: ResourceManager
|
||||||
) -> crate::Result<SegmentWriter> {
|
) -> crate::Result<SegmentWriter> {
|
||||||
let tokenizer_manager = segment.index().tokenizers().clone();
|
let table_num_bits = initial_table_size(config.heap_size_in_byte_per_thread())?;
|
||||||
let table_num_bits = initial_table_size(memory_budget)?;
|
|
||||||
let segment_serializer = SegmentSerializer::for_segment(segment)?;
|
|
||||||
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
|
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
|
||||||
let tokenizers = schema
|
let tokenizers = schema
|
||||||
.fields()
|
.fields()
|
||||||
@@ -82,15 +90,26 @@ impl SegmentWriter {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
.collect();
|
.collect();
|
||||||
|
let mut segment_clone = segment.clone();
|
||||||
|
let spilling_wrt = SpillingWriter::new(
|
||||||
|
50_000_000,
|
||||||
|
Box::new(move || {
|
||||||
|
segment_clone
|
||||||
|
.open_write(SegmentComponent::STORE)
|
||||||
|
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
let store_writer = StoreWriter::new(spilling_wrt);
|
||||||
Ok(SegmentWriter {
|
Ok(SegmentWriter {
|
||||||
max_doc: 0,
|
max_doc: 0,
|
||||||
multifield_postings,
|
multifield_postings,
|
||||||
fieldnorms_writer: FieldNormsWriter::for_schema(schema),
|
fieldnorms_writer: FieldNormsWriter::for_schema(schema),
|
||||||
segment_serializer,
|
segment,
|
||||||
fast_field_writers: FastFieldsWriter::from_schema(schema),
|
fast_field_writers: FastFieldsWriter::from_schema(schema),
|
||||||
doc_opstamps: Vec::with_capacity(1_000),
|
doc_opstamps: Vec::with_capacity(1_000),
|
||||||
tokenizers,
|
tokenizers,
|
||||||
term_buffer: Term::new(),
|
store_writer,
|
||||||
|
memory_manager
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -98,29 +117,40 @@ impl SegmentWriter {
|
|||||||
///
|
///
|
||||||
/// Finalize consumes the `SegmentWriter`, so that it cannot
|
/// Finalize consumes the `SegmentWriter`, so that it cannot
|
||||||
/// be used afterwards.
|
/// be used afterwards.
|
||||||
pub fn finalize(mut self) -> crate::Result<Vec<u64>> {
|
pub fn finalize(mut self) -> crate::Result<(Segment, Vec<u64>)> {
|
||||||
self.fieldnorms_writer.fill_up_to_max_doc(self.max_doc);
|
self.fieldnorms_writer.fill_up_to_max_doc(self.max_doc);
|
||||||
|
let spilling_wrt = self.store_writer.close()?;
|
||||||
|
let mut segment: Segment;
|
||||||
|
match spilling_wrt.finalize()? {
|
||||||
|
SpillingResult::Spilled => {
|
||||||
|
segment = self.segment.clone();
|
||||||
|
}
|
||||||
|
SpillingResult::Buffer(buf) => {
|
||||||
|
segment = self.segment.into_volatile(self.memory_manager.clone());
|
||||||
|
let mut store_wrt = segment.open_write(SegmentComponent::STORE)?;
|
||||||
|
store_wrt.write_all(&buf[..])?;
|
||||||
|
store_wrt.terminate()?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
|
||||||
|
segment = segment.with_max_doc(self.max_doc);
|
||||||
write(
|
write(
|
||||||
&self.multifield_postings,
|
&self.multifield_postings,
|
||||||
&self.fast_field_writers,
|
&self.fast_field_writers,
|
||||||
&self.fieldnorms_writer,
|
&self.fieldnorms_writer,
|
||||||
self.segment_serializer,
|
segment_serializer,
|
||||||
)?;
|
)?;
|
||||||
Ok(self.doc_opstamps)
|
Ok((segment, self.doc_opstamps))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn mem_usage(&self) -> usize {
|
pub fn mem_usage(&self) -> u64 {
|
||||||
self.multifield_postings.mem_usage()
|
self.multifield_postings.mem_usage()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Indexes a new document
|
/// Indexes a new document
|
||||||
///
|
///
|
||||||
/// As a user, you should rather use `IndexWriter`'s add_document.
|
/// As a user, you should rather use `IndexWriter`'s add_document.
|
||||||
pub fn add_document(
|
pub fn add_document(&mut self, add_operation: AddOperation, schema: &Schema) -> io::Result<()> {
|
||||||
&mut self,
|
|
||||||
add_operation: AddOperation,
|
|
||||||
schema: &Schema,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let doc_id = self.max_doc;
|
let doc_id = self.max_doc;
|
||||||
let mut doc = add_operation.document;
|
let mut doc = add_operation.document;
|
||||||
self.doc_opstamps.push(add_operation.opstamp);
|
self.doc_opstamps.push(add_operation.opstamp);
|
||||||
@@ -128,45 +158,34 @@ impl SegmentWriter {
|
|||||||
self.fast_field_writers.add_document(&doc);
|
self.fast_field_writers.add_document(&doc);
|
||||||
|
|
||||||
for (field, field_values) in doc.get_sorted_field_values() {
|
for (field, field_values) in doc.get_sorted_field_values() {
|
||||||
let field_entry = schema.get_field_entry(field);
|
let field_options = schema.get_field_entry(field);
|
||||||
let make_schema_error = || {
|
if !field_options.is_indexed() {
|
||||||
crate::TantivyError::SchemaError(format!(
|
|
||||||
"Expected a {:?} for field {:?}",
|
|
||||||
field_entry.field_type().value_type(),
|
|
||||||
field_entry.name()
|
|
||||||
))
|
|
||||||
};
|
|
||||||
if !field_entry.is_indexed() {
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let (term_buffer, multifield_postings) =
|
match *field_options.field_type() {
|
||||||
(&mut self.term_buffer, &mut self.multifield_postings);
|
|
||||||
match *field_entry.field_type() {
|
|
||||||
FieldType::HierarchicalFacet => {
|
FieldType::HierarchicalFacet => {
|
||||||
term_buffer.set_field(field);
|
let facets: Vec<&str> = field_values
|
||||||
let facets =
|
.iter()
|
||||||
field_values
|
.flat_map(|field_value| match *field_value.value() {
|
||||||
.iter()
|
Value::Facet(ref facet) => Some(facet.encoded_str()),
|
||||||
.flat_map(|field_value| match *field_value.value() {
|
_ => {
|
||||||
Value::Facet(ref facet) => Some(facet.encoded_str()),
|
panic!("Expected hierarchical facet");
|
||||||
_ => {
|
}
|
||||||
panic!("Expected hierarchical facet");
|
})
|
||||||
}
|
.collect();
|
||||||
});
|
let mut term = Term::for_field(field); // we set the Term
|
||||||
for facet_str in facets {
|
for fake_str in facets {
|
||||||
let mut unordered_term_id_opt = None;
|
let mut unordered_term_id_opt = None;
|
||||||
FacetTokenizer
|
FacetTokenizer.token_stream(fake_str).process(&mut |token| {
|
||||||
.token_stream(facet_str)
|
term.set_text(&token.text);
|
||||||
.process(&mut |token| {
|
let unordered_term_id =
|
||||||
term_buffer.set_text(&token.text);
|
self.multifield_postings.subscribe(doc_id, &term);
|
||||||
let unordered_term_id =
|
unordered_term_id_opt = Some(unordered_term_id);
|
||||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
});
|
||||||
unordered_term_id_opt = Some(unordered_term_id);
|
|
||||||
});
|
|
||||||
if let Some(unordered_term_id) = unordered_term_id_opt {
|
if let Some(unordered_term_id) = unordered_term_id_opt {
|
||||||
self.fast_field_writers
|
self.fast_field_writers
|
||||||
.get_multivalue_writer(field)
|
.get_multivalue_writer(field)
|
||||||
.expect("writer for facet missing")
|
.expect("multified writer for facet missing")
|
||||||
.add_val(unordered_term_id);
|
.add_val(unordered_term_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -183,6 +202,7 @@ impl SegmentWriter {
|
|||||||
if let Some(last_token) = tok_str.tokens.last() {
|
if let Some(last_token) = tok_str.tokens.last() {
|
||||||
total_offset += last_token.offset_to;
|
total_offset += last_token.offset_to;
|
||||||
}
|
}
|
||||||
|
|
||||||
token_streams
|
token_streams
|
||||||
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
||||||
}
|
}
|
||||||
@@ -192,6 +212,7 @@ impl SegmentWriter {
|
|||||||
{
|
{
|
||||||
offsets.push(total_offset);
|
offsets.push(total_offset);
|
||||||
total_offset += text.len();
|
total_offset += text.len();
|
||||||
|
|
||||||
token_streams.push(tokenizer.token_stream(text));
|
token_streams.push(tokenizer.token_stream(text));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -203,12 +224,8 @@ impl SegmentWriter {
|
|||||||
0
|
0
|
||||||
} else {
|
} else {
|
||||||
let mut token_stream = TokenStreamChain::new(offsets, token_streams);
|
let mut token_stream = TokenStreamChain::new(offsets, token_streams);
|
||||||
multifield_postings.index_text(
|
self.multifield_postings
|
||||||
doc_id,
|
.index_text(doc_id, field, &mut token_stream)
|
||||||
field,
|
|
||||||
&mut token_stream,
|
|
||||||
term_buffer,
|
|
||||||
)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
self.fieldnorms_writer.record(doc_id, field, num_tokens);
|
self.fieldnorms_writer.record(doc_id, field, num_tokens);
|
||||||
@@ -216,74 +233,55 @@ impl SegmentWriter {
|
|||||||
FieldType::U64(ref int_option) => {
|
FieldType::U64(ref int_option) => {
|
||||||
if int_option.is_indexed() {
|
if int_option.is_indexed() {
|
||||||
for field_value in field_values {
|
for field_value in field_values {
|
||||||
term_buffer.set_field(field_value.field());
|
let term = Term::from_field_u64(
|
||||||
let u64_val = field_value
|
field_value.field(),
|
||||||
.value()
|
field_value.value().u64_value(),
|
||||||
.u64_value()
|
);
|
||||||
.ok_or_else(make_schema_error)?;
|
self.multifield_postings.subscribe(doc_id, &term);
|
||||||
term_buffer.set_u64(u64_val);
|
|
||||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Date(ref int_option) => {
|
FieldType::Date(ref int_option) => {
|
||||||
if int_option.is_indexed() {
|
if int_option.is_indexed() {
|
||||||
for field_value in field_values {
|
for field_value in field_values {
|
||||||
term_buffer.set_field(field_value.field());
|
let term = Term::from_field_i64(
|
||||||
let date_val = field_value
|
field_value.field(),
|
||||||
.value()
|
field_value.value().date_value().timestamp(),
|
||||||
.date_value()
|
);
|
||||||
.ok_or_else(make_schema_error)?;
|
self.multifield_postings.subscribe(doc_id, &term);
|
||||||
term_buffer.set_i64(date_val.timestamp());
|
|
||||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::I64(ref int_option) => {
|
FieldType::I64(ref int_option) => {
|
||||||
if int_option.is_indexed() {
|
if int_option.is_indexed() {
|
||||||
for field_value in field_values {
|
for field_value in field_values {
|
||||||
term_buffer.set_field(field_value.field());
|
let term = Term::from_field_i64(
|
||||||
let i64_val = field_value
|
field_value.field(),
|
||||||
.value()
|
field_value.value().i64_value(),
|
||||||
.i64_value()
|
);
|
||||||
.ok_or_else(make_schema_error)?;
|
self.multifield_postings.subscribe(doc_id, &term);
|
||||||
term_buffer.set_i64(i64_val);
|
|
||||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::F64(ref int_option) => {
|
FieldType::F64(ref int_option) => {
|
||||||
if int_option.is_indexed() {
|
if int_option.is_indexed() {
|
||||||
for field_value in field_values {
|
for field_value in field_values {
|
||||||
term_buffer.set_field(field_value.field());
|
let term = Term::from_field_f64(
|
||||||
let f64_val = field_value
|
field_value.field(),
|
||||||
.value()
|
field_value.value().f64_value(),
|
||||||
.f64_value()
|
);
|
||||||
.ok_or_else(make_schema_error)?;
|
self.multifield_postings.subscribe(doc_id, &term);
|
||||||
term_buffer.set_f64(f64_val);
|
|
||||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Bytes(ref option) => {
|
FieldType::Bytes => {
|
||||||
if option.is_indexed() {
|
// Do nothing. Bytes only supports fast fields.
|
||||||
for field_value in field_values {
|
|
||||||
term_buffer.set_field(field_value.field());
|
|
||||||
let bytes = field_value
|
|
||||||
.value()
|
|
||||||
.bytes_value()
|
|
||||||
.ok_or_else(make_schema_error)?;
|
|
||||||
term_buffer.set_bytes(bytes);
|
|
||||||
self.multifield_postings.subscribe(doc_id, &term_buffer);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
|
doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
|
||||||
doc.prepare_for_store();
|
doc.prepare_for_store();
|
||||||
let doc_writer = self.segment_serializer.get_store_writer();
|
self.store_writer.store(&doc)?;
|
||||||
doc_writer.store(&doc)?;
|
|
||||||
self.max_doc += 1;
|
self.max_doc += 1;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -316,16 +314,9 @@ fn write(
|
|||||||
fieldnorms_writer: &FieldNormsWriter,
|
fieldnorms_writer: &FieldNormsWriter,
|
||||||
mut serializer: SegmentSerializer,
|
mut serializer: SegmentSerializer,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
|
let term_ord_map = multifield_postings.serialize(serializer.get_postings_serializer())?;
|
||||||
fieldnorms_writer.serialize(fieldnorms_serializer)?;
|
|
||||||
}
|
|
||||||
let fieldnorm_data = serializer
|
|
||||||
.segment()
|
|
||||||
.open_read(SegmentComponent::FIELDNORMS)?;
|
|
||||||
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
|
||||||
let term_ord_map =
|
|
||||||
multifield_postings.serialize(serializer.get_postings_serializer(), fieldnorm_readers)?;
|
|
||||||
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
|
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
|
||||||
|
fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer())?;
|
||||||
serializer.close()?;
|
serializer.close()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
531
src/lib.rs
531
src/lib.rs
@@ -105,7 +105,7 @@ extern crate serde_json;
|
|||||||
extern crate log;
|
extern crate log;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate thiserror;
|
extern crate failure;
|
||||||
|
|
||||||
#[cfg(all(test, feature = "unstable"))]
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
extern crate test;
|
extern crate test;
|
||||||
@@ -134,7 +134,7 @@ mod core;
|
|||||||
mod indexer;
|
mod indexer;
|
||||||
|
|
||||||
#[allow(unused_doc_comments)]
|
#[allow(unused_doc_comments)]
|
||||||
pub mod error;
|
mod error;
|
||||||
pub mod tokenizer;
|
pub mod tokenizer;
|
||||||
|
|
||||||
pub mod collector;
|
pub mod collector;
|
||||||
@@ -156,15 +156,14 @@ mod snippet;
|
|||||||
pub use self::snippet::{Snippet, SnippetGenerator};
|
pub use self::snippet::{Snippet, SnippetGenerator};
|
||||||
|
|
||||||
mod docset;
|
mod docset;
|
||||||
pub use self::docset::{DocSet, TERMINATED};
|
pub use self::docset::{DocSet, SkipResult};
|
||||||
pub use crate::common::HasLen;
|
|
||||||
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
||||||
pub use crate::core::{Executor, SegmentComponent};
|
pub use crate::core::{Executor, SegmentComponent};
|
||||||
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
|
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
|
||||||
pub use crate::core::{InvertedIndexReader, SegmentReader};
|
pub use crate::core::{InvertedIndexReader, SegmentReader};
|
||||||
pub use crate::directory::Directory;
|
pub use crate::directory::Directory;
|
||||||
pub use crate::indexer::operation::UserOperation;
|
pub use crate::indexer::operation::UserOperation;
|
||||||
pub use crate::indexer::IndexWriter;
|
pub use crate::indexer::{IndexWriter, IndexWriterConfig};
|
||||||
pub use crate::postings::Postings;
|
pub use crate::postings::Postings;
|
||||||
pub use crate::reader::LeasedItem;
|
pub use crate::reader::LeasedItem;
|
||||||
pub use crate::schema::{Document, Term};
|
pub use crate::schema::{Document, Term};
|
||||||
@@ -174,7 +173,7 @@ use once_cell::sync::Lazy;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// Index format version.
|
/// Index format version.
|
||||||
const INDEX_FORMAT_VERSION: u32 = 3;
|
const INDEX_FORMAT_VERSION: u32 = 1;
|
||||||
|
|
||||||
/// Structure version for the index.
|
/// Structure version for the index.
|
||||||
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
@@ -246,10 +245,11 @@ pub type DocId = u32;
|
|||||||
/// with opstamp `n+1`.
|
/// with opstamp `n+1`.
|
||||||
pub type Opstamp = u64;
|
pub type Opstamp = u64;
|
||||||
|
|
||||||
/// A Score that represents the relevance of the document to the query
|
/// A f32 that represents the relevance of the document to the query
|
||||||
///
|
///
|
||||||
/// This is modelled internally as a `f32`. The larger the number, the more relevant
|
/// This is modelled internally as a `f32`. The
|
||||||
/// the document to the search query.
|
/// larger the number, the more relevant the document
|
||||||
|
/// to the search
|
||||||
pub type Score = f32;
|
pub type Score = f32;
|
||||||
|
|
||||||
/// A `SegmentLocalId` identifies a segment.
|
/// A `SegmentLocalId` identifies a segment.
|
||||||
@@ -277,18 +277,20 @@ impl DocAddress {
|
|||||||
///
|
///
|
||||||
/// The id used for the segment is actually an ordinal
|
/// The id used for the segment is actually an ordinal
|
||||||
/// in the list of `Segment`s held by a `Searcher`.
|
/// in the list of `Segment`s held by a `Searcher`.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
pub struct DocAddress(pub SegmentLocalId, pub DocId);
|
pub struct DocAddress(pub SegmentLocalId, pub DocId);
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::DocSet;
|
||||||
use crate::query::BooleanQuery;
|
use crate::query::BooleanQuery;
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
|
use crate::IndexWriter;
|
||||||
use crate::Postings;
|
use crate::Postings;
|
||||||
use crate::ReloadPolicy;
|
use crate::ReloadPolicy;
|
||||||
use rand::distributions::Bernoulli;
|
use rand::distributions::Bernoulli;
|
||||||
@@ -296,26 +298,17 @@ mod tests {
|
|||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
use rand::{Rng, SeedableRng};
|
use rand::{Rng, SeedableRng};
|
||||||
|
|
||||||
/// Checks if left and right are close one to each other.
|
pub fn assert_nearly_equals(expected: f32, val: f32) {
|
||||||
/// Panics if the two values are more than 0.5% apart.
|
assert!(
|
||||||
#[macro_export]
|
nearly_equals(val, expected),
|
||||||
macro_rules! assert_nearly_equals {
|
"Got {}, expected {}.",
|
||||||
($left:expr, $right:expr) => {{
|
val,
|
||||||
match (&$left, &$right) {
|
expected
|
||||||
(left_val, right_val) => {
|
);
|
||||||
let diff = (left_val - right_val).abs();
|
}
|
||||||
let add = left_val.abs() + right_val.abs();
|
|
||||||
if diff > 0.0005 * add {
|
pub fn nearly_equals(a: f32, b: f32) -> bool {
|
||||||
panic!(
|
(a - b).abs() < 0.0005 * (a + b).abs()
|
||||||
r#"assertion failed: `(left ~= right)`
|
|
||||||
left: `{:?}`,
|
|
||||||
right: `{:?}`"#,
|
|
||||||
&*left_val, &*right_val
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
|
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
|
||||||
@@ -353,14 +346,14 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
fn test_indexing() -> crate::Result<()> {
|
fn test_indexing() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_from_tempdir(schema).unwrap();
|
let index = Index::create_from_tempdir(schema).unwrap();
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
{
|
{
|
||||||
let doc = doc!(text_field=>"af b");
|
let doc = doc!(text_field=>"af b");
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
@@ -375,91 +368,120 @@ mod tests {
|
|||||||
}
|
}
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_docfreq1() -> crate::Result<()> {
|
fn test_docfreq1() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
{
|
||||||
index_writer.commit()?;
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
index_writer.add_document(doc!(text_field=>"a"));
|
index_writer.commit().unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"a a"));
|
}
|
||||||
index_writer.commit()?;
|
{
|
||||||
index_writer.add_document(doc!(text_field=>"c"));
|
{
|
||||||
index_writer.commit()?;
|
let doc = doc!(text_field=>"a");
|
||||||
let reader = index.reader()?;
|
index_writer.add_document(doc);
|
||||||
let searcher = reader.searcher();
|
}
|
||||||
let term_a = Term::from_field_text(text_field, "a");
|
{
|
||||||
assert_eq!(searcher.doc_freq(&term_a)?, 3);
|
let doc = doc!(text_field=>"a a");
|
||||||
let term_b = Term::from_field_text(text_field, "b");
|
index_writer.add_document(doc);
|
||||||
assert_eq!(searcher.doc_freq(&term_b)?, 1);
|
}
|
||||||
let term_c = Term::from_field_text(text_field, "c");
|
index_writer.commit().unwrap();
|
||||||
assert_eq!(searcher.doc_freq(&term_c)?, 2);
|
}
|
||||||
let term_d = Term::from_field_text(text_field, "d");
|
{
|
||||||
assert_eq!(searcher.doc_freq(&term_d)?, 0);
|
let doc = doc!(text_field=>"c");
|
||||||
Ok(())
|
index_writer.add_document(doc);
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let term_a = Term::from_field_text(text_field, "a");
|
||||||
|
assert_eq!(searcher.doc_freq(&term_a), 3);
|
||||||
|
let term_b = Term::from_field_text(text_field, "b");
|
||||||
|
assert_eq!(searcher.doc_freq(&term_b), 1);
|
||||||
|
let term_c = Term::from_field_text(text_field, "c");
|
||||||
|
assert_eq!(searcher.doc_freq(&term_c), 2);
|
||||||
|
let term_d = Term::from_field_text(text_field, "d");
|
||||||
|
assert_eq!(searcher.doc_freq(&term_d), 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_fieldnorm_no_docs_with_field() -> crate::Result<()> {
|
fn test_fieldnorm_no_docs_with_field() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let title_field = schema_builder.add_text_field("title", TEXT);
|
let title_field = schema_builder.add_text_field("title", TEXT);
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
|
||||||
index_writer.commit()?;
|
|
||||||
let index_reader = index.reader()?;
|
|
||||||
let searcher = index_reader.searcher();
|
|
||||||
let reader = searcher.segment_reader(0);
|
|
||||||
{
|
{
|
||||||
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field)?;
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(0), 3);
|
{
|
||||||
|
let doc = doc!(text_field=>"a b c");
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
|
index_writer.commit().unwrap();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field)?;
|
let index_reader = index.reader().unwrap();
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0);
|
let searcher = index_reader.searcher();
|
||||||
|
let reader = searcher.segment_reader(0);
|
||||||
|
{
|
||||||
|
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(0), 3);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_fieldnorm() -> crate::Result<()> {
|
fn test_fieldnorm() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
{
|
||||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!());
|
{
|
||||||
index_writer.add_document(doc!(text_field=>"a b"));
|
let doc = doc!(text_field=>"a b c");
|
||||||
index_writer.commit()?;
|
index_writer.add_document(doc);
|
||||||
let reader = index.reader()?;
|
}
|
||||||
let searcher = reader.searcher();
|
{
|
||||||
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
let doc = doc!();
|
||||||
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field)?;
|
index_writer.add_document(doc);
|
||||||
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
|
}
|
||||||
assert_eq!(fieldnorms_reader.fieldnorm(1), 0);
|
{
|
||||||
assert_eq!(fieldnorms_reader.fieldnorm(2), 2);
|
let doc = doc!(text_field=>"a b");
|
||||||
Ok(())
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
||||||
|
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field);
|
||||||
|
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
|
||||||
|
assert_eq!(fieldnorms_reader.fieldnorm(1), 0);
|
||||||
|
assert_eq!(fieldnorms_reader.fieldnorm(2), 2);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
|
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
|
||||||
let mut doc = docset.advance();
|
while docset.advance() {
|
||||||
while doc != TERMINATED {
|
if !reader.is_deleted(docset.doc()) {
|
||||||
if !reader.is_deleted(doc) {
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
doc = docset.advance();
|
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_postings1() -> crate::Result<()> {
|
fn test_delete_postings1() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let term_abcd = Term::from_field_text(text_field, "abcd");
|
let term_abcd = Term::from_field_text(text_field, "abcd");
|
||||||
@@ -475,7 +497,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
// 0
|
// 0
|
||||||
index_writer.add_document(doc!(text_field=>"a b"));
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
// 1
|
// 1
|
||||||
@@ -491,19 +513,19 @@ mod tests {
|
|||||||
index_writer.add_document(doc!(text_field=>" b c"));
|
index_writer.add_document(doc!(text_field=>" b c"));
|
||||||
// 5
|
// 5
|
||||||
index_writer.add_document(doc!(text_field=>" a"));
|
index_writer.add_document(doc!(text_field=>" a"));
|
||||||
index_writer.commit()?;
|
index_writer.commit().unwrap();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
reader.reload()?;
|
reader.reload().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let inverted_index = segment_reader.inverted_index(text_field)?;
|
let inverted_index = segment_reader.inverted_index(text_field);
|
||||||
assert!(inverted_index
|
assert!(inverted_index
|
||||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.is_none());
|
.is_none());
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||||
assert_eq!(postings.doc(), 5);
|
assert_eq!(postings.doc(), 5);
|
||||||
@@ -511,7 +533,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)?
|
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||||
assert_eq!(postings.doc(), 3);
|
assert_eq!(postings.doc(), 3);
|
||||||
@@ -522,25 +544,25 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
// 0
|
// 0
|
||||||
index_writer.add_document(doc!(text_field=>"a b"));
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
// 1
|
// 1
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
||||||
index_writer.rollback()?;
|
index_writer.rollback().unwrap();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
reader.reload()?;
|
reader.reload().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let seg_reader = searcher.segment_reader(0);
|
let seg_reader = searcher.segment_reader(0);
|
||||||
let inverted_index = seg_reader.inverted_index(term_abcd.field())?;
|
let inverted_index = seg_reader.inverted_index(term_abcd.field());
|
||||||
|
|
||||||
assert!(inverted_index
|
assert!(inverted_index
|
||||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.is_none());
|
.is_none());
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(advance_undeleted(&mut postings, seg_reader));
|
assert!(advance_undeleted(&mut postings, seg_reader));
|
||||||
assert_eq!(postings.doc(), 5);
|
assert_eq!(postings.doc(), 5);
|
||||||
@@ -548,7 +570,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)?
|
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(advance_undeleted(&mut postings, seg_reader));
|
assert!(advance_undeleted(&mut postings, seg_reader));
|
||||||
assert_eq!(postings.doc(), 3);
|
assert_eq!(postings.doc(), 3);
|
||||||
@@ -559,30 +581,30 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"a b"));
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
||||||
index_writer.rollback()?;
|
index_writer.rollback().unwrap();
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "a"));
|
index_writer.delete_term(Term::from_field_text(text_field, "a"));
|
||||||
index_writer.commit()?;
|
index_writer.commit().unwrap();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
reader.reload()?;
|
reader.reload().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let inverted_index = segment_reader.inverted_index(term_abcd.field())?;
|
let inverted_index = segment_reader.inverted_index(term_abcd.field());
|
||||||
assert!(inverted_index
|
assert!(inverted_index
|
||||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.is_none());
|
.is_none());
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(!advance_undeleted(&mut postings, segment_reader));
|
assert!(!advance_undeleted(&mut postings, segment_reader));
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)?
|
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||||
assert_eq!(postings.doc(), 3);
|
assert_eq!(postings.doc(), 3);
|
||||||
@@ -592,107 +614,104 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)?
|
.read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||||
assert_eq!(postings.doc(), 4);
|
assert_eq!(postings.doc(), 4);
|
||||||
assert!(!advance_undeleted(&mut postings, segment_reader));
|
assert!(!advance_undeleted(&mut postings, segment_reader));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexed_u64() -> crate::Result<()> {
|
fn test_indexed_u64() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let field = schema_builder.add_u64_field("value", INDEXED);
|
let field = schema_builder.add_u64_field("value", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(field=>1u64));
|
index_writer.add_document(doc!(field=>1u64));
|
||||||
index_writer.commit()?;
|
index_writer.commit().unwrap();
|
||||||
let reader = index.reader()?;
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_u64(field, 1u64);
|
let term = Term::from_field_u64(field, 1u64);
|
||||||
let mut postings = searcher
|
let mut postings = searcher
|
||||||
.segment_reader(0)
|
.segment_reader(0)
|
||||||
.inverted_index(term.field())?
|
.inverted_index(term.field())
|
||||||
.read_postings(&term, IndexRecordOption::Basic)?
|
.read_postings(&term, IndexRecordOption::Basic)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(postings.advance());
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert!(!postings.advance());
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexed_i64() -> crate::Result<()> {
|
fn test_indexed_i64() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let value_field = schema_builder.add_i64_field("value", INDEXED);
|
let value_field = schema_builder.add_i64_field("value", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
let negative_val = -1i64;
|
let negative_val = -1i64;
|
||||||
index_writer.add_document(doc!(value_field => negative_val));
|
index_writer.add_document(doc!(value_field => negative_val));
|
||||||
index_writer.commit()?;
|
index_writer.commit().unwrap();
|
||||||
let reader = index.reader()?;
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_i64(value_field, negative_val);
|
let term = Term::from_field_i64(value_field, negative_val);
|
||||||
let mut postings = searcher
|
let mut postings = searcher
|
||||||
.segment_reader(0)
|
.segment_reader(0)
|
||||||
.inverted_index(term.field())?
|
.inverted_index(term.field())
|
||||||
.read_postings(&term, IndexRecordOption::Basic)?
|
.read_postings(&term, IndexRecordOption::Basic)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(postings.advance());
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert!(!postings.advance());
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexed_f64() -> crate::Result<()> {
|
fn test_indexed_f64() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let value_field = schema_builder.add_f64_field("value", INDEXED);
|
let value_field = schema_builder.add_f64_field("value", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
let val = std::f64::consts::PI;
|
let val = std::f64::consts::PI;
|
||||||
index_writer.add_document(doc!(value_field => val));
|
index_writer.add_document(doc!(value_field => val));
|
||||||
index_writer.commit()?;
|
index_writer.commit().unwrap();
|
||||||
let reader = index.reader()?;
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_f64(value_field, val);
|
let term = Term::from_field_f64(value_field, val);
|
||||||
let mut postings = searcher
|
let mut postings = searcher
|
||||||
.segment_reader(0)
|
.segment_reader(0)
|
||||||
.inverted_index(term.field())?
|
.inverted_index(term.field())
|
||||||
.read_postings(&term, IndexRecordOption::Basic)?
|
.read_postings(&term, IndexRecordOption::Basic)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(postings.advance());
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert!(!postings.advance());
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexedfield_not_in_documents() -> crate::Result<()> {
|
fn test_indexedfield_not_in_documents() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let absent_field = schema_builder.add_text_field("text", TEXT);
|
let absent_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"a"));
|
index_writer.add_document(doc!(text_field=>"a"));
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
let reader = index.reader()?;
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let inverted_index = segment_reader.inverted_index(absent_field)?;
|
segment_reader.inverted_index(absent_field); //< should not panic
|
||||||
assert_eq!(inverted_index.terms().num_terms(), 0);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_postings2() -> crate::Result<()> {
|
fn test_delete_postings2() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -700,112 +719,128 @@ mod tests {
|
|||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()?;
|
.try_into()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"63"));
|
|
||||||
index_writer.add_document(doc!(text_field=>"70"));
|
let add_document = |index_writer: &mut IndexWriter, val: &'static str| {
|
||||||
index_writer.add_document(doc!(text_field=>"34"));
|
let doc = doc!(text_field=>val);
|
||||||
index_writer.add_document(doc!(text_field=>"1"));
|
index_writer.add_document(doc);
|
||||||
index_writer.add_document(doc!(text_field=>"38"));
|
};
|
||||||
index_writer.add_document(doc!(text_field=>"33"));
|
|
||||||
index_writer.add_document(doc!(text_field=>"40"));
|
let remove_document = |index_writer: &mut IndexWriter, val: &'static str| {
|
||||||
index_writer.add_document(doc!(text_field=>"17"));
|
let delterm = Term::from_field_text(text_field, val);
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "38"));
|
index_writer.delete_term(delterm);
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "34"));
|
};
|
||||||
index_writer.commit()?;
|
|
||||||
reader.reload()?;
|
add_document(&mut index_writer, "63");
|
||||||
assert_eq!(reader.searcher().num_docs(), 6);
|
add_document(&mut index_writer, "70");
|
||||||
Ok(())
|
add_document(&mut index_writer, "34");
|
||||||
|
add_document(&mut index_writer, "1");
|
||||||
|
add_document(&mut index_writer, "38");
|
||||||
|
add_document(&mut index_writer, "33");
|
||||||
|
add_document(&mut index_writer, "40");
|
||||||
|
add_document(&mut index_writer, "17");
|
||||||
|
remove_document(&mut index_writer, "38");
|
||||||
|
remove_document(&mut index_writer, "34");
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
reader.reload().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.num_docs(), 6);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_termfreq() -> crate::Result<()> {
|
fn test_termfreq() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"af af af bc bc"));
|
{
|
||||||
index_writer.commit()?;
|
let doc = doc!(text_field=>"af af af bc bc");
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
|
index_writer.commit().unwrap();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let index_reader = index.reader()?;
|
let index_reader = index.reader().unwrap();
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
let reader = searcher.segment_reader(0);
|
let reader = searcher.segment_reader(0);
|
||||||
let inverted_index = reader.inverted_index(text_field)?;
|
let inverted_index = reader.inverted_index(text_field);
|
||||||
let term_abcd = Term::from_field_text(text_field, "abcd");
|
let term_abcd = Term::from_field_text(text_field, "abcd");
|
||||||
assert!(inverted_index
|
assert!(inverted_index
|
||||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.is_none());
|
.is_none());
|
||||||
let term_af = Term::from_field_text(text_field, "af");
|
let term_af = Term::from_field_text(text_field, "af");
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)?
|
.read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(postings.advance());
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.term_freq(), 3);
|
assert_eq!(postings.term_freq(), 3);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert!(!postings.advance());
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_searcher_1() -> crate::Result<()> {
|
fn test_searcher_1() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let reader = index.reader()?;
|
let reader = index.reader().unwrap();
|
||||||
// writing the segment
|
{
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
// writing the segment
|
||||||
index_writer.add_document(doc!(text_field=>"af af af b"));
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
index_writer.add_document(doc!(text_field=>"af af af b"));
|
||||||
index_writer.add_document(doc!(text_field=>"a b c d"));
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
index_writer.commit()?;
|
index_writer.add_document(doc!(text_field=>"a b c d"));
|
||||||
|
index_writer.commit().unwrap();
|
||||||
reader.reload()?;
|
}
|
||||||
let searcher = reader.searcher();
|
{
|
||||||
let get_doc_ids = |terms: Vec<Term>| {
|
reader.reload().unwrap();
|
||||||
let query = BooleanQuery::new_multiterms_query(terms);
|
let searcher = reader.searcher();
|
||||||
searcher
|
let get_doc_ids = |terms: Vec<Term>| {
|
||||||
.search(&query, &TEST_COLLECTOR_WITH_SCORE)
|
let query = BooleanQuery::new_multiterms_query(terms);
|
||||||
.map(|topdocs| topdocs.docs().to_vec())
|
let topdocs = searcher.search(&query, &TEST_COLLECTOR_WITH_SCORE).unwrap();
|
||||||
};
|
topdocs.docs().to_vec()
|
||||||
assert_eq!(
|
};
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "a")])?,
|
assert_eq!(
|
||||||
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
|
||||||
);
|
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
||||||
assert_eq!(
|
);
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "af")])?,
|
assert_eq!(
|
||||||
vec![DocAddress(0, 0)]
|
get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
|
||||||
);
|
vec![DocAddress(0, 0)]
|
||||||
assert_eq!(
|
);
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "b")])?,
|
assert_eq!(
|
||||||
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
|
||||||
);
|
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
||||||
assert_eq!(
|
);
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "c")])?,
|
assert_eq!(
|
||||||
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
get_doc_ids(vec![Term::from_field_text(text_field, "c")]),
|
||||||
);
|
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
||||||
assert_eq!(
|
);
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "d")])?,
|
assert_eq!(
|
||||||
vec![DocAddress(0, 2)]
|
get_doc_ids(vec![Term::from_field_text(text_field, "d")]),
|
||||||
);
|
vec![DocAddress(0, 2)]
|
||||||
assert_eq!(
|
);
|
||||||
get_doc_ids(vec![
|
assert_eq!(
|
||||||
Term::from_field_text(text_field, "b"),
|
get_doc_ids(vec![
|
||||||
Term::from_field_text(text_field, "a"),
|
Term::from_field_text(text_field, "b"),
|
||||||
])?,
|
Term::from_field_text(text_field, "a"),
|
||||||
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
]),
|
||||||
);
|
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
||||||
Ok(())
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_searcher_2() -> crate::Result<()> {
|
fn test_searcher_2() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -813,17 +848,19 @@ mod tests {
|
|||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()?;
|
.try_into()
|
||||||
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0u64);
|
assert_eq!(reader.searcher().num_docs(), 0u64);
|
||||||
// writing the segment
|
{
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
// writing the segment
|
||||||
index_writer.add_document(doc!(text_field=>"af b"));
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
index_writer.add_document(doc!(text_field=>"af b"));
|
||||||
index_writer.add_document(doc!(text_field=>"a b c d"));
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
index_writer.commit()?;
|
index_writer.add_document(doc!(text_field=>"a b c d"));
|
||||||
reader.reload()?;
|
index_writer.commit().unwrap();
|
||||||
|
}
|
||||||
|
reader.reload().unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 3u64);
|
assert_eq!(reader.searcher().num_docs(), 3u64);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -835,17 +872,17 @@ mod tests {
|
|||||||
text_field => "some other value",
|
text_field => "some other value",
|
||||||
other_text_field => "short");
|
other_text_field => "short");
|
||||||
assert_eq!(document.len(), 3);
|
assert_eq!(document.len(), 3);
|
||||||
let values: Vec<&Value> = document.get_all(text_field).collect();
|
let values = document.get_all(text_field);
|
||||||
assert_eq!(values.len(), 2);
|
assert_eq!(values.len(), 2);
|
||||||
assert_eq!(values[0].text(), Some("tantivy"));
|
assert_eq!(values[0].text(), Some("tantivy"));
|
||||||
assert_eq!(values[1].text(), Some("some other value"));
|
assert_eq!(values[1].text(), Some("some other value"));
|
||||||
let values: Vec<&Value> = document.get_all(other_text_field).collect();
|
let values = document.get_all(other_text_field);
|
||||||
assert_eq!(values.len(), 1);
|
assert_eq!(values.len(), 1);
|
||||||
assert_eq!(values[0].text(), Some("short"));
|
assert_eq!(values[0].text(), Some("short"));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_wrong_fast_field_type() -> crate::Result<()> {
|
fn test_wrong_fast_field_type() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
|
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
|
||||||
let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
|
let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
|
||||||
@@ -855,14 +892,14 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_with_num_threads(1, 50_000_000).unwrap();
|
||||||
{
|
{
|
||||||
let document =
|
let document =
|
||||||
doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64);
|
doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64);
|
||||||
index_writer.add_document(document);
|
index_writer.add_document(document);
|
||||||
index_writer.commit()?;
|
index_writer.commit().unwrap();
|
||||||
}
|
}
|
||||||
let reader = index.reader()?;
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
||||||
{
|
{
|
||||||
@@ -901,12 +938,11 @@ mod tests {
|
|||||||
let fast_field_reader = fast_field_reader_opt.unwrap();
|
let fast_field_reader = fast_field_reader_opt.unwrap();
|
||||||
assert_eq!(fast_field_reader.get(0), 4f64)
|
assert_eq!(fast_field_reader.get(0), 4f64)
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// motivated by #729
|
// motivated by #729
|
||||||
#[test]
|
#[test]
|
||||||
fn test_update_via_delete_insert() -> crate::Result<()> {
|
fn test_update_via_delete_insert() {
|
||||||
use crate::collector::Count;
|
use crate::collector::Count;
|
||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::AllQuery;
|
use crate::query::AllQuery;
|
||||||
@@ -920,17 +956,17 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema.clone());
|
let index = Index::create_in_ram(schema.clone());
|
||||||
let index_reader = index.reader()?;
|
let index_reader = index.reader().unwrap();
|
||||||
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer(3_000_000).unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
|
|
||||||
for doc_id in 0u64..DOC_COUNT {
|
for doc_id in 0u64..DOC_COUNT {
|
||||||
index_writer.add_document(doc!(id => doc_id));
|
index_writer.add_document(doc!(id => doc_id));
|
||||||
}
|
}
|
||||||
index_writer.commit()?;
|
index_writer.commit().unwrap();
|
||||||
|
|
||||||
index_reader.reload()?;
|
index_reader.reload().unwrap();
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -941,11 +977,12 @@ mod tests {
|
|||||||
// update the 10 elements by deleting and re-adding
|
// update the 10 elements by deleting and re-adding
|
||||||
for doc_id in 0u64..DOC_COUNT {
|
for doc_id in 0u64..DOC_COUNT {
|
||||||
index_writer.delete_term(Term::from_field_u64(id, doc_id));
|
index_writer.delete_term(Term::from_field_u64(id, doc_id));
|
||||||
index_writer.commit()?;
|
index_writer.commit().unwrap();
|
||||||
index_reader.reload()?;
|
index_reader.reload().unwrap();
|
||||||
index_writer.add_document(doc!(id => doc_id));
|
let doc = doc!(id => doc_id);
|
||||||
index_writer.commit()?;
|
index_writer.add_document(doc);
|
||||||
index_reader.reload()?;
|
index_writer.commit().unwrap();
|
||||||
|
index_reader.reload().unwrap();
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
// The number of document should be stable.
|
// The number of document should be stable.
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -954,7 +991,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
index_reader.reload()?;
|
index_reader.reload().unwrap();
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
let segment_ids: Vec<SegmentId> = searcher
|
let segment_ids: Vec<SegmentId> = searcher
|
||||||
.segment_readers()
|
.segment_readers()
|
||||||
@@ -963,18 +1000,12 @@ mod tests {
|
|||||||
.collect();
|
.collect();
|
||||||
block_on(index_writer.merge(&segment_ids)).unwrap();
|
block_on(index_writer.merge(&segment_ids)).unwrap();
|
||||||
|
|
||||||
index_reader.reload()?;
|
index_reader.reload().unwrap();
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
assert_eq!(searcher.search(&AllQuery, &Count)?, DOC_COUNT as usize);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
assert_eq!(
|
||||||
fn test_validate_checksum() -> crate::Result<()> {
|
searcher.search(&AllQuery, &Count).unwrap(),
|
||||||
let index_path = tempfile::tempdir().expect("dir");
|
DOC_COUNT as usize
|
||||||
let schema = Schema::builder().build();
|
);
|
||||||
let index = Index::create_in_dir(&index_path, schema)?;
|
|
||||||
assert!(index.validate_checksum()?.is_empty());
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,12 +37,12 @@ const LONG_SKIP_INTERVAL: u64 = (LONG_SKIP_IN_BLOCKS * COMPRESSION_BLOCK_SIZE) a
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
|
|
||||||
use super::PositionSerializer;
|
use super::{PositionReader, PositionSerializer};
|
||||||
use crate::positions::reader::PositionReader;
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::{common::HasLen, directory::FileSlice};
|
use crate::positions::COMPRESSION_BLOCK_SIZE;
|
||||||
use std::iter;
|
use std::iter;
|
||||||
|
|
||||||
fn create_stream_buffer(vals: &[u32]) -> (FileSlice, FileSlice) {
|
fn create_stream_buffer(vals: &[u32]) -> (ReadOnlySource, ReadOnlySource) {
|
||||||
let mut skip_buffer = vec![];
|
let mut skip_buffer = vec![];
|
||||||
let mut stream_buffer = vec![];
|
let mut stream_buffer = vec![];
|
||||||
{
|
{
|
||||||
@@ -53,7 +53,10 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
serializer.close().unwrap();
|
serializer.close().unwrap();
|
||||||
}
|
}
|
||||||
(FileSlice::from(stream_buffer), FileSlice::from(skip_buffer))
|
(
|
||||||
|
ReadOnlySource::from(stream_buffer),
|
||||||
|
ReadOnlySource::from(skip_buffer),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -62,10 +65,10 @@ pub mod tests {
|
|||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||||
assert_eq!(skip.len(), 12);
|
assert_eq!(skip.len(), 12);
|
||||||
assert_eq!(stream.len(), 1168);
|
assert_eq!(stream.len(), 1168);
|
||||||
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap();
|
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
||||||
for &n in &[1, 10, 127, 128, 130, 312] {
|
for &n in &[1, 10, 127, 128, 130, 312] {
|
||||||
let mut v = vec![0u32; n];
|
let mut v = vec![0u32; n];
|
||||||
position_reader.read(0, &mut v[..]);
|
position_reader.read(&mut v[..n]);
|
||||||
for i in 0..n {
|
for i in 0..n {
|
||||||
assert_eq!(v[i], i as u32);
|
assert_eq!(v[i], i as u32);
|
||||||
}
|
}
|
||||||
@@ -73,19 +76,19 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_position_read_with_offset() {
|
fn test_position_skip() {
|
||||||
let v: Vec<u32> = (0..1000).collect();
|
let v: Vec<u32> = (0..1_000).collect();
|
||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||||
assert_eq!(skip.len(), 12);
|
assert_eq!(skip.len(), 12);
|
||||||
assert_eq!(stream.len(), 1168);
|
assert_eq!(stream.len(), 1168);
|
||||||
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap();
|
|
||||||
for &offset in &[1u64, 10u64, 127u64, 128u64, 130u64, 312u64] {
|
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
||||||
for &len in &[1, 10, 130, 500] {
|
position_reader.skip(10);
|
||||||
let mut v = vec![0u32; len];
|
for &n in &[10, 127, COMPRESSION_BLOCK_SIZE, 130, 312] {
|
||||||
position_reader.read(offset, &mut v[..]);
|
let mut v = vec![0u32; n];
|
||||||
for i in 0..len {
|
position_reader.read(&mut v[..n]);
|
||||||
assert_eq!(v[i], i as u32 + offset as u32);
|
for i in 0..n {
|
||||||
}
|
assert_eq!(v[i], 10u32 + i as u32);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -97,15 +100,14 @@ pub mod tests {
|
|||||||
assert_eq!(skip.len(), 12);
|
assert_eq!(skip.len(), 12);
|
||||||
assert_eq!(stream.len(), 1168);
|
assert_eq!(stream.len(), 1168);
|
||||||
|
|
||||||
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap();
|
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
||||||
let mut buf = [0u32; 7];
|
let mut buf = [0u32; 7];
|
||||||
let mut c = 0;
|
let mut c = 0;
|
||||||
|
|
||||||
let mut offset = 0;
|
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
position_reader.read(offset, &mut buf);
|
position_reader.read(&mut buf);
|
||||||
position_reader.read(offset, &mut buf);
|
position_reader.read(&mut buf);
|
||||||
offset += 7;
|
position_reader.skip(4);
|
||||||
|
position_reader.skip(3);
|
||||||
for &el in &buf {
|
for &el in &buf {
|
||||||
assert_eq!(c, el);
|
assert_eq!(c, el);
|
||||||
c += 1;
|
c += 1;
|
||||||
@@ -113,59 +115,6 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_position_reread_anchor_different_than_block() {
|
|
||||||
let v: Vec<u32> = (0..2_000_000).collect();
|
|
||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
|
||||||
assert_eq!(skip.len(), 15_749);
|
|
||||||
assert_eq!(stream.len(), 4_987_872);
|
|
||||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0).unwrap();
|
|
||||||
let mut buf = [0u32; 256];
|
|
||||||
position_reader.read(128, &mut buf);
|
|
||||||
for i in 0..256 {
|
|
||||||
assert_eq!(buf[i], (128 + i) as u32);
|
|
||||||
}
|
|
||||||
position_reader.read(128, &mut buf);
|
|
||||||
for i in 0..256 {
|
|
||||||
assert_eq!(buf[i], (128 + i) as u32);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[should_panic(expected = "offset arguments should be increasing.")]
|
|
||||||
fn test_position_panic_if_called_previous_anchor() {
|
|
||||||
let v: Vec<u32> = (0..2_000_000).collect();
|
|
||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
|
||||||
assert_eq!(skip.len(), 15_749);
|
|
||||||
assert_eq!(stream.len(), 4_987_872);
|
|
||||||
let mut buf = [0u32; 1];
|
|
||||||
let mut position_reader =
|
|
||||||
PositionReader::new(stream.clone(), skip.clone(), 200_000).unwrap();
|
|
||||||
position_reader.read(230, &mut buf);
|
|
||||||
position_reader.read(9, &mut buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_positions_bug() {
|
|
||||||
let mut v: Vec<u32> = vec![];
|
|
||||||
for i in 1..200 {
|
|
||||||
for j in 0..i {
|
|
||||||
v.push(j);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
|
||||||
let mut buf = Vec::new();
|
|
||||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0).unwrap();
|
|
||||||
let mut offset = 0;
|
|
||||||
for i in 1..24 {
|
|
||||||
buf.resize(i, 0);
|
|
||||||
position_reader.read(offset, &mut buf[..]);
|
|
||||||
offset += i as u64;
|
|
||||||
let r: Vec<u32> = (0..i).map(|el| el as u32).collect();
|
|
||||||
assert_eq!(buf, &r[..]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_position_long_skip_const() {
|
fn test_position_long_skip_const() {
|
||||||
const CONST_VAL: u32 = 9u32;
|
const CONST_VAL: u32 = 9u32;
|
||||||
@@ -173,9 +122,9 @@ pub mod tests {
|
|||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||||
assert_eq!(skip.len(), 15_749);
|
assert_eq!(skip.len(), 15_749);
|
||||||
assert_eq!(stream.len(), 1_000_000);
|
assert_eq!(stream.len(), 1_000_000);
|
||||||
let mut position_reader = PositionReader::new(stream, skip, 128 * 1024).unwrap();
|
let mut position_reader = PositionReader::new(stream, skip, 128 * 1024);
|
||||||
let mut buf = [0u32; 1];
|
let mut buf = [0u32; 1];
|
||||||
position_reader.read(0, &mut buf);
|
position_reader.read(&mut buf);
|
||||||
assert_eq!(buf[0], CONST_VAL);
|
assert_eq!(buf[0], CONST_VAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -192,10 +141,9 @@ pub mod tests {
|
|||||||
128 * 1024 + 7,
|
128 * 1024 + 7,
|
||||||
128 * 10 * 1024 + 10,
|
128 * 10 * 1024 + 10,
|
||||||
] {
|
] {
|
||||||
let mut position_reader =
|
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), offset);
|
||||||
PositionReader::new(stream.clone(), skip.clone(), offset).unwrap();
|
|
||||||
let mut buf = [0u32; 1];
|
let mut buf = [0u32; 1];
|
||||||
position_reader.read(0, &mut buf);
|
position_reader.read(&mut buf);
|
||||||
assert_eq!(buf[0], offset as u32);
|
assert_eq!(buf[0], offset as u32);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,9 @@
|
|||||||
use std::io;
|
|
||||||
|
|
||||||
use crate::common::{BinarySerializable, FixedSize};
|
use crate::common::{BinarySerializable, FixedSize};
|
||||||
use crate::directory::FileSlice;
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::directory::OwnedBytes;
|
|
||||||
use crate::positions::COMPRESSION_BLOCK_SIZE;
|
use crate::positions::COMPRESSION_BLOCK_SIZE;
|
||||||
use crate::positions::LONG_SKIP_INTERVAL;
|
use crate::positions::LONG_SKIP_INTERVAL;
|
||||||
use crate::positions::LONG_SKIP_IN_BLOCKS;
|
use crate::positions::LONG_SKIP_IN_BLOCKS;
|
||||||
use bitpacking::{BitPacker, BitPacker4x};
|
use crate::postings::compression::compressed_block_size;
|
||||||
|
|
||||||
/// Positions works as a long sequence of compressed block.
|
/// Positions works as a long sequence of compressed block.
|
||||||
/// All terms are chained one after the other.
|
/// All terms are chained one after the other.
|
||||||
///
|
///
|
||||||
@@ -28,28 +24,28 @@ use bitpacking::{BitPacker, BitPacker4x};
|
|||||||
/// A given block obviously takes `(128 x num_bit_for_the_block / num_bits_in_a_byte)`,
|
/// A given block obviously takes `(128 x num_bit_for_the_block / num_bits_in_a_byte)`,
|
||||||
/// so skipping a block without decompressing it is just a matter of advancing that many
|
/// so skipping a block without decompressing it is just a matter of advancing that many
|
||||||
/// bytes.
|
/// bytes.
|
||||||
|
use bitpacking::{BitPacker, BitPacker4x};
|
||||||
|
use owned_read::OwnedRead;
|
||||||
|
|
||||||
struct Positions {
|
struct Positions {
|
||||||
bit_packer: BitPacker4x,
|
bit_packer: BitPacker4x,
|
||||||
skip_file: FileSlice,
|
skip_source: ReadOnlySource,
|
||||||
position_file: FileSlice,
|
position_source: ReadOnlySource,
|
||||||
long_skip_data: OwnedBytes,
|
long_skip_source: ReadOnlySource,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Positions {
|
impl Positions {
|
||||||
pub fn new(position_file: FileSlice, skip_file: FileSlice) -> io::Result<Positions> {
|
pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions {
|
||||||
let (body, footer) = skip_file.split_from_end(u32::SIZE_IN_BYTES);
|
let (body, footer) = skip_source.split_from_end(u32::SIZE_IN_BYTES);
|
||||||
let footer_data = footer.read_bytes()?;
|
let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
|
||||||
let num_long_skips = u32::deserialize(&mut footer_data.as_slice())?;
|
let (skip_source, long_skip_source) =
|
||||||
let (skip_file, long_skip_file) =
|
|
||||||
body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize));
|
body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize));
|
||||||
let long_skip_data = long_skip_file.read_bytes()?;
|
Positions {
|
||||||
Ok(Positions {
|
|
||||||
bit_packer: BitPacker4x::new(),
|
bit_packer: BitPacker4x::new(),
|
||||||
skip_file,
|
skip_source,
|
||||||
long_skip_data,
|
long_skip_source,
|
||||||
position_file,
|
position_source,
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the offset of the block associated to the given `long_skip_id`.
|
/// Returns the offset of the block associated to the given `long_skip_id`.
|
||||||
@@ -59,116 +55,143 @@ impl Positions {
|
|||||||
if long_skip_id == 0 {
|
if long_skip_id == 0 {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
let long_skip_slice = self.long_skip_data.as_slice();
|
let long_skip_slice = self.long_skip_source.as_slice();
|
||||||
let mut long_skip_blocks: &[u8] = &long_skip_slice[(long_skip_id - 1) * 8..][..8];
|
let mut long_skip_blocks: &[u8] = &long_skip_slice[(long_skip_id - 1) * 8..][..8];
|
||||||
u64::deserialize(&mut long_skip_blocks).expect("Index corrupted")
|
u64::deserialize(&mut long_skip_blocks).expect("Index corrupted")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reader(&self, offset: u64) -> io::Result<PositionReader> {
|
fn reader(&self, offset: u64) -> PositionReader {
|
||||||
let long_skip_id = (offset / LONG_SKIP_INTERVAL) as usize;
|
let long_skip_id = (offset / LONG_SKIP_INTERVAL) as usize;
|
||||||
|
let small_skip = (offset % LONG_SKIP_INTERVAL) as usize;
|
||||||
let offset_num_bytes: u64 = self.long_skip(long_skip_id);
|
let offset_num_bytes: u64 = self.long_skip(long_skip_id);
|
||||||
let position_read = self
|
let mut position_read = OwnedRead::new(self.position_source.clone());
|
||||||
.position_file
|
position_read.advance(offset_num_bytes as usize);
|
||||||
.slice_from(offset_num_bytes as usize)
|
let mut skip_read = OwnedRead::new(self.skip_source.clone());
|
||||||
.read_bytes()?;
|
skip_read.advance(long_skip_id * LONG_SKIP_IN_BLOCKS);
|
||||||
let skip_read = self
|
let mut position_reader = PositionReader {
|
||||||
.skip_file
|
|
||||||
.slice_from(long_skip_id * LONG_SKIP_IN_BLOCKS)
|
|
||||||
.read_bytes()?;
|
|
||||||
Ok(PositionReader {
|
|
||||||
bit_packer: self.bit_packer,
|
bit_packer: self.bit_packer,
|
||||||
skip_read,
|
skip_read,
|
||||||
position_read,
|
position_read,
|
||||||
|
inner_offset: 0,
|
||||||
buffer: Box::new([0u32; 128]),
|
buffer: Box::new([0u32; 128]),
|
||||||
block_offset: std::i64::MAX as u64,
|
ahead: None,
|
||||||
anchor_offset: (long_skip_id as u64) * LONG_SKIP_INTERVAL,
|
};
|
||||||
abs_offset: offset,
|
position_reader.skip(small_skip);
|
||||||
})
|
position_reader
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct PositionReader {
|
pub struct PositionReader {
|
||||||
skip_read: OwnedBytes,
|
skip_read: OwnedRead,
|
||||||
position_read: OwnedBytes,
|
position_read: OwnedRead,
|
||||||
bit_packer: BitPacker4x,
|
bit_packer: BitPacker4x,
|
||||||
buffer: Box<[u32; COMPRESSION_BLOCK_SIZE]>,
|
inner_offset: usize,
|
||||||
|
buffer: Box<[u32; 128]>,
|
||||||
|
ahead: Option<usize>, // if None, no block is loaded.
|
||||||
|
// if Some(num_blocks), the block currently loaded is num_blocks ahead
|
||||||
|
// of the block of the next int to read.
|
||||||
|
}
|
||||||
|
|
||||||
block_offset: u64,
|
// `ahead` represents the offset of the block currently loaded
|
||||||
anchor_offset: u64,
|
// compared to the cursor of the actual stream.
|
||||||
|
//
|
||||||
abs_offset: u64,
|
// By contract, when this function is called, the current block has to be
|
||||||
|
// decompressed.
|
||||||
|
//
|
||||||
|
// If the requested number of els ends exactly at a given block, the next
|
||||||
|
// block is not decompressed.
|
||||||
|
fn read_impl(
|
||||||
|
bit_packer: BitPacker4x,
|
||||||
|
mut position: &[u8],
|
||||||
|
buffer: &mut [u32; 128],
|
||||||
|
mut inner_offset: usize,
|
||||||
|
num_bits: &[u8],
|
||||||
|
output: &mut [u32],
|
||||||
|
) -> usize {
|
||||||
|
let mut output_start = 0;
|
||||||
|
let mut output_len = output.len();
|
||||||
|
let mut ahead = 0;
|
||||||
|
loop {
|
||||||
|
let available_len = COMPRESSION_BLOCK_SIZE - inner_offset;
|
||||||
|
// We have enough elements in the current block.
|
||||||
|
// Let's copy the requested elements in the output buffer,
|
||||||
|
// and return.
|
||||||
|
if output_len <= available_len {
|
||||||
|
output[output_start..].copy_from_slice(&buffer[inner_offset..][..output_len]);
|
||||||
|
return ahead;
|
||||||
|
}
|
||||||
|
output[output_start..][..available_len].copy_from_slice(&buffer[inner_offset..]);
|
||||||
|
output_len -= available_len;
|
||||||
|
output_start += available_len;
|
||||||
|
inner_offset = 0;
|
||||||
|
let num_bits = num_bits[ahead];
|
||||||
|
bit_packer.decompress(position, &mut buffer[..], num_bits);
|
||||||
|
let block_len = compressed_block_size(num_bits);
|
||||||
|
position = &position[block_len..];
|
||||||
|
ahead += 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PositionReader {
|
impl PositionReader {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
position_file: FileSlice,
|
position_source: ReadOnlySource,
|
||||||
skip_file: FileSlice,
|
skip_source: ReadOnlySource,
|
||||||
offset: u64,
|
offset: u64,
|
||||||
) -> io::Result<PositionReader> {
|
) -> PositionReader {
|
||||||
let positions = Positions::new(position_file, skip_file)?;
|
Positions::new(position_source, skip_source).reader(offset)
|
||||||
positions.reader(offset)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn advance_num_blocks(&mut self, num_blocks: usize) {
|
/// Fills a buffer with the next `output.len()` integers.
|
||||||
let num_bits: usize = self.skip_read.as_ref()[..num_blocks]
|
/// This does not consume / advance the stream.
|
||||||
.iter()
|
pub fn read(&mut self, output: &mut [u32]) {
|
||||||
.cloned()
|
let skip_data = self.skip_read.as_ref();
|
||||||
.map(|num_bits| num_bits as usize)
|
let position_data = self.position_read.as_ref();
|
||||||
.sum();
|
let num_bits = self.skip_read.get(0);
|
||||||
let num_bytes_to_skip = num_bits * COMPRESSION_BLOCK_SIZE / 8;
|
if self.ahead != Some(0) {
|
||||||
self.skip_read.advance(num_blocks as usize);
|
// the block currently available is not the block
|
||||||
self.position_read.advance(num_bytes_to_skip);
|
// for the current position
|
||||||
}
|
|
||||||
|
|
||||||
/// Fills a buffer with the positions `[offset..offset+output.len())` integers.
|
|
||||||
///
|
|
||||||
/// `offset` is required to have a value >= to the offsets given in previous calls
|
|
||||||
/// for the given `PositionReaderAbsolute` instance.
|
|
||||||
pub fn read(&mut self, mut offset: u64, mut output: &mut [u32]) {
|
|
||||||
offset += self.abs_offset;
|
|
||||||
assert!(
|
|
||||||
offset >= self.anchor_offset,
|
|
||||||
"offset arguments should be increasing."
|
|
||||||
);
|
|
||||||
let delta_to_block_offset = offset as i64 - self.block_offset as i64;
|
|
||||||
if delta_to_block_offset < 0 || delta_to_block_offset >= 128 {
|
|
||||||
// The first position is not within the first block.
|
|
||||||
// We need to decompress the first block.
|
|
||||||
let delta_to_anchor_offset = offset - self.anchor_offset;
|
|
||||||
let num_blocks_to_skip =
|
|
||||||
(delta_to_anchor_offset / (COMPRESSION_BLOCK_SIZE as u64)) as usize;
|
|
||||||
self.advance_num_blocks(num_blocks_to_skip);
|
|
||||||
self.anchor_offset = offset - (offset % COMPRESSION_BLOCK_SIZE as u64);
|
|
||||||
self.block_offset = self.anchor_offset;
|
|
||||||
let num_bits = self.skip_read.as_slice()[0];
|
|
||||||
self.bit_packer
|
|
||||||
.decompress(self.position_read.as_ref(), self.buffer.as_mut(), num_bits);
|
|
||||||
} else {
|
|
||||||
let num_blocks_to_skip =
|
|
||||||
((self.block_offset - self.anchor_offset) / COMPRESSION_BLOCK_SIZE as u64) as usize;
|
|
||||||
self.advance_num_blocks(num_blocks_to_skip);
|
|
||||||
self.anchor_offset = self.block_offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut num_bits = self.skip_read.as_slice()[0];
|
|
||||||
let mut position_data = self.position_read.as_ref();
|
|
||||||
|
|
||||||
for i in 1.. {
|
|
||||||
let offset_in_block = (offset as usize) % COMPRESSION_BLOCK_SIZE;
|
|
||||||
let remaining_in_block = COMPRESSION_BLOCK_SIZE - offset_in_block;
|
|
||||||
if remaining_in_block >= output.len() {
|
|
||||||
output.copy_from_slice(&self.buffer[offset_in_block..][..output.len()]);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
output[..remaining_in_block].copy_from_slice(&self.buffer[offset_in_block..]);
|
|
||||||
output = &mut output[remaining_in_block..];
|
|
||||||
offset += remaining_in_block as u64;
|
|
||||||
position_data = &position_data[(num_bits as usize * COMPRESSION_BLOCK_SIZE / 8)..];
|
|
||||||
num_bits = self.skip_read.as_slice()[i];
|
|
||||||
self.bit_packer
|
self.bit_packer
|
||||||
.decompress(position_data, self.buffer.as_mut(), num_bits);
|
.decompress(position_data, self.buffer.as_mut(), num_bits);
|
||||||
self.block_offset += COMPRESSION_BLOCK_SIZE as u64;
|
self.ahead = Some(0);
|
||||||
}
|
}
|
||||||
|
let block_len = compressed_block_size(num_bits);
|
||||||
|
self.ahead = Some(read_impl(
|
||||||
|
self.bit_packer,
|
||||||
|
&position_data[block_len..],
|
||||||
|
self.buffer.as_mut(),
|
||||||
|
self.inner_offset,
|
||||||
|
&skip_data[1..],
|
||||||
|
output,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Skip the next `skip_len` integer.
|
||||||
|
///
|
||||||
|
/// If a full block is skipped, calling
|
||||||
|
/// `.skip(...)` will avoid decompressing it.
|
||||||
|
///
|
||||||
|
/// May panic if the end of the stream is reached.
|
||||||
|
pub fn skip(&mut self, skip_len: usize) {
|
||||||
|
let skip_len_plus_inner_offset = skip_len + self.inner_offset;
|
||||||
|
|
||||||
|
let num_blocks_to_advance = skip_len_plus_inner_offset / COMPRESSION_BLOCK_SIZE;
|
||||||
|
self.inner_offset = skip_len_plus_inner_offset % COMPRESSION_BLOCK_SIZE;
|
||||||
|
|
||||||
|
self.ahead = self.ahead.and_then(|num_blocks| {
|
||||||
|
if num_blocks >= num_blocks_to_advance {
|
||||||
|
Some(num_blocks - num_blocks_to_advance)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let skip_len_in_bits = self.skip_read.as_ref()[..num_blocks_to_advance]
|
||||||
|
.iter()
|
||||||
|
.map(|num_bits| *num_bits as usize)
|
||||||
|
.sum::<usize>()
|
||||||
|
* COMPRESSION_BLOCK_SIZE;
|
||||||
|
let skip_len_in_bytes = skip_len_in_bits / 8;
|
||||||
|
self.skip_read.advance(num_blocks_to_advance);
|
||||||
|
self.position_read.advance(skip_len_in_bytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ use std::io::{self, Write};
|
|||||||
pub struct PositionSerializer<W: io::Write> {
|
pub struct PositionSerializer<W: io::Write> {
|
||||||
bit_packer: BitPacker4x,
|
bit_packer: BitPacker4x,
|
||||||
write_stream: CountingWriter<W>,
|
write_stream: CountingWriter<W>,
|
||||||
write_skip_index: W,
|
write_skiplist: W,
|
||||||
block: Vec<u32>,
|
block: Vec<u32>,
|
||||||
buffer: Vec<u8>,
|
buffer: Vec<u8>,
|
||||||
num_ints: u64,
|
num_ints: u64,
|
||||||
@@ -16,11 +16,11 @@ pub struct PositionSerializer<W: io::Write> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<W: io::Write> PositionSerializer<W> {
|
impl<W: io::Write> PositionSerializer<W> {
|
||||||
pub fn new(write_stream: W, write_skip_index: W) -> PositionSerializer<W> {
|
pub fn new(write_stream: W, write_skiplist: W) -> PositionSerializer<W> {
|
||||||
PositionSerializer {
|
PositionSerializer {
|
||||||
bit_packer: BitPacker4x::new(),
|
bit_packer: BitPacker4x::new(),
|
||||||
write_stream: CountingWriter::wrap(write_stream),
|
write_stream: CountingWriter::wrap(write_stream),
|
||||||
write_skip_index,
|
write_skiplist,
|
||||||
block: Vec::with_capacity(128),
|
block: Vec::with_capacity(128),
|
||||||
buffer: vec![0u8; 128 * 4],
|
buffer: vec![0u8; 128 * 4],
|
||||||
num_ints: 0u64,
|
num_ints: 0u64,
|
||||||
@@ -52,7 +52,7 @@ impl<W: io::Write> PositionSerializer<W> {
|
|||||||
|
|
||||||
fn flush_block(&mut self) -> io::Result<()> {
|
fn flush_block(&mut self) -> io::Result<()> {
|
||||||
let num_bits = self.bit_packer.num_bits(&self.block[..]);
|
let num_bits = self.bit_packer.num_bits(&self.block[..]);
|
||||||
self.write_skip_index.write_all(&[num_bits])?;
|
self.write_skiplist.write_all(&[num_bits])?;
|
||||||
let written_len = self
|
let written_len = self
|
||||||
.bit_packer
|
.bit_packer
|
||||||
.compress(&self.block[..], &mut self.buffer, num_bits);
|
.compress(&self.block[..], &mut self.buffer, num_bits);
|
||||||
@@ -70,10 +70,10 @@ impl<W: io::Write> PositionSerializer<W> {
|
|||||||
self.flush_block()?;
|
self.flush_block()?;
|
||||||
}
|
}
|
||||||
for &long_skip in &self.long_skips {
|
for &long_skip in &self.long_skips {
|
||||||
long_skip.serialize(&mut self.write_skip_index)?;
|
long_skip.serialize(&mut self.write_skiplist)?;
|
||||||
}
|
}
|
||||||
(self.long_skips.len() as u32).serialize(&mut self.write_skip_index)?;
|
(self.long_skips.len() as u32).serialize(&mut self.write_skiplist)?;
|
||||||
self.write_skip_index.flush()?;
|
self.write_skiplist.flush()?;
|
||||||
self.write_stream.flush()?;
|
self.write_stream.flush()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -87,7 +87,6 @@ fn exponential_search(arr: &[u32], target: u32) -> (usize, usize) {
|
|||||||
(begin, end)
|
(begin, end)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(never)]
|
|
||||||
fn galloping(block_docs: &[u32], target: u32) -> usize {
|
fn galloping(block_docs: &[u32], target: u32) -> usize {
|
||||||
let (start, end) = exponential_search(&block_docs, target);
|
let (start, end) = exponential_search(&block_docs, target);
|
||||||
start + linear_search(&block_docs[start..end], target)
|
start + linear_search(&block_docs[start..end], target)
|
||||||
@@ -130,18 +129,23 @@ impl BlockSearcher {
|
|||||||
///
|
///
|
||||||
/// If SSE2 instructions are available in the `(platform, running CPU)`,
|
/// If SSE2 instructions are available in the `(platform, running CPU)`,
|
||||||
/// then we use a different implementation that does an exhaustive linear search over
|
/// then we use a different implementation that does an exhaustive linear search over
|
||||||
/// the block regardless of whether the block is full or not.
|
/// the full block whenever the block is full (`len == 128`). It is surprisingly faster, most likely because of the lack
|
||||||
///
|
/// of branch.
|
||||||
/// Indeed, if the block is not full, the remaining items are TERMINATED.
|
pub(crate) fn search_in_block(
|
||||||
/// It is surprisingly faster, most likely because of the lack of branch misprediction.
|
self,
|
||||||
pub(crate) fn search_in_block(self, block_docs: &AlignedBuffer, target: u32) -> usize {
|
block_docs: &AlignedBuffer,
|
||||||
|
len: usize,
|
||||||
|
start: usize,
|
||||||
|
target: u32,
|
||||||
|
) -> usize {
|
||||||
#[cfg(target_arch = "x86_64")]
|
#[cfg(target_arch = "x86_64")]
|
||||||
{
|
{
|
||||||
if self == BlockSearcher::SSE2 {
|
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||||
|
if self == BlockSearcher::SSE2 && len == COMPRESSION_BLOCK_SIZE {
|
||||||
return sse2::linear_search_sse2_128(block_docs, target);
|
return sse2::linear_search_sse2_128(block_docs, target);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
galloping(&block_docs.0[..], target)
|
start + galloping(&block_docs.0[start..len], target)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -162,7 +166,6 @@ mod tests {
|
|||||||
use super::exponential_search;
|
use super::exponential_search;
|
||||||
use super::linear_search;
|
use super::linear_search;
|
||||||
use super::BlockSearcher;
|
use super::BlockSearcher;
|
||||||
use crate::docset::TERMINATED;
|
|
||||||
use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
|
use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -193,12 +196,19 @@ mod tests {
|
|||||||
fn util_test_search_in_block(block_searcher: BlockSearcher, block: &[u32], target: u32) {
|
fn util_test_search_in_block(block_searcher: BlockSearcher, block: &[u32], target: u32) {
|
||||||
let cursor = search_in_block_trivial_but_slow(block, target);
|
let cursor = search_in_block_trivial_but_slow(block, target);
|
||||||
assert!(block.len() < COMPRESSION_BLOCK_SIZE);
|
assert!(block.len() < COMPRESSION_BLOCK_SIZE);
|
||||||
let mut output_buffer = [TERMINATED; COMPRESSION_BLOCK_SIZE];
|
let mut output_buffer = [u32::max_value(); COMPRESSION_BLOCK_SIZE];
|
||||||
output_buffer[..block.len()].copy_from_slice(block);
|
output_buffer[..block.len()].copy_from_slice(block);
|
||||||
assert_eq!(
|
for i in 0..cursor {
|
||||||
block_searcher.search_in_block(&AlignedBuffer(output_buffer), target),
|
assert_eq!(
|
||||||
cursor
|
block_searcher.search_in_block(
|
||||||
);
|
&AlignedBuffer(output_buffer),
|
||||||
|
block.len(),
|
||||||
|
i,
|
||||||
|
target
|
||||||
|
),
|
||||||
|
cursor
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn util_test_search_in_block_all(block_searcher: BlockSearcher, block: &[u32]) {
|
fn util_test_search_in_block_all(block_searcher: BlockSearcher, block: &[u32]) {
|
||||||
|
|||||||
@@ -1,530 +0,0 @@
|
|||||||
use std::io;
|
|
||||||
|
|
||||||
use crate::common::{BinarySerializable, VInt};
|
|
||||||
use crate::directory::FileSlice;
|
|
||||||
use crate::directory::OwnedBytes;
|
|
||||||
use crate::fieldnorm::FieldNormReader;
|
|
||||||
use crate::postings::compression::{
|
|
||||||
AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE,
|
|
||||||
};
|
|
||||||
use crate::postings::{BlockInfo, FreqReadingOption, SkipReader};
|
|
||||||
use crate::query::BM25Weight;
|
|
||||||
use crate::schema::IndexRecordOption;
|
|
||||||
use crate::{DocId, Score, TERMINATED};
|
|
||||||
|
|
||||||
fn max_score<I: Iterator<Item = Score>>(mut it: I) -> Option<Score> {
|
|
||||||
if let Some(first) = it.next() {
|
|
||||||
Some(it.fold(first, Score::max))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `BlockSegmentPostings` is a cursor iterating over blocks
|
|
||||||
/// of documents.
|
|
||||||
///
|
|
||||||
/// # Warning
|
|
||||||
///
|
|
||||||
/// While it is useful for some very specific high-performance
|
|
||||||
/// use cases, you should prefer using `SegmentPostings` for most usage.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct BlockSegmentPostings {
|
|
||||||
pub(crate) doc_decoder: BlockDecoder,
|
|
||||||
loaded_offset: usize,
|
|
||||||
freq_decoder: BlockDecoder,
|
|
||||||
freq_reading_option: FreqReadingOption,
|
|
||||||
block_max_score_cache: Option<Score>,
|
|
||||||
|
|
||||||
doc_freq: u32,
|
|
||||||
|
|
||||||
data: OwnedBytes,
|
|
||||||
pub(crate) skip_reader: SkipReader,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn decode_bitpacked_block(
|
|
||||||
doc_decoder: &mut BlockDecoder,
|
|
||||||
freq_decoder_opt: Option<&mut BlockDecoder>,
|
|
||||||
data: &[u8],
|
|
||||||
doc_offset: DocId,
|
|
||||||
doc_num_bits: u8,
|
|
||||||
tf_num_bits: u8,
|
|
||||||
) {
|
|
||||||
let num_consumed_bytes = doc_decoder.uncompress_block_sorted(data, doc_offset, doc_num_bits);
|
|
||||||
if let Some(freq_decoder) = freq_decoder_opt {
|
|
||||||
freq_decoder.uncompress_block_unsorted(&data[num_consumed_bytes..], tf_num_bits);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn decode_vint_block(
|
|
||||||
doc_decoder: &mut BlockDecoder,
|
|
||||||
freq_decoder_opt: Option<&mut BlockDecoder>,
|
|
||||||
data: &[u8],
|
|
||||||
doc_offset: DocId,
|
|
||||||
num_vint_docs: usize,
|
|
||||||
) {
|
|
||||||
let num_consumed_bytes =
|
|
||||||
doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs, TERMINATED);
|
|
||||||
if let Some(freq_decoder) = freq_decoder_opt {
|
|
||||||
freq_decoder.uncompress_vint_unsorted(
|
|
||||||
&data[num_consumed_bytes..],
|
|
||||||
num_vint_docs,
|
|
||||||
TERMINATED,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn split_into_skips_and_postings(
|
|
||||||
doc_freq: u32,
|
|
||||||
mut bytes: OwnedBytes,
|
|
||||||
) -> (Option<OwnedBytes>, OwnedBytes) {
|
|
||||||
if doc_freq < COMPRESSION_BLOCK_SIZE as u32 {
|
|
||||||
return (None, bytes);
|
|
||||||
}
|
|
||||||
let skip_len = VInt::deserialize(&mut bytes).expect("Data corrupted").0 as usize;
|
|
||||||
let (skip_data, postings_data) = bytes.split(skip_len);
|
|
||||||
(Some(skip_data), postings_data)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockSegmentPostings {
|
|
||||||
pub(crate) fn open(
|
|
||||||
doc_freq: u32,
|
|
||||||
data: FileSlice,
|
|
||||||
record_option: IndexRecordOption,
|
|
||||||
requested_option: IndexRecordOption,
|
|
||||||
) -> io::Result<BlockSegmentPostings> {
|
|
||||||
let freq_reading_option = match (record_option, requested_option) {
|
|
||||||
(IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq,
|
|
||||||
(_, IndexRecordOption::Basic) => FreqReadingOption::SkipFreq,
|
|
||||||
(_, _) => FreqReadingOption::ReadFreq,
|
|
||||||
};
|
|
||||||
|
|
||||||
let (skip_data_opt, postings_data) =
|
|
||||||
split_into_skips_and_postings(doc_freq, data.read_bytes()?);
|
|
||||||
let skip_reader = match skip_data_opt {
|
|
||||||
Some(skip_data) => SkipReader::new(skip_data, doc_freq, record_option),
|
|
||||||
None => SkipReader::new(OwnedBytes::empty(), doc_freq, record_option),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut block_segment_postings = BlockSegmentPostings {
|
|
||||||
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
|
||||||
loaded_offset: std::usize::MAX,
|
|
||||||
freq_decoder: BlockDecoder::with_val(1),
|
|
||||||
freq_reading_option,
|
|
||||||
block_max_score_cache: None,
|
|
||||||
doc_freq,
|
|
||||||
data: postings_data,
|
|
||||||
skip_reader,
|
|
||||||
};
|
|
||||||
block_segment_postings.load_block();
|
|
||||||
Ok(block_segment_postings)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the block_max_score for the current block.
|
|
||||||
/// It does not require the block to be loaded. For instance, it is ok to call this method
|
|
||||||
/// after having called `.shallow_advance(..)`.
|
|
||||||
///
|
|
||||||
/// See `TermScorer::block_max_score(..)` for more information.
|
|
||||||
pub fn block_max_score(
|
|
||||||
&mut self,
|
|
||||||
fieldnorm_reader: &FieldNormReader,
|
|
||||||
bm25_weight: &BM25Weight,
|
|
||||||
) -> Score {
|
|
||||||
if let Some(score) = self.block_max_score_cache {
|
|
||||||
return score;
|
|
||||||
}
|
|
||||||
if let Some(skip_reader_max_score) = self.skip_reader.block_max_score(bm25_weight) {
|
|
||||||
// if we are on a full block, the skip reader should have the block max information
|
|
||||||
// for us
|
|
||||||
self.block_max_score_cache = Some(skip_reader_max_score);
|
|
||||||
return skip_reader_max_score;
|
|
||||||
}
|
|
||||||
// this is the last block of the segment posting list.
|
|
||||||
// If it is actually loaded, we can compute block max manually.
|
|
||||||
if self.block_is_loaded() {
|
|
||||||
let docs = self.doc_decoder.output_array().iter().cloned();
|
|
||||||
let freqs = self.freq_decoder.output_array().iter().cloned();
|
|
||||||
let bm25_scores = docs.zip(freqs).map(|(doc, term_freq)| {
|
|
||||||
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
|
|
||||||
bm25_weight.score(fieldnorm_id, term_freq)
|
|
||||||
});
|
|
||||||
let block_max_score = max_score(bm25_scores).unwrap_or(0.0);
|
|
||||||
self.block_max_score_cache = Some(block_max_score);
|
|
||||||
return block_max_score;
|
|
||||||
}
|
|
||||||
// We do not have access to any good block max value. We return bm25_weight.max_score()
|
|
||||||
// as it is a valid upperbound.
|
|
||||||
//
|
|
||||||
// We do not cache it however, so that it gets computed when once block is loaded.
|
|
||||||
bm25_weight.max_score()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
|
|
||||||
self.freq_reading_option
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resets the block segment postings on another position
|
|
||||||
// in the postings file.
|
|
||||||
//
|
|
||||||
// This is useful for enumerating through a list of terms,
|
|
||||||
// and consuming the associated posting lists while avoiding
|
|
||||||
// reallocating a `BlockSegmentPostings`.
|
|
||||||
//
|
|
||||||
// # Warning
|
|
||||||
//
|
|
||||||
// This does not reset the positions list.
|
|
||||||
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: OwnedBytes) {
|
|
||||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
|
|
||||||
self.data = postings_data;
|
|
||||||
self.block_max_score_cache = None;
|
|
||||||
self.loaded_offset = std::usize::MAX;
|
|
||||||
if let Some(skip_data) = skip_data_opt {
|
|
||||||
self.skip_reader.reset(skip_data, doc_freq);
|
|
||||||
} else {
|
|
||||||
self.skip_reader.reset(OwnedBytes::empty(), doc_freq);
|
|
||||||
}
|
|
||||||
self.doc_freq = doc_freq;
|
|
||||||
self.load_block();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the overall number of documents in the block postings.
|
|
||||||
/// It does not take in account whether documents are deleted or not.
|
|
||||||
///
|
|
||||||
/// This `doc_freq` is simply the sum of the length of all of the blocks
|
|
||||||
/// length, and it does not take in account deleted documents.
|
|
||||||
pub fn doc_freq(&self) -> u32 {
|
|
||||||
self.doc_freq
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the array of docs in the current block.
|
|
||||||
///
|
|
||||||
/// Before the first call to `.advance()`, the block
|
|
||||||
/// returned by `.docs()` is empty.
|
|
||||||
#[inline]
|
|
||||||
pub fn docs(&self) -> &[DocId] {
|
|
||||||
debug_assert!(self.block_is_loaded());
|
|
||||||
self.doc_decoder.output_array()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a full block, regardless of whetehr the block is complete or incomplete (
|
|
||||||
/// as it happens for the last block of the posting list).
|
|
||||||
///
|
|
||||||
/// In the latter case, the block is guaranteed to be padded with the sentinel value:
|
|
||||||
/// `TERMINATED`. The array is also guaranteed to be aligned on 16 bytes = 128 bits.
|
|
||||||
///
|
|
||||||
/// This method is useful to run SSE2 linear search.
|
|
||||||
#[inline(always)]
|
|
||||||
pub(crate) fn docs_aligned(&self) -> &AlignedBuffer {
|
|
||||||
debug_assert!(self.block_is_loaded());
|
|
||||||
self.doc_decoder.output_aligned()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the document at index `idx` of the block.
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn doc(&self, idx: usize) -> u32 {
|
|
||||||
self.doc_decoder.output(idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the array of `term freq` in the block.
|
|
||||||
#[inline]
|
|
||||||
pub fn freqs(&self) -> &[u32] {
|
|
||||||
debug_assert!(self.block_is_loaded());
|
|
||||||
self.freq_decoder.output_array()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the frequency at index `idx` of the block.
|
|
||||||
#[inline]
|
|
||||||
pub fn freq(&self, idx: usize) -> u32 {
|
|
||||||
debug_assert!(self.block_is_loaded());
|
|
||||||
self.freq_decoder.output(idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the length of the current block.
|
|
||||||
///
|
|
||||||
/// All blocks have a length of `NUM_DOCS_PER_BLOCK`,
|
|
||||||
/// except the last block that may have a length
|
|
||||||
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
|
|
||||||
#[inline]
|
|
||||||
pub fn block_len(&self) -> usize {
|
|
||||||
debug_assert!(self.block_is_loaded());
|
|
||||||
self.doc_decoder.output_len
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Position on a block that may contains `target_doc`.
|
|
||||||
///
|
|
||||||
/// If all docs are smaller than target, the block loaded may be empty,
|
|
||||||
/// or be the last an incomplete VInt block.
|
|
||||||
pub fn seek(&mut self, target_doc: DocId) {
|
|
||||||
self.shallow_seek(target_doc);
|
|
||||||
self.load_block();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn position_offset(&self) -> u64 {
|
|
||||||
self.skip_reader.position_offset()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Dangerous API! This calls seek on the skip list,
|
|
||||||
/// but does not `.load_block()` afterwards.
|
|
||||||
///
|
|
||||||
/// `.load_block()` needs to be called manually afterwards.
|
|
||||||
/// If all docs are smaller than target, the block loaded may be empty,
|
|
||||||
/// or be the last an incomplete VInt block.
|
|
||||||
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
|
|
||||||
if self.skip_reader.seek(target_doc) {
|
|
||||||
self.block_max_score_cache = None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn block_is_loaded(&self) -> bool {
|
|
||||||
self.loaded_offset == self.skip_reader.byte_offset()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn load_block(&mut self) {
|
|
||||||
let offset = self.skip_reader.byte_offset();
|
|
||||||
if self.loaded_offset == offset {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
self.loaded_offset = offset;
|
|
||||||
match self.skip_reader.block_info() {
|
|
||||||
BlockInfo::BitPacked {
|
|
||||||
doc_num_bits,
|
|
||||||
tf_num_bits,
|
|
||||||
..
|
|
||||||
} => {
|
|
||||||
decode_bitpacked_block(
|
|
||||||
&mut self.doc_decoder,
|
|
||||||
if let FreqReadingOption::ReadFreq = self.freq_reading_option {
|
|
||||||
Some(&mut self.freq_decoder)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
&self.data.as_slice()[offset..],
|
|
||||||
self.skip_reader.last_doc_in_previous_block,
|
|
||||||
doc_num_bits,
|
|
||||||
tf_num_bits,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
BlockInfo::VInt { num_docs } => {
|
|
||||||
let data = {
|
|
||||||
if num_docs == 0 {
|
|
||||||
&[]
|
|
||||||
} else {
|
|
||||||
&self.data.as_slice()[offset..]
|
|
||||||
}
|
|
||||||
};
|
|
||||||
decode_vint_block(
|
|
||||||
&mut self.doc_decoder,
|
|
||||||
if let FreqReadingOption::ReadFreq = self.freq_reading_option {
|
|
||||||
Some(&mut self.freq_decoder)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
data,
|
|
||||||
self.skip_reader.last_doc_in_previous_block,
|
|
||||||
num_docs as usize,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Advance to the next block.
|
|
||||||
///
|
|
||||||
/// Returns false iff there was no remaining blocks.
|
|
||||||
pub fn advance(&mut self) {
|
|
||||||
self.skip_reader.advance();
|
|
||||||
self.block_max_score_cache = None;
|
|
||||||
self.load_block();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an empty segment postings object
|
|
||||||
pub fn empty() -> BlockSegmentPostings {
|
|
||||||
BlockSegmentPostings {
|
|
||||||
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
|
||||||
loaded_offset: 0,
|
|
||||||
freq_decoder: BlockDecoder::with_val(1),
|
|
||||||
freq_reading_option: FreqReadingOption::NoFreq,
|
|
||||||
block_max_score_cache: None,
|
|
||||||
doc_freq: 0,
|
|
||||||
data: OwnedBytes::empty(),
|
|
||||||
skip_reader: SkipReader::new(OwnedBytes::empty(), 0, IndexRecordOption::Basic),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::BlockSegmentPostings;
|
|
||||||
use crate::common::HasLen;
|
|
||||||
use crate::core::Index;
|
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
|
||||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
|
||||||
use crate::postings::postings::Postings;
|
|
||||||
use crate::postings::SegmentPostings;
|
|
||||||
use crate::schema::IndexRecordOption;
|
|
||||||
use crate::schema::Schema;
|
|
||||||
use crate::schema::Term;
|
|
||||||
use crate::schema::INDEXED;
|
|
||||||
use crate::DocId;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_empty_segment_postings() {
|
|
||||||
let mut postings = SegmentPostings::empty();
|
|
||||||
assert_eq!(postings.doc(), TERMINATED);
|
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
|
||||||
assert_eq!(postings.doc_freq(), 0);
|
|
||||||
assert_eq!(postings.len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_empty_postings_doc_returns_terminated() {
|
|
||||||
let mut postings = SegmentPostings::empty();
|
|
||||||
assert_eq!(postings.doc(), TERMINATED);
|
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_empty_postings_doc_term_freq_returns_0() {
|
|
||||||
let postings = SegmentPostings::empty();
|
|
||||||
assert_eq!(postings.term_freq(), 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_empty_block_segment_postings() {
|
|
||||||
let mut postings = BlockSegmentPostings::empty();
|
|
||||||
assert!(postings.docs().is_empty());
|
|
||||||
assert_eq!(postings.doc_freq(), 0);
|
|
||||||
postings.advance();
|
|
||||||
assert!(postings.docs().is_empty());
|
|
||||||
assert_eq!(postings.doc_freq(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_block_segment_postings() {
|
|
||||||
let mut block_segments = build_block_postings(&(0..100_000).collect::<Vec<u32>>());
|
|
||||||
let mut offset: u32 = 0u32;
|
|
||||||
// checking that the `doc_freq` is correct
|
|
||||||
assert_eq!(block_segments.doc_freq(), 100_000);
|
|
||||||
loop {
|
|
||||||
let block = block_segments.docs();
|
|
||||||
if block.is_empty() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
for (i, doc) in block.iter().cloned().enumerate() {
|
|
||||||
assert_eq!(offset + (i as u32), doc);
|
|
||||||
}
|
|
||||||
offset += block.len() as u32;
|
|
||||||
block_segments.advance();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_skip_right_at_new_block() {
|
|
||||||
let mut doc_ids = (0..128).collect::<Vec<u32>>();
|
|
||||||
// 128 is missing
|
|
||||||
doc_ids.push(129);
|
|
||||||
doc_ids.push(130);
|
|
||||||
{
|
|
||||||
let block_segments = build_block_postings(&doc_ids);
|
|
||||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
|
||||||
assert_eq!(docset.seek(128), 129);
|
|
||||||
assert_eq!(docset.doc(), 129);
|
|
||||||
assert_eq!(docset.advance(), 130);
|
|
||||||
assert_eq!(docset.doc(), 130);
|
|
||||||
assert_eq!(docset.advance(), TERMINATED);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let block_segments = build_block_postings(&doc_ids);
|
|
||||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
|
||||||
assert_eq!(docset.seek(129), 129);
|
|
||||||
assert_eq!(docset.doc(), 129);
|
|
||||||
assert_eq!(docset.advance(), 130);
|
|
||||||
assert_eq!(docset.doc(), 130);
|
|
||||||
assert_eq!(docset.advance(), TERMINATED);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let block_segments = build_block_postings(&doc_ids);
|
|
||||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
|
||||||
assert_eq!(docset.doc(), 0);
|
|
||||||
assert_eq!(docset.seek(131), TERMINATED);
|
|
||||||
assert_eq!(docset.doc(), TERMINATED);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_block_postings(docs: &[DocId]) -> BlockSegmentPostings {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
|
||||||
let mut last_doc = 0u32;
|
|
||||||
for &doc in docs {
|
|
||||||
for _ in last_doc..doc {
|
|
||||||
index_writer.add_document(doc!(int_field=>1u64));
|
|
||||||
}
|
|
||||||
index_writer.add_document(doc!(int_field=>0u64));
|
|
||||||
last_doc = doc + 1;
|
|
||||||
}
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
let searcher = index.reader().unwrap().searcher();
|
|
||||||
let segment_reader = searcher.segment_reader(0);
|
|
||||||
let inverted_index = segment_reader.inverted_index(int_field).unwrap();
|
|
||||||
let term = Term::from_field_u64(int_field, 0u64);
|
|
||||||
let term_info = inverted_index.get_term_info(&term).unwrap().unwrap();
|
|
||||||
inverted_index
|
|
||||||
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_block_segment_postings_seek() {
|
|
||||||
let mut docs = vec![0];
|
|
||||||
for i in 0..1300 {
|
|
||||||
docs.push((i * i / 100) + i);
|
|
||||||
}
|
|
||||||
let mut block_postings = build_block_postings(&docs[..]);
|
|
||||||
for i in vec![0, 424, 10000] {
|
|
||||||
block_postings.seek(i);
|
|
||||||
let docs = block_postings.docs();
|
|
||||||
assert!(docs[0] <= i);
|
|
||||||
assert!(docs.last().cloned().unwrap_or(0u32) >= i);
|
|
||||||
}
|
|
||||||
block_postings.seek(100_000);
|
|
||||||
assert_eq!(block_postings.doc(COMPRESSION_BLOCK_SIZE - 1), TERMINATED);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_reset_block_segment_postings() -> crate::Result<()> {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
// create two postings list, one containg even number,
|
|
||||||
// the other containing odd numbers.
|
|
||||||
for i in 0..6 {
|
|
||||||
let doc = doc!(int_field=> (i % 2) as u64);
|
|
||||||
index_writer.add_document(doc);
|
|
||||||
}
|
|
||||||
index_writer.commit()?;
|
|
||||||
let searcher = index.reader()?.searcher();
|
|
||||||
let segment_reader = searcher.segment_reader(0);
|
|
||||||
|
|
||||||
let mut block_segments;
|
|
||||||
{
|
|
||||||
let term = Term::from_field_u64(int_field, 0u64);
|
|
||||||
let inverted_index = segment_reader.inverted_index(int_field)?;
|
|
||||||
let term_info = inverted_index.get_term_info(&term)?.unwrap();
|
|
||||||
block_segments = inverted_index
|
|
||||||
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)?;
|
|
||||||
}
|
|
||||||
assert_eq!(block_segments.docs(), &[0, 2, 4]);
|
|
||||||
{
|
|
||||||
let term = Term::from_field_u64(int_field, 1u64);
|
|
||||||
let inverted_index = segment_reader.inverted_index(int_field)?;
|
|
||||||
let term_info = inverted_index.get_term_info(&term)?.unwrap();
|
|
||||||
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments)?;
|
|
||||||
}
|
|
||||||
assert_eq!(block_segments.docs(), &[1, 3, 5]);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -17,12 +17,6 @@ pub struct BlockEncoder {
|
|||||||
pub output_len: usize,
|
pub output_len: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for BlockEncoder {
|
|
||||||
fn default() -> Self {
|
|
||||||
BlockEncoder::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockEncoder {
|
impl BlockEncoder {
|
||||||
pub fn new() -> BlockEncoder {
|
pub fn new() -> BlockEncoder {
|
||||||
BlockEncoder {
|
BlockEncoder {
|
||||||
@@ -52,23 +46,19 @@ impl BlockEncoder {
|
|||||||
/// We ensure that the OutputBuffer is align on 128 bits
|
/// We ensure that the OutputBuffer is align on 128 bits
|
||||||
/// in order to run SSE2 linear search on it.
|
/// in order to run SSE2 linear search on it.
|
||||||
#[repr(align(128))]
|
#[repr(align(128))]
|
||||||
#[derive(Clone)]
|
|
||||||
pub(crate) struct AlignedBuffer(pub [u32; COMPRESSION_BLOCK_SIZE]);
|
pub(crate) struct AlignedBuffer(pub [u32; COMPRESSION_BLOCK_SIZE]);
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct BlockDecoder {
|
pub struct BlockDecoder {
|
||||||
bitpacker: BitPacker4x,
|
bitpacker: BitPacker4x,
|
||||||
output: AlignedBuffer,
|
output: AlignedBuffer,
|
||||||
pub output_len: usize,
|
pub output_len: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for BlockDecoder {
|
impl BlockDecoder {
|
||||||
fn default() -> Self {
|
pub fn new() -> BlockDecoder {
|
||||||
BlockDecoder::with_val(0u32)
|
BlockDecoder::with_val(0u32)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockDecoder {
|
|
||||||
pub fn with_val(val: u32) -> BlockDecoder {
|
pub fn with_val(val: u32) -> BlockDecoder {
|
||||||
BlockDecoder {
|
BlockDecoder {
|
||||||
bitpacker: BitPacker4x::new(),
|
bitpacker: BitPacker4x::new(),
|
||||||
@@ -100,8 +90,8 @@ impl BlockDecoder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub(crate) fn output_aligned(&self) -> &AlignedBuffer {
|
pub(crate) fn output_aligned(&self) -> (&AlignedBuffer, usize) {
|
||||||
&self.output
|
(&self.output, self.output_len)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@@ -144,14 +134,11 @@ pub trait VIntDecoder {
|
|||||||
/// For instance, if delta encoded are `1, 3, 9`, and the
|
/// For instance, if delta encoded are `1, 3, 9`, and the
|
||||||
/// `offset` is 5, then the output will be:
|
/// `offset` is 5, then the output will be:
|
||||||
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
|
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
|
||||||
///
|
fn uncompress_vint_sorted<'a>(
|
||||||
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
|
|
||||||
fn uncompress_vint_sorted(
|
|
||||||
&mut self,
|
&mut self,
|
||||||
compressed_data: &[u8],
|
compressed_data: &'a [u8],
|
||||||
offset: u32,
|
offset: u32,
|
||||||
num_els: usize,
|
num_els: usize,
|
||||||
padding: u32,
|
|
||||||
) -> usize;
|
) -> usize;
|
||||||
|
|
||||||
/// Uncompress an array of `u32s`, compressed using variable
|
/// Uncompress an array of `u32s`, compressed using variable
|
||||||
@@ -159,14 +146,7 @@ pub trait VIntDecoder {
|
|||||||
///
|
///
|
||||||
/// The method takes a number of int to decompress, and returns
|
/// The method takes a number of int to decompress, and returns
|
||||||
/// the amount of bytes that were read to decompress them.
|
/// the amount of bytes that were read to decompress them.
|
||||||
///
|
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize;
|
||||||
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
|
|
||||||
fn uncompress_vint_unsorted(
|
|
||||||
&mut self,
|
|
||||||
compressed_data: &[u8],
|
|
||||||
num_els: usize,
|
|
||||||
padding: u32,
|
|
||||||
) -> usize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VIntEncoder for BlockEncoder {
|
impl VIntEncoder for BlockEncoder {
|
||||||
@@ -180,26 +160,18 @@ impl VIntEncoder for BlockEncoder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl VIntDecoder for BlockDecoder {
|
impl VIntDecoder for BlockDecoder {
|
||||||
fn uncompress_vint_sorted(
|
fn uncompress_vint_sorted<'a>(
|
||||||
&mut self,
|
&mut self,
|
||||||
compressed_data: &[u8],
|
compressed_data: &'a [u8],
|
||||||
offset: u32,
|
offset: u32,
|
||||||
num_els: usize,
|
num_els: usize,
|
||||||
padding: u32,
|
|
||||||
) -> usize {
|
) -> usize {
|
||||||
self.output_len = num_els;
|
self.output_len = num_els;
|
||||||
self.output.0.iter_mut().for_each(|el| *el = padding);
|
|
||||||
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
|
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn uncompress_vint_unsorted(
|
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize {
|
||||||
&mut self,
|
|
||||||
compressed_data: &[u8],
|
|
||||||
num_els: usize,
|
|
||||||
padding: u32,
|
|
||||||
) -> usize {
|
|
||||||
self.output_len = num_els;
|
self.output_len = num_els;
|
||||||
self.output.0.iter_mut().for_each(|el| *el = padding);
|
|
||||||
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
|
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -208,14 +180,13 @@ impl VIntDecoder for BlockDecoder {
|
|||||||
pub mod tests {
|
pub mod tests {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::TERMINATED;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_encode_sorted_block() {
|
fn test_encode_sorted_block() {
|
||||||
let vals: Vec<u32> = (0u32..128u32).map(|i| i * 7).collect();
|
let vals: Vec<u32> = (0u32..128u32).map(|i| i * 7).collect();
|
||||||
let mut encoder = BlockEncoder::new();
|
let mut encoder = BlockEncoder::new();
|
||||||
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 0);
|
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 0);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::new();
|
||||||
{
|
{
|
||||||
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 0, num_bits);
|
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 0, num_bits);
|
||||||
assert_eq!(consumed_num_bytes, compressed_data.len());
|
assert_eq!(consumed_num_bytes, compressed_data.len());
|
||||||
@@ -228,9 +199,9 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_encode_sorted_block_with_offset() {
|
fn test_encode_sorted_block_with_offset() {
|
||||||
let vals: Vec<u32> = (0u32..128u32).map(|i| 11 + i * 7).collect();
|
let vals: Vec<u32> = (0u32..128u32).map(|i| 11 + i * 7).collect();
|
||||||
let mut encoder = BlockEncoder::default();
|
let mut encoder = BlockEncoder::new();
|
||||||
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 10);
|
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 10);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::new();
|
||||||
{
|
{
|
||||||
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 10, num_bits);
|
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 10, num_bits);
|
||||||
assert_eq!(consumed_num_bytes, compressed_data.len());
|
assert_eq!(consumed_num_bytes, compressed_data.len());
|
||||||
@@ -245,11 +216,11 @@ pub mod tests {
|
|||||||
let mut compressed: Vec<u8> = Vec::new();
|
let mut compressed: Vec<u8> = Vec::new();
|
||||||
let n = 128;
|
let n = 128;
|
||||||
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32).collect();
|
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32).collect();
|
||||||
let mut encoder = BlockEncoder::default();
|
let mut encoder = BlockEncoder::new();
|
||||||
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 10);
|
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 10);
|
||||||
compressed.extend_from_slice(compressed_data);
|
compressed.extend_from_slice(compressed_data);
|
||||||
compressed.push(173u8);
|
compressed.push(173u8);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::new();
|
||||||
{
|
{
|
||||||
let consumed_num_bytes = decoder.uncompress_block_sorted(&compressed, 10, num_bits);
|
let consumed_num_bytes = decoder.uncompress_block_sorted(&compressed, 10, num_bits);
|
||||||
assert_eq!(consumed_num_bytes, compressed.len() - 1);
|
assert_eq!(consumed_num_bytes, compressed.len() - 1);
|
||||||
@@ -265,11 +236,11 @@ pub mod tests {
|
|||||||
let mut compressed: Vec<u8> = Vec::new();
|
let mut compressed: Vec<u8> = Vec::new();
|
||||||
let n = 128;
|
let n = 128;
|
||||||
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32 % 12).collect();
|
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32 % 12).collect();
|
||||||
let mut encoder = BlockEncoder::default();
|
let mut encoder = BlockEncoder::new();
|
||||||
let (num_bits, compressed_data) = encoder.compress_block_unsorted(&vals);
|
let (num_bits, compressed_data) = encoder.compress_block_unsorted(&vals);
|
||||||
compressed.extend_from_slice(compressed_data);
|
compressed.extend_from_slice(compressed_data);
|
||||||
compressed.push(173u8);
|
compressed.push(173u8);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::new();
|
||||||
{
|
{
|
||||||
let consumed_num_bytes = decoder.uncompress_block_unsorted(&compressed, num_bits);
|
let consumed_num_bytes = decoder.uncompress_block_unsorted(&compressed, num_bits);
|
||||||
assert_eq!(consumed_num_bytes + 1, compressed.len());
|
assert_eq!(consumed_num_bytes + 1, compressed.len());
|
||||||
@@ -280,27 +251,20 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_block_decoder_initialization() {
|
|
||||||
let block = BlockDecoder::with_val(TERMINATED);
|
|
||||||
assert_eq!(block.output(0), TERMINATED);
|
|
||||||
}
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_encode_vint() {
|
fn test_encode_vint() {
|
||||||
const PADDING_VALUE: u32 = 234_234_345u32;
|
{
|
||||||
let expected_length = 154;
|
let expected_length = 154;
|
||||||
let mut encoder = BlockEncoder::new();
|
let mut encoder = BlockEncoder::new();
|
||||||
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
|
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
|
||||||
for offset in &[0u32, 1u32, 2u32] {
|
for offset in &[0u32, 1u32, 2u32] {
|
||||||
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
|
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
|
||||||
assert!(encoded_data.len() <= expected_length);
|
assert!(encoded_data.len() <= expected_length);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::new();
|
||||||
let consumed_num_bytes =
|
let consumed_num_bytes =
|
||||||
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len(), PADDING_VALUE);
|
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
|
||||||
assert_eq!(consumed_num_bytes, encoded_data.len());
|
assert_eq!(consumed_num_bytes, encoded_data.len());
|
||||||
assert_eq!(input, decoder.output_array());
|
assert_eq!(input, decoder.output_array());
|
||||||
for i in input.len()..COMPRESSION_BLOCK_SIZE {
|
|
||||||
assert_eq!(decoder.output(i), PADDING_VALUE);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -310,7 +274,6 @@ pub mod tests {
|
|||||||
mod bench {
|
mod bench {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::TERMINATED;
|
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use rand::SeedableRng;
|
use rand::SeedableRng;
|
||||||
@@ -341,7 +304,7 @@ mod bench {
|
|||||||
let mut encoder = BlockEncoder::new();
|
let mut encoder = BlockEncoder::new();
|
||||||
let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
|
let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
|
||||||
let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32);
|
let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::new();
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
|
decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
|
||||||
});
|
});
|
||||||
@@ -376,9 +339,9 @@ mod bench {
|
|||||||
let mut encoder = BlockEncoder::new();
|
let mut encoder = BlockEncoder::new();
|
||||||
let data = generate_array(NUM_INTS_BENCH_VINT, 0.001);
|
let data = generate_array(NUM_INTS_BENCH_VINT, 0.001);
|
||||||
let compressed = encoder.compress_vint_sorted(&data, 0u32);
|
let compressed = encoder.compress_vint_sorted(&data, 0u32);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::new();
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT, TERMINATED);
|
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user