mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-28 13:02:55 +00:00
Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9bc6a43917 | ||
|
|
e5bf41c1f6 | ||
|
|
58d40ebf95 | ||
|
|
f07634517f | ||
|
|
2edea107ef | ||
|
|
a6b5f4f5b5 | ||
|
|
7859ee4a39 | ||
|
|
f06e116aae | ||
|
|
ec32e0546a | ||
|
|
0cd10e1197 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,5 +1,4 @@
|
||||
tantivy.iml
|
||||
proptest-regressions
|
||||
*.swp
|
||||
target
|
||||
target/debug
|
||||
@@ -12,4 +11,3 @@ cpp/simdcomp/bitpackingbenchmark
|
||||
*.bk
|
||||
.idea
|
||||
trace.dat
|
||||
cargo-timing*
|
||||
|
||||
50
CHANGELOG.md
50
CHANGELOG.md
@@ -1,53 +1,3 @@
|
||||
Tantivy 0.14.0
|
||||
=========================
|
||||
- Remove dependency to atomicwrites #833 .Implemented by @pmasurel upon suggestion and research from @asafigan).
|
||||
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
|
||||
- API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match.
|
||||
- Large API Change in the Directory API. Tantivy used to assume that all files could be somehow memory mapped. After this change, Directory return a `FileSlice` that can be reduced and eventually read into an `OwnedBytes` object. Long and blocking io operation are still required by they do not span over the entire file.
|
||||
- Added support for Brotli compression in the DocStore. (@ppodolsky)
|
||||
- Added helper for building intersections and unions in BooleanQuery (@guilload)
|
||||
- Bugfix in `Query::explain`
|
||||
- Making it possible to opt out the generation of fieldnorms information for indexed fields. This change breaks compatibility as the meta.json file format is slightly changed. (#922, @pmasurel)
|
||||
|
||||
Tantivy 0.13.2
|
||||
===================
|
||||
Bugfix. Acquiring a facet reader on a segment that does not contain any
|
||||
doc with this facet returns `None`. (#896)
|
||||
|
||||
Tantivy 0.13.1
|
||||
===================
|
||||
Made `Query` and `Collector` `Send + Sync`.
|
||||
Updated misc dependency versions.
|
||||
|
||||
Tantivy 0.13.0
|
||||
======================
|
||||
Tantivy 0.13 introduce a change in the index format that will require
|
||||
you to reindex your index (BlockWAND information are added in the skiplist).
|
||||
The index size increase is minor as this information is only added for
|
||||
full blocks.
|
||||
If you have a massive index for which reindexing is not an option, please contact me
|
||||
so that we can discuss possible solutions.
|
||||
|
||||
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
|
||||
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
|
||||
- `MMapDirectory::open` does not return a `Result` anymore.
|
||||
- Change in the DocSet and Scorer API. (@fulmicoton).
|
||||
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
|
||||
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
|
||||
As a result, iterating through DocSet now looks as follows
|
||||
```rust
|
||||
let mut doc = docset.doc();
|
||||
while doc != TERMINATED {
|
||||
// ...
|
||||
doc = docset.advance();
|
||||
}
|
||||
```
|
||||
The change made it possible to greatly simplify a lot of the docset's code.
|
||||
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
|
||||
- Added an offset option to the Top(.*)Collectors. (@robyoung)
|
||||
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
|
||||
to the PISA team for answering all my questions!)
|
||||
|
||||
Tantivy 0.12.0
|
||||
======================
|
||||
- Removing static dispatch in tokenizers for simplicity. (#762)
|
||||
|
||||
58
Cargo.toml
58
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.14.0-dev"
|
||||
version = "0.12.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -13,40 +13,42 @@ keywords = ["search", "information", "retrieval"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
base64 = "0.13"
|
||||
byteorder = "1"
|
||||
crc32fast = "1"
|
||||
once_cell = "1"
|
||||
regex ={version = "1", default-features = false, features = ["std"]}
|
||||
tantivy-fst = "0.3"
|
||||
base64 = "0.12.0"
|
||||
byteorder = "1.0"
|
||||
crc32fast = "1.2.0"
|
||||
once_cell = "1.0"
|
||||
regex ={version = "1.3.0", default-features = false, features = ["std"]}
|
||||
tantivy-fst = "0.2.1"
|
||||
memmap = {version = "0.7", optional=true}
|
||||
lz4 = {version="1", optional=true}
|
||||
brotli = {version="3.3.0", optional=true}
|
||||
lz4 = {version="1.20", optional=true}
|
||||
snap = "1"
|
||||
tempfile = {version="3", optional=true}
|
||||
atomicwrites = {version="0.2.2", optional=true}
|
||||
tempfile = "3.0"
|
||||
log = "0.4"
|
||||
serde = {version="1", features=["derive"]}
|
||||
serde_json = "1"
|
||||
num_cpus = "1"
|
||||
serde = {version="1.0", features=["derive"]}
|
||||
serde_json = "1.0"
|
||||
num_cpus = "1.2"
|
||||
fs2={version="0.4", optional=true}
|
||||
levenshtein_automata = "0.2"
|
||||
levenshtein_automata = "0.1"
|
||||
notify = {version="4", optional=true}
|
||||
uuid = { version = "0.8", features = ["v4", "serde"] }
|
||||
crossbeam = "0.8"
|
||||
crossbeam = "0.7"
|
||||
futures = {version = "0.3", features=["thread-pool"] }
|
||||
tantivy-query-grammar = { version="0.14.0-dev", path="./query-grammar" }
|
||||
stable_deref_trait = "1"
|
||||
rust-stemmers = "1"
|
||||
downcast-rs = "1"
|
||||
owning_ref = "0.4"
|
||||
stable_deref_trait = "1.0.0"
|
||||
rust-stemmers = "1.2"
|
||||
downcast-rs = { version="1.0" }
|
||||
tantivy-query-grammar = { version="0.12", path="./query-grammar" }
|
||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||
census = "0.4"
|
||||
fnv = "1"
|
||||
thiserror = "1.0"
|
||||
htmlescape = "0.3"
|
||||
fail = "0.4"
|
||||
census = {path="../census"}
|
||||
fnv = "1.0.6"
|
||||
owned-read = "0.4"
|
||||
failure = "0.1"
|
||||
htmlescape = "0.3.1"
|
||||
fail = "0.3"
|
||||
murmurhash32 = "0.2"
|
||||
chrono = "0.4"
|
||||
smallvec = "1"
|
||||
smallvec = "1.0"
|
||||
rayon = "1"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
@@ -56,10 +58,9 @@ winapi = "0.3"
|
||||
rand = "0.7"
|
||||
maplit = "1"
|
||||
matches = "0.1.8"
|
||||
proptest = "0.10"
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.4"
|
||||
version = "0.3"
|
||||
features = ["failpoints"]
|
||||
|
||||
[profile.release]
|
||||
@@ -73,8 +74,7 @@ overflow-checks = true
|
||||
|
||||
[features]
|
||||
default = ["mmap"]
|
||||
mmap = ["fs2", "tempfile", "memmap", "notify"]
|
||||
brotli-compression = ["brotli"]
|
||||
mmap = ["atomicwrites", "fs2", "memmap", "notify"]
|
||||
lz4-compression = ["lz4"]
|
||||
failpoints = ["fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
|
||||
[](https://crates.io/crates/tantivy)
|
||||
[](https://saythanks.io/to/fulmicoton)
|
||||
|
||||

|
||||
|
||||
@@ -30,11 +31,12 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||
|
||||
# Benchmark
|
||||
|
||||
Tantivy is typically faster than Lucene, but the results depend on
|
||||
the nature of the queries in your workload.
|
||||
|
||||
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
||||
performance for different type of queries / collection.
|
||||
|
||||
Your mileage WILL vary depending on the nature of queries and their load.
|
||||
|
||||
# Features
|
||||
|
||||
- Full-text search
|
||||
@@ -84,7 +86,7 @@ There are many ways to support this project.
|
||||
- Help with documentation by asking questions or submitting PRs
|
||||
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
|
||||
- Talk about Tantivy around you
|
||||
- [](https://www.patreon.com/fulmicoton)
|
||||
- Drop a word on on [](https://saythanks.io/to/fulmicoton) or even [](https://www.patreon.com/fulmicoton)
|
||||
|
||||
# Contributing code
|
||||
|
||||
|
||||
9
TODO.md
Normal file
9
TODO.md
Normal file
@@ -0,0 +1,9 @@
|
||||
- segment writer serializes if already flush
|
||||
- flush if exceed max doc.
|
||||
- unit tests
|
||||
- reader from `IndexWriter`
|
||||
- configurable limit to flush
|
||||
- bundle directory
|
||||
- add index worker when exceeds some doc limit
|
||||
- flush before prepare commit.
|
||||
- segment_writer should not receive a segment
|
||||
@@ -18,5 +18,5 @@ install:
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- REM SET RUST_LOG=tantivy,test & cargo test --all --verbose --no-default-features --features mmap
|
||||
- REM SET RUST_LOG=tantivy,test & cargo test --verbose --no-default-features --features mmap
|
||||
- REM SET RUST_BACKTRACE=1 & cargo build --examples
|
||||
|
||||
@@ -112,6 +112,18 @@ fn main() -> tantivy::Result<()> {
|
||||
limbs and branches that arch over the pool"
|
||||
));
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "Of Mice and Men",
|
||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
));
|
||||
|
||||
// Multivalued field just need to be repeated.
|
||||
index_writer.add_document(doc!(
|
||||
title => "Frankenstein",
|
||||
|
||||
@@ -14,7 +14,7 @@ use tantivy::fastfield::FastFieldReader;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::Field;
|
||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||
use tantivy::{doc, Index, Score, SegmentReader, TantivyError};
|
||||
use tantivy::{doc, Index, SegmentReader, TantivyError};
|
||||
|
||||
#[derive(Default)]
|
||||
struct Stats {
|
||||
@@ -114,7 +114,7 @@ struct StatsSegmentCollector {
|
||||
impl SegmentCollector for StatsSegmentCollector {
|
||||
type Fruit = Option<Stats>;
|
||||
|
||||
fn collect(&mut self, doc: u32, _score: Score) {
|
||||
fn collect(&mut self, doc: u32, _score: f32) {
|
||||
let value = self.fast_field_reader.get(doc) as f64;
|
||||
self.stats.count += 1;
|
||||
self.stats.sum += value;
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
use std::collections::HashSet;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::doc;
|
||||
use tantivy::query::BooleanQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{DocId, Index, Score, SegmentReader};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
let title = schema_builder.add_text_field("title", STORED);
|
||||
let ingredient = schema_builder.add_facet_field("ingredient");
|
||||
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "Fried egg",
|
||||
ingredient => Facet::from("/ingredient/egg"),
|
||||
ingredient => Facet::from("/ingredient/oil"),
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "Scrambled egg",
|
||||
ingredient => Facet::from("/ingredient/egg"),
|
||||
ingredient => Facet::from("/ingredient/butter"),
|
||||
ingredient => Facet::from("/ingredient/milk"),
|
||||
ingredient => Facet::from("/ingredient/salt"),
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "Egg rolls",
|
||||
ingredient => Facet::from("/ingredient/egg"),
|
||||
ingredient => Facet::from("/ingredient/garlic"),
|
||||
ingredient => Facet::from("/ingredient/salt"),
|
||||
ingredient => Facet::from("/ingredient/oil"),
|
||||
ingredient => Facet::from("/ingredient/tortilla-wrap"),
|
||||
ingredient => Facet::from("/ingredient/mushroom"),
|
||||
));
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
{
|
||||
let facets = vec![
|
||||
Facet::from("/ingredient/egg"),
|
||||
Facet::from("/ingredient/oil"),
|
||||
Facet::from("/ingredient/garlic"),
|
||||
Facet::from("/ingredient/mushroom"),
|
||||
];
|
||||
let query = BooleanQuery::new_multiterms_query(
|
||||
facets
|
||||
.iter()
|
||||
.map(|key| Term::from_facet(ingredient, &key))
|
||||
.collect(),
|
||||
);
|
||||
let top_docs_by_custom_score =
|
||||
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
|
||||
let ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
|
||||
let facet_dict = ingredient_reader.facet_dict();
|
||||
|
||||
let query_ords: HashSet<u64> = facets
|
||||
.iter()
|
||||
.filter_map(|key| facet_dict.term_ord(key.encoded_str()))
|
||||
.collect();
|
||||
|
||||
let mut facet_ords_buffer: Vec<u64> = Vec::with_capacity(20);
|
||||
|
||||
move |doc: DocId, original_score: Score| {
|
||||
ingredient_reader.facet_ords(doc, &mut facet_ords_buffer);
|
||||
let missing_ingredients = facet_ords_buffer
|
||||
.iter()
|
||||
.filter(|ord| !query_ords.contains(ord))
|
||||
.count();
|
||||
let tweak = 1.0 / 4_f32.powi(missing_ingredients as i32);
|
||||
|
||||
original_score * tweak
|
||||
}
|
||||
});
|
||||
let top_docs = searcher.search(&query, &top_docs_by_custom_score)?;
|
||||
|
||||
let titles: Vec<String> = top_docs
|
||||
.iter()
|
||||
.map(|(_, doc_id)| {
|
||||
searcher
|
||||
.doc(*doc_id)
|
||||
.unwrap()
|
||||
.get_first(title)
|
||||
.unwrap()
|
||||
.text()
|
||||
.unwrap()
|
||||
.to_owned()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -10,7 +10,7 @@
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, DocSet, Index, Postings, TERMINATED};
|
||||
use tantivy::{doc, DocId, DocSet, Index, Postings};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// We first create a schema for the sake of the
|
||||
@@ -45,7 +45,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Inverted index stands for the combination of
|
||||
// - the term dictionary
|
||||
// - the inverted lists associated to each terms and their positions
|
||||
let inverted_index = segment_reader.inverted_index(title)?;
|
||||
let inverted_index = segment_reader.inverted_index(title);
|
||||
|
||||
// A `Term` is a text token associated with a field.
|
||||
// Let's go through all docs containing the term `title:the` and access their position
|
||||
@@ -58,15 +58,16 @@ fn main() -> tantivy::Result<()> {
|
||||
// If you don't need all this information, you may get better performance by decompressing less
|
||||
// information.
|
||||
if let Some(mut segment_postings) =
|
||||
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)?
|
||||
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)
|
||||
{
|
||||
// this buffer will be used to request for positions
|
||||
let mut positions: Vec<u32> = Vec::with_capacity(100);
|
||||
let mut doc_id = segment_postings.doc();
|
||||
while doc_id != TERMINATED {
|
||||
while segment_postings.advance() {
|
||||
// the number of time the term appears in the document.
|
||||
let doc_id: DocId = segment_postings.doc(); //< do not try to access this before calling advance once.
|
||||
|
||||
// This MAY contains deleted documents as well.
|
||||
if segment_reader.is_deleted(doc_id) {
|
||||
doc_id = segment_postings.advance();
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -85,7 +86,6 @@ fn main() -> tantivy::Result<()> {
|
||||
// Doc 2: TermFreq 1: [0]
|
||||
// ```
|
||||
println!("Doc {}: TermFreq {}: {:?}", doc_id, term_freq, positions);
|
||||
doc_id = segment_postings.advance();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -106,7 +106,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Inverted index stands for the combination of
|
||||
// - the term dictionary
|
||||
// - the inverted lists associated to each terms and their positions
|
||||
let inverted_index = segment_reader.inverted_index(title)?;
|
||||
let inverted_index = segment_reader.inverted_index(title);
|
||||
|
||||
// This segment posting object is like a cursor over the documents matching the term.
|
||||
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
|
||||
@@ -115,18 +115,13 @@ fn main() -> tantivy::Result<()> {
|
||||
// If you don't need all this information, you may get better performance by decompressing less
|
||||
// information.
|
||||
if let Some(mut block_segment_postings) =
|
||||
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)?
|
||||
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
|
||||
{
|
||||
loop {
|
||||
let docs = block_segment_postings.docs();
|
||||
if docs.is_empty() {
|
||||
break;
|
||||
}
|
||||
while block_segment_postings.advance() {
|
||||
// Once again these docs MAY contains deleted documents as well.
|
||||
let docs = block_segment_postings.docs();
|
||||
// Prints `Docs [0, 2].`
|
||||
println!("Docs {:?}", docs);
|
||||
block_segment_postings.advance();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.14.0-dev"
|
||||
version = "0.12.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
|
||||
@@ -31,12 +31,22 @@ impl Occur {
|
||||
|
||||
/// Compose two occur values.
|
||||
pub fn compose(left: Occur, right: Occur) -> Occur {
|
||||
match (left, right) {
|
||||
(Occur::Should, _) => right,
|
||||
(Occur::Must, Occur::MustNot) => Occur::MustNot,
|
||||
(Occur::Must, _) => Occur::Must,
|
||||
(Occur::MustNot, Occur::MustNot) => Occur::Must,
|
||||
(Occur::MustNot, _) => Occur::MustNot,
|
||||
match left {
|
||||
Occur::Should => right,
|
||||
Occur::Must => {
|
||||
if right == Occur::MustNot {
|
||||
Occur::MustNot
|
||||
} else {
|
||||
Occur::Must
|
||||
}
|
||||
}
|
||||
Occur::MustNot => {
|
||||
if right == Occur::MustNot {
|
||||
Occur::Must
|
||||
} else {
|
||||
Occur::MustNot
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -46,27 +56,3 @@ impl fmt::Display for Occur {
|
||||
f.write_char(self.to_char())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::Occur;
|
||||
|
||||
#[test]
|
||||
fn test_occur_compose() {
|
||||
assert_eq!(Occur::compose(Occur::Should, Occur::Should), Occur::Should);
|
||||
assert_eq!(Occur::compose(Occur::Should, Occur::Must), Occur::Must);
|
||||
assert_eq!(
|
||||
Occur::compose(Occur::Should, Occur::MustNot),
|
||||
Occur::MustNot
|
||||
);
|
||||
assert_eq!(Occur::compose(Occur::Must, Occur::Should), Occur::Must);
|
||||
assert_eq!(Occur::compose(Occur::Must, Occur::Must), Occur::Must);
|
||||
assert_eq!(Occur::compose(Occur::Must, Occur::MustNot), Occur::MustNot);
|
||||
assert_eq!(
|
||||
Occur::compose(Occur::MustNot, Occur::Should),
|
||||
Occur::MustNot
|
||||
);
|
||||
assert_eq!(Occur::compose(Occur::MustNot, Occur::Must), Occur::MustNot);
|
||||
assert_eq!(Occur::compose(Occur::MustNot, Occur::MustNot), Occur::Must);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,10 +9,8 @@ use combine::{
|
||||
|
||||
fn field<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
(
|
||||
(letter().or(char('_'))),
|
||||
many(satisfy(|c: char| {
|
||||
c.is_alphanumeric() || c == '_' || c == '-'
|
||||
})),
|
||||
letter(),
|
||||
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
|
||||
)
|
||||
.skip(char(':'))
|
||||
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
||||
@@ -156,11 +154,17 @@ fn negate(expr: UserInputAST) -> UserInputAST {
|
||||
expr.unary(Occur::MustNot)
|
||||
}
|
||||
|
||||
fn must(expr: UserInputAST) -> UserInputAST {
|
||||
expr.unary(Occur::Must)
|
||||
}
|
||||
|
||||
fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
parser(|input| {
|
||||
char('(')
|
||||
.with(ast())
|
||||
.skip(char(')'))
|
||||
char('-')
|
||||
.with(leaf())
|
||||
.map(negate)
|
||||
.or(char('+').with(leaf()).map(must))
|
||||
.or(char('(').with(ast()).skip(char(')')))
|
||||
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All)))
|
||||
.or(attempt(
|
||||
string("NOT").skip(spaces1()).with(leaf()).map(negate),
|
||||
@@ -172,17 +176,7 @@ fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
})
|
||||
}
|
||||
|
||||
fn occur_symbol<'a>() -> impl Parser<&'a str, Output = Occur> {
|
||||
char('-')
|
||||
.map(|_| Occur::MustNot)
|
||||
.or(char('+').map(|_| Occur::Must))
|
||||
}
|
||||
|
||||
fn occur_leaf<'a>() -> impl Parser<&'a str, Output = (Option<Occur>, UserInputAST)> {
|
||||
(optional(occur_symbol()), boosted_leaf())
|
||||
}
|
||||
|
||||
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f32> {
|
||||
(many1(digit()), optional((char('.'), many1(digit())))).map(
|
||||
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
|
||||
let mut float_str = int_part;
|
||||
@@ -190,18 +184,18 @@ fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||
float_str.push(chr);
|
||||
float_str.push_str(&decimal_str);
|
||||
}
|
||||
float_str.parse::<f64>().unwrap()
|
||||
float_str.parse::<f32>().unwrap()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||
fn boost<'a>() -> impl Parser<&'a str, Output = f32> {
|
||||
(char('^'), positive_float_number()).map(|(_, boost)| boost)
|
||||
}
|
||||
|
||||
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
|
||||
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
|
||||
Some(boost) if (boost - 1.0).abs() > std::f32::EPSILON => {
|
||||
UserInputAST::Boost(Box::new(leaf), boost)
|
||||
}
|
||||
_ => leaf,
|
||||
@@ -245,29 +239,21 @@ fn aggregate_binary_expressions(
|
||||
}
|
||||
}
|
||||
|
||||
fn operand_leaf<'a>() -> impl Parser<&'a str, Output = (BinaryOperand, UserInputAST)> {
|
||||
(
|
||||
pub fn ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
let operand_leaf = (
|
||||
binary_operand().skip(spaces()),
|
||||
boosted_leaf().skip(spaces()),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
let boolean_expr = (boosted_leaf().skip(spaces()), many1(operand_leaf()))
|
||||
);
|
||||
let boolean_expr = (boosted_leaf().skip(spaces().silent()), many1(operand_leaf))
|
||||
.map(|(left, right)| aggregate_binary_expressions(left, right));
|
||||
let whitespace_separated_leaves = many1(occur_leaf().skip(spaces().silent())).map(
|
||||
|subqueries: Vec<(Option<Occur>, UserInputAST)>| {
|
||||
let whitespace_separated_leaves =
|
||||
many1(boosted_leaf().skip(spaces().silent())).map(|subqueries: Vec<UserInputAST>| {
|
||||
if subqueries.len() == 1 {
|
||||
let (occur_opt, ast) = subqueries.into_iter().next().unwrap();
|
||||
match occur_opt.unwrap_or(Occur::Should) {
|
||||
Occur::Must | Occur::Should => ast,
|
||||
Occur::MustNot => UserInputAST::Clause(vec![(Some(Occur::MustNot), ast)]),
|
||||
}
|
||||
subqueries.into_iter().next().unwrap()
|
||||
} else {
|
||||
UserInputAST::Clause(subqueries.into_iter().collect())
|
||||
}
|
||||
},
|
||||
);
|
||||
});
|
||||
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
|
||||
spaces().with(expr).skip(spaces())
|
||||
}
|
||||
@@ -281,16 +267,14 @@ pub fn parse_to_ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
type TestParseResult = Result<(), StringStreamError>;
|
||||
|
||||
use super::*;
|
||||
use combine::parser::Parser;
|
||||
|
||||
pub fn nearly_equals(a: f64, b: f64) -> bool {
|
||||
pub fn nearly_equals(a: f32, b: f32) -> bool {
|
||||
(a - b).abs() < 0.0005 * (a + b).abs()
|
||||
}
|
||||
|
||||
fn assert_nearly_equals(expected: f64, val: f64) {
|
||||
fn assert_nearly_equals(expected: f32, val: f32) {
|
||||
assert!(
|
||||
nearly_equals(val, expected),
|
||||
"Got {}, expected {}.",
|
||||
@@ -299,16 +283,9 @@ mod test {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_occur_symbol() -> TestParseResult {
|
||||
assert_eq!(super::occur_symbol().parse("-")?, (Occur::MustNot, ""));
|
||||
assert_eq!(super::occur_symbol().parse("+")?, (Occur::Must, ""));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_positive_float_number() {
|
||||
fn valid_parse(float_str: &str, expected_val: f64, expected_remaining: &str) {
|
||||
fn valid_parse(float_str: &str, expected_val: f32, expected_remaining: &str) {
|
||||
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
|
||||
assert_eq!(remaining, expected_remaining);
|
||||
assert_nearly_equals(val, expected_val);
|
||||
@@ -316,9 +293,9 @@ mod test {
|
||||
fn error_parse(float_str: &str) {
|
||||
assert!(positive_float_number().parse(float_str).is_err());
|
||||
}
|
||||
valid_parse("1.0", 1.0, "");
|
||||
valid_parse("1", 1.0, "");
|
||||
valid_parse("0.234234 aaa", 0.234234f64, " aaa");
|
||||
valid_parse("1.0", 1.0f32, "");
|
||||
valid_parse("1", 1.0f32, "");
|
||||
valid_parse("0.234234 aaa", 0.234234f32, " aaa");
|
||||
error_parse(".3332");
|
||||
error_parse("1.");
|
||||
error_parse("-1.");
|
||||
@@ -353,7 +330,7 @@ mod test {
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
|
||||
test_parse_query_to_ast_helper("NOT a", "(-\"a\")");
|
||||
test_parse_query_to_ast_helper("NOT a", "-(\"a\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -361,16 +338,16 @@ mod test {
|
||||
assert!(parse_to_ast().parse("a^2^3").is_err());
|
||||
assert!(parse_to_ast().parse("a^2^").is_err());
|
||||
test_parse_query_to_ast_helper("a^3", "(\"a\")^3");
|
||||
test_parse_query_to_ast_helper("a^3 b^2", "(*(\"a\")^3 *(\"b\")^2)");
|
||||
test_parse_query_to_ast_helper("a^3 b^2", "((\"a\")^3 (\"b\")^2)");
|
||||
test_parse_query_to_ast_helper("a^1", "\"a\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_binary_op() {
|
||||
test_parse_query_to_ast_helper("a AND b", "(+\"a\" +\"b\")");
|
||||
test_parse_query_to_ast_helper("a OR b", "(?\"a\" ?\"b\")");
|
||||
test_parse_query_to_ast_helper("a OR b AND c", "(?\"a\" ?(+\"b\" +\"c\"))");
|
||||
test_parse_query_to_ast_helper("a AND b AND c", "(+\"a\" +\"b\" +\"c\")");
|
||||
test_parse_query_to_ast_helper("a AND b", "(+(\"a\") +(\"b\"))");
|
||||
test_parse_query_to_ast_helper("a OR b", "(?(\"a\") ?(\"b\"))");
|
||||
test_parse_query_to_ast_helper("a OR b AND c", "(?(\"a\") ?((+(\"b\") +(\"c\"))))");
|
||||
test_parse_query_to_ast_helper("a AND b AND c", "(+(\"a\") +(\"b\") +(\"c\"))");
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
|
||||
"Err(UnexpectedParse)"
|
||||
@@ -408,32 +385,6 @@ mod test {
|
||||
test_parse_query_to_ast_helper("weight: <= 70.5", "weight:{\"*\" TO \"70.5\"]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_occur_leaf() {
|
||||
let ((occur, ast), _) = super::occur_leaf().parse("+abc").unwrap();
|
||||
assert_eq!(occur, Some(Occur::Must));
|
||||
assert_eq!(format!("{:?}", ast), "\"abc\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_field_name() -> TestParseResult {
|
||||
assert_eq!(
|
||||
super::field().parse("my-field-name:a")?,
|
||||
("my-field-name".to_string(), "a")
|
||||
);
|
||||
assert_eq!(
|
||||
super::field().parse("my_field_name:a")?,
|
||||
("my_field_name".to_string(), "a")
|
||||
);
|
||||
assert!(super::field().parse(":a").is_err());
|
||||
assert!(super::field().parse("-my_field:a").is_err());
|
||||
assert_eq!(
|
||||
super::field().parse("_my_field:a")?,
|
||||
("_my_field".to_string(), "a")
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_parser() {
|
||||
// testing the range() parser separately
|
||||
@@ -462,67 +413,32 @@ mod test {
|
||||
fn test_parse_query_to_triming_spaces() {
|
||||
test_parse_query_to_ast_helper(" abc", "\"abc\"");
|
||||
test_parse_query_to_ast_helper("abc ", "\"abc\"");
|
||||
test_parse_query_to_ast_helper("( a OR abc)", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("a OR abc ", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("(a OR abc )", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("(a OR abc) ", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("( a OR abc)", "(?(\"a\") ?(\"abc\"))");
|
||||
test_parse_query_to_ast_helper("(a OR abc)", "(?(\"a\") ?(\"abc\"))");
|
||||
test_parse_query_to_ast_helper("(a OR abc)", "(?(\"a\") ?(\"abc\"))");
|
||||
test_parse_query_to_ast_helper("a OR abc ", "(?(\"a\") ?(\"abc\"))");
|
||||
test_parse_query_to_ast_helper("(a OR abc )", "(?(\"a\") ?(\"abc\"))");
|
||||
test_parse_query_to_ast_helper("(a OR abc) ", "(?(\"a\") ?(\"abc\"))");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_single_term() {
|
||||
fn test_parse_query_to_ast() {
|
||||
test_parse_query_to_ast_helper("abc", "\"abc\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_default_clause() {
|
||||
test_parse_query_to_ast_helper("a b", "(*\"a\" *\"b\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_must_default_clause() {
|
||||
test_parse_query_to_ast_helper("+(a b)", "(*\"a\" *\"b\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_must_single_term() {
|
||||
test_parse_query_to_ast_helper("+d", "\"d\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_term_with_field() {
|
||||
test_parse_query_to_ast_helper("a b", "(\"a\" \"b\")");
|
||||
test_parse_query_to_ast_helper("+(a b)", "+((\"a\" \"b\"))");
|
||||
test_parse_query_to_ast_helper("+d", "+(\"d\")");
|
||||
test_parse_query_to_ast_helper("+(a b) +d", "(+((\"a\" \"b\")) +(\"d\"))");
|
||||
test_parse_query_to_ast_helper("(+a +b) d", "((+(\"a\") +(\"b\")) \"d\")");
|
||||
test_parse_query_to_ast_helper("(+a)", "+(\"a\")");
|
||||
test_parse_query_to_ast_helper("(+a +b)", "(+(\"a\") +(\"b\"))");
|
||||
test_parse_query_to_ast_helper("abc:toto", "abc:\"toto\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_term_with_float() {
|
||||
test_parse_query_to_ast_helper("abc:1.1", "abc:\"1.1\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_must_clause() {
|
||||
test_parse_query_to_ast_helper("(+a +b)", "(+\"a\" +\"b\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_test_query_plus_a_b_plus_d() {
|
||||
test_parse_query_to_ast_helper("+(a b) +d", "(+(*\"a\" *\"b\") +\"d\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_test_query_other() {
|
||||
test_parse_query_to_ast_helper("(+a +b) d", "(*(+\"a\" +\"b\") *\"d\")");
|
||||
test_parse_query_to_ast_helper("+abc:toto", "abc:\"toto\"");
|
||||
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+abc:\"toto\" -\"titi\")");
|
||||
test_parse_query_to_ast_helper("-abc:toto", "(-abc:\"toto\")");
|
||||
test_parse_query_to_ast_helper("abc:a b", "(*abc:\"a\" *\"b\")");
|
||||
test_parse_query_to_ast_helper("+abc:toto", "+(abc:\"toto\")");
|
||||
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+(abc:\"toto\") -(\"titi\"))");
|
||||
test_parse_query_to_ast_helper("-abc:toto", "-(abc:\"toto\")");
|
||||
test_parse_query_to_ast_helper("abc:a b", "(abc:\"a\" \"b\")");
|
||||
test_parse_query_to_ast_helper("abc:\"a b\"", "abc:\"a b\"");
|
||||
test_parse_query_to_ast_helper("foo:[1 TO 5]", "foo:[\"1\" TO \"5\"]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_with_range() {
|
||||
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
|
||||
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
|
||||
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
|
||||
|
||||
@@ -85,14 +85,15 @@ impl UserInputBound {
|
||||
}
|
||||
|
||||
pub enum UserInputAST {
|
||||
Clause(Vec<(Option<Occur>, UserInputAST)>),
|
||||
Clause(Vec<UserInputAST>),
|
||||
Unary(Occur, Box<UserInputAST>),
|
||||
Leaf(Box<UserInputLeaf>),
|
||||
Boost(Box<UserInputAST>, f64),
|
||||
Boost(Box<UserInputAST>, f32),
|
||||
}
|
||||
|
||||
impl UserInputAST {
|
||||
pub fn unary(self, occur: Occur) -> UserInputAST {
|
||||
UserInputAST::Clause(vec![(Some(occur), self)])
|
||||
UserInputAST::Unary(occur, Box::new(self))
|
||||
}
|
||||
|
||||
fn compose(occur: Occur, asts: Vec<UserInputAST>) -> UserInputAST {
|
||||
@@ -103,7 +104,7 @@ impl UserInputAST {
|
||||
} else {
|
||||
UserInputAST::Clause(
|
||||
asts.into_iter()
|
||||
.map(|ast: UserInputAST| (Some(occur), ast))
|
||||
.map(|ast: UserInputAST| ast.unary(occur))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
}
|
||||
@@ -134,36 +135,25 @@ impl From<UserInputLeaf> for UserInputAST {
|
||||
}
|
||||
}
|
||||
|
||||
fn print_occur_ast(
|
||||
occur_opt: Option<Occur>,
|
||||
ast: &UserInputAST,
|
||||
formatter: &mut fmt::Formatter,
|
||||
) -> fmt::Result {
|
||||
if let Some(occur) = occur_opt {
|
||||
write!(formatter, "{}{:?}", occur, ast)?;
|
||||
} else {
|
||||
write!(formatter, "*{:?}", ast)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl fmt::Debug for UserInputAST {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match *self {
|
||||
UserInputAST::Clause(ref subqueries) => {
|
||||
if subqueries.is_empty() {
|
||||
write!(formatter, "<emptyclause>")?;
|
||||
} else {
|
||||
write!(formatter, "(")?;
|
||||
print_occur_ast(subqueries[0].0, &subqueries[0].1, formatter)?;
|
||||
write!(formatter, "{:?}", &subqueries[0])?;
|
||||
for subquery in &subqueries[1..] {
|
||||
write!(formatter, " ")?;
|
||||
print_occur_ast(subquery.0, &subquery.1, formatter)?;
|
||||
write!(formatter, " {:?}", subquery)?;
|
||||
}
|
||||
write!(formatter, ")")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
UserInputAST::Unary(ref occur, ref subquery) => {
|
||||
write!(formatter, "{}({:?})", occur, subquery)
|
||||
}
|
||||
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
|
||||
UserInputAST::Boost(ref leaf, boost) => write!(formatter, "({:?})^{}", leaf, boost),
|
||||
}
|
||||
|
||||
@@ -96,18 +96,18 @@ mod tests {
|
||||
}
|
||||
{
|
||||
let mut count_collector = SegmentCountCollector::default();
|
||||
count_collector.collect(0u32, 1.0);
|
||||
count_collector.collect(0u32, 1f32);
|
||||
assert_eq!(count_collector.harvest(), 1);
|
||||
}
|
||||
{
|
||||
let mut count_collector = SegmentCountCollector::default();
|
||||
count_collector.collect(0u32, 1.0);
|
||||
count_collector.collect(0u32, 1f32);
|
||||
assert_eq!(count_collector.harvest(), 1);
|
||||
}
|
||||
{
|
||||
let mut count_collector = SegmentCountCollector::default();
|
||||
count_collector.collect(0u32, 1.0);
|
||||
count_collector.collect(1u32, 1.0);
|
||||
count_collector.collect(0u32, 1f32);
|
||||
count_collector.collect(1u32, 1f32);
|
||||
assert_eq!(count_collector.harvest(), 2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,13 +11,13 @@ impl<TCustomScorer, TScore> CustomScoreTopCollector<TCustomScorer, TScore>
|
||||
where
|
||||
TScore: Clone + PartialOrd,
|
||||
{
|
||||
pub(crate) fn new(
|
||||
pub fn new(
|
||||
custom_scorer: TCustomScorer,
|
||||
collector: TopCollector<TScore>,
|
||||
limit: usize,
|
||||
) -> CustomScoreTopCollector<TCustomScorer, TScore> {
|
||||
CustomScoreTopCollector {
|
||||
custom_scorer,
|
||||
collector,
|
||||
collector: TopCollector::with_limit(limit),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,7 @@ where
|
||||
/// It is the segment local version of the [`CustomScorer`](./trait.CustomScorer.html).
|
||||
pub trait CustomSegmentScorer<TScore>: 'static {
|
||||
/// Computes the score of a specific `doc`.
|
||||
fn score(&mut self, doc: DocId) -> TScore;
|
||||
fn score(&self, doc: DocId) -> TScore;
|
||||
}
|
||||
|
||||
/// `CustomScorer` makes it possible to define any kind of score.
|
||||
@@ -46,7 +46,7 @@ pub trait CustomScorer<TScore>: Sync {
|
||||
|
||||
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
|
||||
where
|
||||
TCustomScorer: CustomScorer<TScore> + Send + Sync,
|
||||
TCustomScorer: CustomScorer<TScore>,
|
||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||
{
|
||||
type Fruit = Vec<(TScore, DocAddress)>;
|
||||
@@ -58,10 +58,10 @@ where
|
||||
segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
|
||||
let segment_collector = self
|
||||
.collector
|
||||
.for_segment(segment_local_id, segment_reader)?;
|
||||
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
|
||||
Ok(CustomScoreTopSegmentCollector {
|
||||
segment_collector,
|
||||
segment_scorer,
|
||||
@@ -117,9 +117,9 @@ where
|
||||
|
||||
impl<F, TScore> CustomSegmentScorer<TScore> for F
|
||||
where
|
||||
F: 'static + FnMut(DocId) -> TScore,
|
||||
F: 'static + Sync + Send + Fn(DocId) -> TScore,
|
||||
{
|
||||
fn score(&mut self, doc: DocId) -> TScore {
|
||||
fn score(&self, doc: DocId) -> TScore {
|
||||
(self)(doc)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use crate::{DocAddress, DocId, Score};
|
||||
|
||||
use super::{Collector, SegmentCollector};
|
||||
|
||||
/// Collectors that returns the set of DocAddress that matches the query.
|
||||
///
|
||||
/// This collector is mostly useful for tests.
|
||||
pub struct DocSetCollector;
|
||||
|
||||
impl Collector for DocSetCollector {
|
||||
type Fruit = HashSet<DocAddress>;
|
||||
type Child = DocSetChildCollector;
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: crate::SegmentLocalId,
|
||||
_segment: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
Ok(DocSetChildCollector {
|
||||
segment_local_id,
|
||||
docs: HashSet::new(),
|
||||
})
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
segment_fruits: Vec<(u32, HashSet<DocId>)>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
let len: usize = segment_fruits.iter().map(|(_, docset)| docset.len()).sum();
|
||||
let mut result = HashSet::with_capacity(len);
|
||||
for (segment_local_id, docs) in segment_fruits {
|
||||
for doc in docs {
|
||||
result.insert(DocAddress(segment_local_id, doc));
|
||||
}
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DocSetChildCollector {
|
||||
segment_local_id: u32,
|
||||
docs: HashSet<DocId>,
|
||||
}
|
||||
|
||||
impl SegmentCollector for DocSetChildCollector {
|
||||
type Fruit = (u32, HashSet<DocId>);
|
||||
|
||||
fn collect(&mut self, doc: crate::DocId, _score: Score) {
|
||||
self.docs.insert(doc);
|
||||
}
|
||||
|
||||
fn harvest(self) -> (u32, HashSet<DocId>) {
|
||||
(self.segment_local_id, self.docs)
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
use crate::collector::Collector;
|
||||
use crate::collector::SegmentCollector;
|
||||
use crate::docset::SkipResult;
|
||||
use crate::fastfield::FacetReader;
|
||||
use crate::schema::Facet;
|
||||
use crate::schema::Field;
|
||||
@@ -7,6 +8,7 @@ use crate::DocId;
|
||||
use crate::Score;
|
||||
use crate::SegmentLocalId;
|
||||
use crate::SegmentReader;
|
||||
use crate::TantivyError;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::btree_map;
|
||||
use std::collections::BTreeMap;
|
||||
@@ -186,11 +188,6 @@ pub struct FacetSegmentCollector {
|
||||
collapse_facet_ords: Vec<u64>,
|
||||
}
|
||||
|
||||
enum SkipResult {
|
||||
Found,
|
||||
NotFound,
|
||||
}
|
||||
|
||||
fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
||||
target: &[u8],
|
||||
collapse_it: &mut Peekable<I>,
|
||||
@@ -200,14 +197,14 @@ fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
||||
Some(facet_bytes) => match facet_bytes.encoded_str().as_bytes().cmp(target) {
|
||||
Ordering::Less => {}
|
||||
Ordering::Greater => {
|
||||
return SkipResult::NotFound;
|
||||
return SkipResult::OverStep;
|
||||
}
|
||||
Ordering::Equal => {
|
||||
return SkipResult::Found;
|
||||
return SkipResult::Reached;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
return SkipResult::NotFound;
|
||||
return SkipResult::End;
|
||||
}
|
||||
}
|
||||
collapse_it.next();
|
||||
@@ -265,7 +262,10 @@ impl Collector for FacetCollector {
|
||||
_: SegmentLocalId,
|
||||
reader: &SegmentReader,
|
||||
) -> crate::Result<FacetSegmentCollector> {
|
||||
let facet_reader = reader.facet_reader(self.field)?;
|
||||
let field_name = reader.schema().get_field_name(self.field);
|
||||
let facet_reader = reader.facet_reader(self.field).ok_or_else(|| {
|
||||
TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name))
|
||||
})?;
|
||||
|
||||
let mut collapse_mapping = Vec::new();
|
||||
let mut counts = Vec::new();
|
||||
@@ -281,7 +281,7 @@ impl Collector for FacetCollector {
|
||||
// is positionned on a term that has not been processed yet.
|
||||
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
|
||||
match skip_result {
|
||||
SkipResult::Found => {
|
||||
SkipResult::Reached => {
|
||||
// we reach a facet we decided to collapse.
|
||||
let collapse_depth = facet_depth(facet_streamer.key());
|
||||
let mut collapsed_id = 0;
|
||||
@@ -301,7 +301,7 @@ impl Collector for FacetCollector {
|
||||
}
|
||||
break;
|
||||
}
|
||||
SkipResult::NotFound => {
|
||||
SkipResult::End | SkipResult::OverStep => {
|
||||
collapse_mapping.push(0);
|
||||
if !facet_streamer.advance() {
|
||||
break;
|
||||
@@ -468,7 +468,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let num_facets: usize = 3 * 4 * 5;
|
||||
let facets: Vec<Facet> = (0..num_facets)
|
||||
.map(|mut n| {
|
||||
@@ -527,7 +527,7 @@ mod tests {
|
||||
let facet_field = schema_builder.add_facet_field("facets");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(
|
||||
facet_field => Facet::from_text(&"/subjects/A/a"),
|
||||
facet_field => Facet::from_text(&"/subjects/B/a"),
|
||||
@@ -546,12 +546,12 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_doc_search_by_facet() -> crate::Result<()> {
|
||||
fn test_doc_search_by_facet() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let facet_field = schema_builder.add_facet_field("facet");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(
|
||||
facet_field => Facet::from_text(&"/A/A"),
|
||||
));
|
||||
@@ -564,8 +564,8 @@ mod tests {
|
||||
index_writer.add_document(doc!(
|
||||
facet_field => Facet::from_text(&"/D/C/A"),
|
||||
));
|
||||
index_writer.commit()?;
|
||||
let reader = index.reader()?;
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.num_docs(), 4);
|
||||
|
||||
@@ -582,17 +582,17 @@ mod tests {
|
||||
assert_eq!(count_facet("/A/C"), 1);
|
||||
assert_eq!(count_facet("/A/C/A"), 1);
|
||||
assert_eq!(count_facet("/C/A"), 0);
|
||||
|
||||
let query_parser = QueryParser::for_index(&index, vec![]);
|
||||
{
|
||||
let query = query_parser.parse_query("facet:/A/B")?;
|
||||
assert_eq!(1, searcher.search(&query, &Count).unwrap());
|
||||
let query_parser = QueryParser::for_index(&index, vec![]);
|
||||
{
|
||||
let query = query_parser.parse_query("facet:/A/B").unwrap();
|
||||
assert_eq!(1, searcher.search(&query, &Count).unwrap());
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("facet:/A").unwrap();
|
||||
assert_eq!(3, searcher.search(&query, &Count).unwrap());
|
||||
}
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("facet:/A")?;
|
||||
assert_eq!(3, searcher.search(&query, &Count)?);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -627,7 +627,7 @@ mod tests {
|
||||
.collect();
|
||||
docs[..].shuffle(&mut thread_rng());
|
||||
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
for doc in docs {
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
@@ -680,7 +680,7 @@ mod bench {
|
||||
// 40425 docs
|
||||
docs[..].shuffle(&mut thread_rng());
|
||||
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
for doc in docs {
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
|
||||
127
src/collector/int_facet_collector.rs
Normal file
127
src/collector/int_facet_collector.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
use std::cmp::Eq;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::Hash;
|
||||
|
||||
use collector::Collector;
|
||||
use fastfield::FastFieldReader;
|
||||
use schema::Field;
|
||||
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
use SegmentReader;
|
||||
use SegmentLocalId;
|
||||
|
||||
|
||||
/// Facet collector for i64/u64 fast field
|
||||
pub struct IntFacetCollector<T>
|
||||
where
|
||||
T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash,
|
||||
{
|
||||
counters: HashMap<T::ValueType, u64>,
|
||||
field: Field,
|
||||
ff_reader: Option<T>,
|
||||
}
|
||||
|
||||
|
||||
impl<T> IntFacetCollector<T>
|
||||
where
|
||||
T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash,
|
||||
{
|
||||
/// Creates a new facet collector for aggregating a given field.
|
||||
pub fn new(field: Field) -> IntFacetCollector<T> {
|
||||
IntFacetCollector {
|
||||
counters: HashMap::new(),
|
||||
field: field,
|
||||
ff_reader: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<T> Collector for IntFacetCollector<T>
|
||||
where
|
||||
T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash,
|
||||
{
|
||||
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
|
||||
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect(&mut self, doc: DocId, _: Score) {
|
||||
let val = self.ff_reader
|
||||
.as_ref()
|
||||
.expect(
|
||||
"collect() was called before set_segment. \
|
||||
This should never happen.",
|
||||
)
|
||||
.get(doc);
|
||||
*(self.counters.entry(val).or_insert(0)) += 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use collector::{chain, IntFacetCollector};
|
||||
use query::QueryParser;
|
||||
use fastfield::{I64FastFieldReader, U64FastFieldReader};
|
||||
use schema::{self, FAST, STRING};
|
||||
use Index;
|
||||
|
||||
#[test]
|
||||
// create 10 documents, set num field value to 0 or 1 for even/odd ones
|
||||
// make sure we have facet counters correctly filled
|
||||
fn test_facet_collector_results() {
|
||||
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
|
||||
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
|
||||
let num_field_f64 = schema_builder.add_f64_field("num_f64", FAST);
|
||||
let text_field = schema_builder.add_text_field("text", STRING);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
for i in 0u64..10u64 {
|
||||
index_writer.add_document(doc!(
|
||||
num_field_i64 => ((i as i64) % 3i64) as i64,
|
||||
num_field_u64 => (i % 2u64) as u64,
|
||||
num_field_f64 => (i % 4u64) as f64,
|
||||
text_field => "text"
|
||||
));
|
||||
}
|
||||
}
|
||||
assert_eq!(index_writer.commit().unwrap(), 10u64);
|
||||
}
|
||||
|
||||
let searcher = index.reader().searcher();
|
||||
let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64);
|
||||
let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64);
|
||||
let mut ffvf_f64: IntFacetCollector<F64FastFieldReader> = IntFacetCollector::new(num_field_f64);
|
||||
|
||||
{
|
||||
// perform the query
|
||||
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64).push(&mut ffvf_f64);
|
||||
let mut query_parser = QueryParser::for_index(index, vec![text_field]);
|
||||
let query = query_parser.parse_query("text:text").unwrap();
|
||||
query.search(&searcher, &mut facet_collectors).unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(ffvf_u64.counters[&0], 5);
|
||||
assert_eq!(ffvf_u64.counters[&1], 5);
|
||||
assert_eq!(ffvf_i64.counters[&0], 4);
|
||||
assert_eq!(ffvf_i64.counters[&1], 3);
|
||||
assert_eq!(ffvf_f64.counters[&0.0], 3);
|
||||
assert_eq!(ffvf_f64.counters[&2.0], 2);
|
||||
|
||||
}
|
||||
}
|
||||
@@ -109,10 +109,6 @@ pub use self::tweak_score_top_collector::{ScoreSegmentTweaker, ScoreTweaker};
|
||||
|
||||
mod facet_collector;
|
||||
pub use self::facet_collector::FacetCollector;
|
||||
use crate::query::Weight;
|
||||
|
||||
mod docset_collector;
|
||||
pub use self::docset_collector::DocSetCollector;
|
||||
|
||||
/// `Fruit` is the type for the result of our collection.
|
||||
/// e.g. `usize` for the `Count` collector.
|
||||
@@ -136,13 +132,13 @@ impl<T> Fruit for T where T: Send + downcast_rs::Downcast {}
|
||||
/// The collection logic itself is in the `SegmentCollector`.
|
||||
///
|
||||
/// Segments are not guaranteed to be visited in any specific order.
|
||||
pub trait Collector: Sync + Send {
|
||||
pub trait Collector: Sync {
|
||||
/// `Fruit` is the type for the result of our collection.
|
||||
/// e.g. `usize` for the `Count` collector.
|
||||
type Fruit: Fruit;
|
||||
|
||||
/// Type of the `SegmentCollector` associated to this collector.
|
||||
type Child: SegmentCollector;
|
||||
type Child: SegmentCollector<Fruit = Self::Fruit>;
|
||||
|
||||
/// `set_segment` is called before beginning to enumerate
|
||||
/// on this segment.
|
||||
@@ -157,33 +153,7 @@ pub trait Collector: Sync + Send {
|
||||
|
||||
/// Combines the fruit associated to the collection of each segments
|
||||
/// into one fruit.
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit>;
|
||||
|
||||
/// Created a segment collector and
|
||||
fn collect_segment(
|
||||
&self,
|
||||
weight: &dyn Weight,
|
||||
segment_ord: u32,
|
||||
reader: &SegmentReader,
|
||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
||||
let mut segment_collector = self.for_segment(segment_ord as u32, reader)?;
|
||||
|
||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||
weight.for_each(reader, &mut |doc, score| {
|
||||
if delete_bitset.is_alive(doc) {
|
||||
segment_collector.collect(doc, score);
|
||||
}
|
||||
})?;
|
||||
} else {
|
||||
weight.for_each(reader, &mut |doc, score| {
|
||||
segment_collector.collect(doc, score);
|
||||
})?;
|
||||
}
|
||||
Ok(segment_collector.harvest())
|
||||
}
|
||||
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit>;
|
||||
}
|
||||
|
||||
/// The `SegmentCollector` is the trait in charge of defining the
|
||||
@@ -230,11 +200,11 @@ where
|
||||
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
children: Vec<(Left::Fruit, Right::Fruit)>,
|
||||
) -> crate::Result<(Left::Fruit, Right::Fruit)> {
|
||||
let mut left_fruits = vec![];
|
||||
let mut right_fruits = vec![];
|
||||
for (left_fruit, right_fruit) in segment_fruits {
|
||||
for (left_fruit, right_fruit) in children {
|
||||
left_fruits.push(left_fruit);
|
||||
right_fruits.push(right_fruit);
|
||||
}
|
||||
@@ -288,10 +258,7 @@ where
|
||||
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
|
||||
}
|
||||
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
|
||||
let mut one_fruits = vec![];
|
||||
let mut two_fruits = vec![];
|
||||
let mut three_fruits = vec![];
|
||||
@@ -358,10 +325,7 @@ where
|
||||
|| self.3.requires_scoring()
|
||||
}
|
||||
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
|
||||
let mut one_fruits = vec![];
|
||||
let mut two_fruits = vec![];
|
||||
let mut three_fruits = vec![];
|
||||
|
||||
@@ -34,13 +34,13 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
||||
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
children: Vec<<Self as Collector>::Fruit>,
|
||||
) -> crate::Result<Box<dyn Fruit>> {
|
||||
let typed_fruit: Vec<<TCollector::Child as SegmentCollector>::Fruit> = children
|
||||
let typed_fruit: Vec<TCollector::Fruit> = children
|
||||
.into_iter()
|
||||
.map(|untyped_fruit| {
|
||||
untyped_fruit
|
||||
.downcast::<<TCollector::Child as SegmentCollector>::Fruit>()
|
||||
.downcast::<TCollector::Fruit>()
|
||||
.map(|boxed_but_typed| *boxed_but_typed)
|
||||
.map_err(|_| {
|
||||
TantivyError::InvalidArgument("Failed to cast child fruit.".to_string())
|
||||
@@ -55,7 +55,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
||||
impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
||||
type Fruit = Box<dyn Fruit>;
|
||||
|
||||
fn collect(&mut self, doc: u32, score: Score) {
|
||||
fn collect(&mut self, doc: u32, score: f32) {
|
||||
self.as_mut().collect(doc, score);
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
||||
}
|
||||
|
||||
pub trait BoxableSegmentCollector {
|
||||
fn collect(&mut self, doc: u32, score: Score);
|
||||
fn collect(&mut self, doc: u32, score: f32);
|
||||
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegment
|
||||
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
|
||||
for SegmentCollectorWrapper<TSegmentCollector>
|
||||
{
|
||||
fn collect(&mut self, doc: u32, score: Score) {
|
||||
fn collect(&mut self, doc: u32, score: f32) {
|
||||
self.0.collect(doc, score);
|
||||
}
|
||||
|
||||
@@ -259,7 +259,7 @@ mod tests {
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text=>"abc"));
|
||||
index_writer.add_document(doc!(text=>"abc abc abc"));
|
||||
index_writer.add_document(doc!(text=>"abc abc"));
|
||||
|
||||
@@ -185,15 +185,12 @@ impl Collector for BytesFastFieldTestCollector {
|
||||
_segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<BytesFastFieldSegmentCollector> {
|
||||
let reader = segment_reader
|
||||
.fast_fields()
|
||||
.bytes(self.field)
|
||||
.ok_or_else(|| {
|
||||
crate::TantivyError::InvalidArgument("Field is not a bytes fast field.".to_string())
|
||||
})?;
|
||||
Ok(BytesFastFieldSegmentCollector {
|
||||
vals: Vec::new(),
|
||||
reader,
|
||||
reader: segment_reader
|
||||
.fast_fields()
|
||||
.bytes(self.field)
|
||||
.expect("Field is not a bytes fast field."),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -209,7 +206,7 @@ impl Collector for BytesFastFieldTestCollector {
|
||||
impl SegmentCollector for BytesFastFieldSegmentCollector {
|
||||
type Fruit = Vec<u8>;
|
||||
|
||||
fn collect(&mut self, doc: u32, _score: Score) {
|
||||
fn collect(&mut self, doc: u32, _score: f32) {
|
||||
let data = self.reader.get_bytes(doc);
|
||||
self.vals.extend(data);
|
||||
}
|
||||
|
||||
@@ -18,9 +18,9 @@ use std::collections::BinaryHeap;
|
||||
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
|
||||
/// is equal. This should be perfectly fine for this usage, but let's make sure this
|
||||
/// struct is never public.
|
||||
pub(crate) struct ComparableDoc<T, D> {
|
||||
pub feature: T,
|
||||
pub doc: D,
|
||||
struct ComparableDoc<T, D> {
|
||||
feature: T,
|
||||
doc: D,
|
||||
}
|
||||
|
||||
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> {
|
||||
@@ -56,8 +56,7 @@ impl<T: PartialOrd, D: PartialOrd> PartialEq for ComparableDoc<T, D> {
|
||||
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {}
|
||||
|
||||
pub(crate) struct TopCollector<T> {
|
||||
pub limit: usize,
|
||||
pub offset: usize,
|
||||
limit: usize,
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
@@ -73,20 +72,14 @@ where
|
||||
if limit < 1 {
|
||||
panic!("Limit must be strictly greater than 0.");
|
||||
}
|
||||
Self {
|
||||
TopCollector {
|
||||
limit,
|
||||
offset: 0,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Skip the first "offset" documents when collecting.
|
||||
///
|
||||
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
|
||||
/// Lucene's TopDocsCollector.
|
||||
pub fn and_offset(mut self, offset: usize) -> TopCollector<T> {
|
||||
self.offset = offset;
|
||||
self
|
||||
pub fn limit(&self) -> usize {
|
||||
self.limit
|
||||
}
|
||||
|
||||
pub fn merge_fruits(
|
||||
@@ -99,7 +92,7 @@ where
|
||||
let mut top_collector = BinaryHeap::new();
|
||||
for child_fruit in children {
|
||||
for (feature, doc) in child_fruit {
|
||||
if top_collector.len() < (self.limit + self.offset) {
|
||||
if top_collector.len() < self.limit {
|
||||
top_collector.push(ComparableDoc { feature, doc });
|
||||
} else if let Some(mut head) = top_collector.peek_mut() {
|
||||
if head.feature < feature {
|
||||
@@ -111,7 +104,6 @@ where
|
||||
Ok(top_collector
|
||||
.into_sorted_vec()
|
||||
.into_iter()
|
||||
.skip(self.offset)
|
||||
.map(|cdoc| (cdoc.feature, cdoc.doc))
|
||||
.collect())
|
||||
}
|
||||
@@ -121,23 +113,7 @@ where
|
||||
segment_id: SegmentLocalId,
|
||||
_: &SegmentReader,
|
||||
) -> crate::Result<TopSegmentCollector<F>> {
|
||||
Ok(TopSegmentCollector::new(
|
||||
segment_id,
|
||||
self.limit + self.offset,
|
||||
))
|
||||
}
|
||||
|
||||
/// Create a new TopCollector with the same limit and offset.
|
||||
///
|
||||
/// Ideally we would use Into but the blanket implementation seems to cause the Scorer traits
|
||||
/// to fail.
|
||||
#[doc(hidden)]
|
||||
pub(crate) fn into_tscore<TScore: PartialOrd + Clone>(self) -> TopCollector<TScore> {
|
||||
TopCollector {
|
||||
limit: self.limit,
|
||||
offset: self.offset,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
Ok(TopSegmentCollector::new(segment_id, self.limit))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -211,7 +187,7 @@ impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{TopCollector, TopSegmentCollector};
|
||||
use super::TopSegmentCollector;
|
||||
use crate::DocAddress;
|
||||
|
||||
#[test]
|
||||
@@ -272,48 +248,6 @@ mod tests {
|
||||
top_collector_limit_3.harvest()[..2].to_vec(),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_with_limit_and_offset() {
|
||||
let collector = TopCollector::with_limit(2).and_offset(1);
|
||||
|
||||
let results = collector
|
||||
.merge_fruits(vec![vec![
|
||||
(0.9, DocAddress(0, 1)),
|
||||
(0.8, DocAddress(0, 2)),
|
||||
(0.7, DocAddress(0, 3)),
|
||||
(0.6, DocAddress(0, 4)),
|
||||
(0.5, DocAddress(0, 5)),
|
||||
]])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
results,
|
||||
vec![(0.8, DocAddress(0, 2)), (0.7, DocAddress(0, 3)),]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_with_limit_larger_than_set_and_offset() {
|
||||
let collector = TopCollector::with_limit(2).and_offset(1);
|
||||
|
||||
let results = collector
|
||||
.merge_fruits(vec![vec![(0.9, DocAddress(0, 1)), (0.8, DocAddress(0, 2))]])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(results, vec![(0.8, DocAddress(0, 2)),]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_with_limit_and_offset_larger_than_set() {
|
||||
let collector = TopCollector::with_limit(2).and_offset(20);
|
||||
|
||||
let results = collector
|
||||
.merge_fruits(vec![vec![(0.9, DocAddress(0, 1)), (0.8, DocAddress(0, 2))]])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(results, vec![]);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
|
||||
@@ -1,82 +1,19 @@
|
||||
use super::Collector;
|
||||
use crate::collector::top_collector::{ComparableDoc, TopCollector};
|
||||
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
||||
use crate::collector::top_collector::TopCollector;
|
||||
use crate::collector::top_collector::TopSegmentCollector;
|
||||
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
||||
use crate::collector::{
|
||||
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
||||
};
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::query::Weight;
|
||||
use crate::schema::Field;
|
||||
use crate::DocAddress;
|
||||
use crate::DocId;
|
||||
use crate::Score;
|
||||
use crate::SegmentLocalId;
|
||||
use crate::SegmentReader;
|
||||
use crate::{collector::custom_score_top_collector::CustomScoreTopCollector, fastfield::FastValue};
|
||||
use crate::{collector::top_collector::TopSegmentCollector, TantivyError};
|
||||
use std::fmt;
|
||||
use std::{collections::BinaryHeap, marker::PhantomData};
|
||||
|
||||
struct FastFieldConvertCollector<
|
||||
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
|
||||
TFastValue: FastValue,
|
||||
> {
|
||||
pub collector: TCollector,
|
||||
pub field: Field,
|
||||
pub fast_value: std::marker::PhantomData<TFastValue>,
|
||||
}
|
||||
|
||||
impl<TCollector, TFastValue> Collector for FastFieldConvertCollector<TCollector, TFastValue>
|
||||
where
|
||||
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
|
||||
TFastValue: FastValue + 'static,
|
||||
{
|
||||
type Fruit = Vec<(TFastValue, DocAddress)>;
|
||||
|
||||
type Child = TCollector::Child;
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: crate::SegmentLocalId,
|
||||
segment: &SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let schema = segment.schema();
|
||||
let field_entry = schema.get_field_entry(self.field);
|
||||
if !field_entry.is_fast() {
|
||||
return Err(TantivyError::SchemaError(format!(
|
||||
"Field {:?} is not a fast field.",
|
||||
field_entry.name()
|
||||
)));
|
||||
}
|
||||
let schema_type = TFastValue::to_type();
|
||||
let requested_type = field_entry.field_type().value_type();
|
||||
if schema_type != requested_type {
|
||||
return Err(TantivyError::SchemaError(format!(
|
||||
"Field {:?} is of type {:?}!={:?}",
|
||||
field_entry.name(),
|
||||
schema_type,
|
||||
requested_type
|
||||
)));
|
||||
}
|
||||
self.collector.for_segment(segment_local_id, segment)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
self.collector.requires_scoring()
|
||||
}
|
||||
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
let raw_result = self.collector.merge_fruits(segment_fruits)?;
|
||||
let transformed_result = raw_result
|
||||
.into_iter()
|
||||
.map(|(score, doc_address)| (TFastValue::from_u64(score), doc_address))
|
||||
.collect::<Vec<_>>();
|
||||
Ok(transformed_result)
|
||||
}
|
||||
}
|
||||
|
||||
/// The `TopDocs` collector keeps track of the top `K` documents
|
||||
/// sorted by their score.
|
||||
@@ -99,7 +36,7 @@ where
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
///
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||
@@ -113,18 +50,14 @@ where
|
||||
/// let query = query_parser.parse_query("diary").unwrap();
|
||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
|
||||
///
|
||||
/// assert_eq!(top_docs[0].1, DocAddress(0, 1));
|
||||
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
|
||||
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
|
||||
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
|
||||
/// ```
|
||||
pub struct TopDocs(TopCollector<Score>);
|
||||
|
||||
impl fmt::Debug for TopDocs {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"TopDocs(limit={}, offset={})",
|
||||
self.0.limit, self.0.offset
|
||||
)
|
||||
write!(f, "TopDocs({})", self.0.limit())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,8 +66,8 @@ struct ScorerByFastFieldReader {
|
||||
}
|
||||
|
||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||
fn score(&mut self, doc: DocId) -> u64 {
|
||||
self.ff_reader.get(doc)
|
||||
fn score(&self, doc: DocId) -> u64 {
|
||||
self.ff_reader.get_u64(u64::from(doc))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,10 +81,10 @@ impl CustomScorer<u64> for ScorerByField {
|
||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
|
||||
let ff_reader = segment_reader
|
||||
.fast_fields()
|
||||
.u64_lenient(self.field)
|
||||
.u64(self.field)
|
||||
.ok_or_else(|| {
|
||||
crate::TantivyError::SchemaError(format!(
|
||||
"Field requested ({:?}) is not a fast field.",
|
||||
"Field requested ({:?}) is not a i64/u64 fast field.",
|
||||
self.field
|
||||
))
|
||||
})?;
|
||||
@@ -168,57 +101,8 @@ impl TopDocs {
|
||||
TopDocs(TopCollector::with_limit(limit))
|
||||
}
|
||||
|
||||
/// Skip the first "offset" documents when collecting.
|
||||
///
|
||||
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
|
||||
/// Lucene's TopDocsCollector.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, DocAddress, Index};
|
||||
///
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
///
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of Lena Mukhina"));
|
||||
/// assert!(index_writer.commit().is_ok());
|
||||
///
|
||||
/// let reader = index.reader().unwrap();
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary").unwrap();
|
||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2).and_offset(1)).unwrap();
|
||||
///
|
||||
/// assert_eq!(top_docs.len(), 2);
|
||||
/// assert_eq!(top_docs[0].1, DocAddress(0, 4));
|
||||
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
|
||||
/// ```
|
||||
pub fn and_offset(self, offset: usize) -> TopDocs {
|
||||
TopDocs(self.0.and_offset(offset))
|
||||
}
|
||||
|
||||
/// Set top-K to rank documents by a given fast field.
|
||||
///
|
||||
/// If the field is not a fast or does not exist, this method returns successfully (it is not aware of any schema).
|
||||
/// An error will be returned at the moment of search.
|
||||
///
|
||||
/// If the field is a FAST field but not a u64 field, search will return successfully but it will return
|
||||
/// returns a monotonic u64-representation (ie. the order is still correct) of the requested field type.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||
/// # use tantivy::{doc, Index, DocAddress};
|
||||
@@ -234,13 +118,13 @@ impl TopDocs {
|
||||
/// # let schema = schema_builder.build();
|
||||
/// #
|
||||
/// # let index = Index::create_in_ram(schema);
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
|
||||
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
||||
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
||||
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
|
||||
/// # assert!(index_writer.commit().is_ok());
|
||||
/// # let reader = index.reader()?;
|
||||
/// # let reader = index.reader().unwrap();
|
||||
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
||||
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
||||
/// # assert_eq!(top_docs,
|
||||
@@ -248,20 +132,25 @@ impl TopDocs {
|
||||
/// # (80u64, DocAddress(0u32, 3))]);
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
///
|
||||
///
|
||||
/// /// Searches the document matching the given query, and
|
||||
/// /// collects the top 10 documents, order by the u64-`field`
|
||||
/// /// given in argument.
|
||||
/// ///
|
||||
/// /// `field` is required to be a FAST field.
|
||||
/// fn docs_sorted_by_rating(searcher: &Searcher,
|
||||
/// query: &dyn Query,
|
||||
/// rating_field: Field)
|
||||
/// sort_by_field: Field)
|
||||
/// -> tantivy::Result<Vec<(u64, DocAddress)>> {
|
||||
///
|
||||
/// // This is where we build our topdocs collector
|
||||
/// //
|
||||
/// // Note the `rating_field` needs to be a FAST field here.
|
||||
/// let top_books_by_rating = TopDocs
|
||||
/// // Note the generics parameter that needs to match the
|
||||
/// // type `sort_by_field`.
|
||||
/// let top_docs_by_rating = TopDocs
|
||||
/// ::with_limit(10)
|
||||
/// .order_by_u64_field(rating_field);
|
||||
/// .order_by_u64_field(sort_by_field);
|
||||
///
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `u64` in the pair is the value of our fast field for
|
||||
@@ -271,105 +160,21 @@ impl TopDocs {
|
||||
/// // length of 10, or less if not enough documents matched the
|
||||
/// // query.
|
||||
/// let resulting_docs: Vec<(u64, DocAddress)> =
|
||||
/// searcher.search(query, &top_books_by_rating)?;
|
||||
/// searcher.search(query, &top_docs_by_rating)?;
|
||||
///
|
||||
/// Ok(resulting_docs)
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # See also
|
||||
///
|
||||
/// To confortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
||||
/// [.order_by_fast_field(...)](#method.order_by_fast_field) method.
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if the field requested is not a fast field.
|
||||
///
|
||||
pub fn order_by_u64_field(
|
||||
self,
|
||||
field: Field,
|
||||
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
|
||||
CustomScoreTopCollector::new(ScorerByField { field }, self.0.into_tscore())
|
||||
}
|
||||
|
||||
/// Set top-K to rank documents by a given fast field.
|
||||
///
|
||||
/// If the field is not a fast field, or its field type does not match the generic type, this method does not panic,
|
||||
/// but an explicit error will be returned at the moment of collection.
|
||||
///
|
||||
/// Note that this method is a generic. The requested fast field type will be often
|
||||
/// inferred in your code by the rust compiler.
|
||||
///
|
||||
/// Implementation-wise, for performance reason, tantivy will manipulate the u64 representation of your fast
|
||||
/// field until the last moment.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||
/// # use tantivy::{doc, Index, DocAddress};
|
||||
/// # use tantivy::query::{Query, AllQuery};
|
||||
/// use tantivy::Searcher;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::schema::Field;
|
||||
///
|
||||
/// # fn main() -> tantivy::Result<()> {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
/// # let title = schema_builder.add_text_field("company", TEXT);
|
||||
/// # let rating = schema_builder.add_i64_field("revenue", FAST);
|
||||
/// # let schema = schema_builder.build();
|
||||
/// #
|
||||
/// # let index = Index::create_in_ram(schema);
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||
/// # index_writer.add_document(doc!(title => "MadCow Inc.", rating => 92_000_000i64));
|
||||
/// # index_writer.add_document(doc!(title => "Zozo Cow KKK", rating => 119_000_000i64));
|
||||
/// # index_writer.add_document(doc!(title => "Declining Cow", rating => -63_000_000i64));
|
||||
/// # assert!(index_writer.commit().is_ok());
|
||||
/// # let reader = index.reader()?;
|
||||
/// # let top_docs = docs_sorted_by_revenue(&reader.searcher(), &AllQuery, rating)?;
|
||||
/// # assert_eq!(top_docs,
|
||||
/// # vec![(119_000_000i64, DocAddress(0, 1)),
|
||||
/// # (92_000_000i64, DocAddress(0, 0))]);
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// /// Searches the document matching the given query, and
|
||||
/// /// collects the top 10 documents, order by the u64-`field`
|
||||
/// /// given in argument.
|
||||
/// fn docs_sorted_by_revenue(searcher: &Searcher,
|
||||
/// query: &dyn Query,
|
||||
/// revenue_field: Field)
|
||||
/// -> tantivy::Result<Vec<(i64, DocAddress)>> {
|
||||
///
|
||||
/// // This is where we build our topdocs collector
|
||||
/// //
|
||||
/// // Note the generics parameter that needs to match the
|
||||
/// // type `sort_by_field`. revenue_field here is a FAST i64 field.
|
||||
/// let top_company_by_revenue = TopDocs
|
||||
/// ::with_limit(2)
|
||||
/// .order_by_fast_field(revenue_field);
|
||||
///
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `i64` in the pair is the value of our fast field for
|
||||
/// // each documents.
|
||||
/// //
|
||||
/// // The vec is sorted decreasingly by `sort_by_field`, and has a
|
||||
/// // length of 10, or less if not enough documents matched the
|
||||
/// // query.
|
||||
/// let resulting_docs: Vec<(i64, DocAddress)> =
|
||||
/// searcher.search(query, &top_company_by_revenue)?;
|
||||
///
|
||||
/// Ok(resulting_docs)
|
||||
/// }
|
||||
/// ```
|
||||
pub fn order_by_fast_field<TFastValue>(
|
||||
self,
|
||||
fast_field: Field,
|
||||
) -> impl Collector<Fruit = Vec<(TFastValue, DocAddress)>>
|
||||
where
|
||||
TFastValue: FastValue + 'static,
|
||||
{
|
||||
let u64_collector = self.order_by_u64_field(fast_field);
|
||||
FastFieldConvertCollector {
|
||||
collector: u64_collector,
|
||||
field: fast_field,
|
||||
fast_value: PhantomData,
|
||||
}
|
||||
self.custom_score(ScorerByField { field })
|
||||
}
|
||||
|
||||
/// Ranks the documents using a custom score.
|
||||
@@ -414,7 +219,7 @@ impl TopDocs {
|
||||
/// fn create_index() -> tantivy::Result<Index> {
|
||||
/// let schema = create_schema();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// let product_name = index.schema().get_field("product_name").unwrap();
|
||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
||||
@@ -453,7 +258,7 @@ impl TopDocs {
|
||||
/// let popularity: u64 = popularity_reader.get(doc);
|
||||
/// // Well.. For the sake of the example we use a simple logarithm
|
||||
/// // function.
|
||||
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
|
||||
/// let popularity_boost_score = ((2u64 + popularity) as f32).log2();
|
||||
/// popularity_boost_score * original_score
|
||||
/// }
|
||||
/// });
|
||||
@@ -474,9 +279,9 @@ impl TopDocs {
|
||||
where
|
||||
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
||||
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
|
||||
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker> + Send + Sync,
|
||||
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
|
||||
{
|
||||
TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore())
|
||||
TweakedScoreTopCollector::new(score_tweaker, self.0.limit())
|
||||
}
|
||||
|
||||
/// Ranks the documents using a custom score.
|
||||
@@ -521,7 +326,7 @@ impl TopDocs {
|
||||
/// # fn main() -> tantivy::Result<()> {
|
||||
/// # let schema = create_schema();
|
||||
/// # let index = Index::create_in_ram(schema);
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// # let product_name = index.schema().get_field("product_name").unwrap();
|
||||
/// #
|
||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||
@@ -588,9 +393,9 @@ impl TopDocs {
|
||||
where
|
||||
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
||||
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
|
||||
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer> + Send + Sync,
|
||||
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
|
||||
{
|
||||
CustomScoreTopCollector::new(custom_score, self.0.into_tscore())
|
||||
CustomScoreTopCollector::new(custom_score, self.0.limit())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -618,64 +423,6 @@ impl Collector for TopDocs {
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
self.0.merge_fruits(child_fruits)
|
||||
}
|
||||
|
||||
fn collect_segment(
|
||||
&self,
|
||||
weight: &dyn Weight,
|
||||
segment_ord: u32,
|
||||
reader: &SegmentReader,
|
||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
||||
let heap_len = self.0.limit + self.0.offset;
|
||||
let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len);
|
||||
|
||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||
let mut threshold = Score::MIN;
|
||||
weight.for_each_pruning(threshold, reader, &mut |doc, score| {
|
||||
if delete_bitset.is_deleted(doc) {
|
||||
return threshold;
|
||||
}
|
||||
let heap_item = ComparableDoc {
|
||||
feature: score,
|
||||
doc,
|
||||
};
|
||||
if heap.len() < heap_len {
|
||||
heap.push(heap_item);
|
||||
if heap.len() == heap_len {
|
||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||
}
|
||||
return threshold;
|
||||
}
|
||||
*heap.peek_mut().unwrap() = heap_item;
|
||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||
threshold
|
||||
})?;
|
||||
} else {
|
||||
weight.for_each_pruning(Score::MIN, reader, &mut |doc, score| {
|
||||
let heap_item = ComparableDoc {
|
||||
feature: score,
|
||||
doc,
|
||||
};
|
||||
if heap.len() < heap_len {
|
||||
heap.push(heap_item);
|
||||
// TODO the threshold is suboptimal for heap.len == heap_len
|
||||
if heap.len() == heap_len {
|
||||
return heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||
} else {
|
||||
return Score::MIN;
|
||||
}
|
||||
}
|
||||
*heap.peek_mut().unwrap() = heap_item;
|
||||
heap.peek().map(|el| el.feature).unwrap_or(Score::MIN)
|
||||
})?;
|
||||
}
|
||||
|
||||
let fruit = heap
|
||||
.into_sorted_vec()
|
||||
.into_iter()
|
||||
.map(|cid| (cid.feature, DocAddress(segment_ord, cid.doc)))
|
||||
.collect();
|
||||
Ok(fruit)
|
||||
}
|
||||
}
|
||||
|
||||
/// Segment Collector associated to `TopDocs`.
|
||||
@@ -685,7 +432,7 @@ impl SegmentCollector for TopScoreSegmentCollector {
|
||||
type Fruit = Vec<(Score, DocAddress)>;
|
||||
|
||||
fn collect(&mut self, doc: DocId, score: Score) {
|
||||
self.0.collect(doc, score);
|
||||
self.0.collect(doc, score)
|
||||
}
|
||||
|
||||
fn harvest(self) -> Vec<(Score, DocAddress)> {
|
||||
@@ -699,10 +446,10 @@ mod tests {
|
||||
use crate::collector::Collector;
|
||||
use crate::query::{AllQuery, Query, QueryParser};
|
||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||
use crate::DocAddress;
|
||||
use crate::Index;
|
||||
use crate::IndexWriter;
|
||||
use crate::Score;
|
||||
use crate::{DocAddress, DocId, SegmentReader};
|
||||
|
||||
fn make_index() -> Index {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -711,7 +458,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
|
||||
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
|
||||
index_writer.add_document(doc!(text_field=>"I like Droopy"));
|
||||
@@ -720,13 +467,6 @@ mod tests {
|
||||
index
|
||||
}
|
||||
|
||||
fn assert_results_equals(results: &[(Score, DocAddress)], expected: &[(Score, DocAddress)]) {
|
||||
for (result, expected) in results.iter().zip(expected.iter()) {
|
||||
assert_eq!(result.1, expected.1);
|
||||
crate::assert_nearly_equals!(result.0, expected.0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_not_at_capacity() {
|
||||
let index = make_index();
|
||||
@@ -739,31 +479,16 @@ mod tests {
|
||||
.searcher()
|
||||
.search(&text_query, &TopDocs::with_limit(4))
|
||||
.unwrap();
|
||||
assert_results_equals(
|
||||
&score_docs,
|
||||
&[
|
||||
assert_eq!(
|
||||
score_docs,
|
||||
vec![
|
||||
(0.81221175, DocAddress(0u32, 1)),
|
||||
(0.5376842, DocAddress(0u32, 2)),
|
||||
(0.48527452, DocAddress(0, 0)),
|
||||
],
|
||||
(0.48527452, DocAddress(0, 0))
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_not_at_capacity_with_offset() {
|
||||
let index = make_index();
|
||||
let field = index.schema().get_field("text").unwrap();
|
||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||
let score_docs: Vec<(Score, DocAddress)> = index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.search(&text_query, &TopDocs::with_limit(4).and_offset(2))
|
||||
.unwrap();
|
||||
assert_results_equals(&score_docs[..], &[(0.48527452, DocAddress(0, 0))]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_at_capacity() {
|
||||
let index = make_index();
|
||||
@@ -776,33 +501,12 @@ mod tests {
|
||||
.searcher()
|
||||
.search(&text_query, &TopDocs::with_limit(2))
|
||||
.unwrap();
|
||||
assert_results_equals(
|
||||
&score_docs,
|
||||
&[
|
||||
assert_eq!(
|
||||
score_docs,
|
||||
vec![
|
||||
(0.81221175, DocAddress(0u32, 1)),
|
||||
(0.5376842, DocAddress(0u32, 2)),
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_at_capacity_with_offset() {
|
||||
let index = make_index();
|
||||
let field = index.schema().get_field("text").unwrap();
|
||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||
let score_docs: Vec<(Score, DocAddress)> = index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.search(&text_query, &TopDocs::with_limit(2).and_offset(1))
|
||||
.unwrap();
|
||||
assert_results_equals(
|
||||
&score_docs[..],
|
||||
&[
|
||||
(0.5376842, DocAddress(0u32, 2)),
|
||||
(0.48527452, DocAddress(0, 0)),
|
||||
],
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
@@ -863,8 +567,8 @@ mod tests {
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
&[
|
||||
top_docs,
|
||||
vec![
|
||||
(64, DocAddress(0, 1)),
|
||||
(16, DocAddress(0, 2)),
|
||||
(12, DocAddress(0, 0))
|
||||
@@ -872,94 +576,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_field_collector_datetime() -> crate::Result<()> {
|
||||
use std::str::FromStr;
|
||||
let mut schema_builder = Schema::builder();
|
||||
let name = schema_builder.add_text_field("name", TEXT);
|
||||
let birthday = schema_builder.add_date_field("birthday", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let pr_birthday = crate::DateTime::from_str("1898-04-09T00:00:00+00:00")?;
|
||||
index_writer.add_document(doc!(
|
||||
name => "Paul Robeson",
|
||||
birthday => pr_birthday
|
||||
));
|
||||
let mr_birthday = crate::DateTime::from_str("1947-11-08T00:00:00+00:00")?;
|
||||
index_writer.add_document(doc!(
|
||||
name => "Minnie Riperton",
|
||||
birthday => mr_birthday
|
||||
));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field(birthday);
|
||||
let top_docs: Vec<(crate::DateTime, DocAddress)> =
|
||||
searcher.search(&AllQuery, &top_collector)?;
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
&[
|
||||
(mr_birthday, DocAddress(0, 1)),
|
||||
(pr_birthday, DocAddress(0, 0)),
|
||||
]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_field_collector_i64() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let city = schema_builder.add_text_field("city", TEXT);
|
||||
let altitude = schema_builder.add_i64_field("altitude", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(
|
||||
city => "georgetown",
|
||||
altitude => -1i64,
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
city => "tokyo",
|
||||
altitude => 40i64,
|
||||
));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
|
||||
let top_docs: Vec<(i64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
&[(40i64, DocAddress(0, 1)), (-1i64, DocAddress(0, 0)),]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_field_collector_f64() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let city = schema_builder.add_text_field("city", TEXT);
|
||||
let altitude = schema_builder.add_f64_field("altitude", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(
|
||||
city => "georgetown",
|
||||
altitude => -1.0f64,
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
city => "tokyo",
|
||||
altitude => 40f64,
|
||||
));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
|
||||
let top_docs: Vec<(f64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
&[(40f64, DocAddress(0, 1)), (-1.0f64, DocAddress(0, 0)),]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_field_does_not_exist() {
|
||||
@@ -982,85 +598,29 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_field_not_fast_field() -> crate::Result<()> {
|
||||
fn test_field_not_fast_field() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||
let size = schema_builder.add_u64_field(SIZE, STORED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(size=>1u64));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let (index, _) = index("beer", title, schema, |index_writer| {
|
||||
index_writer.add_document(doc!(
|
||||
title => "bottle of beer",
|
||||
size => 12u64,
|
||||
));
|
||||
});
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment = searcher.segment_reader(0);
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||
let err = top_collector.for_segment(0, segment).err().unwrap();
|
||||
assert!(
|
||||
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field requested (Field(0)) is not a fast field.")
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_field_wrong_type() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let size = schema_builder.add_u64_field(SIZE, STORED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(size=>1u64));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment = searcher.segment_reader(0);
|
||||
let top_collector = TopDocs::with_limit(4).order_by_fast_field::<i64>(size);
|
||||
let err = top_collector.for_segment(0, segment).err().unwrap();
|
||||
assert!(
|
||||
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field \"size\" is not a fast field.")
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tweak_score_top_collector_with_offset() {
|
||||
let index = make_index();
|
||||
let field = index.schema().get_field("text").unwrap();
|
||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||
let collector = TopDocs::with_limit(2).and_offset(1).tweak_score(
|
||||
move |_segment_reader: &SegmentReader| move |doc: DocId, _original_score: Score| doc,
|
||||
);
|
||||
let score_docs: Vec<(u32, DocAddress)> = index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.search(&text_query, &collector)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
score_docs,
|
||||
vec![(1, DocAddress(0, 1)), (0, DocAddress(0, 0)),]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_custom_score_top_collector_with_offset() {
|
||||
let index = make_index();
|
||||
let field = index.schema().get_field("text").unwrap();
|
||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||
let collector = TopDocs::with_limit(2)
|
||||
.and_offset(1)
|
||||
.custom_score(move |_segment_reader: &SegmentReader| move |doc: DocId| doc);
|
||||
let score_docs: Vec<(u32, DocAddress)> = index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.search(&text_query, &collector)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
score_docs,
|
||||
vec![(1, DocAddress(0, 1)), (0, DocAddress(0, 0)),]
|
||||
);
|
||||
let err = top_collector.for_segment(0, segment);
|
||||
if let Err(crate::TantivyError::SchemaError(msg)) = err {
|
||||
assert_eq!(
|
||||
msg,
|
||||
"Field requested (Field(1)) is not a i64/u64 fast field."
|
||||
);
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
|
||||
fn index(
|
||||
@@ -1070,7 +630,8 @@ mod tests {
|
||||
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
|
||||
) -> (Index, Box<dyn Query>) {
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
doc_adder(&mut index_writer);
|
||||
index_writer.commit().unwrap();
|
||||
let query_parser = QueryParser::for_index(&index, vec![query_field]);
|
||||
|
||||
@@ -14,11 +14,11 @@ where
|
||||
{
|
||||
pub fn new(
|
||||
score_tweaker: TScoreTweaker,
|
||||
collector: TopCollector<TScore>,
|
||||
limit: usize,
|
||||
) -> TweakedScoreTopCollector<TScoreTweaker, TScore> {
|
||||
TweakedScoreTopCollector {
|
||||
score_tweaker,
|
||||
collector,
|
||||
collector: TopCollector::with_limit(limit),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -29,7 +29,7 @@ where
|
||||
/// It is the segment local version of the [`ScoreTweaker`](./trait.ScoreTweaker.html).
|
||||
pub trait ScoreSegmentTweaker<TScore>: 'static {
|
||||
/// Tweak the given `score` for the document `doc`.
|
||||
fn score(&mut self, doc: DocId, score: Score) -> TScore;
|
||||
fn score(&self, doc: DocId, score: Score) -> TScore;
|
||||
}
|
||||
|
||||
/// `ScoreTweaker` makes it possible to tweak the score
|
||||
@@ -49,7 +49,7 @@ pub trait ScoreTweaker<TScore>: Sync {
|
||||
|
||||
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
|
||||
where
|
||||
TScoreTweaker: ScoreTweaker<TScore> + Send + Sync,
|
||||
TScoreTweaker: ScoreTweaker<TScore>,
|
||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||
{
|
||||
type Fruit = Vec<(TScore, DocAddress)>;
|
||||
@@ -121,9 +121,9 @@ where
|
||||
|
||||
impl<F, TScore> ScoreSegmentTweaker<TScore> for F
|
||||
where
|
||||
F: 'static + FnMut(DocId, Score) -> TScore,
|
||||
F: 'static + Sync + Send + Fn(DocId, Score) -> TScore,
|
||||
{
|
||||
fn score(&mut self, doc: DocId, score: Score) -> TScore {
|
||||
fn score(&self, doc: DocId, score: Score) -> TScore {
|
||||
(self)(doc, score)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||
use std::io;
|
||||
|
||||
use crate::directory::OwnedBytes;
|
||||
use std::ops::Deref;
|
||||
|
||||
pub(crate) struct BitPacker {
|
||||
mini_buffer: u64,
|
||||
@@ -61,14 +60,20 @@ impl BitPacker {
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BitUnpacker {
|
||||
pub struct BitUnpacker<Data>
|
||||
where
|
||||
Data: Deref<Target = [u8]>,
|
||||
{
|
||||
num_bits: u64,
|
||||
mask: u64,
|
||||
data: OwnedBytes,
|
||||
data: Data,
|
||||
}
|
||||
|
||||
impl BitUnpacker {
|
||||
pub fn new(data: OwnedBytes, num_bits: u8) -> BitUnpacker {
|
||||
impl<Data> BitUnpacker<Data>
|
||||
where
|
||||
Data: Deref<Target = [u8]>,
|
||||
{
|
||||
pub fn new(data: Data, num_bits: u8) -> BitUnpacker<Data> {
|
||||
let mask: u64 = if num_bits == 64 {
|
||||
!0u64
|
||||
} else {
|
||||
@@ -85,7 +90,7 @@ impl BitUnpacker {
|
||||
if self.num_bits == 0 {
|
||||
return 0u64;
|
||||
}
|
||||
let data: &[u8] = self.data.as_slice();
|
||||
let data: &[u8] = &*self.data;
|
||||
let num_bits = self.num_bits;
|
||||
let mask = self.mask;
|
||||
let addr_in_bits = idx * num_bits;
|
||||
@@ -104,9 +109,8 @@ impl BitUnpacker {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::{BitPacker, BitUnpacker};
|
||||
use crate::directory::OwnedBytes;
|
||||
|
||||
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>) {
|
||||
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
|
||||
let mut data = Vec::new();
|
||||
let mut bitpacker = BitPacker::new();
|
||||
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
|
||||
@@ -118,7 +122,7 @@ mod test {
|
||||
}
|
||||
bitpacker.close(&mut data).unwrap();
|
||||
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
|
||||
let bitunpacker = BitUnpacker::new(OwnedBytes::new(data), num_bits);
|
||||
let bitunpacker = BitUnpacker::new(data, num_bits);
|
||||
(bitunpacker, vals)
|
||||
}
|
||||
|
||||
|
||||
@@ -33,10 +33,6 @@ impl TinySet {
|
||||
TinySet(0u64)
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.0 = 0u64;
|
||||
}
|
||||
|
||||
/// Returns the complement of the set in `[0, 64[`.
|
||||
fn complement(self) -> TinySet {
|
||||
TinySet(!self.0)
|
||||
@@ -47,11 +43,6 @@ impl TinySet {
|
||||
!self.intersect(TinySet::singleton(el)).is_empty()
|
||||
}
|
||||
|
||||
/// Returns the number of elements in the TinySet.
|
||||
pub fn len(self) -> u32 {
|
||||
self.0.count_ones()
|
||||
}
|
||||
|
||||
/// Returns the intersection of `self` and `other`
|
||||
pub fn intersect(self, other: TinySet) -> TinySet {
|
||||
TinySet(self.0 & other.0)
|
||||
@@ -118,12 +109,22 @@ impl TinySet {
|
||||
pub fn range_greater_or_equal(from_included: u32) -> TinySet {
|
||||
TinySet::range_lower(from_included).complement()
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.0 = 0u64;
|
||||
}
|
||||
|
||||
pub fn len(self) -> u32 {
|
||||
self.0.count_ones()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BitSet {
|
||||
tinysets: Box<[TinySet]>,
|
||||
len: usize,
|
||||
len: usize, //< Technically it should be u32, but we
|
||||
// count multiple inserts.
|
||||
// `usize` guards us from overflow.
|
||||
max_value: u32,
|
||||
}
|
||||
|
||||
@@ -203,7 +204,7 @@ mod tests {
|
||||
|
||||
use super::BitSet;
|
||||
use super::TinySet;
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::docset::DocSet;
|
||||
use crate::query::BitSetDocSet;
|
||||
use crate::tests;
|
||||
use crate::tests::generate_nonunique_unsorted;
|
||||
@@ -277,13 +278,11 @@ mod tests {
|
||||
}
|
||||
assert_eq!(btreeset.len(), bitset.len());
|
||||
let mut bitset_docset = BitSetDocSet::from(bitset);
|
||||
let mut remaining = true;
|
||||
for el in btreeset.into_iter() {
|
||||
assert!(remaining);
|
||||
bitset_docset.advance();
|
||||
assert_eq!(bitset_docset.doc(), el);
|
||||
remaining = bitset_docset.advance() != TERMINATED;
|
||||
}
|
||||
assert!(!remaining);
|
||||
assert!(!bitset_docset.advance());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::CountingWriter;
|
||||
use crate::common::VInt;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::{TerminatingWrite, WritePtr};
|
||||
use crate::schema::Field;
|
||||
use crate::space_usage::FieldUsage;
|
||||
use crate::space_usage::PerFieldSpaceUsage;
|
||||
use std::collections::HashMap;
|
||||
use std::io::{self, Read, Write};
|
||||
|
||||
use super::HasLen;
|
||||
use std::io::Write;
|
||||
use std::io::{self, Read};
|
||||
|
||||
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
|
||||
pub struct FileAddr {
|
||||
@@ -104,26 +103,25 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
/// for each field.
|
||||
#[derive(Clone)]
|
||||
pub struct CompositeFile {
|
||||
data: FileSlice,
|
||||
data: ReadOnlySource,
|
||||
offsets_index: HashMap<FileAddr, (usize, usize)>,
|
||||
}
|
||||
|
||||
impl CompositeFile {
|
||||
/// Opens a composite file stored in a given
|
||||
/// `FileSlice`.
|
||||
pub fn open(data: &FileSlice) -> io::Result<CompositeFile> {
|
||||
/// `ReadOnlySource`.
|
||||
pub fn open(data: &ReadOnlySource) -> io::Result<CompositeFile> {
|
||||
let end = data.len();
|
||||
let footer_len_data = data.slice_from(end - 4).read_bytes()?;
|
||||
let footer_len_data = data.slice_from(end - 4);
|
||||
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
|
||||
let footer_start = end - 4 - footer_len;
|
||||
let footer_data = data
|
||||
.slice(footer_start, footer_start + footer_len)
|
||||
.read_bytes()?;
|
||||
let footer_data = data.slice(footer_start, footer_start + footer_len);
|
||||
let mut footer_buffer = footer_data.as_slice();
|
||||
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
|
||||
|
||||
let mut file_addrs = vec![];
|
||||
let mut offsets = vec![];
|
||||
|
||||
let mut field_index = HashMap::new();
|
||||
|
||||
let mut offset = 0;
|
||||
@@ -152,19 +150,19 @@ impl CompositeFile {
|
||||
pub fn empty() -> CompositeFile {
|
||||
CompositeFile {
|
||||
offsets_index: HashMap::new(),
|
||||
data: FileSlice::empty(),
|
||||
data: ReadOnlySource::empty(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `FileSlice` associated
|
||||
/// Returns the `ReadOnlySource` associated
|
||||
/// to a given `Field` and stored in a `CompositeFile`.
|
||||
pub fn open_read(&self, field: Field) -> Option<FileSlice> {
|
||||
pub fn open_read(&self, field: Field) -> Option<ReadOnlySource> {
|
||||
self.open_read_with_idx(field, 0)
|
||||
}
|
||||
|
||||
/// Returns the `FileSlice` associated
|
||||
/// Returns the `ReadOnlySource` associated
|
||||
/// to a given `Field` and stored in a `CompositeFile`.
|
||||
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> {
|
||||
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<ReadOnlySource> {
|
||||
self.offsets_index
|
||||
.get(&FileAddr { field, idx })
|
||||
.map(|&(from, to)| self.data.slice(from, to))
|
||||
@@ -192,46 +190,49 @@ mod test {
|
||||
use crate::schema::Field;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_composite_file() -> crate::Result<()> {
|
||||
fn test_composite_file() {
|
||||
let path = Path::new("test_path");
|
||||
let directory = RAMDirectory::create();
|
||||
let mut directory = RAMDirectory::default();
|
||||
{
|
||||
let w = directory.open_write(path).unwrap();
|
||||
let mut composite_write = CompositeWrite::wrap(w);
|
||||
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
|
||||
VInt(32431123u64).serialize(&mut write_0)?;
|
||||
write_0.flush()?;
|
||||
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
|
||||
VInt(2).serialize(&mut write_4)?;
|
||||
write_4.flush()?;
|
||||
composite_write.close()?;
|
||||
{
|
||||
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
|
||||
VInt(32431123u64).serialize(&mut write_0).unwrap();
|
||||
write_0.flush().unwrap();
|
||||
}
|
||||
|
||||
{
|
||||
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
|
||||
VInt(2).serialize(&mut write_4).unwrap();
|
||||
write_4.flush().unwrap();
|
||||
}
|
||||
composite_write.close().unwrap();
|
||||
}
|
||||
{
|
||||
let r = directory.open_read(path)?;
|
||||
let composite_file = CompositeFile::open(&r)?;
|
||||
let r = directory.open_read(path).unwrap();
|
||||
let composite_file = CompositeFile::open(&r).unwrap();
|
||||
{
|
||||
let file0 = composite_file
|
||||
.open_read(Field::from_field_id(0u32))
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
.unwrap();
|
||||
let mut file0_buf = file0.as_slice();
|
||||
let payload_0 = VInt::deserialize(&mut file0_buf)?.0;
|
||||
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
|
||||
assert_eq!(file0_buf.len(), 0);
|
||||
assert_eq!(payload_0, 32431123u64);
|
||||
}
|
||||
{
|
||||
let file4 = composite_file
|
||||
.open_read(Field::from_field_id(4u32))
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
.unwrap();
|
||||
let mut file4_buf = file4.as_slice();
|
||||
let payload_4 = VInt::deserialize(&mut file4_buf)?.0;
|
||||
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
|
||||
assert_eq!(file4_buf.len(), 0);
|
||||
assert_eq!(payload_4, 2u64);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ pub mod bitpacker;
|
||||
mod bitset;
|
||||
mod composite_file;
|
||||
mod counting_writer;
|
||||
mod mutable_enum;
|
||||
mod serialize;
|
||||
mod vint;
|
||||
|
||||
@@ -9,10 +10,9 @@ pub use self::bitset::BitSet;
|
||||
pub(crate) use self::bitset::TinySet;
|
||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||
pub use self::counting_writer::CountingWriter;
|
||||
pub(crate) use self::mutable_enum::MutableEnum;
|
||||
pub use self::serialize::{BinarySerializable, FixedSize};
|
||||
pub use self::vint::{
|
||||
read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt,
|
||||
};
|
||||
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
|
||||
pub use byteorder::LittleEndian as Endianness;
|
||||
|
||||
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
||||
|
||||
37
src/common/mutable_enum.rs
Normal file
37
src/common/mutable_enum.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
pub(crate) struct MutableEnum<T>(Option<T>);
|
||||
|
||||
impl<T> MutableEnum<T> {
|
||||
pub fn wrap(val: T) -> Self {
|
||||
MutableEnum(Some(val))
|
||||
}
|
||||
|
||||
pub fn into(self) -> T {
|
||||
self.0.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> MutableEnum<T> {
|
||||
pub fn map_mutate<E, F>(&mut self, transformation: F) -> Result<(), E>
|
||||
where
|
||||
F: FnOnce(T) -> Result<T, E>,
|
||||
{
|
||||
self.0 = self.0.take().map(transformation).transpose()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for MutableEnum<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.0.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for MutableEnum<T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.0.as_mut().unwrap()
|
||||
}
|
||||
}
|
||||
@@ -89,19 +89,6 @@ impl FixedSize for u64 {
|
||||
const SIZE_IN_BYTES: usize = 8;
|
||||
}
|
||||
|
||||
impl BinarySerializable for f32 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_f32::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
reader.read_f32::<Endianness>()
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for f32 {
|
||||
const SIZE_IN_BYTES: usize = 4;
|
||||
}
|
||||
|
||||
impl BinarySerializable for i64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_i64::<Endianness>(*self)
|
||||
|
||||
@@ -5,12 +5,12 @@ use std::io::Read;
|
||||
use std::io::Write;
|
||||
|
||||
/// Wrapper over a `u64` that serializes as a variable int.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub struct VInt(pub u64);
|
||||
|
||||
const STOP_BIT: u8 = 128;
|
||||
|
||||
pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
|
||||
pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
||||
const START_2: u64 = 1 << 7;
|
||||
const START_3: u64 = 1 << 14;
|
||||
const START_4: u64 = 1 << 21;
|
||||
@@ -29,7 +29,7 @@ pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
|
||||
|
||||
let val = u64::from(val);
|
||||
const STOP_BIT: u64 = 128u64;
|
||||
let (res, num_bytes) = match val {
|
||||
match val {
|
||||
0..=STOP_1 => (val | STOP_BIT, 1),
|
||||
START_2..=STOP_2 => (
|
||||
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
|
||||
@@ -56,9 +56,7 @@ pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
|
||||
| (STOP_BIT << (8 * 4)),
|
||||
5,
|
||||
),
|
||||
};
|
||||
LittleEndian::write_u64(&mut buf[..], res);
|
||||
&buf[0..num_bytes]
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of bytes covered by a
|
||||
@@ -87,26 +85,23 @@ fn vint_len(data: &[u8]) -> usize {
|
||||
/// If the buffer does not start by a valid
|
||||
/// vint payload
|
||||
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
||||
let (result, vlen) = read_u32_vint_no_advance(*data);
|
||||
*data = &data[vlen..];
|
||||
result
|
||||
}
|
||||
|
||||
pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
|
||||
let vlen = vint_len(data);
|
||||
let vlen = vint_len(*data);
|
||||
let mut result = 0u32;
|
||||
let mut shift = 0u64;
|
||||
for &b in &data[..vlen] {
|
||||
result |= u32::from(b & 127u8) << shift;
|
||||
shift += 7;
|
||||
}
|
||||
(result, vlen)
|
||||
*data = &data[vlen..];
|
||||
result
|
||||
}
|
||||
|
||||
/// Write a `u32` as a vint payload.
|
||||
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
||||
let mut buf = [0u8; 8];
|
||||
let data = serialize_vint_u32(val, &mut buf);
|
||||
writer.write_all(&data)
|
||||
let (val, num_bytes) = serialize_vint_u32(val);
|
||||
let mut buffer = [0u8; 8];
|
||||
LittleEndian::write_u64(&mut buffer, val);
|
||||
writer.write_all(&buffer[..num_bytes])
|
||||
}
|
||||
|
||||
impl VInt {
|
||||
@@ -177,6 +172,7 @@ mod tests {
|
||||
use super::serialize_vint_u32;
|
||||
use super::VInt;
|
||||
use crate::common::BinarySerializable;
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
|
||||
fn aux_test_vint(val: u64) {
|
||||
let mut v = [14u8; 10];
|
||||
@@ -212,10 +208,12 @@ mod tests {
|
||||
|
||||
fn aux_test_serialize_vint_u32(val: u32) {
|
||||
let mut buffer = [0u8; 10];
|
||||
let mut buffer2 = [0u8; 8];
|
||||
let mut buffer2 = [0u8; 10];
|
||||
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
|
||||
let res2 = serialize_vint_u32(val, &mut buffer2);
|
||||
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
|
||||
let (vint, len) = serialize_vint_u32(val);
|
||||
assert_eq!(len, len_vint, "len wrong for val {}", val);
|
||||
LittleEndian::write_u64(&mut buffer2, vint);
|
||||
assert_eq!(&buffer[..len], &buffer2[..len], "array wrong for {}", val);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -12,8 +12,8 @@ use crate::directory::INDEX_WRITER_LOCK;
|
||||
use crate::directory::{Directory, RAMDirectory};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::error::TantivyError;
|
||||
use crate::indexer::index_writer::HEAP_SIZE_MIN;
|
||||
use crate::indexer::segment_updater::save_new_metas;
|
||||
use crate::indexer::IndexWriterConfig;
|
||||
use crate::reader::IndexReader;
|
||||
use crate::reader::IndexReaderBuilder;
|
||||
use crate::schema::Field;
|
||||
@@ -21,12 +21,12 @@ use crate::schema::FieldType;
|
||||
use crate::schema::Schema;
|
||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||
use crate::IndexWriter;
|
||||
use num_cpus;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt;
|
||||
|
||||
#[cfg(feature = "mmap")]
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
fn load_metas(
|
||||
@@ -56,9 +56,7 @@ pub struct Index {
|
||||
}
|
||||
|
||||
impl Index {
|
||||
/// Examines the directory to see if it contains an index.
|
||||
///
|
||||
/// Effectively, it only checks for the presence of the `meta.json` file.
|
||||
/// Examines the director to see if it contains an index
|
||||
pub fn exists<Dir: Directory>(dir: &Dir) -> bool {
|
||||
dir.exists(&META_FILEPATH)
|
||||
}
|
||||
@@ -141,9 +139,7 @@ impl Index {
|
||||
Index::create(mmap_directory, schema)
|
||||
}
|
||||
|
||||
/// Creates a new index given an implementation of the trait `Directory`.
|
||||
///
|
||||
/// If a directory previously existed, it will be erased.
|
||||
/// Creates a new index given an implementation of the trait `Directory`
|
||||
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
||||
let directory = ManagedDirectory::wrap(dir)?;
|
||||
Index::from_directory(directory, schema)
|
||||
@@ -152,8 +148,8 @@ impl Index {
|
||||
/// Create a new index from a directory.
|
||||
///
|
||||
/// This will overwrite existing meta.json
|
||||
fn from_directory(directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
|
||||
save_new_metas(schema.clone(), &directory)?;
|
||||
fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
|
||||
save_new_metas(schema.clone(), directory.borrow_mut())?;
|
||||
let metas = IndexMeta::with_schema(schema);
|
||||
Index::create_from_metas(directory, &metas, SegmentMetaInventory::default())
|
||||
}
|
||||
@@ -277,8 +273,40 @@ impl Index {
|
||||
pub fn writer_with_num_threads(
|
||||
&self,
|
||||
num_threads: usize,
|
||||
overall_heap_size_in_bytes: usize,
|
||||
overall_heap_size_in_bytes: u64,
|
||||
) -> crate::Result<IndexWriter> {
|
||||
let config = IndexWriterConfig {
|
||||
max_indexing_threads: num_threads,
|
||||
memory_budget: overall_heap_size_in_bytes,
|
||||
..Default::default()
|
||||
};
|
||||
self.writer_from_config(config)
|
||||
}
|
||||
|
||||
/// Creates a multithreaded writer
|
||||
///
|
||||
/// Tantivy will automatically define the number of threads to use.
|
||||
/// `overall_heap_size_in_bytes` is the total target memory usage that will be split
|
||||
/// between a given number of threads.
|
||||
///
|
||||
/// # Errors
|
||||
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
||||
/// # Panics
|
||||
/// If the heap size per thread is too small, panics.
|
||||
pub fn writer(&self, overall_heap_size_in_bytes: u64) -> crate::Result<IndexWriter> {
|
||||
let config = IndexWriterConfig {
|
||||
max_indexing_threads: num_cpus::get(),
|
||||
memory_budget: overall_heap_size_in_bytes,
|
||||
..Default::default()
|
||||
};
|
||||
self.writer_from_config(config)
|
||||
}
|
||||
|
||||
/// Creates a new writer with a given configuration.
|
||||
///
|
||||
/// See [`IndexWriterConfig`](./struct.IndexWriterConfig.html) for more information.
|
||||
pub fn writer_from_config(&self, mut config: IndexWriterConfig) -> crate::Result<IndexWriter> {
|
||||
config.validate()?;
|
||||
let directory_lock = self
|
||||
.directory
|
||||
.acquire_lock(&INDEX_WRITER_LOCK)
|
||||
@@ -294,41 +322,7 @@ impl Index {
|
||||
),
|
||||
)
|
||||
})?;
|
||||
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
|
||||
IndexWriter::new(
|
||||
self,
|
||||
num_threads,
|
||||
heap_size_in_bytes_per_thread,
|
||||
directory_lock,
|
||||
)
|
||||
}
|
||||
|
||||
/// Helper to create an index writer for tests.
|
||||
///
|
||||
/// That index writer only simply has a single thread and a heap of 5 MB.
|
||||
/// Using a single thread gives us a deterministic allocation of DocId.
|
||||
#[cfg(test)]
|
||||
pub fn writer_for_tests(&self) -> crate::Result<IndexWriter> {
|
||||
self.writer_with_num_threads(1, 10_000_000)
|
||||
}
|
||||
|
||||
/// Creates a multithreaded writer
|
||||
///
|
||||
/// Tantivy will automatically define the number of threads to use.
|
||||
/// `overall_heap_size_in_bytes` is the total target memory usage that will be split
|
||||
/// between a given number of threads.
|
||||
///
|
||||
/// # Errors
|
||||
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
||||
/// # Panics
|
||||
/// If the heap size per thread is too small, panics.
|
||||
pub fn writer(&self, overall_heap_size_in_bytes: usize) -> crate::Result<IndexWriter> {
|
||||
let mut num_threads = num_cpus::get();
|
||||
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
|
||||
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
||||
num_threads = (overall_heap_size_in_bytes / HEAP_SIZE_MIN).max(1);
|
||||
}
|
||||
self.writer_with_num_threads(num_threads, overall_heap_size_in_bytes)
|
||||
IndexWriter::new(self, config, directory_lock)
|
||||
}
|
||||
|
||||
/// Accessor to the index schema
|
||||
@@ -482,7 +476,7 @@ mod tests {
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
||||
test_index_on_commit_reload_policy_aux(field, index.clone(), &index, &reader);
|
||||
}
|
||||
|
||||
#[cfg(feature = "mmap")]
|
||||
@@ -506,7 +500,7 @@ mod tests {
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
||||
test_index_on_commit_reload_policy_aux(field, index.clone(), &index, &reader);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -514,7 +508,7 @@ mod tests {
|
||||
let schema = throw_away_schema();
|
||||
let field = schema.get_field("num_likes").unwrap();
|
||||
let mut index = Index::create_from_tempdir(schema).unwrap();
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
writer.commit().unwrap();
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
@@ -548,36 +542,30 @@ mod tests {
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
|
||||
test_index_on_commit_reload_policy_aux(field, read_index, &write_index, &reader);
|
||||
}
|
||||
}
|
||||
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
|
||||
let mut reader_index = reader.index();
|
||||
|
||||
fn test_index_on_commit_reload_policy_aux(
|
||||
field: Field,
|
||||
mut reader_index: Index,
|
||||
index: &Index,
|
||||
reader: &IndexReader,
|
||||
) {
|
||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
|
||||
let _ = sender.send(());
|
||||
}));
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
writer.add_document(doc!(field=>1u64));
|
||||
writer.commit().unwrap();
|
||||
// We need a loop here because it is possible for notify to send more than
|
||||
// one modify event. It was observed on CI on MacOS.
|
||||
loop {
|
||||
assert!(receiver.recv().is_ok());
|
||||
if reader.searcher().num_docs() == 1 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert!(receiver.recv().is_ok());
|
||||
assert_eq!(reader.searcher().num_docs(), 1);
|
||||
writer.add_document(doc!(field=>2u64));
|
||||
writer.commit().unwrap();
|
||||
// ... Same as above
|
||||
loop {
|
||||
assert!(receiver.recv().is_ok());
|
||||
if reader.searcher().num_docs() == 2 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert!(receiver.recv().is_ok());
|
||||
assert_eq!(reader.searcher().num_docs(), 2);
|
||||
}
|
||||
|
||||
// This test will not pass on windows, because windows
|
||||
|
||||
@@ -3,7 +3,9 @@ use crate::core::SegmentId;
|
||||
use crate::schema::Schema;
|
||||
use crate::Opstamp;
|
||||
use census::{Inventory, TrackedObject};
|
||||
use serde;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
@@ -213,7 +215,7 @@ pub struct IndexMeta {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Payload associated to the last commit.
|
||||
///
|
||||
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
||||
/// Upon commit, clients can optionally add a small `Striing` payload to their commit
|
||||
/// to help identify this commit.
|
||||
/// This payload is entirely unused by tantivy.
|
||||
pub payload: Option<String>,
|
||||
@@ -301,7 +303,7 @@ mod tests {
|
||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default","fieldnorms":true},"stored":false}}],"opstamp":0}"#
|
||||
r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
use std::io;
|
||||
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::positions::PositionReader;
|
||||
use crate::postings::TermInfo;
|
||||
use crate::postings::{BlockSegmentPostings, SegmentPostings};
|
||||
use crate::schema::FieldType;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::schema::Term;
|
||||
use crate::termdict::TermDictionary;
|
||||
use owned_read::OwnedRead;
|
||||
|
||||
/// The inverted index reader is in charge of accessing
|
||||
/// the inverted index associated to a specific field.
|
||||
@@ -16,7 +16,7 @@ use crate::termdict::TermDictionary;
|
||||
///
|
||||
/// It is safe to delete the segment associated to
|
||||
/// an `InvertedIndexReader`. As long as it is open,
|
||||
/// the `FileSlice` it is relying on should
|
||||
/// the `ReadOnlySource` it is relying on should
|
||||
/// stay available.
|
||||
///
|
||||
///
|
||||
@@ -24,9 +24,9 @@ use crate::termdict::TermDictionary;
|
||||
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
||||
pub struct InvertedIndexReader {
|
||||
termdict: TermDictionary,
|
||||
postings_file_slice: FileSlice,
|
||||
positions_file_slice: FileSlice,
|
||||
positions_idx_file_slice: FileSlice,
|
||||
postings_source: ReadOnlySource,
|
||||
positions_source: ReadOnlySource,
|
||||
positions_idx_source: ReadOnlySource,
|
||||
record_option: IndexRecordOption,
|
||||
total_num_tokens: u64,
|
||||
}
|
||||
@@ -35,31 +35,35 @@ impl InvertedIndexReader {
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
|
||||
pub(crate) fn new(
|
||||
termdict: TermDictionary,
|
||||
postings_file_slice: FileSlice,
|
||||
positions_file_slice: FileSlice,
|
||||
positions_idx_file_slice: FileSlice,
|
||||
postings_source: ReadOnlySource,
|
||||
positions_source: ReadOnlySource,
|
||||
positions_idx_source: ReadOnlySource,
|
||||
record_option: IndexRecordOption,
|
||||
) -> io::Result<InvertedIndexReader> {
|
||||
let (total_num_tokens_slice, postings_body) = postings_file_slice.split(8);
|
||||
let total_num_tokens = u64::deserialize(&mut total_num_tokens_slice.read_bytes()?)?;
|
||||
Ok(InvertedIndexReader {
|
||||
) -> InvertedIndexReader {
|
||||
let total_num_tokens_data = postings_source.slice(0, 8);
|
||||
let mut total_num_tokens_cursor = total_num_tokens_data.as_slice();
|
||||
let total_num_tokens = u64::deserialize(&mut total_num_tokens_cursor).unwrap_or(0u64);
|
||||
InvertedIndexReader {
|
||||
termdict,
|
||||
postings_file_slice: postings_body,
|
||||
positions_file_slice,
|
||||
positions_idx_file_slice,
|
||||
postings_source: postings_source.slice_from(8),
|
||||
positions_source,
|
||||
positions_idx_source,
|
||||
record_option,
|
||||
total_num_tokens,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an empty `InvertedIndexReader` object, which
|
||||
/// contains no terms at all.
|
||||
pub fn empty(record_option: IndexRecordOption) -> InvertedIndexReader {
|
||||
pub fn empty(field_type: &FieldType) -> InvertedIndexReader {
|
||||
let record_option = field_type
|
||||
.get_index_record_option()
|
||||
.unwrap_or(IndexRecordOption::Basic);
|
||||
InvertedIndexReader {
|
||||
termdict: TermDictionary::empty(),
|
||||
postings_file_slice: FileSlice::empty(),
|
||||
positions_file_slice: FileSlice::empty(),
|
||||
positions_idx_file_slice: FileSlice::empty(),
|
||||
postings_source: ReadOnlySource::empty(),
|
||||
positions_source: ReadOnlySource::empty(),
|
||||
positions_idx_source: ReadOnlySource::empty(),
|
||||
record_option,
|
||||
total_num_tokens: 0u64,
|
||||
}
|
||||
@@ -89,12 +93,12 @@ impl InvertedIndexReader {
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
block_postings: &mut BlockSegmentPostings,
|
||||
) -> io::Result<()> {
|
||||
let postings_slice = self
|
||||
.postings_file_slice
|
||||
.slice_from(term_info.postings_offset as usize);
|
||||
block_postings.reset(term_info.doc_freq, postings_slice.read_bytes()?);
|
||||
Ok(())
|
||||
) {
|
||||
let offset = term_info.postings_offset as usize;
|
||||
let end_source = self.postings_source.len();
|
||||
let postings_slice = self.postings_source.slice(offset, end_source);
|
||||
let postings_reader = OwnedRead::new(postings_slice);
|
||||
block_postings.reset(term_info.doc_freq, postings_reader);
|
||||
}
|
||||
|
||||
/// Returns a block postings given a `Term`.
|
||||
@@ -105,11 +109,9 @@ impl InvertedIndexReader {
|
||||
&self,
|
||||
term: &Term,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<Option<BlockSegmentPostings>> {
|
||||
Ok(self
|
||||
.get_term_info(term)
|
||||
) -> Option<BlockSegmentPostings> {
|
||||
self.get_term_info(term)
|
||||
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
|
||||
.transpose()?)
|
||||
}
|
||||
|
||||
/// Returns a block postings given a `term_info`.
|
||||
@@ -120,12 +122,12 @@ impl InvertedIndexReader {
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
requested_option: IndexRecordOption,
|
||||
) -> io::Result<BlockSegmentPostings> {
|
||||
) -> BlockSegmentPostings {
|
||||
let offset = term_info.postings_offset as usize;
|
||||
let postings_data = self.postings_file_slice.slice_from(offset);
|
||||
BlockSegmentPostings::open(
|
||||
let postings_data = self.postings_source.slice_from(offset);
|
||||
BlockSegmentPostings::from_data(
|
||||
term_info.doc_freq,
|
||||
postings_data,
|
||||
OwnedRead::new(postings_data),
|
||||
self.record_option,
|
||||
requested_option,
|
||||
)
|
||||
@@ -139,23 +141,20 @@ impl InvertedIndexReader {
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<SegmentPostings> {
|
||||
let block_postings = self.read_block_postings_from_terminfo(term_info, option)?;
|
||||
) -> SegmentPostings {
|
||||
let block_postings = self.read_block_postings_from_terminfo(term_info, option);
|
||||
let position_stream = {
|
||||
if option.has_positions() {
|
||||
let position_reader = self.positions_file_slice.clone();
|
||||
let skip_reader = self.positions_idx_file_slice.clone();
|
||||
let position_reader = self.positions_source.clone();
|
||||
let skip_reader = self.positions_idx_source.clone();
|
||||
let position_reader =
|
||||
PositionReader::new(position_reader, skip_reader, term_info.positions_idx)?;
|
||||
PositionReader::new(position_reader, skip_reader, term_info.positions_idx);
|
||||
Some(position_reader)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
Ok(SegmentPostings::from_block_postings(
|
||||
block_postings,
|
||||
position_stream,
|
||||
))
|
||||
SegmentPostings::from_block_postings(block_postings, position_stream)
|
||||
}
|
||||
|
||||
/// Returns the total number of tokens recorded for all documents
|
||||
@@ -174,31 +173,24 @@ impl InvertedIndexReader {
|
||||
/// For instance, requesting `IndexRecordOption::Freq` for a
|
||||
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
||||
/// with `DocId`s and frequencies.
|
||||
pub fn read_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<Option<SegmentPostings>> {
|
||||
pub fn read_postings(&self, term: &Term, option: IndexRecordOption) -> Option<SegmentPostings> {
|
||||
self.get_term_info(term)
|
||||
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub(crate) fn read_postings_no_deletes(
|
||||
&self,
|
||||
term: &Term,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<Option<SegmentPostings>> {
|
||||
) -> Option<SegmentPostings> {
|
||||
self.get_term_info(term)
|
||||
.map(|term_info| self.read_postings_from_terminfo(&term_info, option))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Returns the number of documents containing the term.
|
||||
pub fn doc_freq(&self, term: &Term) -> io::Result<u32> {
|
||||
Ok(self
|
||||
.get_term_info(term)
|
||||
pub fn doc_freq(&self, term: &Term) -> u32 {
|
||||
self.get_term_info(term)
|
||||
.map(|term_info| term_info.doc_freq)
|
||||
.unwrap_or(0u32))
|
||||
.unwrap_or(0u32)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
use crate::collector::Collector;
|
||||
use crate::collector::SegmentCollector;
|
||||
use crate::core::Executor;
|
||||
use crate::core::InvertedIndexReader;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::query::Query;
|
||||
use crate::query::Scorer;
|
||||
use crate::query::Weight;
|
||||
use crate::schema::Document;
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::{Field, Term};
|
||||
@@ -11,8 +14,28 @@ use crate::store::StoreReader;
|
||||
use crate::termdict::TermMerger;
|
||||
use crate::DocAddress;
|
||||
use crate::Index;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
fn collect_segment<C: Collector>(
|
||||
collector: &C,
|
||||
weight: &dyn Weight,
|
||||
segment_ord: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<C::Fruit> {
|
||||
let mut scorer = weight.scorer(segment_reader, 1.0f32)?;
|
||||
let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?;
|
||||
if let Some(delete_bitset) = segment_reader.delete_bitset() {
|
||||
scorer.for_each(&mut |doc, score| {
|
||||
if delete_bitset.is_alive(doc) {
|
||||
segment_collector.collect(doc, score);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
scorer.for_each(&mut |doc, score| segment_collector.collect(doc, score));
|
||||
}
|
||||
Ok(segment_collector.harvest())
|
||||
}
|
||||
|
||||
/// Holds a list of `SegmentReader`s ready for search.
|
||||
///
|
||||
@@ -32,17 +55,17 @@ impl Searcher {
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
) -> io::Result<Searcher> {
|
||||
let store_readers: Vec<StoreReader> = segment_readers
|
||||
) -> Searcher {
|
||||
let store_readers = segment_readers
|
||||
.iter()
|
||||
.map(SegmentReader::get_store_reader)
|
||||
.collect::<io::Result<Vec<_>>>()?;
|
||||
Ok(Searcher {
|
||||
.collect();
|
||||
Searcher {
|
||||
schema,
|
||||
index,
|
||||
segment_readers,
|
||||
store_readers,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `Index` associated to the `Searcher`
|
||||
@@ -75,14 +98,13 @@ impl Searcher {
|
||||
|
||||
/// Return the overall number of documents containing
|
||||
/// the given term.
|
||||
pub fn doc_freq(&self, term: &Term) -> crate::Result<u64> {
|
||||
let mut total_doc_freq = 0;
|
||||
for segment_reader in &self.segment_readers {
|
||||
let inverted_index = segment_reader.inverted_index(term.field())?;
|
||||
let doc_freq = inverted_index.doc_freq(term)?;
|
||||
total_doc_freq += u64::from(doc_freq);
|
||||
}
|
||||
Ok(total_doc_freq)
|
||||
pub fn doc_freq(&self, term: &Term) -> u64 {
|
||||
self.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| {
|
||||
u64::from(segment_reader.inverted_index(term.field()).doc_freq(term))
|
||||
})
|
||||
.sum::<u64>()
|
||||
}
|
||||
|
||||
/// Return the list of segment readers
|
||||
@@ -141,7 +163,12 @@ impl Searcher {
|
||||
let segment_readers = self.segment_readers();
|
||||
let fruits = executor.map(
|
||||
|(segment_ord, segment_reader)| {
|
||||
collector.collect_segment(weight.as_ref(), segment_ord as u32, segment_reader)
|
||||
collect_segment(
|
||||
collector,
|
||||
weight.as_ref(),
|
||||
segment_ord as u32,
|
||||
segment_reader,
|
||||
)
|
||||
},
|
||||
segment_readers.iter().enumerate(),
|
||||
)?;
|
||||
@@ -149,22 +176,22 @@ impl Searcher {
|
||||
}
|
||||
|
||||
/// Return the field searcher associated to a `Field`.
|
||||
pub fn field(&self, field: Field) -> crate::Result<FieldSearcher> {
|
||||
let inv_index_readers: Vec<Arc<InvertedIndexReader>> = self
|
||||
pub fn field(&self, field: Field) -> FieldSearcher {
|
||||
let inv_index_readers = self
|
||||
.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.inverted_index(field))
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
Ok(FieldSearcher::new(inv_index_readers))
|
||||
.collect::<Vec<_>>();
|
||||
FieldSearcher::new(inv_index_readers)
|
||||
}
|
||||
|
||||
/// Summarize total space usage of this searcher.
|
||||
pub fn space_usage(&self) -> io::Result<SearcherSpaceUsage> {
|
||||
pub fn space_usage(&self) -> SearcherSpaceUsage {
|
||||
let mut space_usage = SearcherSpaceUsage::new();
|
||||
for segment_reader in &self.segment_readers {
|
||||
space_usage.add_segment(segment_reader.space_usage()?);
|
||||
for segment_reader in self.segment_readers.iter() {
|
||||
space_usage.add_segment(segment_reader.space_usage());
|
||||
}
|
||||
Ok(space_usage)
|
||||
space_usage
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,41 +3,140 @@ use crate::core::Index;
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::directory::error::{OpenReadError, OpenWriteError};
|
||||
use crate::directory::Directory;
|
||||
use crate::directory::{FileSlice, WritePtr};
|
||||
use crate::directory::{Directory, ManagedDirectory, RAMDirectory};
|
||||
use crate::directory::{ReadOnlySource, WritePtr};
|
||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||
use crate::schema::Schema;
|
||||
use crate::Opstamp;
|
||||
use std::fmt;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::path::PathBuf;
|
||||
use crate::indexer::{ResourceManager};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) enum SegmentDirectory {
|
||||
Persisted(ManagedDirectory),
|
||||
Volatile {
|
||||
directory: RAMDirectory,
|
||||
},
|
||||
}
|
||||
|
||||
impl SegmentDirectory {
|
||||
fn new_volatile(memory_manager: ResourceManager) -> SegmentDirectory {
|
||||
SegmentDirectory::Volatile {
|
||||
directory: RAMDirectory::create_with_memory_manager(memory_manager),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentDirectory {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
SegmentDirectory::Volatile { .. } => write!(f, "volatile")?,
|
||||
SegmentDirectory::Persisted(dir) => write!(f, "Persisted({:?})", dir)?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ManagedDirectory> for SegmentDirectory {
|
||||
fn from(directory: ManagedDirectory) -> Self {
|
||||
SegmentDirectory::Persisted(directory)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for SegmentDirectory {
|
||||
type Target = dyn Directory;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
match self {
|
||||
SegmentDirectory::Volatile {
|
||||
directory, ..
|
||||
} => directory,
|
||||
SegmentDirectory::Persisted(dir) => dir,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for SegmentDirectory {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
match self {
|
||||
SegmentDirectory::Volatile { directory, .. } => directory,
|
||||
SegmentDirectory::Persisted(dir) => dir,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A segment is a piece of the index.
|
||||
#[derive(Clone)]
|
||||
pub struct Segment {
|
||||
index: Index,
|
||||
schema: Schema,
|
||||
meta: SegmentMeta,
|
||||
directory: SegmentDirectory,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Segment {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Segment({:?})", self.id().uuid_string())
|
||||
write!(
|
||||
f,
|
||||
"Segment(id={:?}, directory={:?})",
|
||||
self.id().uuid_string(),
|
||||
self.directory
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Segment {
|
||||
/// Creates a new segment given an `Index` and a `SegmentId`
|
||||
pub(crate) fn for_index(index: Index, meta: SegmentMeta) -> Segment {
|
||||
Segment { index, meta }
|
||||
pub(crate) fn new_persisted(
|
||||
meta: SegmentMeta,
|
||||
directory: ManagedDirectory,
|
||||
schema: Schema,
|
||||
) -> Segment {
|
||||
Segment {
|
||||
meta,
|
||||
schema,
|
||||
directory: SegmentDirectory::from(directory),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the index the segment belongs to.
|
||||
pub fn index(&self) -> &Index {
|
||||
&self.index
|
||||
/// Creates a new segment that embeds its own `RAMDirectory`.
|
||||
///
|
||||
/// That segment is entirely dissociated from the index directory.
|
||||
/// It will be persisted by a background thread in charge of IO.
|
||||
pub fn new_volatile(meta: SegmentMeta, schema: Schema, resource_manager: ResourceManager) -> Segment {
|
||||
Segment {
|
||||
schema,
|
||||
meta,
|
||||
directory: SegmentDirectory::new_volatile(resource_manager),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new segment given an `Index` and a `SegmentId`
|
||||
pub(crate) fn for_index(index: Index, meta: SegmentMeta) -> Segment {
|
||||
let segment_directory = index.directory().clone();
|
||||
Segment::new_persisted(meta, segment_directory, index.schema())
|
||||
}
|
||||
|
||||
/// Persists a given `Segment` to a directory.
|
||||
pub fn persist(&mut self, mut dest_directory: ManagedDirectory) -> crate::Result<()> {
|
||||
if let SegmentDirectory::Persisted(_) = self.directory {
|
||||
// this segment is already persisted.
|
||||
return Ok(());
|
||||
}
|
||||
if let SegmentDirectory::Volatile { directory, ..} = &self.directory {
|
||||
directory.persist(&mut dest_directory)?;
|
||||
}
|
||||
self.directory = SegmentDirectory::Persisted(dest_directory);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn into_volatile(&self, memory_manager: ResourceManager) -> Segment {
|
||||
Segment::new_volatile(self.meta.clone(), self.schema.clone(), memory_manager)
|
||||
}
|
||||
|
||||
/// Returns our index's schema.
|
||||
pub fn schema(&self) -> Schema {
|
||||
self.index.schema()
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
/// Returns the segment meta-information
|
||||
@@ -51,16 +150,18 @@ impl Segment {
|
||||
/// as we finalize a fresh new segment.
|
||||
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
|
||||
Segment {
|
||||
index: self.index,
|
||||
schema: self.schema,
|
||||
meta: self.meta.with_max_doc(max_doc),
|
||||
directory: self.directory,
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
||||
Segment {
|
||||
index: self.index,
|
||||
schema: self.schema,
|
||||
meta: self.meta.with_delete_meta(num_deleted_docs, opstamp),
|
||||
directory: self.directory,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,16 +179,17 @@ impl Segment {
|
||||
}
|
||||
|
||||
/// Open one of the component file for a *regular* read.
|
||||
pub fn open_read(&self, component: SegmentComponent) -> Result<FileSlice, OpenReadError> {
|
||||
pub fn open_read(&self, component: SegmentComponent) -> Result<ReadOnlySource, OpenReadError> {
|
||||
let path = self.relative_path(component);
|
||||
self.index.directory().open_read(&path)
|
||||
let source = self.directory.open_read(&path)?;
|
||||
Ok(source)
|
||||
}
|
||||
|
||||
/// Open one of the component file for *regular* write.
|
||||
pub fn open_write(&mut self, component: SegmentComponent) -> Result<WritePtr, OpenWriteError> {
|
||||
let path = self.relative_path(component);
|
||||
let write = self.index.directory_mut().open_write(&path)?;
|
||||
Ok(write)
|
||||
let wrt = self.directory.open_write(&path)?;
|
||||
Ok(wrt)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ pub enum SegmentComponent {
|
||||
/// Dictionary associating `Term`s to `TermInfo`s which is
|
||||
/// simply an address into the `postings` file and the `positions` file.
|
||||
TERMS,
|
||||
/// Row-oriented, compressed storage of the documents.
|
||||
/// Row-oriented, LZ4-compressed storage of the documents.
|
||||
/// Accessing a document from the store is relatively slow, as it
|
||||
/// requires to decompress the entire block it belongs to.
|
||||
STORE,
|
||||
|
||||
@@ -1,26 +1,26 @@
|
||||
use crate::common::CompositeFile;
|
||||
use crate::common::HasLen;
|
||||
use crate::core::InvertedIndexReader;
|
||||
use crate::core::Segment;
|
||||
use crate::core::SegmentComponent;
|
||||
use crate::core::SegmentId;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::fastfield::DeleteBitSet;
|
||||
use crate::fastfield::FacetReader;
|
||||
use crate::fastfield::FastFieldReaders;
|
||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::schema::Field;
|
||||
use crate::schema::FieldType;
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::{Field, IndexRecordOption};
|
||||
use crate::space_usage::SegmentSpaceUsage;
|
||||
use crate::store::StoreReader;
|
||||
use crate::termdict::TermDictionary;
|
||||
use crate::DocId;
|
||||
use crate::{common::CompositeFile, error::DataCorruption};
|
||||
use fail::fail_point;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
use std::{collections::HashMap, io};
|
||||
|
||||
/// Entry point to access all of the datastructures of the `Segment`
|
||||
///
|
||||
@@ -48,9 +48,9 @@ pub struct SegmentReader {
|
||||
positions_composite: CompositeFile,
|
||||
positions_idx_composite: CompositeFile,
|
||||
fast_fields_readers: Arc<FastFieldReaders>,
|
||||
fieldnorm_readers: FieldNormReaders,
|
||||
fieldnorms_composite: CompositeFile,
|
||||
|
||||
store_file: FileSlice,
|
||||
store_source: ReadOnlySource,
|
||||
delete_bitset_opt: Option<DeleteBitSet>,
|
||||
schema: Schema,
|
||||
}
|
||||
@@ -106,26 +106,16 @@ impl SegmentReader {
|
||||
}
|
||||
|
||||
/// Accessor to the `FacetReader` associated to a given `Field`.
|
||||
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> {
|
||||
pub fn facet_reader(&self, field: Field) -> Option<FacetReader> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
if field_entry.field_type() != &FieldType::HierarchicalFacet {
|
||||
return Err(crate::TantivyError::InvalidArgument(format!(
|
||||
"Field {:?} is not a facet field.",
|
||||
field_entry.name()
|
||||
)));
|
||||
return None;
|
||||
}
|
||||
let term_ords_reader = self.fast_fields().u64s(field).ok_or_else(|| {
|
||||
DataCorruption::comment_only(format!(
|
||||
"Cannot find data for hierarchical facet {:?}",
|
||||
field_entry.name()
|
||||
))
|
||||
})?;
|
||||
let termdict = self
|
||||
.termdict_composite
|
||||
.open_read(field)
|
||||
.map(TermDictionary::open)
|
||||
.unwrap_or_else(|| Ok(TermDictionary::empty()))?;
|
||||
Ok(FacetReader::new(term_ords_reader, termdict))
|
||||
let term_ords_reader = self.fast_fields().u64s(field)?;
|
||||
let termdict_source = self.termdict_composite.open_read(field)?;
|
||||
let termdict = TermDictionary::from_source(&termdict_source);
|
||||
let facet_reader = FacetReader::new(term_ords_reader, termdict);
|
||||
Some(facet_reader)
|
||||
}
|
||||
|
||||
/// Accessor to the segment's `Field norms`'s reader.
|
||||
@@ -135,45 +125,47 @@ impl SegmentReader {
|
||||
///
|
||||
/// They are simply stored as a fast field, serialized in
|
||||
/// the `.fieldnorm` file of the segment.
|
||||
pub fn get_fieldnorms_reader(&self, field: Field) -> crate::Result<FieldNormReader> {
|
||||
self.fieldnorm_readers.get_field(field)?.ok_or_else(|| {
|
||||
pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader {
|
||||
if let Some(fieldnorm_source) = self.fieldnorms_composite.open_read(field) {
|
||||
FieldNormReader::open(fieldnorm_source)
|
||||
} else {
|
||||
let field_name = self.schema.get_field_name(field);
|
||||
let err_msg = format!(
|
||||
"Field norm not found for field {:?}. Was it marked as indexed during indexing?",
|
||||
"Field norm not found for field {:?}. Was it market as indexed during indexing.",
|
||||
field_name
|
||||
);
|
||||
crate::TantivyError::SchemaError(err_msg)
|
||||
})
|
||||
panic!(err_msg);
|
||||
}
|
||||
}
|
||||
|
||||
/// Accessor to the segment's `StoreReader`.
|
||||
pub fn get_store_reader(&self) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone())
|
||||
pub fn get_store_reader(&self) -> StoreReader {
|
||||
StoreReader::from_source(self.store_source.clone())
|
||||
}
|
||||
|
||||
/// Open a new segment for reading.
|
||||
pub fn open(segment: &Segment) -> crate::Result<SegmentReader> {
|
||||
let termdict_file = segment.open_read(SegmentComponent::TERMS)?;
|
||||
let termdict_composite = CompositeFile::open(&termdict_file)?;
|
||||
let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
|
||||
let termdict_composite = CompositeFile::open(&termdict_source)?;
|
||||
|
||||
let store_file = segment.open_read(SegmentComponent::STORE)?;
|
||||
let store_source = segment.open_read(SegmentComponent::STORE)?;
|
||||
|
||||
fail_point!("SegmentReader::open#middle");
|
||||
|
||||
let postings_file = segment.open_read(SegmentComponent::POSTINGS)?;
|
||||
let postings_composite = CompositeFile::open(&postings_file)?;
|
||||
let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
|
||||
let postings_composite = CompositeFile::open(&postings_source)?;
|
||||
|
||||
let positions_composite = {
|
||||
if let Ok(positions_file) = segment.open_read(SegmentComponent::POSITIONS) {
|
||||
CompositeFile::open(&positions_file)?
|
||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONS) {
|
||||
CompositeFile::open(&source)?
|
||||
} else {
|
||||
CompositeFile::empty()
|
||||
}
|
||||
};
|
||||
|
||||
let positions_idx_composite = {
|
||||
if let Ok(positions_skip_file) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
|
||||
CompositeFile::open(&positions_skip_file)?
|
||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
|
||||
CompositeFile::open(&source)?
|
||||
} else {
|
||||
CompositeFile::empty()
|
||||
}
|
||||
@@ -186,27 +178,26 @@ impl SegmentReader {
|
||||
let fast_field_readers =
|
||||
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
|
||||
|
||||
let fieldnorm_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
||||
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
||||
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
|
||||
|
||||
let delete_bitset_opt = if segment.meta().has_deletes() {
|
||||
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
||||
let delete_bitset = DeleteBitSet::open(delete_data)?;
|
||||
Some(delete_bitset)
|
||||
Some(DeleteBitSet::open(delete_data))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(SegmentReader {
|
||||
inv_idx_reader_cache: Default::default(),
|
||||
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
max_doc: segment.meta().max_doc(),
|
||||
num_docs: segment.meta().num_docs(),
|
||||
termdict_composite,
|
||||
postings_composite,
|
||||
fast_fields_readers: fast_field_readers,
|
||||
fieldnorm_readers,
|
||||
fieldnorms_composite,
|
||||
segment_id: segment.id(),
|
||||
store_file,
|
||||
store_source,
|
||||
delete_bitset_opt,
|
||||
positions_composite,
|
||||
positions_idx_composite,
|
||||
@@ -221,64 +212,58 @@ impl SegmentReader {
|
||||
/// The field reader is in charge of iterating through the
|
||||
/// term dictionary associated to a specific field,
|
||||
/// and opening the posting list associated to any term.
|
||||
///
|
||||
/// If the field is marked as index, a warn is logged and an empty `InvertedIndexReader`
|
||||
/// is returned.
|
||||
/// Similarly if the field is marked as indexed but no term has been indexed for the given
|
||||
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
|
||||
pub fn inverted_index(&self, field: Field) -> Arc<InvertedIndexReader> {
|
||||
if let Some(inv_idx_reader) = self
|
||||
.inv_idx_reader_cache
|
||||
.read()
|
||||
.expect("Lock poisoned. This should never happen")
|
||||
.get(&field)
|
||||
{
|
||||
return Ok(Arc::clone(inv_idx_reader));
|
||||
return Arc::clone(inv_idx_reader);
|
||||
}
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let field_type = field_entry.field_type();
|
||||
let record_option_opt = field_type.get_index_record_option();
|
||||
|
||||
if record_option_opt.is_none() {
|
||||
warn!("Field {:?} does not seem indexed.", field_entry.name());
|
||||
panic!("Field {:?} does not seem indexed.", field_entry.name());
|
||||
}
|
||||
|
||||
let postings_file_opt = self.postings_composite.open_read(field);
|
||||
let record_option = record_option_opt.unwrap();
|
||||
|
||||
if postings_file_opt.is_none() || record_option_opt.is_none() {
|
||||
let postings_source_opt = self.postings_composite.open_read(field);
|
||||
|
||||
if postings_source_opt.is_none() {
|
||||
// no documents in the segment contained this field.
|
||||
// As a result, no data is associated to the inverted index.
|
||||
//
|
||||
// Returns an empty inverted index.
|
||||
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
|
||||
return Ok(Arc::new(InvertedIndexReader::empty(record_option)));
|
||||
return Arc::new(InvertedIndexReader::empty(field_type));
|
||||
}
|
||||
|
||||
let record_option = record_option_opt.unwrap();
|
||||
let postings_file = postings_file_opt.unwrap();
|
||||
let postings_source = postings_source_opt.unwrap();
|
||||
|
||||
let termdict_file: FileSlice = self.termdict_composite.open_read(field)
|
||||
.ok_or_else(||
|
||||
DataCorruption::comment_only(format!("Failed to open field {:?}'s term dictionary in the composite file. Has the schema been modified?", field_entry.name()))
|
||||
)?;
|
||||
let termdict_source = self.termdict_composite.open_read(field).expect(
|
||||
"Failed to open field term dictionary in composite file. Is the field indexed?",
|
||||
);
|
||||
|
||||
let positions_file = self
|
||||
let positions_source = self
|
||||
.positions_composite
|
||||
.open_read(field)
|
||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||
|
||||
let positions_idx_file = self
|
||||
let positions_idx_source = self
|
||||
.positions_idx_composite
|
||||
.open_read(field)
|
||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||
|
||||
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
||||
TermDictionary::open(termdict_file)?,
|
||||
postings_file,
|
||||
positions_file,
|
||||
positions_idx_file,
|
||||
TermDictionary::from_source(&termdict_source),
|
||||
postings_source,
|
||||
positions_source,
|
||||
positions_idx_source,
|
||||
record_option,
|
||||
)?);
|
||||
));
|
||||
|
||||
// by releasing the lock in between, we may end up opening the inverting index
|
||||
// twice, but this is fine.
|
||||
@@ -287,7 +272,7 @@ impl SegmentReader {
|
||||
.expect("Field reader cache lock poisoned. This should never happen.")
|
||||
.insert(field, Arc::clone(&inv_idx_reader));
|
||||
|
||||
Ok(inv_idx_reader)
|
||||
inv_idx_reader
|
||||
}
|
||||
|
||||
/// Returns the segment id
|
||||
@@ -310,26 +295,26 @@ impl SegmentReader {
|
||||
}
|
||||
|
||||
/// Returns an iterator that will iterate over the alive document ids
|
||||
pub fn doc_ids_alive<'a>(&'a self) -> impl Iterator<Item = DocId> + 'a {
|
||||
(0u32..self.max_doc).filter(move |doc| !self.is_deleted(*doc))
|
||||
pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator<'_> {
|
||||
SegmentReaderAliveDocsIterator::new(&self)
|
||||
}
|
||||
|
||||
/// Summarize total space usage of this segment.
|
||||
pub fn space_usage(&self) -> io::Result<SegmentSpaceUsage> {
|
||||
Ok(SegmentSpaceUsage::new(
|
||||
pub fn space_usage(&self) -> SegmentSpaceUsage {
|
||||
SegmentSpaceUsage::new(
|
||||
self.num_docs(),
|
||||
self.termdict_composite.space_usage(),
|
||||
self.postings_composite.space_usage(),
|
||||
self.positions_composite.space_usage(),
|
||||
self.positions_idx_composite.space_usage(),
|
||||
self.fast_fields_readers.space_usage(),
|
||||
self.fieldnorm_readers.space_usage(),
|
||||
self.get_store_reader()?.space_usage(),
|
||||
self.fieldnorms_composite.space_usage(),
|
||||
self.get_store_reader().space_usage(),
|
||||
self.delete_bitset_opt
|
||||
.as_ref()
|
||||
.map(DeleteBitSet::space_usage)
|
||||
.unwrap_or(0),
|
||||
))
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -339,6 +324,52 @@ impl fmt::Debug for SegmentReader {
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements the iterator trait to allow easy iteration
|
||||
/// over non-deleted ("alive") DocIds in a SegmentReader
|
||||
pub struct SegmentReaderAliveDocsIterator<'a> {
|
||||
reader: &'a SegmentReader,
|
||||
max_doc: DocId,
|
||||
current: DocId,
|
||||
}
|
||||
|
||||
impl<'a> SegmentReaderAliveDocsIterator<'a> {
|
||||
pub fn new(reader: &'a SegmentReader) -> SegmentReaderAliveDocsIterator<'a> {
|
||||
SegmentReaderAliveDocsIterator {
|
||||
reader,
|
||||
max_doc: reader.max_doc(),
|
||||
current: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterator for SegmentReaderAliveDocsIterator<'a> {
|
||||
type Item = DocId;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
// TODO: Use TinySet (like in BitSetDocSet) to speed this process up
|
||||
if self.current >= self.max_doc {
|
||||
return None;
|
||||
}
|
||||
|
||||
// find the next alive doc id
|
||||
while self.reader.is_deleted(self.current) {
|
||||
self.current += 1;
|
||||
|
||||
if self.current >= self.max_doc {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
// capture the current alive DocId
|
||||
let result = Some(self.current);
|
||||
|
||||
// move down the chain
|
||||
self.current += 1;
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::core::Index;
|
||||
@@ -346,7 +377,7 @@ mod test {
|
||||
use crate::DocId;
|
||||
|
||||
#[test]
|
||||
fn test_alive_docs_iterator() -> crate::Result<()> {
|
||||
fn test_alive_docs_iterator() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_text_field("name", TEXT | STORED);
|
||||
let schema = schema_builder.build();
|
||||
@@ -354,26 +385,26 @@ mod test {
|
||||
let name = schema.get_field("name").unwrap();
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(name => "tantivy"));
|
||||
index_writer.add_document(doc!(name => "horse"));
|
||||
index_writer.add_document(doc!(name => "jockey"));
|
||||
index_writer.add_document(doc!(name => "cap"));
|
||||
|
||||
// we should now have one segment with two docs
|
||||
index_writer.commit()?;
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
|
||||
{
|
||||
let mut index_writer2 = index.writer(50_000_000)?;
|
||||
let mut index_writer2 = index.writer(50_000_000).unwrap();
|
||||
index_writer2.delete_term(Term::from_field_text(name, "horse"));
|
||||
index_writer2.delete_term(Term::from_field_text(name, "cap"));
|
||||
|
||||
// ok, now we should have a deleted doc
|
||||
index_writer2.commit()?;
|
||||
index_writer2.commit().unwrap();
|
||||
}
|
||||
let searcher = index.reader()?.searcher();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect();
|
||||
assert_eq!(vec![0u32, 2u32], docs);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ use crate::directory::error::LockError;
|
||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::WatchCallback;
|
||||
use crate::directory::WatchHandle;
|
||||
use crate::directory::{FileSlice, WritePtr};
|
||||
use crate::directory::{ReadOnlySource, WritePtr};
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
@@ -11,6 +11,7 @@ use std::marker::Send;
|
||||
use std::marker::Sync;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::result;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -79,7 +80,7 @@ fn try_acquire_lock(
|
||||
) -> Result<DirectoryLock, TryAcquireLockError> {
|
||||
let mut write = directory.open_write(filepath).map_err(|e| match e {
|
||||
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
|
||||
OpenWriteError::IOError { io_error, .. } => TryAcquireLockError::IOError(io_error),
|
||||
OpenWriteError::IOError(io_error) => TryAcquireLockError::IOError(io_error.into()),
|
||||
})?;
|
||||
write.flush().map_err(TryAcquireLockError::IOError)?;
|
||||
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
|
||||
@@ -116,19 +117,19 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// change.
|
||||
///
|
||||
/// Specifically, subsequent writes or flushes should
|
||||
/// have no effect on the returned `FileSlice` object.
|
||||
/// have no effect on the returned `ReadOnlySource` object.
|
||||
///
|
||||
/// You should only use this to read files create with [Directory::open_write].
|
||||
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError>;
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
|
||||
|
||||
/// Removes a file
|
||||
///
|
||||
/// Removing a file will not affect an eventual
|
||||
/// existing FileSlice pointing to it.
|
||||
/// existing ReadOnlySource pointing to it.
|
||||
///
|
||||
/// Removing a nonexistent file, yields a
|
||||
/// `DeleteError::DoesNotExist`.
|
||||
fn delete(&self, path: &Path) -> Result<(), DeleteError>;
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
|
||||
|
||||
/// Returns true iff the file exists
|
||||
fn exists(&self, path: &Path) -> bool;
|
||||
@@ -138,7 +139,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
///
|
||||
/// Right after this call, the file should be created
|
||||
/// and any subsequent call to `open_read` for the
|
||||
/// same path should return a `FileSlice`.
|
||||
/// same path should return a `ReadOnlySource`.
|
||||
///
|
||||
/// Write operations may be aggressively buffered.
|
||||
/// The client of this trait is responsible for calling flush
|
||||
@@ -152,7 +153,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// was not called.
|
||||
///
|
||||
/// The file may not previously exist.
|
||||
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
||||
|
||||
/// Reads the full content file that has been written using
|
||||
/// atomic_write.
|
||||
@@ -168,7 +169,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// a partially written file.
|
||||
///
|
||||
/// The file may or may not previously exist.
|
||||
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()>;
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()>;
|
||||
|
||||
/// Acquire a lock in the given directory.
|
||||
///
|
||||
|
||||
@@ -1,67 +1,162 @@
|
||||
use crate::Version;
|
||||
use std::error::Error as StdError;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Error while trying to acquire a directory lock.
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Fail)]
|
||||
pub enum LockError {
|
||||
/// Failed to acquired a lock as it is already held by another
|
||||
/// client.
|
||||
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
|
||||
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
|
||||
#[error("Could not acquire lock as it is already held, possibly by a different process.")]
|
||||
#[fail(
|
||||
display = "Could not acquire lock as it is already held, possibly by a different process."
|
||||
)]
|
||||
LockBusy,
|
||||
/// Trying to acquire a lock failed with an `IOError`
|
||||
#[error("Failed to acquire the lock due to an io:Error.")]
|
||||
#[fail(display = "Failed to acquire the lock due to an io:Error.")]
|
||||
IOError(io::Error),
|
||||
}
|
||||
|
||||
/// General IO error with an optional path to the offending file.
|
||||
#[derive(Debug)]
|
||||
pub struct IOError {
|
||||
path: Option<PathBuf>,
|
||||
err: io::Error,
|
||||
}
|
||||
|
||||
impl Into<io::Error> for IOError {
|
||||
fn into(self) -> io::Error {
|
||||
self.err
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for IOError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.path {
|
||||
Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err),
|
||||
None => write!(f, "io error occurred: '{}'", self.err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StdError for IOError {
|
||||
fn description(&self) -> &str {
|
||||
"io error occurred"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
Some(&self.err)
|
||||
}
|
||||
}
|
||||
|
||||
impl IOError {
|
||||
pub(crate) fn with_path(path: PathBuf, err: io::Error) -> Self {
|
||||
IOError {
|
||||
path: Some(path),
|
||||
err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for IOError {
|
||||
fn from(err: io::Error) -> IOError {
|
||||
IOError { path: None, err }
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when opening a directory
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug)]
|
||||
pub enum OpenDirectoryError {
|
||||
/// The underlying directory does not exists.
|
||||
#[error("Directory does not exist: '{0}'.")]
|
||||
DoesNotExist(PathBuf),
|
||||
/// The path exists but is not a directory.
|
||||
#[error("Path exists but is not a directory: '{0}'.")]
|
||||
NotADirectory(PathBuf),
|
||||
/// Failed to create a temp directory.
|
||||
#[error("Failed to create a temporary directory: '{0}'.")]
|
||||
FailedToCreateTempDir(io::Error),
|
||||
/// IoError
|
||||
#[error("IOError '{io_error:?}' while create directory in: '{directory_path:?}'.")]
|
||||
IoError {
|
||||
/// underlying io Error.
|
||||
io_error: io::Error,
|
||||
/// directory we tried to open.
|
||||
directory_path: PathBuf,
|
||||
},
|
||||
IoError(io::Error),
|
||||
}
|
||||
|
||||
impl From<io::Error> for OpenDirectoryError {
|
||||
fn from(io_err: io::Error) -> Self {
|
||||
OpenDirectoryError::IoError(io_err)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for OpenDirectoryError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
OpenDirectoryError::DoesNotExist(ref path) => {
|
||||
write!(f, "the underlying directory '{:?}' does not exist", path)
|
||||
}
|
||||
OpenDirectoryError::NotADirectory(ref path) => {
|
||||
write!(f, "the path '{:?}' exists but is not a directory", path)
|
||||
}
|
||||
OpenDirectoryError::IoError(ref err) => write!(
|
||||
f,
|
||||
"IOError while trying to open/create the directory. {:?}",
|
||||
err
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StdError for OpenDirectoryError {
|
||||
fn description(&self) -> &str {
|
||||
"error occurred while opening a directory"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when starting to write in a file
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug)]
|
||||
pub enum OpenWriteError {
|
||||
/// Our directory is WORM, writing an existing file is forbidden.
|
||||
/// Checkout the `Directory` documentation.
|
||||
#[error("File already exists: '{0}'")]
|
||||
FileAlreadyExists(PathBuf),
|
||||
/// Any kind of IO error that happens when
|
||||
/// writing in the underlying IO device.
|
||||
#[error("IOError '{io_error:?}' while opening file for write: '{filepath}'.")]
|
||||
IOError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
/// File path of the file that tantivy failed to open for write.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
IOError(IOError),
|
||||
}
|
||||
|
||||
impl OpenWriteError {
|
||||
pub(crate) fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||
Self::IOError { io_error, filepath }
|
||||
impl From<IOError> for OpenWriteError {
|
||||
fn from(err: IOError) -> OpenWriteError {
|
||||
OpenWriteError::IOError(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for OpenWriteError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
OpenWriteError::FileAlreadyExists(ref path) => {
|
||||
write!(f, "the file '{:?}' already exists", path)
|
||||
}
|
||||
OpenWriteError::IOError(ref err) => write!(
|
||||
f,
|
||||
"an io error occurred while opening a file for writing: '{}'",
|
||||
err
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StdError for OpenWriteError {
|
||||
fn description(&self) -> &str {
|
||||
"error occurred while opening a file for writing"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
match *self {
|
||||
OpenWriteError::FileAlreadyExists(_) => None,
|
||||
OpenWriteError::IOError(ref err) => Some(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Type of index incompatibility between the library and the index found on disk
|
||||
/// Used to catch and provide a hint to solve this incompatibility issue
|
||||
pub enum Incompatibility {
|
||||
@@ -122,46 +217,55 @@ impl fmt::Debug for Incompatibility {
|
||||
}
|
||||
|
||||
/// Error that may occur when accessing a file read
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug)]
|
||||
pub enum OpenReadError {
|
||||
/// The file does not exists.
|
||||
#[error("Files does not exists: {0:?}")]
|
||||
FileDoesNotExist(PathBuf),
|
||||
/// Any kind of io::Error.
|
||||
#[error(
|
||||
"IOError: '{io_error:?}' happened while opening the following file for Read: {filepath}."
|
||||
)]
|
||||
IOError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
/// File path of the file that tantivy failed to open for read.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
/// This library does not support the index version found in file footer.
|
||||
#[error("Index version unsupported: {0:?}")]
|
||||
IncompatibleIndex(Incompatibility),
|
||||
}
|
||||
|
||||
impl OpenReadError {
|
||||
pub(crate) fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||
Self::IOError { io_error, filepath }
|
||||
}
|
||||
}
|
||||
/// Error that may occur when trying to delete a file
|
||||
#[derive(Debug, Error)]
|
||||
pub enum DeleteError {
|
||||
/// The file does not exists.
|
||||
#[error("File does not exists: '{0}'.")]
|
||||
FileDoesNotExist(PathBuf),
|
||||
/// Any kind of IO error that happens when
|
||||
/// interacting with the underlying IO device.
|
||||
#[error("The following IO error happened while deleting file '{filepath}': '{io_error:?}'.")]
|
||||
IOError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
/// File path of the file that tantivy failed to delete.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
IOError(IOError),
|
||||
/// This library doesn't support the index version found on disk
|
||||
IncompatibleIndex(Incompatibility),
|
||||
}
|
||||
|
||||
impl From<IOError> for OpenReadError {
|
||||
fn from(err: IOError) -> OpenReadError {
|
||||
OpenReadError::IOError(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for OpenReadError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
OpenReadError::FileDoesNotExist(ref path) => {
|
||||
write!(f, "the file '{:?}' does not exist", path)
|
||||
}
|
||||
OpenReadError::IOError(ref err) => write!(
|
||||
f,
|
||||
"an io error occurred while opening a file for reading: '{}'",
|
||||
err
|
||||
),
|
||||
OpenReadError::IncompatibleIndex(ref footer) => {
|
||||
write!(f, "Incompatible index format: {:?}", footer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when trying to delete a file
|
||||
#[derive(Debug)]
|
||||
pub enum DeleteError {
|
||||
/// The file does not exists.
|
||||
FileDoesNotExist(PathBuf),
|
||||
/// Any kind of IO error that happens when
|
||||
/// interacting with the underlying IO device.
|
||||
IOError(IOError),
|
||||
}
|
||||
|
||||
impl From<IOError> for DeleteError {
|
||||
fn from(err: IOError) -> DeleteError {
|
||||
DeleteError::IOError(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Incompatibility> for OpenReadError {
|
||||
@@ -169,3 +273,29 @@ impl From<Incompatibility> for OpenReadError {
|
||||
OpenReadError::IncompatibleIndex(incompatibility)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DeleteError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
DeleteError::FileDoesNotExist(ref path) => {
|
||||
write!(f, "the file '{:?}' does not exist", path)
|
||||
}
|
||||
DeleteError::IOError(ref err) => {
|
||||
write!(f, "an io error occurred while deleting a file: '{}'", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StdError for DeleteError {
|
||||
fn description(&self) -> &str {
|
||||
"error occurred while deleting a file"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
match *self {
|
||||
DeleteError::FileDoesNotExist(_) => None,
|
||||
DeleteError::IOError(ref err) => Some(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,237 +0,0 @@
|
||||
use stable_deref_trait::StableDeref;
|
||||
|
||||
use crate::common::HasLen;
|
||||
use crate::directory::OwnedBytes;
|
||||
use std::sync::Arc;
|
||||
use std::{io, ops::Deref};
|
||||
|
||||
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
|
||||
/// Objects that represents files sections in tantivy.
|
||||
///
|
||||
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
||||
/// is alive, the data associated with it cannot be altered or destroyed.
|
||||
///
|
||||
/// The underlying behavior is therefore specific to the `Directory` that created it.
|
||||
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
|
||||
/// on the filesystem.
|
||||
pub trait FileHandle: 'static + Send + Sync + HasLen {
|
||||
/// Reads a slice of bytes.
|
||||
///
|
||||
/// This method may panic if the range requested is invalid.
|
||||
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes>;
|
||||
}
|
||||
|
||||
impl FileHandle for &'static [u8] {
|
||||
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
||||
let bytes = &self[from..to];
|
||||
Ok(OwnedBytes::new(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Deref<Target = [u8]>> HasLen for T {
|
||||
fn len(&self) -> usize {
|
||||
self.as_ref().len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> From<B> for FileSlice
|
||||
where
|
||||
B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync,
|
||||
{
|
||||
fn from(bytes: B) -> FileSlice {
|
||||
FileSlice::new(OwnedBytes::new(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
/// Logical slice of read only file in tantivy.
|
||||
//
|
||||
/// It can be cloned and sliced cheaply.
|
||||
///
|
||||
#[derive(Clone)]
|
||||
pub struct FileSlice {
|
||||
data: Arc<Box<dyn FileHandle>>,
|
||||
start: usize,
|
||||
stop: usize,
|
||||
}
|
||||
|
||||
impl FileSlice {
|
||||
/// Wraps a FileHandle.
|
||||
pub fn new<D>(data: D) -> Self
|
||||
where
|
||||
D: FileHandle,
|
||||
{
|
||||
let len = data.len();
|
||||
FileSlice {
|
||||
data: Arc::new(Box::new(data)),
|
||||
start: 0,
|
||||
stop: len,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a fileslice that is just a view over a slice of the data.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if `to < from` or if `to` exceeds the filesize.
|
||||
pub fn slice(&self, from: usize, to: usize) -> FileSlice {
|
||||
assert!(to <= self.len());
|
||||
assert!(to >= from);
|
||||
FileSlice {
|
||||
data: self.data.clone(),
|
||||
start: self.start + from,
|
||||
stop: self.start + to,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an empty FileSlice
|
||||
pub fn empty() -> FileSlice {
|
||||
const EMPTY_SLICE: &[u8] = &[];
|
||||
FileSlice::from(EMPTY_SLICE)
|
||||
}
|
||||
|
||||
/// Returns a `OwnedBytes` with all of the data in the `FileSlice`.
|
||||
///
|
||||
/// The behavior is strongly dependant on the implementation of the underlying
|
||||
/// `Directory` and the `FileSliceTrait` it creates.
|
||||
/// In particular, it is up to the `Directory` implementation
|
||||
/// to handle caching if needed.
|
||||
pub fn read_bytes(&self) -> io::Result<OwnedBytes> {
|
||||
self.data.read_bytes(self.start, self.stop)
|
||||
}
|
||||
|
||||
/// Reads a specific slice of data.
|
||||
///
|
||||
/// This is equivalent to running `file_slice.slice(from, to).read_bytes()`.
|
||||
pub fn read_bytes_slice(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
||||
assert!(from <= to);
|
||||
assert!(
|
||||
self.start + to <= self.stop,
|
||||
"`to` exceeds the fileslice length"
|
||||
);
|
||||
self.data.read_bytes(self.start + from, self.start + to)
|
||||
}
|
||||
|
||||
/// Splits the FileSlice at the given offset and return two file slices.
|
||||
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
||||
///
|
||||
/// This operation is cheap and must not copy any underlying data.
|
||||
pub fn split(self, left_len: usize) -> (FileSlice, FileSlice) {
|
||||
let left = self.slice_to(left_len);
|
||||
let right = self.slice_from(left_len);
|
||||
(left, right)
|
||||
}
|
||||
|
||||
/// Splits the file slice at the given offset and return two file slices.
|
||||
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
||||
pub fn split_from_end(self, right_len: usize) -> (FileSlice, FileSlice) {
|
||||
let left_len = self.len() - right_len;
|
||||
self.split(left_len)
|
||||
}
|
||||
|
||||
/// Like `.slice(...)` but enforcing only the `from`
|
||||
/// boundary.
|
||||
///
|
||||
/// Equivalent to `.slice(from_offset, self.len())`
|
||||
pub fn slice_from(&self, from_offset: usize) -> FileSlice {
|
||||
self.slice(from_offset, self.len())
|
||||
}
|
||||
|
||||
/// Like `.slice(...)` but enforcing only the `to`
|
||||
/// boundary.
|
||||
///
|
||||
/// Equivalent to `.slice(0, to_offset)`
|
||||
pub fn slice_to(&self, to_offset: usize) -> FileSlice {
|
||||
self.slice(0, to_offset)
|
||||
}
|
||||
}
|
||||
|
||||
impl HasLen for FileSlice {
|
||||
fn len(&self) -> usize {
|
||||
self.stop - self.start
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{FileHandle, FileSlice};
|
||||
use crate::common::HasLen;
|
||||
use std::io;
|
||||
|
||||
#[test]
|
||||
fn test_file_slice() -> io::Result<()> {
|
||||
let file_slice = FileSlice::new(b"abcdef".as_ref());
|
||||
assert_eq!(file_slice.len(), 6);
|
||||
assert_eq!(file_slice.slice_from(2).read_bytes()?.as_slice(), b"cdef");
|
||||
assert_eq!(file_slice.slice_to(2).read_bytes()?.as_slice(), b"ab");
|
||||
assert_eq!(
|
||||
file_slice
|
||||
.slice_from(1)
|
||||
.slice_to(2)
|
||||
.read_bytes()?
|
||||
.as_slice(),
|
||||
b"bc"
|
||||
);
|
||||
{
|
||||
let (left, right) = file_slice.clone().split(0);
|
||||
assert_eq!(left.read_bytes()?.as_slice(), b"");
|
||||
assert_eq!(right.read_bytes()?.as_slice(), b"abcdef");
|
||||
}
|
||||
{
|
||||
let (left, right) = file_slice.clone().split(2);
|
||||
assert_eq!(left.read_bytes()?.as_slice(), b"ab");
|
||||
assert_eq!(right.read_bytes()?.as_slice(), b"cdef");
|
||||
}
|
||||
{
|
||||
let (left, right) = file_slice.clone().split_from_end(0);
|
||||
assert_eq!(left.read_bytes()?.as_slice(), b"abcdef");
|
||||
assert_eq!(right.read_bytes()?.as_slice(), b"");
|
||||
}
|
||||
{
|
||||
let (left, right) = file_slice.clone().split_from_end(2);
|
||||
assert_eq!(left.read_bytes()?.as_slice(), b"abcd");
|
||||
assert_eq!(right.read_bytes()?.as_slice(), b"ef");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_slice_trait_slice_len() {
|
||||
let blop: &'static [u8] = b"abc";
|
||||
let owned_bytes: Box<dyn FileHandle> = Box::new(blop);
|
||||
assert_eq!(owned_bytes.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slice_simple_read() -> io::Result<()> {
|
||||
let slice = FileSlice::new(&b"abcdef"[..]);
|
||||
assert_eq!(slice.len(), 6);
|
||||
assert_eq!(slice.read_bytes()?.as_ref(), b"abcdef");
|
||||
assert_eq!(slice.slice(1, 4).read_bytes()?.as_ref(), b"bcd");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slice_read_slice() -> io::Result<()> {
|
||||
let slice_deref = FileSlice::new(&b"abcdef"[..]);
|
||||
assert_eq!(slice_deref.read_bytes_slice(1, 4)?.as_ref(), b"bcd");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "assertion failed: from <= to")]
|
||||
fn test_slice_read_slice_invalid_range() {
|
||||
let slice_deref = FileSlice::new(&b"abcdef"[..]);
|
||||
assert_eq!(slice_deref.read_bytes_slice(1, 0).unwrap().as_ref(), b"bcd");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "`to` exceeds the fileslice length")]
|
||||
fn test_slice_read_slice_invalid_range_exceeds() {
|
||||
let slice_deref = FileSlice::new(&b"abcdef"[..]);
|
||||
assert_eq!(
|
||||
slice_deref.read_bytes_slice(0, 10).unwrap().as_ref(),
|
||||
b"bcd"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
use crate::common::{BinarySerializable, CountingWriter, FixedSize, HasLen, VInt};
|
||||
use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt};
|
||||
use crate::directory::error::Incompatibility;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::read_only_source::ReadOnlySource;
|
||||
use crate::directory::{AntiCallToken, TerminatingWrite};
|
||||
use crate::Version;
|
||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||
use crc32fast::Hasher;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
@@ -63,26 +64,26 @@ impl Footer {
|
||||
let mut counting_write = CountingWriter::wrap(&mut write);
|
||||
self.serialize(&mut counting_write)?;
|
||||
let written_len = counting_write.written_bytes();
|
||||
(written_len as u32).serialize(write)?;
|
||||
write.write_u32::<LittleEndian>(written_len as u32)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn extract_footer(file: FileSlice) -> io::Result<(Footer, FileSlice)> {
|
||||
if file.len() < 4 {
|
||||
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
|
||||
if source.len() < 4 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
format!(
|
||||
"File corrupted. The file is smaller than 4 bytes (len={}).",
|
||||
file.len()
|
||||
source.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
let (body_footer, footer_len_file) = file.split_from_end(u32::SIZE_IN_BYTES);
|
||||
let mut footer_len_bytes = footer_len_file.read_bytes()?;
|
||||
let footer_len = u32::deserialize(&mut footer_len_bytes)? as usize;
|
||||
let (body, footer) = body_footer.split_from_end(footer_len);
|
||||
let mut footer_bytes = footer.read_bytes()?;
|
||||
let footer = Footer::deserialize(&mut footer_bytes)?;
|
||||
let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES);
|
||||
let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize;
|
||||
let body_len = body_footer.len() - footer_len;
|
||||
let (body, footer_data) = body_footer.split(body_len);
|
||||
let mut cursor = footer_data.as_slice();
|
||||
let footer = Footer::deserialize(&mut cursor)?;
|
||||
Ok((footer, body))
|
||||
}
|
||||
|
||||
@@ -93,24 +94,12 @@ impl Footer {
|
||||
match &self.versioned_footer {
|
||||
VersionedFooter::V1 {
|
||||
crc32: _crc,
|
||||
store_compression,
|
||||
store_compression: compression,
|
||||
} => {
|
||||
if &library_version.store_compression != store_compression {
|
||||
if &library_version.store_compression != compression {
|
||||
return Err(Incompatibility::CompressionMismatch {
|
||||
library_compression_format: library_version.store_compression.to_string(),
|
||||
index_compression_format: store_compression.to_string(),
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
VersionedFooter::V2 {
|
||||
crc32: _crc,
|
||||
store_compression,
|
||||
} => {
|
||||
if &library_version.store_compression != store_compression {
|
||||
return Err(Incompatibility::CompressionMismatch {
|
||||
library_compression_format: library_version.store_compression.to_string(),
|
||||
index_compression_format: store_compression.to_string(),
|
||||
index_compression_format: compression.to_string(),
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
@@ -131,29 +120,24 @@ pub enum VersionedFooter {
|
||||
crc32: CrcHashU32,
|
||||
store_compression: String,
|
||||
},
|
||||
// Introduction of the Block WAND information.
|
||||
V2 {
|
||||
crc32: CrcHashU32,
|
||||
store_compression: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl BinarySerializable for VersionedFooter {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let mut buf = Vec::new();
|
||||
match self {
|
||||
VersionedFooter::V2 {
|
||||
VersionedFooter::V1 {
|
||||
crc32,
|
||||
store_compression: compression,
|
||||
} => {
|
||||
// Serializes a valid `VersionedFooter` or panics if the version is unknown
|
||||
// [ version | crc_hash | compression_mode ]
|
||||
// [ 0..4 | 4..8 | variable ]
|
||||
BinarySerializable::serialize(&2u32, &mut buf)?;
|
||||
BinarySerializable::serialize(&1u32, &mut buf)?;
|
||||
BinarySerializable::serialize(crc32, &mut buf)?;
|
||||
BinarySerializable::serialize(compression, &mut buf)?;
|
||||
}
|
||||
VersionedFooter::V1 { .. } | VersionedFooter::UnknownVersion => {
|
||||
VersionedFooter::UnknownVersion => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Cannot serialize an unknown versioned footer ",
|
||||
@@ -182,30 +166,22 @@ impl BinarySerializable for VersionedFooter {
|
||||
reader.read_exact(&mut buf[..])?;
|
||||
let mut cursor = &buf[..];
|
||||
let version = u32::deserialize(&mut cursor)?;
|
||||
if version != 1 && version != 2 {
|
||||
return Ok(VersionedFooter::UnknownVersion);
|
||||
}
|
||||
let crc32 = u32::deserialize(&mut cursor)?;
|
||||
let store_compression = String::deserialize(&mut cursor)?;
|
||||
Ok(if version == 1 {
|
||||
VersionedFooter::V1 {
|
||||
if version == 1 {
|
||||
let crc32 = u32::deserialize(&mut cursor)?;
|
||||
let compression = String::deserialize(&mut cursor)?;
|
||||
Ok(VersionedFooter::V1 {
|
||||
crc32,
|
||||
store_compression,
|
||||
}
|
||||
store_compression: compression,
|
||||
})
|
||||
} else {
|
||||
assert_eq!(version, 2);
|
||||
VersionedFooter::V2 {
|
||||
crc32,
|
||||
store_compression,
|
||||
}
|
||||
})
|
||||
Ok(VersionedFooter::UnknownVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VersionedFooter {
|
||||
pub fn crc(&self) -> Option<CrcHashU32> {
|
||||
match self {
|
||||
VersionedFooter::V2 { crc32, .. } => Some(*crc32),
|
||||
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
|
||||
VersionedFooter::UnknownVersion { .. } => None,
|
||||
}
|
||||
@@ -243,7 +219,7 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
|
||||
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||
let crc32 = self.hasher.take().unwrap().finalize();
|
||||
let footer = Footer::new(VersionedFooter::V2 {
|
||||
let footer = Footer::new(VersionedFooter::V1 {
|
||||
crc32,
|
||||
store_compression: crate::store::COMPRESSION.to_string(),
|
||||
});
|
||||
@@ -270,17 +246,17 @@ mod tests {
|
||||
let mut vec = Vec::new();
|
||||
let footer_proxy = FooterProxy::new(&mut vec);
|
||||
assert!(footer_proxy.terminate().is_ok());
|
||||
if crate::store::COMPRESSION == "lz4" {
|
||||
assert_eq!(vec.len(), 158);
|
||||
} else {
|
||||
assert_eq!(vec.len(), 167);
|
||||
}
|
||||
assert_eq!(vec.len(), 167);
|
||||
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
|
||||
assert!(matches!(
|
||||
footer.versioned_footer,
|
||||
VersionedFooter::V2 { store_compression, .. }
|
||||
if store_compression == crate::store::COMPRESSION
|
||||
));
|
||||
if let VersionedFooter::V1 {
|
||||
crc32: _,
|
||||
store_compression,
|
||||
} = footer.versioned_footer
|
||||
{
|
||||
assert_eq!(store_compression, crate::store::COMPRESSION);
|
||||
} else {
|
||||
panic!("Versioned footer should be V1.");
|
||||
}
|
||||
assert_eq!(&footer.version, crate::version());
|
||||
}
|
||||
|
||||
@@ -288,7 +264,7 @@ mod tests {
|
||||
fn test_serialize_deserialize_footer() {
|
||||
let mut buffer = Vec::new();
|
||||
let crc32 = 123456u32;
|
||||
let footer: Footer = Footer::new(VersionedFooter::V2 {
|
||||
let footer: Footer = Footer::new(VersionedFooter::V1 {
|
||||
crc32,
|
||||
store_compression: "lz4".to_string(),
|
||||
});
|
||||
@@ -300,7 +276,7 @@ mod tests {
|
||||
#[test]
|
||||
fn footer_length() {
|
||||
let crc32 = 1111111u32;
|
||||
let versioned_footer = VersionedFooter::V2 {
|
||||
let versioned_footer = VersionedFooter::V1 {
|
||||
crc32,
|
||||
store_compression: "lz4".to_string(),
|
||||
};
|
||||
@@ -321,7 +297,7 @@ mod tests {
|
||||
// versionned footer length
|
||||
12 | 128,
|
||||
// index format version
|
||||
2,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
@@ -340,7 +316,7 @@ mod tests {
|
||||
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
|
||||
assert!(cursor.is_empty());
|
||||
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
|
||||
let expected_versioned_footer: VersionedFooter = VersionedFooter::V2 {
|
||||
let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 {
|
||||
crc32: expected_crc,
|
||||
store_compression: "lz4".to_string(),
|
||||
};
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
use crate::core::{MANAGED_FILEPATH, META_FILEPATH};
|
||||
use crate::directory::error::{DeleteError, LockError, OpenReadError, OpenWriteError};
|
||||
use crate::core::MANAGED_FILEPATH;
|
||||
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::footer::{Footer, FooterProxy};
|
||||
use crate::directory::DirectoryLock;
|
||||
use crate::directory::GarbageCollectionResult;
|
||||
use crate::directory::Lock;
|
||||
use crate::directory::META_LOCK;
|
||||
use crate::directory::{FileSlice, WritePtr};
|
||||
use crate::directory::{ReadOnlySource, WritePtr};
|
||||
use crate::directory::{WatchCallback, WatchHandle};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::Directory;
|
||||
|
||||
use crc32fast::Hasher;
|
||||
use serde_json;
|
||||
use std::collections::HashSet;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
@@ -53,7 +54,7 @@ struct MetaInformation {
|
||||
/// Saves the file containing the list of existing files
|
||||
/// that were created by tantivy.
|
||||
fn save_managed_paths(
|
||||
directory: &dyn Directory,
|
||||
directory: &mut dyn Directory,
|
||||
wlock: &RwLockWriteGuard<'_, MetaInformation>,
|
||||
) -> io::Result<()> {
|
||||
let mut w = serde_json::to_vec(&wlock.managed_paths)?;
|
||||
@@ -86,7 +87,7 @@ impl ManagedDirectory {
|
||||
directory: Box::new(directory),
|
||||
meta_informations: Arc::default(),
|
||||
}),
|
||||
io_err @ Err(OpenReadError::IOError { .. }) => Err(io_err.err().unwrap().into()),
|
||||
Err(OpenReadError::IOError(e)) => Err(From::from(e)),
|
||||
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
|
||||
// For the moment, this should never happen `meta.json`
|
||||
// do not have any footer and cannot detect incompatibility.
|
||||
@@ -168,7 +169,7 @@ impl ManagedDirectory {
|
||||
DeleteError::FileDoesNotExist(_) => {
|
||||
deleted_files.push(file_to_delete.clone());
|
||||
}
|
||||
DeleteError::IOError { .. } => {
|
||||
DeleteError::IOError(_) => {
|
||||
failed_to_delete_files.push(file_to_delete.clone());
|
||||
if !cfg!(target_os = "windows") {
|
||||
// On windows, delete is expected to fail if the file
|
||||
@@ -212,7 +213,7 @@ impl ManagedDirectory {
|
||||
/// File starting by "." are reserved to locks.
|
||||
/// They are not managed and cannot be subjected
|
||||
/// to garbage collection.
|
||||
fn register_file_as_managed(&self, filepath: &Path) -> io::Result<()> {
|
||||
fn register_file_as_managed(&mut self, filepath: &Path) -> io::Result<()> {
|
||||
// Files starting by "." (e.g. lock files) are not managed.
|
||||
if !is_managed(filepath) {
|
||||
return Ok(());
|
||||
@@ -223,7 +224,7 @@ impl ManagedDirectory {
|
||||
.expect("Managed file lock poisoned");
|
||||
let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned());
|
||||
if has_changed {
|
||||
save_managed_paths(self.directory.as_ref(), &meta_wlock)?;
|
||||
save_managed_paths(self.directory.as_mut(), &meta_wlock)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -231,19 +232,10 @@ impl ManagedDirectory {
|
||||
/// Verify checksum of a managed file
|
||||
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
|
||||
let reader = self.directory.open_read(path)?;
|
||||
let (footer, data) =
|
||||
Footer::extract_footer(reader).map_err(|io_error| OpenReadError::IOError {
|
||||
io_error,
|
||||
filepath: path.to_path_buf(),
|
||||
})?;
|
||||
let bytes = data
|
||||
.read_bytes()
|
||||
.map_err(|io_error| OpenReadError::IOError {
|
||||
filepath: path.to_path_buf(),
|
||||
io_error,
|
||||
})?;
|
||||
let (footer, data) = Footer::extract_footer(reader)
|
||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||
let mut hasher = Hasher::new();
|
||||
hasher.update(bytes.as_slice());
|
||||
hasher.update(data.as_slice());
|
||||
let crc = hasher.finalize();
|
||||
Ok(footer
|
||||
.versioned_footer
|
||||
@@ -254,37 +246,35 @@ impl ManagedDirectory {
|
||||
|
||||
/// List files for which checksum does not match content
|
||||
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
|
||||
let mut managed_paths = self
|
||||
let mut hashset = HashSet::new();
|
||||
let managed_paths = self
|
||||
.meta_informations
|
||||
.read()
|
||||
.expect("Managed directory rlock poisoned in list damaged.")
|
||||
.managed_paths
|
||||
.clone();
|
||||
|
||||
managed_paths.remove(*META_FILEPATH);
|
||||
|
||||
let mut damaged_files = HashSet::new();
|
||||
for path in managed_paths {
|
||||
for path in managed_paths.into_iter() {
|
||||
if !self.validate_checksum(&path)? {
|
||||
damaged_files.insert(path);
|
||||
hashset.insert(path);
|
||||
}
|
||||
}
|
||||
Ok(damaged_files)
|
||||
Ok(hashset)
|
||||
}
|
||||
}
|
||||
|
||||
impl Directory for ManagedDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||
let file_slice = self.directory.open_read(path)?;
|
||||
let (footer, reader) = Footer::extract_footer(file_slice)
|
||||
.map_err(|io_error| OpenReadError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
let read_only_source = self.directory.open_read(path)?;
|
||||
let (footer, reader) = Footer::extract_footer(read_only_source)
|
||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||
footer.is_compatible()?;
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
fn open_write(&self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
|
||||
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
|
||||
self.register_file_as_managed(path)
|
||||
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
|
||||
self.directory
|
||||
.open_write(path)?
|
||||
@@ -294,7 +284,7 @@ impl Directory for ManagedDirectory {
|
||||
))))
|
||||
}
|
||||
|
||||
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
self.register_file_as_managed(path)?;
|
||||
self.directory.atomic_write(path, data)
|
||||
}
|
||||
@@ -408,37 +398,39 @@ mod tests_mmap_specific {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_checksum() -> crate::Result<()> {
|
||||
fn test_checksum() {
|
||||
let test_path1: &'static Path = Path::new("some_path_for_test");
|
||||
let test_path2: &'static Path = Path::new("other_test_path");
|
||||
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
|
||||
let mmap_directory = MmapDirectory::open(&tempdir_path)?;
|
||||
let managed_directory = ManagedDirectory::wrap(mmap_directory)?;
|
||||
let mut write = managed_directory.open_write(test_path1)?;
|
||||
write.write_all(&[0u8, 1u8])?;
|
||||
write.terminate()?;
|
||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||
let mut write = managed_directory.open_write(test_path1).unwrap();
|
||||
write.write_all(&[0u8, 1u8]).unwrap();
|
||||
write.terminate().unwrap();
|
||||
|
||||
let mut write = managed_directory.open_write(test_path2)?;
|
||||
write.write_all(&[3u8, 4u8, 5u8])?;
|
||||
write.terminate()?;
|
||||
let mut write = managed_directory.open_write(test_path2).unwrap();
|
||||
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
|
||||
write.terminate().unwrap();
|
||||
|
||||
let read_file = managed_directory.open_read(test_path2)?.read_bytes()?;
|
||||
assert_eq!(read_file.as_slice(), &[3u8, 4u8, 5u8]);
|
||||
let read_source = managed_directory.open_read(test_path2).unwrap();
|
||||
assert_eq!(read_source.as_slice(), &[3u8, 4u8, 5u8]);
|
||||
assert!(managed_directory.list_damaged().unwrap().is_empty());
|
||||
|
||||
let mut corrupted_path = tempdir_path.clone();
|
||||
corrupted_path.push(test_path2);
|
||||
let mut file = OpenOptions::new().write(true).open(&corrupted_path)?;
|
||||
file.write_all(&[255u8])?;
|
||||
file.flush()?;
|
||||
let mut file = OpenOptions::new()
|
||||
.write(true)
|
||||
.open(&corrupted_path)
|
||||
.unwrap();
|
||||
file.write_all(&[255u8]).unwrap();
|
||||
file.flush().unwrap();
|
||||
drop(file);
|
||||
|
||||
let damaged = managed_directory.list_damaged()?;
|
||||
let damaged = managed_directory.list_damaged().unwrap();
|
||||
assert_eq!(damaged.len(), 1);
|
||||
assert!(damaged.contains(test_path2));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,23 +1,29 @@
|
||||
use fs2;
|
||||
use notify;
|
||||
|
||||
use self::fs2::FileExt;
|
||||
use self::notify::RawEvent;
|
||||
use self::notify::RecursiveMode;
|
||||
use self::notify::Watcher;
|
||||
use crate::core::META_FILEPATH;
|
||||
use crate::directory::error::LockError;
|
||||
use crate::directory::error::{DeleteError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::error::{
|
||||
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
|
||||
};
|
||||
use crate::directory::read_only_source::BoxedData;
|
||||
use crate::directory::AntiCallToken;
|
||||
use crate::directory::BoxedData;
|
||||
use crate::directory::Directory;
|
||||
use crate::directory::DirectoryLock;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::Lock;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::WatchCallback;
|
||||
use crate::directory::WatchCallbackList;
|
||||
use crate::directory::WatchHandle;
|
||||
use crate::directory::{TerminatingWrite, WritePtr};
|
||||
use fs2::FileExt;
|
||||
use atomicwrites;
|
||||
use memmap::Mmap;
|
||||
use notify::RawEvent;
|
||||
use notify::RecursiveMode;
|
||||
use notify::Watcher;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use stable_deref_trait::StableDeref;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::From;
|
||||
use std::fmt;
|
||||
use std::fs::OpenOptions;
|
||||
@@ -32,7 +38,6 @@ use std::sync::Mutex;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::Weak;
|
||||
use std::thread;
|
||||
use std::{collections::HashMap, ops::Deref};
|
||||
use tempfile::TempDir;
|
||||
|
||||
/// Create a default io error given a string.
|
||||
@@ -43,17 +48,17 @@ pub(crate) fn make_io_err(msg: String) -> io::Error {
|
||||
/// Returns None iff the file exists, can be read, but is empty (and hence
|
||||
/// cannot be mmapped)
|
||||
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
||||
let file = File::open(full_path).map_err(|io_err| {
|
||||
if io_err.kind() == io::ErrorKind::NotFound {
|
||||
OpenReadError::FileDoesNotExist(full_path.to_path_buf())
|
||||
let file = File::open(full_path).map_err(|e| {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
OpenReadError::FileDoesNotExist(full_path.to_owned())
|
||||
} else {
|
||||
OpenReadError::wrap_io_error(io_err, full_path.to_path_buf())
|
||||
OpenReadError::IOError(IOError::with_path(full_path.to_owned(), e))
|
||||
}
|
||||
})?;
|
||||
|
||||
let meta_data = file
|
||||
.metadata()
|
||||
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_owned()))?;
|
||||
.map_err(|e| IOError::with_path(full_path.to_owned(), e))?;
|
||||
if meta_data.len() == 0 {
|
||||
// if the file size is 0, it will not be possible
|
||||
// to mmap the file, so we return None
|
||||
@@ -63,7 +68,7 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
||||
unsafe {
|
||||
memmap::Mmap::map(&file)
|
||||
.map(Some)
|
||||
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_path_buf()))
|
||||
.map_err(|e| From::from(IOError::with_path(full_path.to_owned(), e)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,10 +187,6 @@ impl WatcherWrapper {
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.map_err(|io_error| OpenDirectoryError::IoError {
|
||||
io_error,
|
||||
directory_path: path.to_path_buf(),
|
||||
})?;
|
||||
Ok(WatcherWrapper {
|
||||
_watcher: Mutex::new(watcher),
|
||||
@@ -223,13 +224,17 @@ struct MmapDirectoryInner {
|
||||
}
|
||||
|
||||
impl MmapDirectoryInner {
|
||||
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectoryInner {
|
||||
MmapDirectoryInner {
|
||||
fn new(
|
||||
root_path: PathBuf,
|
||||
temp_directory: Option<TempDir>,
|
||||
) -> Result<MmapDirectoryInner, OpenDirectoryError> {
|
||||
let mmap_directory_inner = MmapDirectoryInner {
|
||||
root_path,
|
||||
mmap_cache: Default::default(),
|
||||
_temp_directory: temp_directory,
|
||||
watcher: RwLock::new(None),
|
||||
}
|
||||
};
|
||||
Ok(mmap_directory_inner)
|
||||
}
|
||||
|
||||
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
|
||||
@@ -263,11 +268,14 @@ impl fmt::Debug for MmapDirectory {
|
||||
}
|
||||
|
||||
impl MmapDirectory {
|
||||
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectory {
|
||||
let inner = MmapDirectoryInner::new(root_path, temp_directory);
|
||||
MmapDirectory {
|
||||
fn new(
|
||||
root_path: PathBuf,
|
||||
temp_directory: Option<TempDir>,
|
||||
) -> Result<MmapDirectory, OpenDirectoryError> {
|
||||
let inner = MmapDirectoryInner::new(root_path, temp_directory)?;
|
||||
Ok(MmapDirectory {
|
||||
inner: Arc::new(inner),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a new MmapDirectory in a temporary directory.
|
||||
@@ -275,11 +283,9 @@ impl MmapDirectory {
|
||||
/// This is mostly useful to test the MmapDirectory itself.
|
||||
/// For your unit tests, prefer the RAMDirectory.
|
||||
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
||||
let tempdir = TempDir::new().map_err(OpenDirectoryError::FailedToCreateTempDir)?;
|
||||
Ok(MmapDirectory::new(
|
||||
tempdir.path().to_path_buf(),
|
||||
Some(tempdir),
|
||||
))
|
||||
let tempdir = TempDir::new().map_err(OpenDirectoryError::IoError)?;
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
MmapDirectory::new(tempdir_path, Some(tempdir))
|
||||
}
|
||||
|
||||
/// Opens a MmapDirectory in a directory.
|
||||
@@ -297,7 +303,7 @@ impl MmapDirectory {
|
||||
directory_path,
|
||||
)))
|
||||
} else {
|
||||
Ok(MmapDirectory::new(PathBuf::from(directory_path), None))
|
||||
Ok(MmapDirectory::new(PathBuf::from(directory_path), None)?)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -401,20 +407,8 @@ impl TerminatingWrite for SafeFileWriter {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct MmapArc(Arc<Box<dyn Deref<Target = [u8]> + Send + Sync>>);
|
||||
|
||||
impl Deref for MmapArc {
|
||||
type Target = [u8];
|
||||
|
||||
fn deref(&self) -> &[u8] {
|
||||
self.0.deref()
|
||||
}
|
||||
}
|
||||
unsafe impl StableDeref for MmapArc {}
|
||||
|
||||
impl Directory for MmapDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
debug!("Open Read {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
|
||||
@@ -424,14 +418,12 @@ impl Directory for MmapDirectory {
|
||||
on mmap cache while reading {:?}",
|
||||
path
|
||||
);
|
||||
let io_err = make_io_err(msg);
|
||||
OpenReadError::wrap_io_error(io_err, path.to_path_buf())
|
||||
IOError::with_path(path.to_owned(), make_io_err(msg))
|
||||
})?;
|
||||
if let Some(mmap_arc) = mmap_cache.get_mmap(&full_path)? {
|
||||
Ok(FileSlice::from(MmapArc(mmap_arc)))
|
||||
} else {
|
||||
Ok(FileSlice::empty())
|
||||
}
|
||||
Ok(mmap_cache
|
||||
.get_mmap(&full_path)?
|
||||
.map(ReadOnlySource::from)
|
||||
.unwrap_or_else(ReadOnlySource::empty))
|
||||
}
|
||||
|
||||
/// Any entry associated to the path in the mmap will be
|
||||
@@ -439,18 +431,14 @@ impl Directory for MmapDirectory {
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
let full_path = self.resolve_path(path);
|
||||
match fs::remove_file(&full_path) {
|
||||
Ok(_) => self.sync_directory().map_err(|e| DeleteError::IOError {
|
||||
io_error: e,
|
||||
filepath: path.to_path_buf(),
|
||||
}),
|
||||
Ok(_) => self
|
||||
.sync_directory()
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e).into()),
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
Err(DeleteError::FileDoesNotExist(path.to_owned()))
|
||||
} else {
|
||||
Err(DeleteError::IOError {
|
||||
io_error: e,
|
||||
filepath: path.to_path_buf(),
|
||||
})
|
||||
Err(IOError::with_path(path.to_owned(), e).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -461,7 +449,7 @@ impl Directory for MmapDirectory {
|
||||
full_path.exists()
|
||||
}
|
||||
|
||||
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
debug!("Open Write {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
|
||||
@@ -470,22 +458,22 @@ impl Directory for MmapDirectory {
|
||||
.create_new(true)
|
||||
.open(full_path);
|
||||
|
||||
let mut file = open_res.map_err(|io_err| {
|
||||
if io_err.kind() == io::ErrorKind::AlreadyExists {
|
||||
OpenWriteError::FileAlreadyExists(path.to_path_buf())
|
||||
let mut file = open_res.map_err(|err| {
|
||||
if err.kind() == io::ErrorKind::AlreadyExists {
|
||||
OpenWriteError::FileAlreadyExists(path.to_owned())
|
||||
} else {
|
||||
OpenWriteError::wrap_io_error(io_err, path.to_path_buf())
|
||||
IOError::with_path(path.to_owned(), err).into()
|
||||
}
|
||||
})?;
|
||||
|
||||
// making sure the file is created.
|
||||
file.flush()
|
||||
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||
|
||||
// Apparetntly, on some filesystem syncing the parent
|
||||
// directory is required.
|
||||
self.sync_directory()
|
||||
.map_err(|io_err| OpenWriteError::wrap_io_error(io_err, path.to_path_buf()))?;
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||
|
||||
let writer = SafeFileWriter::new(file);
|
||||
Ok(BufWriter::new(Box::new(writer)))
|
||||
@@ -496,28 +484,25 @@ impl Directory for MmapDirectory {
|
||||
let mut buffer = Vec::new();
|
||||
match File::open(&full_path) {
|
||||
Ok(mut file) => {
|
||||
file.read_to_end(&mut buffer).map_err(|io_error| {
|
||||
OpenReadError::wrap_io_error(io_error, path.to_path_buf())
|
||||
})?;
|
||||
file.read_to_end(&mut buffer)
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||
Ok(buffer)
|
||||
}
|
||||
Err(io_error) => {
|
||||
if io_error.kind() == io::ErrorKind::NotFound {
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
|
||||
} else {
|
||||
Err(OpenReadError::wrap_io_error(io_error, path.to_path_buf()))
|
||||
Err(IOError::with_path(path.to_owned(), e).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn atomic_write(&self, path: &Path, content: &[u8]) -> io::Result<()> {
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
debug!("Atomic Write {:?}", path);
|
||||
let mut tempfile = tempfile::Builder::new().tempfile_in(&self.inner.root_path)?;
|
||||
tempfile.write_all(content)?;
|
||||
tempfile.flush()?;
|
||||
let full_path = self.resolve_path(path);
|
||||
tempfile.into_temp_path().persist(full_path)?;
|
||||
let meta_file = atomicwrites::AtomicFile::new(full_path, atomicwrites::AllowOverwrite);
|
||||
meta_file.write(|f| f.write_all(data))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -553,10 +538,10 @@ mod tests {
|
||||
// The following tests are specific to the MmapDirectory
|
||||
|
||||
use super::*;
|
||||
use crate::indexer::LogMergePolicy;
|
||||
use crate::schema::{Schema, SchemaBuilder, TEXT};
|
||||
use crate::Index;
|
||||
use crate::ReloadPolicy;
|
||||
use crate::{common::HasLen, indexer::LogMergePolicy};
|
||||
use std::fs;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
@@ -571,7 +556,7 @@ mod tests {
|
||||
// cannot be mmapped.
|
||||
//
|
||||
// In that case the directory returns a SharedVecSlice.
|
||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
let path = PathBuf::from("test");
|
||||
{
|
||||
let mut w = mmap_directory.open_write(&path).unwrap();
|
||||
@@ -587,7 +572,7 @@ mod tests {
|
||||
|
||||
// here we test if the cache releases
|
||||
// mmaps correctly.
|
||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
let num_paths = 10;
|
||||
let paths: Vec<PathBuf> = (0..num_paths)
|
||||
.map(|i| PathBuf::from(&*format!("file_{}", i)))
|
||||
@@ -678,7 +663,7 @@ mod tests {
|
||||
{
|
||||
let index = Index::create(mmap_directory.clone(), schema).unwrap();
|
||||
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let mut log_merge_policy = LogMergePolicy::default();
|
||||
log_merge_policy.set_min_merge_size(3);
|
||||
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
||||
|
||||
@@ -9,12 +9,14 @@ mod mmap_directory;
|
||||
|
||||
mod directory;
|
||||
mod directory_lock;
|
||||
mod file_slice;
|
||||
mod footer;
|
||||
mod managed_directory;
|
||||
mod owned_bytes;
|
||||
mod ram_directory;
|
||||
mod read_only_source;
|
||||
mod spilling_writer;
|
||||
mod watch_event_router;
|
||||
mod persistor;
|
||||
|
||||
|
||||
/// Errors specific to the directory module.
|
||||
pub mod error;
|
||||
@@ -22,14 +24,12 @@ pub mod error;
|
||||
pub use self::directory::DirectoryLock;
|
||||
pub use self::directory::{Directory, DirectoryClone};
|
||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||
pub(crate) use self::file_slice::BoxedData;
|
||||
pub use self::file_slice::{FileHandle, FileSlice};
|
||||
pub use self::owned_bytes::OwnedBytes;
|
||||
pub use self::ram_directory::RAMDirectory;
|
||||
pub use self::read_only_source::ReadOnlySource;
|
||||
pub(crate) use self::spilling_writer::{SpillingResult, SpillingWriter};
|
||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||
use std::io::{self, BufWriter, Write};
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Outcome of the Garbage collection
|
||||
pub struct GarbageCollectionResult {
|
||||
/// List of files that were deleted in this cycle
|
||||
@@ -83,10 +83,16 @@ impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
|
||||
}
|
||||
}
|
||||
|
||||
impl TerminatingWrite for Vec<u8> {
|
||||
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
|
||||
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
|
||||
self.flush()
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,255 +0,0 @@
|
||||
use crate::directory::FileHandle;
|
||||
use stable_deref_trait::StableDeref;
|
||||
use std::mem;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
|
||||
/// this data as a static slice.
|
||||
///
|
||||
/// The backing object is required to be `StableDeref`.
|
||||
#[derive(Clone)]
|
||||
pub struct OwnedBytes {
|
||||
data: &'static [u8],
|
||||
box_stable_deref: Arc<dyn Deref<Target = [u8]> + Sync + Send>,
|
||||
}
|
||||
|
||||
impl FileHandle for OwnedBytes {
|
||||
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
||||
Ok(self.slice(from, to))
|
||||
}
|
||||
}
|
||||
|
||||
impl OwnedBytes {
|
||||
/// Creates an empty `OwnedBytes`.
|
||||
pub fn empty() -> OwnedBytes {
|
||||
OwnedBytes::new(&[][..])
|
||||
}
|
||||
|
||||
/// Creates an `OwnedBytes` intance given a `StableDeref` object.
|
||||
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
|
||||
data_holder: T,
|
||||
) -> OwnedBytes {
|
||||
let box_stable_deref = Arc::new(data_holder);
|
||||
let bytes: &[u8] = box_stable_deref.as_ref();
|
||||
let data = unsafe { mem::transmute::<_, &'static [u8]>(bytes.deref()) };
|
||||
OwnedBytes {
|
||||
box_stable_deref,
|
||||
data,
|
||||
}
|
||||
}
|
||||
|
||||
/// creates a fileslice that is just a view over a slice of the data.
|
||||
pub fn slice(&self, from: usize, to: usize) -> Self {
|
||||
OwnedBytes {
|
||||
data: &self.data[from..to],
|
||||
box_stable_deref: self.box_stable_deref.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the underlying slice of data.
|
||||
/// `Deref` and `AsRef` are also available.
|
||||
#[inline(always)]
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
self.data
|
||||
}
|
||||
|
||||
/// Returns the len of the slice.
|
||||
#[inline(always)]
|
||||
pub fn len(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
|
||||
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
|
||||
///
|
||||
/// Left will hold `split_len` bytes.
|
||||
///
|
||||
/// This operation is cheap and does not require to copy any memory.
|
||||
/// On the other hand, both `left` and `right` retain a handle over
|
||||
/// the entire slice of memory. In other words, the memory will only
|
||||
/// be released when both left and right are dropped.
|
||||
pub fn split(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
|
||||
let right_box_stable_deref = self.box_stable_deref.clone();
|
||||
let left = OwnedBytes {
|
||||
data: &self.data[..split_len],
|
||||
box_stable_deref: self.box_stable_deref,
|
||||
};
|
||||
let right = OwnedBytes {
|
||||
data: &self.data[split_len..],
|
||||
box_stable_deref: right_box_stable_deref,
|
||||
};
|
||||
(left, right)
|
||||
}
|
||||
|
||||
/// Returns true iff this `OwnedBytes` is empty.
|
||||
#[inline(always)]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.as_slice().is_empty()
|
||||
}
|
||||
|
||||
/// Drops the left most `advance_len` bytes.
|
||||
///
|
||||
/// See also [.clip(clip_len: usize))](#method.clip).
|
||||
#[inline(always)]
|
||||
pub fn advance(&mut self, advance_len: usize) {
|
||||
self.data = &self.data[advance_len..]
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for OwnedBytes {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
// We truncate the bytes in order to make sure the debug string
|
||||
// is not too long.
|
||||
let bytes_truncated: &[u8] = if self.len() > 8 {
|
||||
&self.as_slice()[..10]
|
||||
} else {
|
||||
self.as_slice()
|
||||
};
|
||||
write!(f, "OwnedBytes({:?}, len={})", bytes_truncated, self.len())
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for OwnedBytes {
|
||||
type Target = [u8];
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
impl io::Read for OwnedBytes {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let read_len = {
|
||||
let data = self.as_slice();
|
||||
if data.len() >= buf.len() {
|
||||
let buf_len = buf.len();
|
||||
buf.copy_from_slice(&data[..buf_len]);
|
||||
buf.len()
|
||||
} else {
|
||||
let data_len = data.len();
|
||||
buf[..data_len].copy_from_slice(data);
|
||||
data_len
|
||||
}
|
||||
};
|
||||
self.advance(read_len);
|
||||
Ok(read_len)
|
||||
}
|
||||
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
let read_len = {
|
||||
let data = self.as_slice();
|
||||
buf.extend(data);
|
||||
data.len()
|
||||
};
|
||||
self.advance(read_len);
|
||||
Ok(read_len)
|
||||
}
|
||||
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
||||
let read_len = self.read(buf)?;
|
||||
if read_len != buf.len() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"failed to fill whole buffer",
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for OwnedBytes {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::{self, Read};
|
||||
|
||||
use super::OwnedBytes;
|
||||
|
||||
#[test]
|
||||
fn test_owned_bytes_debug() {
|
||||
let short_bytes = OwnedBytes::new(b"abcd".as_ref());
|
||||
assert_eq!(
|
||||
format!("{:?}", short_bytes),
|
||||
"OwnedBytes([97, 98, 99, 100], len=4)"
|
||||
);
|
||||
let long_bytes = OwnedBytes::new(b"abcdefghijklmnopq".as_ref());
|
||||
assert_eq!(
|
||||
format!("{:?}", long_bytes),
|
||||
"OwnedBytes([97, 98, 99, 100, 101, 102, 103, 104, 105, 106], len=17)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_owned_bytes_read() -> io::Result<()> {
|
||||
let mut bytes = OwnedBytes::new(b"abcdefghiklmnopqrstuvwxyz".as_ref());
|
||||
{
|
||||
let mut buf = [0u8; 5];
|
||||
bytes.read_exact(&mut buf[..]).unwrap();
|
||||
assert_eq!(&buf, b"abcde");
|
||||
assert_eq!(bytes.as_slice(), b"fghiklmnopqrstuvwxyz")
|
||||
}
|
||||
{
|
||||
let mut buf = [0u8; 2];
|
||||
bytes.read_exact(&mut buf[..]).unwrap();
|
||||
assert_eq!(&buf, b"fg");
|
||||
assert_eq!(bytes.as_slice(), b"hiklmnopqrstuvwxyz")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_owned_bytes_read_right_at_the_end() -> io::Result<()> {
|
||||
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
||||
let mut buf = [0u8; 5];
|
||||
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
|
||||
assert_eq!(&buf, b"abcde");
|
||||
assert_eq!(bytes.as_slice(), b"");
|
||||
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
|
||||
assert_eq!(&buf, b"abcde");
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn test_owned_bytes_read_incomplete() -> io::Result<()> {
|
||||
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
||||
let mut buf = [0u8; 7];
|
||||
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
|
||||
assert_eq!(&buf[..5], b"abcde");
|
||||
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_owned_bytes_read_to_end() -> io::Result<()> {
|
||||
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
||||
let mut buf = Vec::new();
|
||||
bytes.read_to_end(&mut buf)?;
|
||||
assert_eq!(buf.as_slice(), b"abcde".as_ref());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_owned_bytes_split() {
|
||||
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
|
||||
let (left, right) = bytes.split(3);
|
||||
assert_eq!(left.as_slice(), b"abc");
|
||||
assert_eq!(right.as_slice(), b"defghi");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_owned_bytes_split_boundary() {
|
||||
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
|
||||
{
|
||||
let (left, right) = bytes.clone().split(0);
|
||||
assert_eq!(left.as_slice(), b"");
|
||||
assert_eq!(right.as_slice(), b"abcdefghi");
|
||||
}
|
||||
{
|
||||
let (left, right) = bytes.split(9);
|
||||
assert_eq!(left.as_slice(), b"abcdefghi");
|
||||
assert_eq!(right.as_slice(), b"");
|
||||
}
|
||||
}
|
||||
}
|
||||
38
src/directory/persistor.rs
Normal file
38
src/directory/persistor.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
use crate::indexer::{SegmentManager, ResourceManager, MergeOperationInventory};
|
||||
use std::thread::JoinHandle;
|
||||
use crate::{IndexWriterConfig, SegmentId};
|
||||
use std::collections::HashSet;
|
||||
|
||||
pub(crate) struct Persistor {
|
||||
memory_manager: ResourceManager,
|
||||
thread_handle: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl Persistor {
|
||||
pub(crate) fn create_and_start(segment_manager: SegmentManager,
|
||||
memory_manager: ResourceManager,
|
||||
config: IndexWriterConfig) -> crate::Result<Persistor> {
|
||||
let memory_manager_clone = memory_manager.clone();
|
||||
let thread_handle = std::thread::Builder::new()
|
||||
.name("persistor-thread".to_string())
|
||||
.spawn(move || {
|
||||
while let Ok(_) = memory_manager_clone.wait_until_in_range(config.persist_low..) {
|
||||
segment_manager.largest_segment_not_in_merge();
|
||||
}
|
||||
}).map_err(|_err| crate::TantivyError::ErrorInThread("Failed to start persistor thread.".to_string()))?;
|
||||
Ok(Persistor {
|
||||
memory_manager,
|
||||
thread_handle
|
||||
})
|
||||
}
|
||||
|
||||
/// Stop the persisting thread.
|
||||
///
|
||||
/// The memory manager will be terminated, which will unlock the thread from any waiting
|
||||
/// position.
|
||||
/// This method blocks for a short amount of tim until the persistor thread has terminated.
|
||||
pub fn stop(self) {
|
||||
self.memory_manager.terminate();
|
||||
let _ = self.thread_handle.join();
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
use crate::core::META_FILEPATH;
|
||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::AntiCallToken;
|
||||
use crate::directory::WatchCallbackList;
|
||||
use crate::directory::{Directory, FileSlice, WatchCallback, WatchHandle};
|
||||
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
|
||||
use crate::directory::{TerminatingWrite, WritePtr};
|
||||
use crate::{common::HasLen, core::META_FILEPATH};
|
||||
use fail::fail_point;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
@@ -11,6 +11,7 @@ use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::result;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use crate::indexer::ResourceManager;
|
||||
|
||||
/// Writer associated with the `RAMDirectory`
|
||||
///
|
||||
@@ -80,17 +81,18 @@ impl TerminatingWrite for VecWriter {
|
||||
|
||||
#[derive(Default)]
|
||||
struct InnerDirectory {
|
||||
fs: HashMap<PathBuf, FileSlice>,
|
||||
fs: HashMap<PathBuf, ReadOnlySource>,
|
||||
watch_router: WatchCallbackList,
|
||||
memory_manager: ResourceManager,
|
||||
}
|
||||
|
||||
impl InnerDirectory {
|
||||
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
|
||||
let data = FileSlice::from(data.to_vec());
|
||||
let data = ReadOnlySource::new_with_allocation(Vec::from(data), &self.memory_manager);
|
||||
self.fs.insert(path, data).is_some()
|
||||
}
|
||||
|
||||
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> {
|
||||
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
|
||||
self.fs
|
||||
.get(path)
|
||||
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
||||
@@ -112,8 +114,8 @@ impl InnerDirectory {
|
||||
self.watch_router.subscribe(watch_handle)
|
||||
}
|
||||
|
||||
fn total_mem_usage(&self) -> usize {
|
||||
self.fs.values().map(|f| f.len()).sum()
|
||||
fn total_mem_usage(&self) -> u64 {
|
||||
self.fs.values().map(|source| source.len() as u64).sum()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -134,14 +136,30 @@ pub struct RAMDirectory {
|
||||
}
|
||||
|
||||
impl RAMDirectory {
|
||||
/// Constructor
|
||||
|
||||
/// Creates a new RAMDirectory.
|
||||
///
|
||||
/// Check `.create_with_memory_manager(..)` if you want to associate an external memory
|
||||
/// manager to your RAMDirectory.
|
||||
pub fn create() -> RAMDirectory {
|
||||
Self::default()
|
||||
RAMDirectory::default()
|
||||
}
|
||||
|
||||
/// Constructor
|
||||
pub fn create_with_memory_manager(memory_manager: ResourceManager) -> RAMDirectory {
|
||||
let inner_directory = InnerDirectory {
|
||||
fs: Default::default(),
|
||||
watch_router: Default::default(),
|
||||
memory_manager
|
||||
};
|
||||
RAMDirectory {
|
||||
fs: Arc::new(RwLock::new(inner_directory))
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the sum of the size of the different files
|
||||
/// in the RAMDirectory.
|
||||
pub fn total_mem_usage(&self) -> usize {
|
||||
pub fn total_mem_usage(&self) -> u64 {
|
||||
self.fs.read().unwrap().total_mem_usage()
|
||||
}
|
||||
|
||||
@@ -151,11 +169,11 @@ impl RAMDirectory {
|
||||
/// written using the `atomic_write` api.
|
||||
///
|
||||
/// If an error is encounterred, files may be persisted partially.
|
||||
pub fn persist(&self, dest: &dyn Directory) -> crate::Result<()> {
|
||||
pub fn persist(&self, dest: &mut dyn Directory) -> crate::Result<()> {
|
||||
let wlock = self.fs.write().unwrap();
|
||||
for (path, file) in wlock.fs.iter() {
|
||||
for (path, source) in wlock.fs.iter() {
|
||||
let mut dest_wrt = dest.open_write(path)?;
|
||||
dest_wrt.write_all(file.read_bytes()?.as_slice())?;
|
||||
dest_wrt.write_all(source.as_slice())?;
|
||||
dest_wrt.terminate()?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -163,16 +181,15 @@ impl RAMDirectory {
|
||||
}
|
||||
|
||||
impl Directory for RAMDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
self.fs.read().unwrap().open_read(path)
|
||||
}
|
||||
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
fail_point!("RAMDirectory::delete", |_| {
|
||||
Err(DeleteError::IOError {
|
||||
io_error: io::Error::from(io::ErrorKind::Other),
|
||||
filepath: path.to_path_buf(),
|
||||
})
|
||||
use crate::directory::error::IOError;
|
||||
let io_error = IOError::from(io::Error::from(io::ErrorKind::Other));
|
||||
Err(DeleteError::from(io_error))
|
||||
});
|
||||
self.fs.write().unwrap().delete(path)
|
||||
}
|
||||
@@ -181,7 +198,7 @@ impl Directory for RAMDirectory {
|
||||
self.fs.read().unwrap().exists(path)
|
||||
}
|
||||
|
||||
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
let mut fs = self.fs.write().unwrap();
|
||||
let path_buf = PathBuf::from(path);
|
||||
let vec_writer = VecWriter::new(path_buf.clone(), self.clone());
|
||||
@@ -195,17 +212,10 @@ impl Directory for RAMDirectory {
|
||||
}
|
||||
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||
let bytes =
|
||||
self.open_read(path)?
|
||||
.read_bytes()
|
||||
.map_err(|io_error| OpenReadError::IOError {
|
||||
io_error,
|
||||
filepath: path.to_path_buf(),
|
||||
})?;
|
||||
Ok(bytes.as_slice().to_owned())
|
||||
Ok(self.open_read(path)?.as_slice().to_owned())
|
||||
}
|
||||
|
||||
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
msg.unwrap_or_else(|| "Undefined".to_string())
|
||||
@@ -235,6 +245,9 @@ mod tests {
|
||||
use crate::Directory;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use crate::indexer::ResourceManager;
|
||||
use crate::directory::TerminatingWrite;
|
||||
use std::mem;
|
||||
|
||||
#[test]
|
||||
fn test_persist() {
|
||||
@@ -242,14 +255,69 @@ mod tests {
|
||||
let msg_seq: &'static [u8] = b"sequential is the way";
|
||||
let path_atomic: &'static Path = Path::new("atomic");
|
||||
let path_seq: &'static Path = Path::new("seq");
|
||||
let directory = RAMDirectory::create();
|
||||
let mut directory = RAMDirectory::create();
|
||||
assert!(directory.atomic_write(path_atomic, msg_atomic).is_ok());
|
||||
let mut wrt = directory.open_write(path_seq).unwrap();
|
||||
assert!(wrt.write_all(msg_seq).is_ok());
|
||||
assert!(wrt.flush().is_ok());
|
||||
let directory_copy = RAMDirectory::create();
|
||||
assert!(directory.persist(&directory_copy).is_ok());
|
||||
let mut directory_copy = RAMDirectory::create();
|
||||
assert!(directory.persist(&mut directory_copy).is_ok());
|
||||
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
||||
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory_manager_several_path() {
|
||||
let memory_manager = ResourceManager::default();
|
||||
let mut ram_directory = RAMDirectory::create_with_memory_manager(memory_manager.clone());
|
||||
assert!(ram_directory.atomic_write(Path::new("/titi"), b"abcd").is_ok());
|
||||
assert_eq!(memory_manager.total_amount(), 4u64);
|
||||
assert!(ram_directory.atomic_write(Path::new("/toto"), b"abcde").is_ok());
|
||||
assert_eq!(memory_manager.total_amount(), 9u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory_manager_override() {
|
||||
let memory_manager = ResourceManager::default();
|
||||
let mut ram_directory = RAMDirectory::create_with_memory_manager(memory_manager.clone());
|
||||
assert!(ram_directory.atomic_write(Path::new("/titi"), b"abcde").is_ok());
|
||||
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||
assert!(ram_directory.atomic_write(Path::new("/titi"), b"abcdef").is_ok());
|
||||
assert_eq!(memory_manager.total_amount(), 6u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory_manager_seq_wrt() {
|
||||
let memory_manager = ResourceManager::default();
|
||||
let mut ram_directory = RAMDirectory::create_with_memory_manager(memory_manager.clone());
|
||||
let mut wrt = ram_directory.open_write(Path::new("/titi")).unwrap();
|
||||
assert!(wrt.write_all(b"abcde").is_ok());
|
||||
assert!(wrt.terminate().is_ok());
|
||||
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||
assert!(ram_directory.atomic_write(Path::new("/titi"), b"abcdef").is_ok());
|
||||
assert_eq!(memory_manager.total_amount(), 6u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_release_on_drop() {
|
||||
let memory_manager = ResourceManager::default();
|
||||
let mut ram_directory = RAMDirectory::create_with_memory_manager(memory_manager.clone());
|
||||
let mut wrt = ram_directory.open_write(Path::new("/titi")).unwrap();
|
||||
assert!(wrt.write_all(b"abcde").is_ok());
|
||||
assert!(wrt.terminate().is_ok());
|
||||
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||
let mut wrt2 = ram_directory.open_write(Path::new("/toto")).unwrap();
|
||||
assert!(wrt2.write_all(b"abcdefghijkl").is_ok());
|
||||
assert!(wrt2.terminate().is_ok());
|
||||
assert_eq!(memory_manager.total_amount(), 17u64);
|
||||
let source = ram_directory.open_read(Path::new("/titi")).unwrap();
|
||||
let source_clone = source.clone();
|
||||
assert_eq!(memory_manager.total_amount(), 17u64);
|
||||
mem::drop(ram_directory);
|
||||
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||
mem::drop(source);
|
||||
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||
mem::drop(source_clone);
|
||||
assert_eq!(memory_manager.total_amount(), 0u64);
|
||||
}
|
||||
}
|
||||
|
||||
157
src/directory/read_only_source.rs
Normal file
157
src/directory/read_only_source.rs
Normal file
@@ -0,0 +1,157 @@
|
||||
use crate::common::HasLen;
|
||||
use stable_deref_trait::{CloneStableDeref, StableDeref};
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
use crate::indexer::{Allocation, ResourceManager};
|
||||
|
||||
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
|
||||
/// Read object that represents files in tantivy.
|
||||
///
|
||||
/// These read objects are only in charge to deliver
|
||||
/// the data in the form of a constant read-only `&[u8]`.
|
||||
/// Whatever happens to the directory file, the data
|
||||
/// hold by this object should never be altered or destroyed.
|
||||
pub struct ReadOnlySource {
|
||||
data: Arc<BoxedData>,
|
||||
start: usize,
|
||||
stop: usize,
|
||||
allocation: Option<Arc<Allocation>>
|
||||
}
|
||||
|
||||
unsafe impl StableDeref for ReadOnlySource {}
|
||||
unsafe impl CloneStableDeref for ReadOnlySource {}
|
||||
|
||||
impl Deref for ReadOnlySource {
|
||||
type Target = [u8];
|
||||
|
||||
fn deref(&self) -> &[u8] {
|
||||
self.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Arc<BoxedData>> for ReadOnlySource {
|
||||
fn from(data: Arc<BoxedData>) -> Self {
|
||||
let len = data.len();
|
||||
ReadOnlySource {
|
||||
data,
|
||||
start: 0,
|
||||
stop: len,
|
||||
allocation: None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadOnlySource {
|
||||
|
||||
pub(crate) fn new<D>(data: D) -> ReadOnlySource
|
||||
where
|
||||
D: Deref<Target = [u8]> + Send + Sync + 'static,
|
||||
{
|
||||
let len = data.len();
|
||||
ReadOnlySource {
|
||||
data: Arc::new(Box::new(data)),
|
||||
start: 0,
|
||||
stop: len,
|
||||
allocation: None
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn new_with_allocation<D>(data: D, memory_manager: &ResourceManager) -> ReadOnlySource
|
||||
where
|
||||
D: Deref<Target = [u8]> + Send + Sync + 'static,
|
||||
{
|
||||
let len = data.len();
|
||||
ReadOnlySource {
|
||||
data: Arc::new(Box::new(data)),
|
||||
start: 0,
|
||||
stop: len,
|
||||
allocation: Some(Arc::new(memory_manager.allocate(len as u64)))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Creates an empty ReadOnlySource
|
||||
pub fn empty() -> ReadOnlySource {
|
||||
ReadOnlySource::new(&[][..])
|
||||
}
|
||||
|
||||
/// Returns the data underlying the ReadOnlySource object.
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
&self.data[self.start..self.stop]
|
||||
}
|
||||
|
||||
/// Splits into 2 `ReadOnlySource`, at the offset given
|
||||
/// as an argument.
|
||||
pub fn split(self, addr: usize) -> (ReadOnlySource, ReadOnlySource) {
|
||||
let left = self.slice(0, addr);
|
||||
let right = self.slice_from(addr);
|
||||
(left, right)
|
||||
}
|
||||
|
||||
/// Splits into 2 `ReadOnlySource`, at the offset `end - right_len`.
|
||||
pub fn split_from_end(self, right_len: usize) -> (ReadOnlySource, ReadOnlySource) {
|
||||
let left_len = self.len() - right_len;
|
||||
self.split(left_len)
|
||||
}
|
||||
|
||||
/// Creates a ReadOnlySource that is just a
|
||||
/// view over a slice of the data.
|
||||
///
|
||||
/// Keep in mind that any living slice extends
|
||||
/// the lifetime of the original ReadOnlySource,
|
||||
///
|
||||
/// For instance, if `ReadOnlySource` wraps 500MB
|
||||
/// worth of data in anonymous memory, and only a
|
||||
/// 1KB slice is remaining, the whole `500MBs`
|
||||
/// are retained in memory.
|
||||
pub fn slice(&self, start: usize, stop: usize) -> ReadOnlySource {
|
||||
assert!(
|
||||
start <= stop,
|
||||
"Requested negative slice [{}..{}]",
|
||||
start,
|
||||
stop
|
||||
);
|
||||
assert!(stop <= self.len());
|
||||
ReadOnlySource {
|
||||
data: self.data.clone(),
|
||||
start: self.start + start,
|
||||
stop: self.start + stop,
|
||||
allocation: self.allocation.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Like `.slice(...)` but enforcing only the `from`
|
||||
/// boundary.
|
||||
///
|
||||
/// Equivalent to `.slice(from_offset, self.len())`
|
||||
pub fn slice_from(&self, from_offset: usize) -> ReadOnlySource {
|
||||
self.slice(from_offset, self.len())
|
||||
}
|
||||
|
||||
/// Like `.slice(...)` but enforcing only the `to`
|
||||
/// boundary.
|
||||
///
|
||||
/// Equivalent to `.slice(0, to_offset)`
|
||||
pub fn slice_to(&self, to_offset: usize) -> ReadOnlySource {
|
||||
self.slice(0, to_offset)
|
||||
}
|
||||
}
|
||||
|
||||
impl HasLen for ReadOnlySource {
|
||||
fn len(&self) -> usize {
|
||||
self.stop - self.start
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for ReadOnlySource {
|
||||
fn clone(&self) -> Self {
|
||||
self.slice_from(0)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for ReadOnlySource {
|
||||
fn from(data: Vec<u8>) -> ReadOnlySource {
|
||||
ReadOnlySource::new(data)
|
||||
}
|
||||
}
|
||||
186
src/directory/spilling_writer.rs
Normal file
186
src/directory/spilling_writer.rs
Normal file
@@ -0,0 +1,186 @@
|
||||
use crate::common::MutableEnum;
|
||||
use crate::directory::{TerminatingWrite, WritePtr};
|
||||
use std::io::{self, Write};
|
||||
|
||||
/// Represents the state of the `SpillingWriter`.
|
||||
enum SpillingState {
|
||||
Buffer {
|
||||
buffer: Vec<u8>,
|
||||
capacity: usize,
|
||||
write_factory: Box<dyn FnOnce() -> io::Result<WritePtr>>,
|
||||
},
|
||||
Spilled(WritePtr),
|
||||
}
|
||||
|
||||
impl SpillingState {
|
||||
fn new(
|
||||
limit: usize,
|
||||
write_factory: Box<dyn FnOnce() -> io::Result<WritePtr>>,
|
||||
) -> SpillingState {
|
||||
SpillingState::Buffer {
|
||||
buffer: Vec::with_capacity(limit),
|
||||
capacity: limit,
|
||||
write_factory,
|
||||
}
|
||||
}
|
||||
|
||||
// Change the state in such a way that it is ready to accept
|
||||
// `extra_capacity` bytes.
|
||||
//
|
||||
fn reserve(self, extra_capacity: usize) -> io::Result<SpillingState> {
|
||||
match self {
|
||||
SpillingState::Buffer {
|
||||
buffer,
|
||||
capacity,
|
||||
write_factory,
|
||||
} => {
|
||||
if capacity >= extra_capacity {
|
||||
Ok(SpillingState::Buffer {
|
||||
buffer,
|
||||
capacity: capacity - extra_capacity,
|
||||
write_factory,
|
||||
})
|
||||
} else {
|
||||
let mut wrt = write_factory()?;
|
||||
wrt.write_all(&buffer[..])?;
|
||||
Ok(SpillingState::Spilled(wrt))
|
||||
}
|
||||
}
|
||||
SpillingState::Spilled(wrt) => Ok(SpillingState::Spilled(wrt)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The `SpillingWriter` is a writer that start by writing in a
|
||||
/// buffer.
|
||||
///
|
||||
/// Once a memory limit is reached, the spilling writer will
|
||||
/// call a given `WritePtr` factory and start spilling into it.
|
||||
///
|
||||
/// Spilling here includes:
|
||||
/// - writing all of the data that were written in the in-memory buffer so far
|
||||
/// - writing subsequent data as well.
|
||||
///
|
||||
/// Once entering "spilling" mode, the `SpillingWriter` stays in this mode.
|
||||
pub struct SpillingWriter {
|
||||
state: MutableEnum<SpillingState>,
|
||||
}
|
||||
|
||||
impl SpillingWriter {
|
||||
//// Creates a new `Spilling Writer`.
|
||||
pub fn new(
|
||||
limit: usize,
|
||||
write_factory: Box<dyn FnOnce() -> io::Result<WritePtr>>,
|
||||
) -> SpillingWriter {
|
||||
let state = SpillingState::new(limit, write_factory);
|
||||
SpillingWriter {
|
||||
state: MutableEnum::wrap(state),
|
||||
}
|
||||
}
|
||||
|
||||
/// Finalizes the `SpillingWriter`.
|
||||
///
|
||||
/// The `SpillingResult` object is an enum specific
|
||||
/// to whether the `SpillingWriter` reached the spilling limit
|
||||
/// (In that case, the buffer is returned).
|
||||
///
|
||||
/// If the writer reached the spilling mode, the underlying `WritePtr`
|
||||
/// is terminated and SpillingResult::Spilled is returned.
|
||||
pub fn finalize(self) -> io::Result<SpillingResult> {
|
||||
match self.state.into() {
|
||||
SpillingState::Spilled(wrt) => {
|
||||
wrt.terminate()?;
|
||||
Ok(SpillingResult::Spilled)
|
||||
}
|
||||
SpillingState::Buffer { buffer, .. } => Ok(SpillingResult::Buffer(buffer)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// enum used as the result of `.finalize()`.
|
||||
pub enum SpillingResult {
|
||||
Spilled,
|
||||
Buffer(Vec<u8>),
|
||||
}
|
||||
|
||||
impl io::Write for SpillingWriter {
|
||||
fn write(&mut self, payload: &[u8]) -> io::Result<usize> {
|
||||
self.write_all(payload)?;
|
||||
Ok(payload.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
if let SpillingState::Spilled(wrt) = &mut *self.state {
|
||||
wrt.flush()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_all(&mut self, payload: &[u8]) -> io::Result<()> {
|
||||
self.state.map_mutate(|mut state| {
|
||||
state = state.reserve(payload.len())?;
|
||||
match &mut state {
|
||||
SpillingState::Buffer { buffer, .. } => {
|
||||
buffer.extend_from_slice(payload);
|
||||
}
|
||||
SpillingState::Spilled(wrt) => {
|
||||
wrt.write_all(payload)?;
|
||||
}
|
||||
}
|
||||
Ok(state)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::SpillingWriter;
|
||||
use crate::directory::spilling_writer::SpillingResult;
|
||||
use crate::directory::RAMDirectory;
|
||||
use crate::Directory;
|
||||
use std::io::{self, Write};
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn test_no_spilling() {
|
||||
let ram_directory = RAMDirectory::create();
|
||||
let mut ram_directory_clone = ram_directory.clone();
|
||||
let path = Path::new("test");
|
||||
let write_factory = Box::new(move || {
|
||||
ram_directory_clone
|
||||
.open_write(path)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
|
||||
});
|
||||
let mut spilling_wrt = SpillingWriter::new(10, write_factory);
|
||||
assert!(spilling_wrt.write_all(b"abcd").is_ok());
|
||||
if let SpillingResult::Buffer(buf) = spilling_wrt.finalize().unwrap() {
|
||||
assert_eq!(buf, b"abcd")
|
||||
} else {
|
||||
panic!("spill writer should not have spilled");
|
||||
}
|
||||
assert!(!ram_directory.exists(path));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spilling() {
|
||||
let ram_directory = RAMDirectory::create();
|
||||
let mut ram_directory_clone = ram_directory.clone();
|
||||
let path = Path::new("test");
|
||||
let write_factory = Box::new(move || {
|
||||
ram_directory_clone
|
||||
.open_write(path)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
|
||||
});
|
||||
let mut spilling_wrt = SpillingWriter::new(10, write_factory);
|
||||
assert!(spilling_wrt.write_all(b"abcd").is_ok());
|
||||
assert!(spilling_wrt.write_all(b"efghijklmnop").is_ok());
|
||||
if let SpillingResult::Spilled = spilling_wrt.finalize().unwrap() {
|
||||
} else {
|
||||
panic!("spill writer should have spilled");
|
||||
}
|
||||
assert_eq!(
|
||||
ram_directory.atomic_read(path).unwrap(),
|
||||
b"abcdefghijklmnop"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -20,47 +20,45 @@ mod mmap_directory_tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple() -> crate::Result<()> {
|
||||
let directory = make_directory();
|
||||
super::test_simple(&directory)
|
||||
fn test_simple() {
|
||||
let mut directory = make_directory();
|
||||
super::test_simple(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_create_the_file() {
|
||||
let directory = make_directory();
|
||||
super::test_write_create_the_file(&directory);
|
||||
let mut directory = make_directory();
|
||||
super::test_write_create_the_file(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_forbidden() -> crate::Result<()> {
|
||||
let directory = make_directory();
|
||||
super::test_rewrite_forbidden(&directory)?;
|
||||
Ok(())
|
||||
fn test_rewrite_forbidden() {
|
||||
let mut directory = make_directory();
|
||||
super::test_rewrite_forbidden(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_directory_delete() -> crate::Result<()> {
|
||||
let directory = make_directory();
|
||||
super::test_directory_delete(&directory)?;
|
||||
Ok(())
|
||||
fn test_directory_delete() {
|
||||
let mut directory = make_directory();
|
||||
super::test_directory_delete(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_non_blocking() {
|
||||
let directory = make_directory();
|
||||
super::test_lock_non_blocking(&directory);
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_non_blocking(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_blocking() {
|
||||
let directory = make_directory();
|
||||
super::test_lock_blocking(&directory);
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_blocking(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_watch() {
|
||||
let directory = make_directory();
|
||||
super::test_watch(&directory);
|
||||
let mut directory = make_directory();
|
||||
super::test_watch(&mut directory);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,47 +72,45 @@ mod ram_directory_tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple() -> crate::Result<()> {
|
||||
let directory = make_directory();
|
||||
super::test_simple(&directory)
|
||||
fn test_simple() {
|
||||
let mut directory = make_directory();
|
||||
super::test_simple(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_create_the_file() {
|
||||
let directory = make_directory();
|
||||
super::test_write_create_the_file(&directory);
|
||||
let mut directory = make_directory();
|
||||
super::test_write_create_the_file(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_forbidden() -> crate::Result<()> {
|
||||
let directory = make_directory();
|
||||
super::test_rewrite_forbidden(&directory)?;
|
||||
Ok(())
|
||||
fn test_rewrite_forbidden() {
|
||||
let mut directory = make_directory();
|
||||
super::test_rewrite_forbidden(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_directory_delete() -> crate::Result<()> {
|
||||
let directory = make_directory();
|
||||
super::test_directory_delete(&directory)?;
|
||||
Ok(())
|
||||
fn test_directory_delete() {
|
||||
let mut directory = make_directory();
|
||||
super::test_directory_delete(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_non_blocking() {
|
||||
let directory = make_directory();
|
||||
super::test_lock_non_blocking(&directory);
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_non_blocking(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_blocking() {
|
||||
let directory = make_directory();
|
||||
super::test_lock_blocking(&directory);
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_blocking(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_watch() {
|
||||
let directory = make_directory();
|
||||
super::test_watch(&directory);
|
||||
let mut directory = make_directory();
|
||||
super::test_watch(&mut directory);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,37 +118,43 @@ mod ram_directory_tests {
|
||||
#[should_panic]
|
||||
fn ram_directory_panics_if_flush_forgotten() {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
let ram_directory = RAMDirectory::create();
|
||||
let mut ram_directory = RAMDirectory::create();
|
||||
let mut write_file = ram_directory.open_write(test_path).unwrap();
|
||||
assert!(write_file.write_all(&[4]).is_ok());
|
||||
}
|
||||
|
||||
fn test_simple(directory: &dyn Directory) -> crate::Result<()> {
|
||||
fn test_simple(directory: &mut dyn Directory) {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
let mut write_file = directory.open_write(test_path)?;
|
||||
assert!(directory.exists(test_path));
|
||||
write_file.write_all(&[4])?;
|
||||
write_file.write_all(&[3])?;
|
||||
write_file.write_all(&[7, 3, 5])?;
|
||||
write_file.flush()?;
|
||||
let read_file = directory.open_read(test_path)?.read_bytes()?;
|
||||
assert_eq!(read_file.as_slice(), &[4u8, 3u8, 7u8, 3u8, 5u8]);
|
||||
mem::drop(read_file);
|
||||
{
|
||||
let mut write_file = directory.open_write(test_path).unwrap();
|
||||
assert!(directory.exists(test_path));
|
||||
write_file.write_all(&[4]).unwrap();
|
||||
write_file.write_all(&[3]).unwrap();
|
||||
write_file.write_all(&[7, 3, 5]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
}
|
||||
{
|
||||
let read_file = directory.open_read(test_path).unwrap();
|
||||
let data: &[u8] = &*read_file;
|
||||
assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]);
|
||||
}
|
||||
assert!(directory.delete(test_path).is_ok());
|
||||
assert!(!directory.exists(test_path));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_rewrite_forbidden(directory: &dyn Directory) -> crate::Result<()> {
|
||||
fn test_rewrite_forbidden(directory: &mut dyn Directory) {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
directory.open_write(test_path)?;
|
||||
assert!(directory.exists(test_path));
|
||||
assert!(directory.open_write(test_path).is_err());
|
||||
{
|
||||
directory.open_write(test_path).unwrap();
|
||||
assert!(directory.exists(test_path));
|
||||
}
|
||||
{
|
||||
assert!(directory.open_write(test_path).is_err());
|
||||
}
|
||||
assert!(directory.delete(test_path).is_ok());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_write_create_the_file(directory: &dyn Directory) {
|
||||
fn test_write_create_the_file(directory: &mut dyn Directory) {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
{
|
||||
assert!(directory.open_read(test_path).is_err());
|
||||
@@ -163,20 +165,21 @@ fn test_write_create_the_file(directory: &dyn Directory) {
|
||||
}
|
||||
}
|
||||
|
||||
fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> {
|
||||
fn test_directory_delete(directory: &mut dyn Directory) {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
assert!(directory.open_read(test_path).is_err());
|
||||
let mut write_file = directory.open_write(&test_path)?;
|
||||
write_file.write_all(&[1, 2, 3, 4])?;
|
||||
write_file.flush()?;
|
||||
let mut write_file = directory.open_write(&test_path).unwrap();
|
||||
write_file.write_all(&[1, 2, 3, 4]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
{
|
||||
let read_handle = directory.open_read(&test_path)?.read_bytes()?;
|
||||
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]);
|
||||
let read_handle = directory.open_read(&test_path).unwrap();
|
||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
||||
// Mapped files can't be deleted on Windows
|
||||
if !cfg!(windows) {
|
||||
assert!(directory.delete(&test_path).is_ok());
|
||||
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]);
|
||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
||||
}
|
||||
|
||||
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
|
||||
}
|
||||
|
||||
@@ -186,10 +189,9 @@ fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> {
|
||||
|
||||
assert!(directory.open_read(&test_path).is_err());
|
||||
assert!(directory.delete(&test_path).is_err());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_watch(directory: &dyn Directory) {
|
||||
fn test_watch(directory: &mut dyn Directory) {
|
||||
let num_progress: Arc<AtomicUsize> = Default::default();
|
||||
let counter: Arc<AtomicUsize> = Default::default();
|
||||
let counter_clone = counter.clone();
|
||||
@@ -209,22 +211,22 @@ fn test_watch(directory: &dyn Directory) {
|
||||
.unwrap();
|
||||
|
||||
for i in 0..10 {
|
||||
assert!(i <= counter.load(SeqCst));
|
||||
assert_eq!(i, counter.load(SeqCst));
|
||||
assert!(directory
|
||||
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
|
||||
.is_ok());
|
||||
assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i));
|
||||
assert!(i + 1 <= counter.load(SeqCst)); // notify can trigger more than once.
|
||||
assert_eq!(i + 1, counter.load(SeqCst));
|
||||
}
|
||||
mem::drop(watch_handle);
|
||||
assert!(directory
|
||||
.atomic_write(Path::new("meta.json"), b"random_test_data")
|
||||
.is_ok());
|
||||
assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok());
|
||||
assert!(10 <= counter.load(SeqCst));
|
||||
assert_eq!(10, counter.load(SeqCst));
|
||||
}
|
||||
|
||||
fn test_lock_non_blocking(directory: &dyn Directory) {
|
||||
fn test_lock_non_blocking(directory: &mut dyn Directory) {
|
||||
{
|
||||
let lock_a_res = directory.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
@@ -249,7 +251,7 @@ fn test_lock_non_blocking(directory: &dyn Directory) {
|
||||
assert!(lock_a_res.is_ok());
|
||||
}
|
||||
|
||||
fn test_lock_blocking(directory: &dyn Directory) {
|
||||
fn test_lock_blocking(directory: &mut dyn Directory) {
|
||||
let lock_a_res = directory.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
is_blocking: true,
|
||||
|
||||
@@ -5,7 +5,7 @@ use std::sync::RwLock;
|
||||
use std::sync::Weak;
|
||||
|
||||
/// Type alias for callbacks registered when watching files of a `Directory`.
|
||||
pub type WatchCallback = Box<dyn Fn() + Sync + Send>;
|
||||
pub type WatchCallback = Box<dyn Fn() -> () + Sync + Send>;
|
||||
|
||||
/// Helper struct to implement the watch method in `Directory` implementations.
|
||||
///
|
||||
@@ -29,17 +29,10 @@ impl WatchHandle {
|
||||
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
|
||||
WatchHandle(watch_callback)
|
||||
}
|
||||
|
||||
/// Returns an empty watch handle.
|
||||
///
|
||||
/// This function is only useful when implementing a readonly directory.
|
||||
pub fn empty() -> WatchHandle {
|
||||
WatchHandle::new(Arc::new(Box::new(|| {})))
|
||||
}
|
||||
}
|
||||
|
||||
impl WatchCallbackList {
|
||||
/// Subscribes a new callback and returns a handle that controls the lifetime of the callback.
|
||||
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
|
||||
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
|
||||
let watch_callback_arc = Arc::new(watch_callback);
|
||||
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
|
||||
|
||||
131
src/docset.rs
131
src/docset.rs
@@ -1,48 +1,58 @@
|
||||
use crate::common::BitSet;
|
||||
use crate::fastfield::DeleteBitSet;
|
||||
use crate::DocId;
|
||||
use std::borrow::Borrow;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::cmp::Ordering;
|
||||
|
||||
/// Sentinel value returned when a DocSet has been entirely consumed.
|
||||
///
|
||||
/// This is not u32::MAX as one would have expected, due to the lack of SSE2 instructions
|
||||
/// to compare [u32; 4].
|
||||
pub const TERMINATED: DocId = std::i32::MAX as u32;
|
||||
/// Expresses the outcome of a call to `DocSet`'s `.skip_next(...)`.
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
pub enum SkipResult {
|
||||
/// target was in the docset
|
||||
Reached,
|
||||
/// target was not in the docset, skipping stopped as a greater element was found
|
||||
OverStep,
|
||||
/// the docset was entirely consumed without finding the target, nor any
|
||||
/// element greater than the target.
|
||||
End,
|
||||
}
|
||||
|
||||
/// Represents an iterable set of sorted doc ids.
|
||||
pub trait DocSet {
|
||||
/// Goes to the next element.
|
||||
///
|
||||
/// The DocId of the next element is returned.
|
||||
/// In other words we should always have :
|
||||
/// ```ignore
|
||||
/// let doc = docset.advance();
|
||||
/// assert_eq!(doc, docset.doc());
|
||||
/// ```
|
||||
///
|
||||
/// If we reached the end of the DocSet, TERMINATED should be returned.
|
||||
///
|
||||
/// Calling `.advance()` on a terminated DocSet should be supported, and TERMINATED should
|
||||
/// be returned.
|
||||
/// TODO Test existing docsets.
|
||||
fn advance(&mut self) -> DocId;
|
||||
/// `.advance(...)` needs to be called a first time to point to the correct
|
||||
/// element.
|
||||
fn advance(&mut self) -> bool;
|
||||
|
||||
/// Advances the DocSet forward until reaching the target, or going to the
|
||||
/// lowest DocId greater than the target.
|
||||
/// After skipping, position the iterator in such a way that `.doc()`
|
||||
/// will return a value greater than or equal to target.
|
||||
///
|
||||
/// If the end of the DocSet is reached, TERMINATED is returned.
|
||||
/// SkipResult expresses whether the `target value` was reached, overstepped,
|
||||
/// or if the `DocSet` was entirely consumed without finding any value
|
||||
/// greater or equal to the `target`.
|
||||
///
|
||||
/// Calling `.seek(target)` on a terminated DocSet is legal. Implementation
|
||||
/// of DocSet should support it.
|
||||
/// WARNING: Calling skip always advances the docset.
|
||||
/// More specifically, if the docset is already positionned on the target
|
||||
/// skipping will advance to the next position and return SkipResult::Overstep.
|
||||
///
|
||||
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a DocSet.
|
||||
fn seek(&mut self, target: DocId) -> DocId {
|
||||
let mut doc = self.doc();
|
||||
debug_assert!(doc <= target);
|
||||
while doc < target {
|
||||
doc = self.advance();
|
||||
/// If `.skip_next()` oversteps, then the docset must be positionned correctly
|
||||
/// on an existing document. In other words, `.doc()` should return the first document
|
||||
/// greater than `DocId`.
|
||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||
if !self.advance() {
|
||||
return SkipResult::End;
|
||||
}
|
||||
loop {
|
||||
match self.doc().cmp(&target) {
|
||||
Ordering::Less => {
|
||||
if !self.advance() {
|
||||
return SkipResult::End;
|
||||
}
|
||||
}
|
||||
Ordering::Equal => return SkipResult::Reached,
|
||||
Ordering::Greater => return SkipResult::OverStep,
|
||||
}
|
||||
}
|
||||
doc
|
||||
}
|
||||
|
||||
/// Fills a given mutable buffer with the next doc ids from the
|
||||
@@ -61,38 +71,38 @@ pub trait DocSet {
|
||||
/// use case where batching. The normal way to
|
||||
/// go through the `DocId`'s is to call `.advance()`.
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
||||
if self.doc() == TERMINATED {
|
||||
return 0;
|
||||
}
|
||||
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
||||
*buffer_val = self.doc();
|
||||
if self.advance() == TERMINATED {
|
||||
return i + 1;
|
||||
if self.advance() {
|
||||
*buffer_val = self.doc();
|
||||
} else {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
buffer.len()
|
||||
}
|
||||
|
||||
/// Returns the current document
|
||||
/// Right after creating a new DocSet, the docset points to the first document.
|
||||
///
|
||||
/// If the DocSet is empty, .doc() should return `TERMINATED`.
|
||||
fn doc(&self) -> DocId;
|
||||
|
||||
/// Returns a best-effort hint of the
|
||||
/// length of the docset.
|
||||
fn size_hint(&self) -> u32;
|
||||
|
||||
/// Appends all docs to a `bitset`.
|
||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||
while self.advance() {
|
||||
bitset.insert(self.doc());
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number documents matching.
|
||||
/// Calling this method consumes the `DocSet`.
|
||||
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||
let mut count = 0u32;
|
||||
let mut doc = self.doc();
|
||||
while doc != TERMINATED {
|
||||
if !delete_bitset.is_deleted(doc) {
|
||||
while self.advance() {
|
||||
if !delete_bitset.is_deleted(self.doc()) {
|
||||
count += 1u32;
|
||||
}
|
||||
doc = self.advance();
|
||||
}
|
||||
count
|
||||
}
|
||||
@@ -104,42 +114,22 @@ pub trait DocSet {
|
||||
/// given by `count()`.
|
||||
fn count_including_deleted(&mut self) -> u32 {
|
||||
let mut count = 0u32;
|
||||
let mut doc = self.doc();
|
||||
while doc != TERMINATED {
|
||||
while self.advance() {
|
||||
count += 1u32;
|
||||
doc = self.advance();
|
||||
}
|
||||
count
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> DocSet for &'a mut dyn DocSet {
|
||||
fn advance(&mut self) -> u32 {
|
||||
(**self).advance()
|
||||
}
|
||||
|
||||
fn seek(&mut self, target: DocId) -> DocId {
|
||||
(**self).seek(target)
|
||||
}
|
||||
|
||||
fn doc(&self) -> u32 {
|
||||
(**self).doc()
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> u32 {
|
||||
(**self).size_hint()
|
||||
}
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
||||
fn advance(&mut self) -> DocId {
|
||||
fn advance(&mut self) -> bool {
|
||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||
unboxed.advance()
|
||||
}
|
||||
|
||||
fn seek(&mut self, target: DocId) -> DocId {
|
||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||
unboxed.seek(target)
|
||||
unboxed.skip_next(target)
|
||||
}
|
||||
|
||||
fn doc(&self) -> DocId {
|
||||
@@ -161,4 +151,9 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||
unboxed.count_including_deleted()
|
||||
}
|
||||
|
||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||
unboxed.append_to_bitset(bitset);
|
||||
}
|
||||
}
|
||||
|
||||
111
src/error.rs
111
src/error.rs
@@ -2,27 +2,22 @@
|
||||
|
||||
use std::io;
|
||||
|
||||
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::error::{Incompatibility, LockError};
|
||||
use crate::fastfield::FastFieldNotAvailableError;
|
||||
use crate::query;
|
||||
use crate::{
|
||||
directory::error::{OpenDirectoryError, OpenReadError, OpenWriteError},
|
||||
schema,
|
||||
};
|
||||
use crate::schema;
|
||||
use serde_json;
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::PoisonError;
|
||||
|
||||
/// Represents a `DataCorruption` error.
|
||||
///
|
||||
/// When facing data corruption, tantivy actually panic or return this error.
|
||||
pub struct DataCorruption {
|
||||
filepath: Option<PathBuf>,
|
||||
comment: String,
|
||||
}
|
||||
|
||||
impl DataCorruption {
|
||||
/// Creates a `DataCorruption` Error.
|
||||
pub fn new(filepath: PathBuf, comment: String) -> DataCorruption {
|
||||
DataCorruption {
|
||||
filepath: Some(filepath),
|
||||
@@ -30,11 +25,10 @@ impl DataCorruption {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a `DataCorruption` Error, when the filepath is irrelevant.
|
||||
pub fn comment_only<TStr: ToString>(comment: TStr) -> DataCorruption {
|
||||
pub fn comment_only(comment: String) -> DataCorruption {
|
||||
DataCorruption {
|
||||
filepath: None,
|
||||
comment: comment.to_string(),
|
||||
comment,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -50,47 +44,44 @@ impl fmt::Debug for DataCorruption {
|
||||
}
|
||||
}
|
||||
|
||||
/// The library's error enum
|
||||
#[derive(Debug, Error)]
|
||||
/// The library's failure based error enum
|
||||
#[derive(Debug, Fail)]
|
||||
pub enum TantivyError {
|
||||
/// Failed to open the directory.
|
||||
#[error("Failed to open the directory: '{0:?}'")]
|
||||
OpenDirectoryError(#[from] OpenDirectoryError),
|
||||
/// Failed to open a file for read.
|
||||
#[error("Failed to open file for read: '{0:?}'")]
|
||||
OpenReadError(#[from] OpenReadError),
|
||||
/// Failed to open a file for write.
|
||||
#[error("Failed to open file for write: '{0:?}'")]
|
||||
OpenWriteError(#[from] OpenWriteError),
|
||||
/// Path does not exist.
|
||||
#[fail(display = "Path does not exist: '{:?}'", _0)]
|
||||
PathDoesNotExist(PathBuf),
|
||||
/// File already exists, this is a problem when we try to write into a new file.
|
||||
#[fail(display = "File already exists: '{:?}'", _0)]
|
||||
FileAlreadyExists(PathBuf),
|
||||
/// Index already exists in this directory
|
||||
#[error("Index already exists")]
|
||||
#[fail(display = "Index already exists")]
|
||||
IndexAlreadyExists,
|
||||
/// Failed to acquire file lock
|
||||
#[error("Failed to acquire Lockfile: {0:?}. {1:?}")]
|
||||
#[fail(display = "Failed to acquire Lockfile: {:?}. {:?}", _0, _1)]
|
||||
LockFailure(LockError, Option<String>),
|
||||
/// IO Error.
|
||||
#[error("An IO error occurred: '{0}'")]
|
||||
IOError(#[from] io::Error),
|
||||
#[fail(display = "An IO error occurred: '{}'", _0)]
|
||||
IOError(#[cause] IOError),
|
||||
/// Data corruption.
|
||||
#[error("Data corrupted: '{0:?}'")]
|
||||
#[fail(display = "{:?}", _0)]
|
||||
DataCorruption(DataCorruption),
|
||||
/// A thread holding the locked panicked and poisoned the lock.
|
||||
#[error("A thread holding the locked panicked and poisoned the lock")]
|
||||
#[fail(display = "A thread holding the locked panicked and poisoned the lock")]
|
||||
Poisoned,
|
||||
/// Invalid argument was passed by the user.
|
||||
#[error("An invalid argument was passed: '{0}'")]
|
||||
#[fail(display = "An invalid argument was passed: '{}'", _0)]
|
||||
InvalidArgument(String),
|
||||
/// An Error happened in one of the thread.
|
||||
#[error("An error occurred in a thread: '{0}'")]
|
||||
#[fail(display = "An error occurred in a thread: '{}'", _0)]
|
||||
ErrorInThread(String),
|
||||
/// An Error appeared related to the schema.
|
||||
#[error("Schema error: '{0}'")]
|
||||
#[fail(display = "Schema error: '{}'", _0)]
|
||||
SchemaError(String),
|
||||
/// System error. (e.g.: We failed spawning a new thread)
|
||||
#[error("System error.'{0}'")]
|
||||
#[fail(display = "System error.'{}'", _0)]
|
||||
SystemError(String),
|
||||
/// Index incompatible with current version of tantivy
|
||||
#[error("{0:?}")]
|
||||
#[fail(display = "{:?}", _0)]
|
||||
IncompatibleIndex(Incompatibility),
|
||||
}
|
||||
|
||||
@@ -99,17 +90,31 @@ impl From<DataCorruption> for TantivyError {
|
||||
TantivyError::DataCorruption(data_corruption)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FastFieldNotAvailableError> for TantivyError {
|
||||
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
|
||||
TantivyError::SchemaError(format!("{}", fastfield_error))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<LockError> for TantivyError {
|
||||
fn from(lock_error: LockError) -> TantivyError {
|
||||
TantivyError::LockFailure(lock_error, None)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<IOError> for TantivyError {
|
||||
fn from(io_error: IOError) -> TantivyError {
|
||||
TantivyError::IOError(io_error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for TantivyError {
|
||||
fn from(io_error: io::Error) -> TantivyError {
|
||||
TantivyError::IOError(io_error.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<query::QueryParserError> for TantivyError {
|
||||
fn from(parsing_error: query::QueryParserError) -> TantivyError {
|
||||
TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error))
|
||||
@@ -122,9 +127,15 @@ impl<Guard> From<PoisonError<Guard>> for TantivyError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<chrono::ParseError> for TantivyError {
|
||||
fn from(err: chrono::ParseError) -> TantivyError {
|
||||
TantivyError::InvalidArgument(err.to_string())
|
||||
impl From<OpenReadError> for TantivyError {
|
||||
fn from(error: OpenReadError) -> TantivyError {
|
||||
match error {
|
||||
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
|
||||
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
|
||||
OpenReadError::IncompatibleIndex(incompatibility) => {
|
||||
TantivyError::IncompatibleIndex(incompatibility)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -134,9 +145,35 @@ impl From<schema::DocParsingError> for TantivyError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OpenWriteError> for TantivyError {
|
||||
fn from(error: OpenWriteError) -> TantivyError {
|
||||
match error {
|
||||
OpenWriteError::FileAlreadyExists(filepath) => {
|
||||
TantivyError::FileAlreadyExists(filepath)
|
||||
}
|
||||
OpenWriteError::IOError(io_error) => TantivyError::IOError(io_error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OpenDirectoryError> for TantivyError {
|
||||
fn from(error: OpenDirectoryError) -> TantivyError {
|
||||
match error {
|
||||
OpenDirectoryError::DoesNotExist(directory_path) => {
|
||||
TantivyError::PathDoesNotExist(directory_path)
|
||||
}
|
||||
OpenDirectoryError::NotADirectory(directory_path) => {
|
||||
TantivyError::InvalidArgument(format!("{:?} is not a directory", directory_path))
|
||||
}
|
||||
OpenDirectoryError::IoError(err) => TantivyError::IOError(IOError::from(err)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for TantivyError {
|
||||
fn from(error: serde_json::Error) -> TantivyError {
|
||||
TantivyError::IOError(error.into())
|
||||
let io_err = io::Error::from(error);
|
||||
TantivyError::IOError(io_err.into())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,114 +6,31 @@ pub use self::writer::BytesFastFieldWriter;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value};
|
||||
use crate::{query::TermQuery, schema::FAST, schema::INDEXED, schema::STORED};
|
||||
use crate::{DocAddress, DocSet, Index, Searcher, Term};
|
||||
use std::ops::Deref;
|
||||
use crate::schema::Schema;
|
||||
use crate::Index;
|
||||
|
||||
#[test]
|
||||
fn test_bytes() -> crate::Result<()> {
|
||||
fn test_bytes() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let bytes_field = schema_builder.add_bytes_field("bytesfield", FAST);
|
||||
let field = schema_builder.add_bytes_field("bytesfield");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(bytes_field=>vec![0u8, 1, 2, 3]));
|
||||
index_writer.add_document(doc!(bytes_field=>vec![]));
|
||||
index_writer.add_document(doc!(bytes_field=>vec![255u8]));
|
||||
index_writer.add_document(doc!(bytes_field=>vec![1u8, 3, 5, 7, 9]));
|
||||
index_writer.add_document(doc!(bytes_field=>vec![0u8; 1000]));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(field=>vec![0u8, 1, 2, 3]));
|
||||
index_writer.add_document(doc!(field=>vec![]));
|
||||
index_writer.add_document(doc!(field=>vec![255u8]));
|
||||
index_writer.add_document(doc!(field=>vec![1u8, 3, 5, 7, 9]));
|
||||
index_writer.add_document(doc!(field=>vec![0u8; 1000]));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let bytes_reader = segment_reader.fast_fields().bytes(bytes_field).unwrap();
|
||||
let bytes_reader = segment_reader.fast_fields().bytes(field).unwrap();
|
||||
|
||||
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
|
||||
assert!(bytes_reader.get_bytes(1).is_empty());
|
||||
assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
|
||||
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]);
|
||||
let long = vec![0u8; 1000];
|
||||
assert_eq!(bytes_reader.get_bytes(4), long.as_slice());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_index_for_test<T: Into<BytesOptions>>(
|
||||
byte_options: T,
|
||||
) -> crate::Result<impl Deref<Target = Searcher>> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_bytes_field("string_bytes", byte_options.into());
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(
|
||||
field => b"tantivy".as_ref(),
|
||||
field => b"lucene".as_ref()
|
||||
));
|
||||
index_writer.commit()?;
|
||||
Ok(index.reader()?.searcher())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stored_bytes() -> crate::Result<()> {
|
||||
let searcher = create_index_for_test(STORED)?;
|
||||
assert_eq!(searcher.num_docs(), 1);
|
||||
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||
let values: Vec<&Value> = retrieved_doc.get_all(field).collect();
|
||||
assert_eq!(values.len(), 2);
|
||||
let values_bytes: Vec<&[u8]> = values
|
||||
.into_iter()
|
||||
.flat_map(|value| value.bytes_value())
|
||||
.collect();
|
||||
assert_eq!(values_bytes, &[&b"tantivy"[..], &b"lucene"[..]]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_stored_bytes() -> crate::Result<()> {
|
||||
let searcher = create_index_for_test(INDEXED)?;
|
||||
assert_eq!(searcher.num_docs(), 1);
|
||||
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||
assert!(retrieved_doc.get_first(field).is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_bytes() -> crate::Result<()> {
|
||||
let searcher = create_index_for_test(INDEXED)?;
|
||||
assert_eq!(searcher.num_docs(), 1);
|
||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
||||
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||
let term_weight = term_query.specialized_weight(&searcher, true)?;
|
||||
let term_scorer = term_weight.specialized_scorer(searcher.segment_reader(0), 1.0f32)?;
|
||||
assert_eq!(term_scorer.doc(), 0u32);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_index_bytes() -> crate::Result<()> {
|
||||
let searcher = create_index_for_test(STORED)?;
|
||||
assert_eq!(searcher.num_docs(), 1);
|
||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
||||
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||
let term_weight_res = term_query.specialized_weight(&searcher, false);
|
||||
assert!(matches!(
|
||||
term_weight_res,
|
||||
Err(crate::TantivyError::SchemaError(_))
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fast_bytes_multivalue_value() -> crate::Result<()> {
|
||||
let searcher = create_index_for_test(FAST)?;
|
||||
assert_eq!(searcher.num_docs(), 1);
|
||||
let fast_fields = searcher.segment_reader(0u32).fast_fields();
|
||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||
let fast_field_reader = fast_fields.bytes(field).unwrap();
|
||||
assert_eq!(fast_field_reader.get_bytes(0u32), b"tantivy");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::OwnedBytes;
|
||||
use owning_ref::OwningRef;
|
||||
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::DocId;
|
||||
|
||||
@@ -16,16 +17,16 @@ use crate::DocId;
|
||||
#[derive(Clone)]
|
||||
pub struct BytesFastFieldReader {
|
||||
idx_reader: FastFieldReader<u64>,
|
||||
values: OwnedBytes,
|
||||
values: OwningRef<ReadOnlySource, [u8]>,
|
||||
}
|
||||
|
||||
impl BytesFastFieldReader {
|
||||
pub(crate) fn open(
|
||||
idx_reader: FastFieldReader<u64>,
|
||||
values_file: FileSlice,
|
||||
) -> crate::Result<BytesFastFieldReader> {
|
||||
let values = values_file.read_bytes()?;
|
||||
Ok(BytesFastFieldReader { idx_reader, values })
|
||||
values_source: ReadOnlySource,
|
||||
) -> BytesFastFieldReader {
|
||||
let values = OwningRef::new(values_source).map(|source| &source[..]);
|
||||
BytesFastFieldReader { idx_reader, values }
|
||||
}
|
||||
|
||||
fn range(&self, doc: DocId) -> (usize, usize) {
|
||||
@@ -37,7 +38,7 @@ impl BytesFastFieldReader {
|
||||
/// Returns the bytes associated to the given `doc`
|
||||
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
||||
let (start, stop) = self.range(doc);
|
||||
&self.values.as_slice()[start..stop]
|
||||
&self.values[start..stop]
|
||||
}
|
||||
|
||||
/// Returns the overall number of bytes in this bytes fast field.
|
||||
|
||||
@@ -49,10 +49,16 @@ impl BytesFastFieldWriter {
|
||||
/// matching field values present in the document.
|
||||
pub fn add_document(&mut self, doc: &Document) {
|
||||
self.next_doc();
|
||||
for field_value in doc.get_all(self.field) {
|
||||
if let Value::Bytes(ref bytes) = field_value {
|
||||
self.vals.extend_from_slice(bytes);
|
||||
return;
|
||||
for field_value in doc.field_values() {
|
||||
if field_value.field() == self.field {
|
||||
if let Value::Bytes(ref bytes) = *field_value.value() {
|
||||
self.vals.extend_from_slice(bytes);
|
||||
} else {
|
||||
panic!(
|
||||
"Bytes field contained non-Bytes Value!. Field {:?} = {:?}",
|
||||
self.field, field_value
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -70,18 +76,21 @@ impl BytesFastFieldWriter {
|
||||
|
||||
/// Serializes the fast field values by pushing them to the `FastFieldSerializer`.
|
||||
pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> {
|
||||
// writing the offset index
|
||||
let mut doc_index_serializer =
|
||||
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
||||
for &offset in &self.doc_index {
|
||||
doc_index_serializer.add_val(offset)?;
|
||||
{
|
||||
// writing the offset index
|
||||
let mut doc_index_serializer =
|
||||
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
||||
for &offset in &self.doc_index {
|
||||
doc_index_serializer.add_val(offset)?;
|
||||
}
|
||||
doc_index_serializer.add_val(self.vals.len() as u64)?;
|
||||
doc_index_serializer.close_field()?;
|
||||
}
|
||||
{
|
||||
// writing the values themselves
|
||||
let mut value_serializer = serializer.new_bytes_fast_field_with_idx(self.field, 1)?;
|
||||
value_serializer.write_all(&self.vals)?;
|
||||
}
|
||||
doc_index_serializer.add_val(self.vals.len() as u64)?;
|
||||
doc_index_serializer.close_field()?;
|
||||
// writing the values themselves
|
||||
serializer
|
||||
.new_bytes_fast_field_with_idx(self.field, 1)?
|
||||
.write_all(&self.vals)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use crate::common::{BitSet, HasLen};
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::WritePtr;
|
||||
use crate::space_usage::ByteCount;
|
||||
use crate::DocId;
|
||||
@@ -10,8 +9,6 @@ use std::io::Write;
|
||||
/// Write a delete `BitSet`
|
||||
///
|
||||
/// where `delete_bitset` is the set of deleted `DocId`.
|
||||
/// Warning: this function does not call terminate. The caller is in charge of
|
||||
/// closing the writer properly.
|
||||
pub fn write_delete_bitset(
|
||||
delete_bitset: &BitSet,
|
||||
max_doc: u32,
|
||||
@@ -40,41 +37,22 @@ pub fn write_delete_bitset(
|
||||
/// Set of deleted `DocId`s.
|
||||
#[derive(Clone)]
|
||||
pub struct DeleteBitSet {
|
||||
data: OwnedBytes,
|
||||
data: ReadOnlySource,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl DeleteBitSet {
|
||||
#[cfg(test)]
|
||||
pub(crate) fn for_test(docs: &[DocId], max_doc: u32) -> DeleteBitSet {
|
||||
use crate::directory::{Directory, RAMDirectory, TerminatingWrite};
|
||||
use std::path::Path;
|
||||
assert!(docs.iter().all(|&doc| doc < max_doc));
|
||||
let mut bitset = BitSet::with_max_value(max_doc);
|
||||
for &doc in docs {
|
||||
bitset.insert(doc);
|
||||
}
|
||||
let directory = RAMDirectory::create();
|
||||
let path = Path::new("dummydeletebitset");
|
||||
let mut wrt = directory.open_write(path).unwrap();
|
||||
write_delete_bitset(&bitset, max_doc, &mut wrt).unwrap();
|
||||
wrt.terminate().unwrap();
|
||||
let file = directory.open_read(path).unwrap();
|
||||
Self::open(file).unwrap()
|
||||
}
|
||||
|
||||
/// Opens a delete bitset given its file.
|
||||
pub fn open(file: FileSlice) -> crate::Result<DeleteBitSet> {
|
||||
let bytes = file.read_bytes()?;
|
||||
let num_deleted: usize = bytes
|
||||
/// Opens a delete bitset given its data source.
|
||||
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
|
||||
let num_deleted: usize = data
|
||||
.as_slice()
|
||||
.iter()
|
||||
.map(|b| b.count_ones() as usize)
|
||||
.sum();
|
||||
Ok(DeleteBitSet {
|
||||
data: bytes,
|
||||
DeleteBitSet {
|
||||
data,
|
||||
len: num_deleted,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true iff the document is still "alive". In other words, if it has not been deleted.
|
||||
@@ -86,7 +64,7 @@ impl DeleteBitSet {
|
||||
#[inline(always)]
|
||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||
let byte_offset = doc / 8u32;
|
||||
let b: u8 = self.data.as_slice()[byte_offset as usize];
|
||||
let b: u8 = (*self.data)[byte_offset as usize];
|
||||
let shift = (doc & 7u32) as u8;
|
||||
b & (1u8 << shift) != 0
|
||||
}
|
||||
@@ -105,35 +83,42 @@ impl HasLen for DeleteBitSet {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::DeleteBitSet;
|
||||
use crate::common::HasLen;
|
||||
use super::*;
|
||||
use crate::directory::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn test_delete_bitset_empty() {
|
||||
let delete_bitset = DeleteBitSet::for_test(&[], 10);
|
||||
for doc in 0..10 {
|
||||
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
|
||||
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
|
||||
let test_path = PathBuf::from("test");
|
||||
let mut directory = RAMDirectory::default();
|
||||
{
|
||||
let mut writer = directory.open_write(&*test_path).unwrap();
|
||||
write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
|
||||
writer.terminate().unwrap();
|
||||
}
|
||||
assert_eq!(delete_bitset.len(), 0);
|
||||
let source = directory.open_read(&test_path).unwrap();
|
||||
let delete_bitset = DeleteBitSet::open(source);
|
||||
for doc in 0..max_doc {
|
||||
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
|
||||
}
|
||||
assert_eq!(delete_bitset.len(), bitset.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete_bitset() {
|
||||
let delete_bitset = DeleteBitSet::for_test(&[1, 9], 10);
|
||||
assert!(delete_bitset.is_alive(0));
|
||||
assert!(delete_bitset.is_deleted(1));
|
||||
assert!(delete_bitset.is_alive(2));
|
||||
assert!(delete_bitset.is_alive(3));
|
||||
assert!(delete_bitset.is_alive(4));
|
||||
assert!(delete_bitset.is_alive(5));
|
||||
assert!(delete_bitset.is_alive(6));
|
||||
assert!(delete_bitset.is_alive(6));
|
||||
assert!(delete_bitset.is_alive(7));
|
||||
assert!(delete_bitset.is_alive(8));
|
||||
assert!(delete_bitset.is_deleted(9));
|
||||
for doc in 0..10 {
|
||||
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
|
||||
{
|
||||
let mut bitset = BitSet::with_max_value(10);
|
||||
bitset.insert(1);
|
||||
bitset.insert(9);
|
||||
test_delete_bitset_helper(&bitset, 10);
|
||||
}
|
||||
{
|
||||
let mut bitset = BitSet::with_max_value(8);
|
||||
bitset.insert(1);
|
||||
bitset.insert(2);
|
||||
bitset.insert(3);
|
||||
bitset.insert(5);
|
||||
bitset.insert(7);
|
||||
test_delete_bitset_helper(&bitset, 8);
|
||||
}
|
||||
assert_eq!(delete_bitset.len(), 2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ use std::result;
|
||||
/// `FastFieldNotAvailableError` is returned when the
|
||||
/// user requested for a fast field reader, and the field was not
|
||||
/// defined in the schema as a fast field.
|
||||
#[derive(Debug, Error)]
|
||||
#[error("Fast field not available: '{field_name:?}'")]
|
||||
#[derive(Debug, Fail)]
|
||||
#[fail(display = "Fast field not available: '{:?}'", field_name)]
|
||||
pub struct FastFieldNotAvailableError {
|
||||
field_name: String,
|
||||
}
|
||||
|
||||
@@ -73,61 +73,7 @@ impl FacetReader {
|
||||
}
|
||||
|
||||
/// Return the list of facet ordinals associated to a document.
|
||||
pub fn facet_ords(&self, doc: DocId, output: &mut Vec<u64>) {
|
||||
pub fn facet_ords(&mut self, doc: DocId, output: &mut Vec<u64>) {
|
||||
self.term_ords.get_vals(doc, output);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::Index;
|
||||
use crate::{
|
||||
schema::{Facet, SchemaBuilder},
|
||||
Document,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_facet_not_populated_for_all_docs() -> crate::Result<()> {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let facet_field = schema_builder.add_facet_field("facet");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(facet_field=>Facet::from_text("/a/b")));
|
||||
index_writer.add_document(Document::default());
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let facet_reader = searcher
|
||||
.segment_reader(0u32)
|
||||
.facet_reader(facet_field)
|
||||
.unwrap();
|
||||
let mut facet_ords = Vec::new();
|
||||
facet_reader.facet_ords(0u32, &mut facet_ords);
|
||||
assert_eq!(&facet_ords, &[2u64]);
|
||||
facet_reader.facet_ords(1u32, &mut facet_ords);
|
||||
assert!(facet_ords.is_empty());
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn test_facet_not_populated_for_any_docs() -> crate::Result<()> {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let facet_field = schema_builder.add_facet_field("facet");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(Document::default());
|
||||
index_writer.add_document(Document::default());
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let facet_reader = searcher
|
||||
.segment_reader(0u32)
|
||||
.facet_reader(facet_field)
|
||||
.unwrap();
|
||||
let mut facet_ords = Vec::new();
|
||||
facet_reader.facet_ords(0u32, &mut facet_ords);
|
||||
assert!(facet_ords.is_empty());
|
||||
facet_reader.facet_ords(1u32, &mut facet_ords);
|
||||
assert!(facet_ords.is_empty());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,14 +33,11 @@ pub use self::reader::FastFieldReader;
|
||||
pub use self::readers::FastFieldReaders;
|
||||
pub use self::serializer::FastFieldSerializer;
|
||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||
use crate::chrono::{NaiveDateTime, Utc};
|
||||
use crate::common;
|
||||
use crate::schema::Cardinality;
|
||||
use crate::schema::FieldType;
|
||||
use crate::schema::Value;
|
||||
use crate::{
|
||||
chrono::{NaiveDateTime, Utc},
|
||||
schema::Type,
|
||||
};
|
||||
|
||||
mod bytes;
|
||||
mod delete;
|
||||
@@ -79,9 +76,6 @@ pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd {
|
||||
fn make_zero() -> Self {
|
||||
Self::from_u64(0i64.to_u64())
|
||||
}
|
||||
|
||||
/// Returns the `schema::Type` for this FastValue.
|
||||
fn to_type() -> Type;
|
||||
}
|
||||
|
||||
impl FastValue for u64 {
|
||||
@@ -104,10 +98,6 @@ impl FastValue for u64 {
|
||||
fn as_u64(&self) -> u64 {
|
||||
*self
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::U64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for i64 {
|
||||
@@ -129,10 +119,6 @@ impl FastValue for i64 {
|
||||
fn as_u64(&self) -> u64 {
|
||||
*self as u64
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::I64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for f64 {
|
||||
@@ -154,10 +140,6 @@ impl FastValue for f64 {
|
||||
fn as_u64(&self) -> u64 {
|
||||
self.to_bits()
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::F64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for crate::DateTime {
|
||||
@@ -180,10 +162,6 @@ impl FastValue for crate::DateTime {
|
||||
fn as_u64(&self) -> u64 {
|
||||
self.timestamp().as_u64()
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::Date
|
||||
}
|
||||
}
|
||||
|
||||
fn value_to_u64(value: &Value) -> u64 {
|
||||
@@ -209,7 +187,6 @@ mod tests {
|
||||
use crate::schema::FAST;
|
||||
use crate::schema::{Document, IntOptions};
|
||||
use crate::{Index, SegmentId, SegmentReader};
|
||||
use common::HasLen;
|
||||
use once_cell::sync::Lazy;
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::rngs::StdRng;
|
||||
@@ -240,9 +217,9 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_small() -> crate::Result<()> {
|
||||
fn test_intfastfield_small() {
|
||||
let path = Path::new("test");
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||
@@ -255,24 +232,27 @@ mod tests {
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
assert_eq!(file.len(), 36 as usize);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(file)?;
|
||||
assert_eq!(fast_field_reader.get(0), 13u64);
|
||||
assert_eq!(fast_field_reader.get(1), 14u64);
|
||||
assert_eq!(fast_field_reader.get(2), 2u64);
|
||||
Ok(())
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
{
|
||||
assert_eq!(source.len(), 36 as usize);
|
||||
}
|
||||
{
|
||||
let composite_file = CompositeFile::open(&source).unwrap();
|
||||
let field_source = composite_file.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(field_source);
|
||||
assert_eq!(fast_field_reader.get(0), 13u64);
|
||||
assert_eq!(fast_field_reader.get(1), 14u64);
|
||||
assert_eq!(fast_field_reader.get(2), 2u64);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_large() -> crate::Result<()> {
|
||||
fn test_intfastfield_large() {
|
||||
let path = Path::new("test");
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
||||
let mut serializer = FastFieldSerializer::from_write(write)?;
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>4u64));
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64));
|
||||
@@ -283,15 +263,19 @@ mod tests {
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>1_002u64));
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>1_501u64));
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>215u64));
|
||||
fast_field_writers.serialize(&mut serializer, &HashMap::new())?;
|
||||
serializer.close()?;
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(&path)?;
|
||||
assert_eq!(file.len(), 61 as usize);
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
assert_eq!(source.len(), 61 as usize);
|
||||
}
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||
assert_eq!(fast_field_reader.get(0), 4u64);
|
||||
assert_eq!(fast_field_reader.get(1), 14_082_001u64);
|
||||
assert_eq!(fast_field_reader.get(2), 3_052u64);
|
||||
@@ -302,13 +286,12 @@ mod tests {
|
||||
assert_eq!(fast_field_reader.get(7), 1_501u64);
|
||||
assert_eq!(fast_field_reader.get(8), 215u64);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_null_amplitude() -> crate::Result<()> {
|
||||
fn test_intfastfield_null_amplitude() {
|
||||
let path = Path::new("test");
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
@@ -322,23 +305,24 @@ mod tests {
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
assert_eq!(file.len(), 34 as usize);
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
assert_eq!(source.len(), 34 as usize);
|
||||
}
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||
for doc in 0..10_000 {
|
||||
assert_eq!(fast_field_reader.get(doc), 100_000u64);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_large_numbers() -> crate::Result<()> {
|
||||
fn test_intfastfield_large_numbers() {
|
||||
let path = Path::new("test");
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
@@ -354,12 +338,14 @@ mod tests {
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
assert_eq!(file.len(), 80042 as usize);
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
assert_eq!(source.len(), 80042 as usize);
|
||||
}
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||
assert_eq!(fast_field_reader.get(0), 0u64);
|
||||
for doc in 1..10_001 {
|
||||
assert_eq!(
|
||||
@@ -368,13 +354,12 @@ mod tests {
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signed_intfastfield() -> crate::Result<()> {
|
||||
fn test_signed_intfastfield() {
|
||||
let path = Path::new("test");
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
let i64_field = schema_builder.add_i64_field("field", FAST);
|
||||
@@ -393,12 +378,14 @@ mod tests {
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
assert_eq!(file.len(), 17709 as usize);
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
assert_eq!(source.len(), 17709 as usize);
|
||||
}
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<i64>::open(data)?;
|
||||
let fast_field_reader = FastFieldReader::<i64>::open(data);
|
||||
|
||||
assert_eq!(fast_field_reader.min_value(), -100i64);
|
||||
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
||||
@@ -411,13 +398,12 @@ mod tests {
|
||||
assert_eq!(buffer[i], -100i64 + 53i64 + i as i64);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signed_intfastfield_default_val() -> crate::Result<()> {
|
||||
fn test_signed_intfastfield_default_val() {
|
||||
let path = Path::new("test");
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
let mut schema_builder = Schema::builder();
|
||||
let i64_field = schema_builder.add_i64_field("field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
@@ -434,14 +420,13 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<i64>::open(data)?;
|
||||
let fast_field_reader = FastFieldReader::<i64>::open(data);
|
||||
assert_eq!(fast_field_reader.get(0u32), 0i64);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
@@ -452,26 +437,28 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_permutation() -> crate::Result<()> {
|
||||
fn test_intfastfield_permutation() {
|
||||
let path = Path::new("test");
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let directory = RAMDirectory::create();
|
||||
let mut directory = RAMDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
||||
let mut serializer = FastFieldSerializer::from_write(write)?;
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||
for &x in &permutation {
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
||||
}
|
||||
fast_field_writers.serialize(&mut serializer, &HashMap::new())?;
|
||||
serializer.close()?;
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(&path)?;
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
@@ -479,7 +466,6 @@ mod tests {
|
||||
a = fast_field_reader.get(a as u32);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -488,7 +474,7 @@ mod tests {
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
|
||||
index_writer.commit().unwrap();
|
||||
@@ -525,7 +511,7 @@ mod tests {
|
||||
);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer.add_document(doc!(
|
||||
date_field => crate::DateTime::from_u64(1i64.to_u64()),
|
||||
@@ -612,7 +598,7 @@ mod bench {
|
||||
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
|
||||
let path = Path::new("test");
|
||||
let permutation = generate_permutation();
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||
@@ -625,9 +611,9 @@ mod bench {
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||
|
||||
@@ -646,7 +632,7 @@ mod bench {
|
||||
fn bench_intfastfield_fflookup(b: &mut Bencher) {
|
||||
let path = Path::new("test");
|
||||
let permutation = generate_permutation();
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||
@@ -659,9 +645,9 @@ mod bench {
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ mod tests {
|
||||
);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(field=>1u64, field=>3u64));
|
||||
index_writer.add_document(doc!());
|
||||
index_writer.add_document(doc!(field=>4u64));
|
||||
@@ -64,7 +64,7 @@ mod tests {
|
||||
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let first_time_stamp = chrono::Utc::now();
|
||||
index_writer.add_document(
|
||||
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
|
||||
@@ -100,7 +100,6 @@ mod tests {
|
||||
.get_first(date_field)
|
||||
.expect("cannot find value")
|
||||
.date_value()
|
||||
.unwrap()
|
||||
.timestamp(),
|
||||
first_time_stamp.timestamp()
|
||||
);
|
||||
@@ -109,7 +108,7 @@ mod tests {
|
||||
.get_first(time_i)
|
||||
.expect("cannot find value")
|
||||
.i64_value(),
|
||||
Some(1i64)
|
||||
1i64
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -132,7 +131,6 @@ mod tests {
|
||||
.get_first(date_field)
|
||||
.expect("cannot find value")
|
||||
.date_value()
|
||||
.unwrap()
|
||||
.timestamp(),
|
||||
two_secs_ahead.timestamp()
|
||||
);
|
||||
@@ -141,7 +139,7 @@ mod tests {
|
||||
.get_first(time_i)
|
||||
.expect("cannot find value")
|
||||
.i64_value(),
|
||||
Some(3i64)
|
||||
3i64
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -188,7 +186,7 @@ mod tests {
|
||||
);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(field=> 1i64, field => 3i64));
|
||||
index_writer.add_document(doc!());
|
||||
index_writer.add_document(doc!(field=> -4i64));
|
||||
@@ -199,14 +197,22 @@ mod tests {
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let mut vals = Vec::new();
|
||||
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
|
||||
multi_value_reader.get_vals(2, &mut vals);
|
||||
assert_eq!(&vals, &[-4i64]);
|
||||
multi_value_reader.get_vals(0, &mut vals);
|
||||
assert_eq!(&vals, &[1i64, 3i64]);
|
||||
multi_value_reader.get_vals(1, &mut vals);
|
||||
assert!(vals.is_empty());
|
||||
multi_value_reader.get_vals(3, &mut vals);
|
||||
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
|
||||
{
|
||||
multi_value_reader.get_vals(2, &mut vals);
|
||||
assert_eq!(&vals, &[-4i64]);
|
||||
}
|
||||
{
|
||||
multi_value_reader.get_vals(0, &mut vals);
|
||||
assert_eq!(&vals, &[1i64, 3i64]);
|
||||
}
|
||||
{
|
||||
multi_value_reader.get_vals(1, &mut vals);
|
||||
assert!(vals.is_empty());
|
||||
}
|
||||
{
|
||||
multi_value_reader.get_vals(3, &mut vals);
|
||||
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
#[ignore]
|
||||
@@ -215,7 +221,7 @@ mod tests {
|
||||
let field = schema_builder.add_facet_field("facetfield");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
for i in 0..100_000 {
|
||||
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index
|
||||
.writer_for_tests()
|
||||
.writer_with_num_threads(1, 30_000_000)
|
||||
.expect("Failed to create index writer.");
|
||||
index_writer.add_document(doc!(
|
||||
facet_field => Facet::from("/category/cat2"),
|
||||
|
||||
@@ -143,7 +143,7 @@ impl MultiValueIntFastFieldWriter {
|
||||
.iter()
|
||||
.map(|val| *mapping.get(val).expect("Missing term ordinal"));
|
||||
doc_vals.extend(remapped_vals);
|
||||
doc_vals.sort_unstable();
|
||||
doc_vals.sort();
|
||||
for &val in &doc_vals {
|
||||
value_serializer.add_val(val)?;
|
||||
}
|
||||
|
||||
@@ -3,12 +3,13 @@ use crate::common::bitpacker::BitUnpacker;
|
||||
use crate::common::compute_num_bits;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::CompositeFile;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::FAST;
|
||||
use crate::DocId;
|
||||
use owning_ref::OwningRef;
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::path::Path;
|
||||
@@ -19,27 +20,34 @@ use std::path::Path;
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct FastFieldReader<Item: FastValue> {
|
||||
bit_unpacker: BitUnpacker,
|
||||
bit_unpacker: BitUnpacker<OwningRef<ReadOnlySource, [u8]>>,
|
||||
min_value_u64: u64,
|
||||
max_value_u64: u64,
|
||||
_phantom: PhantomData<Item>,
|
||||
}
|
||||
|
||||
impl<Item: FastValue> FastFieldReader<Item> {
|
||||
/// Opens a fast field given a file.
|
||||
pub fn open(file: FileSlice) -> crate::Result<Self> {
|
||||
let mut bytes = file.read_bytes()?;
|
||||
let min_value = u64::deserialize(&mut bytes)?;
|
||||
let amplitude = u64::deserialize(&mut bytes)?;
|
||||
/// Opens a fast field given a source.
|
||||
pub fn open(data: ReadOnlySource) -> Self {
|
||||
let min_value: u64;
|
||||
let amplitude: u64;
|
||||
{
|
||||
let mut cursor = data.as_slice();
|
||||
min_value =
|
||||
u64::deserialize(&mut cursor).expect("Failed to read the min_value of fast field.");
|
||||
amplitude =
|
||||
u64::deserialize(&mut cursor).expect("Failed to read the amplitude of fast field.");
|
||||
}
|
||||
let max_value = min_value + amplitude;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let bit_unpacker = BitUnpacker::new(bytes, num_bits);
|
||||
Ok(FastFieldReader {
|
||||
let owning_ref = OwningRef::new(data).map(|data| &data[16..]);
|
||||
let bit_unpacker = BitUnpacker::new(owning_ref, num_bits);
|
||||
FastFieldReader {
|
||||
min_value_u64: min_value,
|
||||
max_value_u64: max_value,
|
||||
bit_unpacker,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> {
|
||||
@@ -127,7 +135,7 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
|
||||
let field = schema_builder.add_u64_field("field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let path = Path::new("__dummy__");
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory
|
||||
.open_write(path)
|
||||
@@ -149,11 +157,12 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
|
||||
let file = directory.open_read(path).expect("Failed to open the file");
|
||||
let composite_file = CompositeFile::open(&file).expect("Failed to read the composite file");
|
||||
let field_file = composite_file
|
||||
let source = directory.open_read(path).expect("Failed to open the file");
|
||||
let composite_file =
|
||||
CompositeFile::open(&source).expect("Failed to read the composite file");
|
||||
let field_source = composite_file
|
||||
.open_read(field)
|
||||
.expect("File component not found");
|
||||
FastFieldReader::open(field_file).unwrap()
|
||||
FastFieldReader::open(field_source)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,52 +68,45 @@ impl FastFieldReaders {
|
||||
};
|
||||
for (field, field_entry) in schema.fields() {
|
||||
let field_type = field_entry.field_type();
|
||||
if let FieldType::Bytes(bytes_option) = field_type {
|
||||
if !bytes_option.is_fast() {
|
||||
continue;
|
||||
}
|
||||
let fast_field_idx_file = fast_fields_composite
|
||||
if field_type == &FieldType::Bytes {
|
||||
let idx_reader = fast_fields_composite
|
||||
.open_read_with_idx(field, 0)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
||||
let idx_reader = FastFieldReader::open(fast_field_idx_file)?;
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
||||
.map(FastFieldReader::open)?;
|
||||
let data = fast_fields_composite
|
||||
.open_read_with_idx(field, 1)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
||||
let bytes_fast_field_reader = BytesFastFieldReader::open(idx_reader, data)?;
|
||||
fast_field_readers
|
||||
.fast_bytes
|
||||
.insert(field, bytes_fast_field_reader);
|
||||
.insert(field, BytesFastFieldReader::open(idx_reader, data));
|
||||
} else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) {
|
||||
match cardinality {
|
||||
Cardinality::SingleValue => {
|
||||
if let Some(fast_field_data) = fast_fields_composite.open_read(field) {
|
||||
match fast_type {
|
||||
FastType::U64 => {
|
||||
let fast_field_reader = FastFieldReader::open(fast_field_data)?;
|
||||
let fast_field_reader = FastFieldReader::open(fast_field_data);
|
||||
fast_field_readers
|
||||
.fast_field_u64
|
||||
.insert(field, fast_field_reader);
|
||||
}
|
||||
FastType::I64 => {
|
||||
let fast_field_reader =
|
||||
FastFieldReader::open(fast_field_data.clone())?;
|
||||
fast_field_readers
|
||||
.fast_field_i64
|
||||
.insert(field, fast_field_reader);
|
||||
fast_field_readers.fast_field_i64.insert(
|
||||
field,
|
||||
FastFieldReader::open(fast_field_data.clone()),
|
||||
);
|
||||
}
|
||||
FastType::F64 => {
|
||||
let fast_field_reader =
|
||||
FastFieldReader::open(fast_field_data.clone())?;
|
||||
fast_field_readers
|
||||
.fast_field_f64
|
||||
.insert(field, fast_field_reader);
|
||||
fast_field_readers.fast_field_f64.insert(
|
||||
field,
|
||||
FastFieldReader::open(fast_field_data.clone()),
|
||||
);
|
||||
}
|
||||
FastType::Date => {
|
||||
let fast_field_reader =
|
||||
FastFieldReader::open(fast_field_data.clone())?;
|
||||
fast_field_readers
|
||||
.fast_field_date
|
||||
.insert(field, fast_field_reader);
|
||||
fast_field_readers.fast_field_date.insert(
|
||||
field,
|
||||
FastFieldReader::open(fast_field_data.clone()),
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -124,10 +117,10 @@ impl FastFieldReaders {
|
||||
let idx_opt = fast_fields_composite.open_read_with_idx(field, 0);
|
||||
let data_opt = fast_fields_composite.open_read_with_idx(field, 1);
|
||||
if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) {
|
||||
let idx_reader = FastFieldReader::open(fast_field_idx)?;
|
||||
let idx_reader = FastFieldReader::open(fast_field_idx);
|
||||
match fast_type {
|
||||
FastType::I64 => {
|
||||
let vals_reader = FastFieldReader::open(fast_field_data)?;
|
||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||
let multivalued_int_fast_field =
|
||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||
fast_field_readers
|
||||
@@ -135,7 +128,7 @@ impl FastFieldReaders {
|
||||
.insert(field, multivalued_int_fast_field);
|
||||
}
|
||||
FastType::U64 => {
|
||||
let vals_reader = FastFieldReader::open(fast_field_data)?;
|
||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||
let multivalued_int_fast_field =
|
||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||
fast_field_readers
|
||||
@@ -143,7 +136,7 @@ impl FastFieldReaders {
|
||||
.insert(field, multivalued_int_fast_field);
|
||||
}
|
||||
FastType::F64 => {
|
||||
let vals_reader = FastFieldReader::open(fast_field_data)?;
|
||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||
let multivalued_int_fast_field =
|
||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||
fast_field_readers
|
||||
@@ -151,7 +144,7 @@ impl FastFieldReaders {
|
||||
.insert(field, multivalued_int_fast_field);
|
||||
}
|
||||
FastType::Date => {
|
||||
let vals_reader = FastFieldReader::open(fast_field_data)?;
|
||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||
let multivalued_int_fast_field =
|
||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||
fast_field_readers
|
||||
|
||||
@@ -33,7 +33,7 @@ impl FastFieldsWriter {
|
||||
let mut bytes_value_writers = Vec::new();
|
||||
|
||||
for (field, field_entry) in schema.fields() {
|
||||
match field_entry.field_type() {
|
||||
match *field_entry.field_type() {
|
||||
FieldType::I64(ref int_options)
|
||||
| FieldType::U64(ref int_options)
|
||||
| FieldType::F64(ref int_options)
|
||||
@@ -56,11 +56,9 @@ impl FastFieldsWriter {
|
||||
let fast_field_writer = MultiValueIntFastFieldWriter::new(field, true);
|
||||
multi_values_writers.push(fast_field_writer);
|
||||
}
|
||||
FieldType::Bytes(bytes_option) => {
|
||||
if bytes_option.is_fast() {
|
||||
let fast_field_writer = BytesFastFieldWriter::new(field);
|
||||
bytes_value_writers.push(fast_field_writer);
|
||||
}
|
||||
FieldType::Bytes => {
|
||||
let fast_field_writer = BytesFastFieldWriter::new(field);
|
||||
bytes_value_writers.push(fast_field_writer);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
@@ -128,7 +126,6 @@ impl FastFieldsWriter {
|
||||
for field_writer in &self.single_value_writers {
|
||||
field_writer.serialize(serializer)?;
|
||||
}
|
||||
|
||||
for field_writer in &self.multi_values_writers {
|
||||
let field = field_writer.field();
|
||||
field_writer.serialize(serializer, mapping.get(&field))?;
|
||||
|
||||
@@ -21,7 +21,7 @@ mod reader;
|
||||
mod serializer;
|
||||
mod writer;
|
||||
|
||||
pub use self::reader::{FieldNormReader, FieldNormReaders};
|
||||
pub use self::reader::FieldNormReader;
|
||||
pub use self::serializer::FieldNormsSerializer;
|
||||
pub use self::writer::FieldNormsWriter;
|
||||
|
||||
|
||||
@@ -1,47 +1,6 @@
|
||||
use super::{fieldnorm_to_id, id_to_fieldnorm};
|
||||
use crate::common::CompositeFile;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::schema::Field;
|
||||
use crate::space_usage::PerFieldSpaceUsage;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::DocId;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Reader for the fieldnorm (for each document, the number of tokens indexed in the
|
||||
/// field) of all indexed fields in the index.
|
||||
///
|
||||
/// Each fieldnorm is approximately compressed over one byte. We refer to this byte as
|
||||
/// `fieldnorm_id`.
|
||||
/// The mapping from `fieldnorm` to `fieldnorm_id` is given by monotonic.
|
||||
#[derive(Clone)]
|
||||
pub struct FieldNormReaders {
|
||||
data: Arc<CompositeFile>,
|
||||
}
|
||||
|
||||
impl FieldNormReaders {
|
||||
/// Creates a field norm reader.
|
||||
pub fn open(file: FileSlice) -> crate::Result<FieldNormReaders> {
|
||||
let data = CompositeFile::open(&file)?;
|
||||
Ok(FieldNormReaders {
|
||||
data: Arc::new(data),
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the FieldNormReader for a specific field.
|
||||
pub fn get_field(&self, field: Field) -> crate::Result<Option<FieldNormReader>> {
|
||||
if let Some(file) = self.data.open_read(field) {
|
||||
let fieldnorm_reader = FieldNormReader::open(file)?;
|
||||
Ok(Some(fieldnorm_reader))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a break down of the space usage per field.
|
||||
pub fn space_usage(&self) -> PerFieldSpaceUsage {
|
||||
self.data.space_usage()
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads the fieldnorm associated to a document.
|
||||
/// The fieldnorm represents the length associated to
|
||||
@@ -49,7 +8,7 @@ impl FieldNormReaders {
|
||||
///
|
||||
/// This metric is important to compute the score of a
|
||||
/// document : a document having a query word in one its short fields
|
||||
/// (e.g. title)is likely to be more relevant than in one of its longer field
|
||||
/// (e.g. title) is likely to be more relevant than in one of its longer field
|
||||
/// (e.g. body).
|
||||
///
|
||||
/// tantivy encodes `fieldnorm` on one byte with some precision loss,
|
||||
@@ -60,32 +19,14 @@ impl FieldNormReaders {
|
||||
/// Apart from compression, this scale also makes it possible to
|
||||
/// precompute computationally expensive functions of the fieldnorm
|
||||
/// in a very short array.
|
||||
#[derive(Clone)]
|
||||
pub enum FieldNormReader {
|
||||
ConstFieldNorm { fieldnorm_id: u8, num_docs: u32 },
|
||||
OneByte(OwnedBytes),
|
||||
pub struct FieldNormReader {
|
||||
data: ReadOnlySource,
|
||||
}
|
||||
|
||||
impl FieldNormReader {
|
||||
pub fn const_fieldnorm_id(fieldnorm_id: u8, num_docs: u32) -> FieldNormReader {
|
||||
FieldNormReader::ConstFieldNorm {
|
||||
fieldnorm_id,
|
||||
num_docs,
|
||||
}
|
||||
}
|
||||
|
||||
/// Opens a field norm reader given its file.
|
||||
pub fn open(fieldnorm_file: FileSlice) -> crate::Result<Self> {
|
||||
let data = fieldnorm_file.read_bytes()?;
|
||||
Ok(FieldNormReader::OneByte(data))
|
||||
}
|
||||
|
||||
/// Returns the number of documents in this segment.
|
||||
pub fn num_docs(&self) -> u32 {
|
||||
match self {
|
||||
Self::ConstFieldNorm { num_docs, .. } => *num_docs,
|
||||
FieldNormReader::OneByte(vals) => vals.len() as u32,
|
||||
}
|
||||
/// Opens a field norm reader given its data source.
|
||||
pub fn open(data: ReadOnlySource) -> Self {
|
||||
FieldNormReader { data }
|
||||
}
|
||||
|
||||
/// Returns the `fieldnorm` associated to a doc id.
|
||||
@@ -97,7 +38,6 @@ impl FieldNormReader {
|
||||
///
|
||||
/// The fieldnorm is effectively decoded from the
|
||||
/// `fieldnorm_id` by doing a simple table lookup.
|
||||
#[inline(always)]
|
||||
pub fn fieldnorm(&self, doc_id: DocId) -> u32 {
|
||||
let fieldnorm_id = self.fieldnorm_id(doc_id);
|
||||
id_to_fieldnorm(fieldnorm_id)
|
||||
@@ -106,11 +46,8 @@ impl FieldNormReader {
|
||||
/// Returns the `fieldnorm_id` associated to a document.
|
||||
#[inline(always)]
|
||||
pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 {
|
||||
match self {
|
||||
FieldNormReader::ConstFieldNorm { fieldnorm_id, .. } => *fieldnorm_id,
|
||||
|
||||
FieldNormReader::OneByte(data) => data.as_slice()[doc_id as usize],
|
||||
}
|
||||
let fielnorms_data = self.data.as_slice();
|
||||
fielnorms_data[doc_id as usize]
|
||||
}
|
||||
|
||||
/// Converts a `fieldnorm_id` into a fieldnorm.
|
||||
@@ -125,32 +62,18 @@ impl FieldNormReader {
|
||||
pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
|
||||
fieldnorm_to_id(fieldnorm)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn for_test(field_norms: &[u32]) -> FieldNormReader {
|
||||
let field_norms_id = field_norms
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(FieldNormReader::fieldnorm_to_id)
|
||||
.collect::<Vec<u8>>();
|
||||
let field_norms_data = OwnedBytes::new(field_norms_id);
|
||||
FieldNormReader::OneByte(field_norms_data)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
|
||||
#[test]
|
||||
fn test_from_fieldnorms_array() {
|
||||
let fieldnorms = &[1, 2, 3, 4, 1_000_000];
|
||||
let fieldnorm_reader = FieldNormReader::for_test(fieldnorms);
|
||||
assert_eq!(fieldnorm_reader.num_docs(), 5);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(0), 1);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(2), 3);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(3), 4);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(4), 983_064);
|
||||
impl From<Vec<u32>> for FieldNormReader {
|
||||
fn from(field_norms: Vec<u32>) -> FieldNormReader {
|
||||
let field_norms_id = field_norms
|
||||
.into_iter()
|
||||
.map(FieldNormReader::fieldnorm_to_id)
|
||||
.collect::<Vec<u8>>();
|
||||
let field_norms_data = ReadOnlySource::from(field_norms_id);
|
||||
FieldNormReader {
|
||||
data: field_norms_data,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ use std::io;
|
||||
/// byte per document per field.
|
||||
pub struct FieldNormsWriter {
|
||||
fields: Vec<Field>,
|
||||
fieldnorms_buffer: Vec<Option<Vec<u8>>>,
|
||||
fieldnorms_buffer: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl FieldNormsWriter {
|
||||
@@ -23,7 +23,7 @@ impl FieldNormsWriter {
|
||||
schema
|
||||
.fields()
|
||||
.filter_map(|(field, field_entry)| {
|
||||
if field_entry.has_fieldnorms() {
|
||||
if field_entry.is_indexed() {
|
||||
Some(field)
|
||||
} else {
|
||||
None
|
||||
@@ -36,14 +36,15 @@ impl FieldNormsWriter {
|
||||
/// specified in the schema.
|
||||
pub fn for_schema(schema: &Schema) -> FieldNormsWriter {
|
||||
let fields = FieldNormsWriter::fields_with_fieldnorm(schema);
|
||||
let num_fields = schema.num_fields();
|
||||
let mut fieldnorms_buffer: Vec<Option<Vec<u8>>> = vec![None; num_fields];
|
||||
for field in &fields {
|
||||
fieldnorms_buffer[field.field_id() as usize] = Some(Vec::new());
|
||||
}
|
||||
let max_field = fields
|
||||
.iter()
|
||||
.map(Field::field_id)
|
||||
.max()
|
||||
.map(|max_field_id| max_field_id as usize + 1)
|
||||
.unwrap_or(0);
|
||||
FieldNormsWriter {
|
||||
fields,
|
||||
fieldnorms_buffer,
|
||||
fieldnorms_buffer: (0..max_field).map(|_| Vec::new()).collect::<Vec<_>>(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,10 +53,8 @@ impl FieldNormsWriter {
|
||||
///
|
||||
/// Will extend with 0-bytes for documents that have not been seen.
|
||||
pub fn fill_up_to_max_doc(&mut self, max_doc: DocId) {
|
||||
for buffer_opt in self.fieldnorms_buffer.iter_mut() {
|
||||
if let Some(buffer) = buffer_opt {
|
||||
buffer.resize(max_doc as usize, 0u8);
|
||||
}
|
||||
for field in self.fields.iter() {
|
||||
self.fieldnorms_buffer[field.field_id() as usize].resize(max_doc as usize, 0u8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,24 +67,22 @@ impl FieldNormsWriter {
|
||||
/// * field - the field being set
|
||||
/// * fieldnorm - the number of terms present in document `doc` in field `field`
|
||||
pub fn record(&mut self, doc: DocId, field: Field, fieldnorm: u32) {
|
||||
if let Some(fieldnorm_buffer) = self.fieldnorms_buffer[field.field_id() as usize].as_mut() {
|
||||
assert!(
|
||||
fieldnorm_buffer.len() <= doc as usize,
|
||||
"Cannot register a given fieldnorm twice" // we fill intermediary `DocId` as having a fieldnorm of 0.
|
||||
);
|
||||
fieldnorm_buffer.resize(doc as usize + 1, 0u8);
|
||||
fieldnorm_buffer[doc as usize] = fieldnorm_to_id(fieldnorm);
|
||||
}
|
||||
let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.field_id() as usize];
|
||||
assert!(
|
||||
fieldnorm_buffer.len() <= doc as usize,
|
||||
"Cannot register a given fieldnorm twice"
|
||||
);
|
||||
// we fill intermediary `DocId` as having a fieldnorm of 0.
|
||||
fieldnorm_buffer.resize(doc as usize + 1, 0u8);
|
||||
fieldnorm_buffer[doc as usize] = fieldnorm_to_id(fieldnorm);
|
||||
}
|
||||
|
||||
/// Serialize the seen fieldnorm values to the serializer for all fields.
|
||||
pub fn serialize(&self, mut fieldnorms_serializer: FieldNormsSerializer) -> io::Result<()> {
|
||||
pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> {
|
||||
for &field in self.fields.iter() {
|
||||
if let Some(buffer) = self.fieldnorms_buffer[field.field_id() as usize].as_ref() {
|
||||
fieldnorms_serializer.serialize_field(field, &buffer[..])?;
|
||||
}
|
||||
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
|
||||
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
|
||||
}
|
||||
fieldnorms_serializer.close()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,22 +8,26 @@ use crate::core::SegmentComponent;
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::directory::TerminatingWrite;
|
||||
use crate::directory::{DirectoryLock, GarbageCollectionResult};
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::directory::{TerminatingWrite, WatchCallbackList};
|
||||
use crate::docset::DocSet;
|
||||
use crate::error::TantivyError;
|
||||
use crate::fastfield::write_delete_bitset;
|
||||
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
|
||||
use crate::indexer::doc_opstamp_mapping::DocToOpstampMapping;
|
||||
use crate::indexer::operation::DeleteOperation;
|
||||
use crate::indexer::segment_manager::SegmentRegisters;
|
||||
use crate::indexer::segment_register::SegmentRegister;
|
||||
use crate::indexer::stamper::Stamper;
|
||||
use crate::indexer::MergePolicy;
|
||||
use crate::indexer::SegmentEntry;
|
||||
use crate::indexer::{SegmentEntry, ResourceManager};
|
||||
use crate::indexer::SegmentWriter;
|
||||
use crate::indexer::{IndexWriterConfig, MergePolicy};
|
||||
use crate::reader::NRTReader;
|
||||
use crate::schema::Document;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::schema::Term;
|
||||
use crate::Opstamp;
|
||||
use crate::tokenizer::TokenizerManager;
|
||||
use crate::{IndexReader, Opstamp};
|
||||
use crossbeam::channel;
|
||||
use futures::executor::block_on;
|
||||
use futures::future::Future;
|
||||
@@ -31,18 +35,10 @@ use smallvec::smallvec;
|
||||
use smallvec::SmallVec;
|
||||
use std::mem;
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
use std::thread::JoinHandle;
|
||||
|
||||
// Size of the margin for the heap. A segment is closed when the remaining memory
|
||||
// in the heap goes below MARGIN_IN_BYTES.
|
||||
pub const MARGIN_IN_BYTES: usize = 1_000_000;
|
||||
|
||||
// We impose the memory per thread to be at least 3 MB.
|
||||
pub const HEAP_SIZE_MIN: usize = ((MARGIN_IN_BYTES as u32) * 3u32) as usize;
|
||||
pub const HEAP_SIZE_MAX: usize = u32::max_value() as usize - MARGIN_IN_BYTES;
|
||||
|
||||
// Add document will block if the number of docs waiting in the queue to be indexed
|
||||
// reaches `PIPELINE_MAX_SIZE_IN_DOCS`
|
||||
const PIPELINE_MAX_SIZE_IN_DOCS: usize = 10_000;
|
||||
@@ -69,8 +65,9 @@ pub struct IndexWriter {
|
||||
_directory_lock: Option<DirectoryLock>,
|
||||
|
||||
index: Index,
|
||||
config: IndexWriterConfig,
|
||||
|
||||
heap_size_in_bytes_per_thread: usize,
|
||||
segment_registers: Arc<RwLock<SegmentRegisters>>,
|
||||
|
||||
workers_join_handle: Vec<JoinHandle<crate::Result<()>>>,
|
||||
|
||||
@@ -80,13 +77,14 @@ pub struct IndexWriter {
|
||||
segment_updater: SegmentUpdater,
|
||||
|
||||
worker_id: usize,
|
||||
|
||||
num_threads: usize,
|
||||
|
||||
delete_queue: DeleteQueue,
|
||||
|
||||
stamper: Stamper,
|
||||
committed_opstamp: Opstamp,
|
||||
|
||||
on_commit: WatchCallbackList,
|
||||
|
||||
memory_manager: ResourceManager,
|
||||
}
|
||||
|
||||
fn compute_deleted_bitset(
|
||||
@@ -108,19 +106,19 @@ fn compute_deleted_bitset(
|
||||
// Limit doc helps identify the first document
|
||||
// that may be affected by the delete operation.
|
||||
let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp);
|
||||
let inverted_index = segment_reader.inverted_index(delete_op.term.field())?;
|
||||
let inverted_index = segment_reader.inverted_index(delete_op.term.field());
|
||||
if let Some(mut docset) =
|
||||
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)?
|
||||
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)
|
||||
{
|
||||
let mut deleted_doc = docset.doc();
|
||||
while deleted_doc != TERMINATED {
|
||||
while docset.advance() {
|
||||
let deleted_doc = docset.doc();
|
||||
if deleted_doc < limit_doc {
|
||||
delete_bitset.insert(deleted_doc);
|
||||
might_have_changed = true;
|
||||
}
|
||||
deleted_doc = docset.advance();
|
||||
}
|
||||
}
|
||||
|
||||
delete_cursor.advance();
|
||||
}
|
||||
Ok(might_have_changed)
|
||||
@@ -133,7 +131,6 @@ fn compute_deleted_bitset(
|
||||
/// For instance, there was no delete operation between the state of the `segment_entry` and
|
||||
/// the `target_opstamp`, `segment_entry` is not updated.
|
||||
pub(crate) fn advance_deletes(
|
||||
mut segment: Segment,
|
||||
segment_entry: &mut SegmentEntry,
|
||||
target_opstamp: Opstamp,
|
||||
) -> crate::Result<()> {
|
||||
@@ -142,25 +139,33 @@ pub(crate) fn advance_deletes(
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if segment_entry.delete_bitset().is_none() && segment_entry.delete_cursor().get().is_none() {
|
||||
let delete_bitset_opt = segment_entry.take_delete_bitset();
|
||||
|
||||
// We avoid directly advancing the `SegmentEntry` delete cursor, because
|
||||
// we do not want to end up in an invalid state if the delete bitset
|
||||
// serialization fails.
|
||||
let mut delete_cursor = segment_entry.delete_cursor();
|
||||
|
||||
if delete_bitset_opt.is_none() && segment_entry.delete_cursor().get().is_none() {
|
||||
// There has been no `DeleteOperation` between the segment status and `target_opstamp`.
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// We open our current serialized segment to compute the new deleted bitset.
|
||||
let segment = segment_entry.segment().clone();
|
||||
let segment_reader = SegmentReader::open(&segment)?;
|
||||
|
||||
let max_doc = segment_reader.max_doc();
|
||||
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
|
||||
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
|
||||
None => BitSet::with_max_value(max_doc),
|
||||
};
|
||||
|
||||
let mut delete_bitset: BitSet =
|
||||
delete_bitset_opt.unwrap_or_else(|| BitSet::with_max_value(max_doc));
|
||||
|
||||
let num_deleted_docs_before = segment.meta().num_deleted_docs();
|
||||
|
||||
compute_deleted_bitset(
|
||||
&mut delete_bitset,
|
||||
&segment_reader,
|
||||
segment_entry.delete_cursor(),
|
||||
&mut delete_cursor,
|
||||
&DocToOpstampMapping::None,
|
||||
target_opstamp,
|
||||
)?;
|
||||
@@ -179,32 +184,40 @@ pub(crate) fn advance_deletes(
|
||||
let num_deleted_docs: u32 = delete_bitset.len() as u32;
|
||||
if num_deleted_docs > num_deleted_docs_before {
|
||||
// There are new deletes. We need to write a new delete file.
|
||||
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
|
||||
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
|
||||
let mut delete_file = segment
|
||||
.with_delete_meta(num_deleted_docs as u32, target_opstamp)
|
||||
.open_write(SegmentComponent::DELETE)?;
|
||||
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
|
||||
delete_file.terminate()?;
|
||||
segment_entry.reset_delete_meta(num_deleted_docs as u32, target_opstamp);
|
||||
}
|
||||
|
||||
segment_entry.set_meta(segment.meta().clone());
|
||||
// Regardless of whether we did end up having to write a new file or not
|
||||
// we advance the `delete_cursor`. This is an optimisation. We want to ensure we do not
|
||||
// check that a given deleted term does not match any of our docs more than once.
|
||||
segment_entry.set_delete_cursor(delete_cursor);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn index_documents(
|
||||
memory_budget: usize,
|
||||
config: IndexWriterConfig,
|
||||
segment: Segment,
|
||||
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
|
||||
segment_updater: &mut SegmentUpdater,
|
||||
tokenizers: &TokenizerManager,
|
||||
mut delete_cursor: DeleteCursor,
|
||||
memory_manager: ResourceManager
|
||||
) -> crate::Result<bool> {
|
||||
let schema = segment.schema();
|
||||
|
||||
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
|
||||
let mut segment_writer = SegmentWriter::for_segment(
|
||||
&config, segment, &schema, tokenizers, memory_manager)?;
|
||||
for document_group in grouped_document_iterator {
|
||||
for doc in document_group {
|
||||
segment_writer.add_document(doc, &schema)?;
|
||||
}
|
||||
let mem_usage = segment_writer.mem_usage();
|
||||
if mem_usage >= memory_budget - MARGIN_IN_BYTES {
|
||||
if mem_usage >= config.heap_size_before_flushing() {
|
||||
info!(
|
||||
"Buffer limit reached, flushing segment with maxdoc={}.",
|
||||
segment_writer.max_doc()
|
||||
@@ -223,24 +236,14 @@ fn index_documents(
|
||||
// the worker thread.
|
||||
assert!(max_doc > 0);
|
||||
|
||||
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
|
||||
|
||||
let segment_with_max_doc = segment.with_max_doc(max_doc);
|
||||
let (segment, doc_opstamps): (Segment, Vec<Opstamp>) = segment_writer.finalize()?;
|
||||
|
||||
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
|
||||
|
||||
let delete_bitset_opt = apply_deletes(
|
||||
&segment_with_max_doc,
|
||||
&mut delete_cursor,
|
||||
&doc_opstamps,
|
||||
last_docstamp,
|
||||
)?;
|
||||
let delete_bitset_opt =
|
||||
apply_deletes(&segment, &mut delete_cursor, &doc_opstamps, last_docstamp)?;
|
||||
|
||||
let segment_entry = SegmentEntry::new(
|
||||
segment_with_max_doc.meta().clone(),
|
||||
delete_cursor,
|
||||
delete_bitset_opt,
|
||||
);
|
||||
let segment_entry = SegmentEntry::new(segment, delete_cursor, delete_bitset_opt);
|
||||
block_on(segment_updater.schedule_add_segment(segment_entry))?;
|
||||
Ok(true)
|
||||
}
|
||||
@@ -292,21 +295,10 @@ impl IndexWriter {
|
||||
/// If the heap size per thread is too small, panics.
|
||||
pub(crate) fn new(
|
||||
index: &Index,
|
||||
num_threads: usize,
|
||||
heap_size_in_bytes_per_thread: usize,
|
||||
mut config: IndexWriterConfig,
|
||||
directory_lock: DirectoryLock,
|
||||
) -> crate::Result<IndexWriter> {
|
||||
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
||||
let err_msg = format!(
|
||||
"The heap size per thread needs to be at least {}.",
|
||||
HEAP_SIZE_MIN
|
||||
);
|
||||
return Err(TantivyError::InvalidArgument(err_msg));
|
||||
}
|
||||
if heap_size_in_bytes_per_thread >= HEAP_SIZE_MAX {
|
||||
let err_msg = format!("The heap size per thread cannot exceed {}", HEAP_SIZE_MAX);
|
||||
return Err(TantivyError::InvalidArgument(err_msg));
|
||||
}
|
||||
config.validate()?;
|
||||
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
||||
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
||||
|
||||
@@ -314,16 +306,26 @@ impl IndexWriter {
|
||||
|
||||
let current_opstamp = index.load_metas()?.opstamp;
|
||||
|
||||
let meta = index.load_metas()?;
|
||||
|
||||
let stamper = Stamper::new(current_opstamp);
|
||||
|
||||
let commited_segments = SegmentRegister::new(
|
||||
index.directory(),
|
||||
&index.schema(),
|
||||
meta.segments,
|
||||
&delete_queue.cursor(),
|
||||
);
|
||||
let segment_registers = Arc::new(RwLock::new(SegmentRegisters::new(commited_segments)));
|
||||
|
||||
let segment_updater =
|
||||
SegmentUpdater::create(index.clone(), stamper.clone(), &delete_queue.cursor())?;
|
||||
SegmentUpdater::create(segment_registers.clone(), index.clone(), stamper.clone())?;
|
||||
|
||||
let mut index_writer = IndexWriter {
|
||||
_directory_lock: Some(directory_lock),
|
||||
|
||||
heap_size_in_bytes_per_thread,
|
||||
index: index.clone(),
|
||||
config,
|
||||
|
||||
operation_receiver: document_receiver,
|
||||
operation_sender: document_sender,
|
||||
@@ -331,7 +333,6 @@ impl IndexWriter {
|
||||
segment_updater,
|
||||
|
||||
workers_join_handle: vec![],
|
||||
num_threads,
|
||||
|
||||
delete_queue,
|
||||
|
||||
@@ -339,6 +340,10 @@ impl IndexWriter {
|
||||
stamper,
|
||||
|
||||
worker_id: 0,
|
||||
segment_registers,
|
||||
on_commit: Default::default(),
|
||||
|
||||
memory_manager: Default::default()
|
||||
};
|
||||
index_writer.start_workers()?;
|
||||
Ok(index_writer)
|
||||
@@ -346,7 +351,7 @@ impl IndexWriter {
|
||||
|
||||
fn drop_sender(&mut self) {
|
||||
let (sender, _receiver) = channel::bounded(1);
|
||||
self.operation_sender = sender;
|
||||
mem::replace(&mut self.operation_sender, sender);
|
||||
}
|
||||
|
||||
/// If there are some merging threads, blocks until they all finish their work and
|
||||
@@ -366,23 +371,11 @@ impl IndexWriter {
|
||||
})?;
|
||||
}
|
||||
|
||||
let result = self
|
||||
self
|
||||
.segment_updater
|
||||
.wait_merging_thread()
|
||||
.map_err(|_| TantivyError::ErrorInThread("Failed to join merging thread.".into()));
|
||||
.wait_merging_thread();
|
||||
|
||||
if let Err(ref e) = result {
|
||||
error!("Some merging thread failed {:?}", e);
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn add_segment(&self, segment_meta: SegmentMeta) -> crate::Result<()> {
|
||||
let delete_cursor = self.delete_queue.cursor();
|
||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
|
||||
block_on(self.segment_updater.schedule_add_segment(segment_entry))
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates a new segment.
|
||||
@@ -405,8 +398,9 @@ impl IndexWriter {
|
||||
|
||||
let mut delete_cursor = self.delete_queue.cursor();
|
||||
|
||||
let mem_budget = self.heap_size_in_bytes_per_thread;
|
||||
let index = self.index.clone();
|
||||
let config = self.config.clone();
|
||||
let memory_manager = self.memory_manager.clone();
|
||||
let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new()
|
||||
.name(format!("thrd-tantivy-index{}", self.worker_id))
|
||||
.spawn(move || {
|
||||
@@ -435,11 +429,13 @@ impl IndexWriter {
|
||||
}
|
||||
let segment = index.new_segment();
|
||||
index_documents(
|
||||
mem_budget,
|
||||
config.clone(),
|
||||
segment,
|
||||
&mut document_iterator,
|
||||
&mut segment_updater,
|
||||
index.tokenizers(),
|
||||
delete_cursor.clone(),
|
||||
memory_manager.clone()
|
||||
)?;
|
||||
}
|
||||
})?;
|
||||
@@ -459,7 +455,7 @@ impl IndexWriter {
|
||||
}
|
||||
|
||||
fn start_workers(&mut self) -> crate::Result<()> {
|
||||
for _ in 0..self.num_threads {
|
||||
for _ in 0..self.config.max_indexing_threads {
|
||||
self.add_indexing_worker()?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -536,7 +532,6 @@ impl IndexWriter {
|
||||
/// when no documents are remaining.
|
||||
///
|
||||
/// Returns the former segment_ready channel.
|
||||
#[allow(unused_must_use)]
|
||||
fn recreate_document_channel(&mut self) -> OperationReceiver {
|
||||
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
||||
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
||||
@@ -565,18 +560,14 @@ impl IndexWriter {
|
||||
.take()
|
||||
.expect("The IndexWriter does not have any lock. This is a bug, please report.");
|
||||
|
||||
let new_index_writer: IndexWriter = IndexWriter::new(
|
||||
&self.index,
|
||||
self.num_threads,
|
||||
self.heap_size_in_bytes_per_thread,
|
||||
directory_lock,
|
||||
)?;
|
||||
let new_index_writer: IndexWriter =
|
||||
IndexWriter::new(&self.index, self.config.clone(), directory_lock)?;
|
||||
|
||||
// the current `self` is dropped right away because of this call.
|
||||
//
|
||||
// This will drop the document queue, and the thread
|
||||
// should terminate.
|
||||
*self = new_index_writer;
|
||||
mem::replace(self, new_index_writer);
|
||||
|
||||
// Drains the document receiver pipeline :
|
||||
// Workers don't need to index the pending documents.
|
||||
@@ -609,7 +600,7 @@ impl IndexWriter {
|
||||
/// It is also possible to add a payload to the `commit`
|
||||
/// using this API.
|
||||
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
|
||||
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit> {
|
||||
pub fn prepare_commit(&mut self, soft_commit: bool) -> crate::Result<PreparedCommit> {
|
||||
// Here, because we join all of the worker threads,
|
||||
// all of the segment update for this commit have been
|
||||
// sent.
|
||||
@@ -637,7 +628,7 @@ impl IndexWriter {
|
||||
}
|
||||
|
||||
let commit_opstamp = self.stamper.stamp();
|
||||
let prepared_commit = PreparedCommit::new(self, commit_opstamp);
|
||||
let prepared_commit = PreparedCommit::new(self, commit_opstamp, soft_commit);
|
||||
info!("Prepared commit {}", commit_opstamp);
|
||||
Ok(prepared_commit)
|
||||
}
|
||||
@@ -657,7 +648,25 @@ impl IndexWriter {
|
||||
/// that made it in the commit.
|
||||
///
|
||||
pub fn commit(&mut self) -> crate::Result<Opstamp> {
|
||||
self.prepare_commit()?.commit()
|
||||
self.prepare_commit(false)?.commit()
|
||||
}
|
||||
|
||||
pub fn soft_commit(&mut self) -> crate::Result<Opstamp> {
|
||||
self.prepare_commit(true)?.commit()
|
||||
}
|
||||
|
||||
pub(crate) fn trigger_commit(&self) -> impl Future<Output = ()> {
|
||||
self.on_commit.broadcast()
|
||||
}
|
||||
|
||||
pub fn reader(&self, num_searchers: usize) -> crate::Result<IndexReader> {
|
||||
let nrt_reader = NRTReader::create(
|
||||
num_searchers,
|
||||
self.index.clone(),
|
||||
self.segment_registers.clone(),
|
||||
&self.on_commit,
|
||||
)?;
|
||||
Ok(IndexReader::NRT(nrt_reader))
|
||||
}
|
||||
|
||||
pub(crate) fn segment_updater(&self) -> &SegmentUpdater {
|
||||
@@ -800,7 +809,7 @@ mod tests {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let index_writer = index.writer_for_tests().unwrap();
|
||||
let index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let operations = vec![
|
||||
UserOperation::Add(doc!(text_field=>"a")),
|
||||
UserOperation::Add(doc!(text_field=>"b")),
|
||||
@@ -815,7 +824,7 @@ mod tests {
|
||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field => "hello1"));
|
||||
index_writer.add_document(doc!(text_field => "hello2"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
@@ -864,7 +873,7 @@ mod tests {
|
||||
.reload_policy(ReloadPolicy::Manual)
|
||||
.try_into()
|
||||
.unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let a_term = Term::from_field_text(text_field, "a");
|
||||
let b_term = Term::from_field_text(text_field, "b");
|
||||
let operations = vec![
|
||||
@@ -926,8 +935,8 @@ mod tests {
|
||||
fn test_lockfile_already_exists_error_msg() {
|
||||
let schema_builder = schema::Schema::builder();
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let _index_writer = index.writer_for_tests().unwrap();
|
||||
match index.writer_for_tests() {
|
||||
let _index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
match index.writer_with_num_threads(1, 3_000_000) {
|
||||
Err(err) => {
|
||||
let err_msg = err.to_string();
|
||||
assert!(err_msg.contains("already an `IndexWriter`"));
|
||||
@@ -979,7 +988,7 @@ mod tests {
|
||||
let num_docs_containing = |s: &str| {
|
||||
let searcher = reader.searcher();
|
||||
let term = Term::from_field_text(text_field, s);
|
||||
searcher.doc_freq(&term).unwrap()
|
||||
searcher.doc_freq(&term)
|
||||
};
|
||||
|
||||
{
|
||||
@@ -1015,7 +1024,7 @@ mod tests {
|
||||
.unwrap();
|
||||
let num_docs_containing = |s: &str| {
|
||||
let term_a = Term::from_field_text(text_field, s);
|
||||
reader.searcher().doc_freq(&term_a).unwrap()
|
||||
reader.searcher().doc_freq(&term_a)
|
||||
};
|
||||
{
|
||||
// writing the segment
|
||||
@@ -1055,7 +1064,8 @@ mod tests {
|
||||
index_writer.add_document(doc!(text_field => "a"));
|
||||
}
|
||||
{
|
||||
let mut prepared_commit = index_writer.prepare_commit().expect("commit failed");
|
||||
let mut prepared_commit =
|
||||
index_writer.prepare_commit(false).expect("commit failed");
|
||||
prepared_commit.set_payload("first commit");
|
||||
prepared_commit.commit().expect("commit failed");
|
||||
}
|
||||
@@ -1088,7 +1098,8 @@ mod tests {
|
||||
index_writer.add_document(doc!(text_field => "a"));
|
||||
}
|
||||
{
|
||||
let mut prepared_commit = index_writer.prepare_commit().expect("commit failed");
|
||||
let mut prepared_commit =
|
||||
index_writer.prepare_commit(false).expect("commit failed");
|
||||
prepared_commit.set_payload("first commit");
|
||||
prepared_commit.abort().expect("commit failed");
|
||||
}
|
||||
@@ -1110,7 +1121,6 @@ mod tests {
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.doc_freq(&term_a)
|
||||
.unwrap()
|
||||
};
|
||||
assert_eq!(num_docs_containing("a"), 0);
|
||||
assert_eq!(num_docs_containing("b"), 100);
|
||||
@@ -1130,7 +1140,7 @@ mod tests {
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let term = Term::from_field_text(text_field, s);
|
||||
searcher.doc_freq(&term).unwrap()
|
||||
searcher.doc_freq(&term)
|
||||
};
|
||||
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
|
||||
|
||||
@@ -1181,15 +1191,7 @@ mod tests {
|
||||
|
||||
// working with an empty index == no documents
|
||||
let term_b = Term::from_field_text(text_field, "b");
|
||||
assert_eq!(
|
||||
index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.doc_freq(&term_b)
|
||||
.unwrap(),
|
||||
0
|
||||
);
|
||||
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_b), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1209,15 +1211,7 @@ mod tests {
|
||||
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
// expect the document with that term to be in the index
|
||||
assert_eq!(
|
||||
index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.doc_freq(&term_a)
|
||||
.unwrap(),
|
||||
1
|
||||
);
|
||||
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1243,15 +1237,7 @@ mod tests {
|
||||
// Find original docs in the index
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
// expect the document with that term to be in the index
|
||||
assert_eq!(
|
||||
index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.doc_freq(&term_a)
|
||||
.unwrap(),
|
||||
1
|
||||
);
|
||||
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1286,9 +1272,46 @@ mod tests {
|
||||
let idfield = schema_builder.add_text_field("id", STRING);
|
||||
schema_builder.add_text_field("optfield", STRING);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(idfield=>"myid"));
|
||||
let commit = index_writer.commit();
|
||||
assert!(commit.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_writer_reader() {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let idfield = schema_builder.add_text_field("id", STRING);
|
||||
schema_builder.add_text_field("optfield", STRING);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(idfield=>"myid"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
let reader = index_writer.reader(2).unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.num_docs(), 1u64);
|
||||
index_writer.add_document(doc!(idfield=>"myid"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
assert_eq!(reader.searcher().num_docs(), 2u64);
|
||||
assert_eq!(searcher.num_docs(), 1u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_writer_reader_soft_commit() {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let idfield = schema_builder.add_text_field("id", STRING);
|
||||
schema_builder.add_text_field("optfield", STRING);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(idfield=>"myid"));
|
||||
assert!(index_writer.soft_commit().is_ok());
|
||||
let nrt_reader = index_writer.reader(2).unwrap();
|
||||
let normal_reader = index.reader_builder().try_into().unwrap();
|
||||
assert_eq!(nrt_reader.searcher().num_docs(), 1u64);
|
||||
assert_eq!(normal_reader.searcher().num_docs(), 0u64);
|
||||
assert!(index_writer.commit().is_ok());
|
||||
assert!(normal_reader.reload().is_ok());
|
||||
assert_eq!(nrt_reader.searcher().num_docs(), 1u64);
|
||||
assert_eq!(normal_reader.searcher().num_docs(), 1u64);
|
||||
}
|
||||
}
|
||||
|
||||
133
src/indexer/index_writer_config.rs
Normal file
133
src/indexer/index_writer_config.rs
Normal file
@@ -0,0 +1,133 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
// Size of the margin for the heap. A segment is closed when the remaining memory
|
||||
// in the heap goes below MARGIN_IN_BYTES.
|
||||
const MARGIN_IN_BYTES: u64 = 1_000_000;
|
||||
|
||||
// We impose the memory per thread to be at least 3 MB.
|
||||
const HEAP_SIZE_MIN: u64 = MARGIN_IN_BYTES * 3u64;
|
||||
const HEAP_SIZE_MAX: u64 = u32::max_value() as u64 - MARGIN_IN_BYTES;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct IndexWriterConfig {
|
||||
pub max_indexing_threads: usize,
|
||||
pub max_merging_threads: usize,
|
||||
pub memory_budget: u64,
|
||||
pub store_flush_num_bytes: u64,
|
||||
pub persist_low: u64,
|
||||
pub persist_high: u64,
|
||||
}
|
||||
|
||||
impl Default for IndexWriterConfig {
|
||||
fn default() -> Self {
|
||||
IndexWriterConfig {
|
||||
max_indexing_threads: 1,
|
||||
max_merging_threads: 3,
|
||||
memory_budget: 50_000_000u64,
|
||||
store_flush_num_bytes: 10_000_000u64,
|
||||
persist_low: 10_000_000u64,
|
||||
persist_high: 50_000_000u64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexWriterConfig {
|
||||
#[cfg(test)]
|
||||
pub fn for_test() -> IndexWriterConfig {
|
||||
IndexWriterConfig {
|
||||
max_indexing_threads: 1,
|
||||
max_merging_threads: 5,
|
||||
memory_budget: 4_000_000u64,
|
||||
store_flush_num_bytes: 500_000u64,
|
||||
persist_low: 2_000_000u64,
|
||||
persist_high: 3_000_000u64,
|
||||
}
|
||||
}
|
||||
|
||||
// Ensures the `IndexWriterConfig` is correct.
|
||||
//
|
||||
// This method checks that the values in the `IndexWriterConfig`
|
||||
// are valid. If it is not, it may mutate some of the values (like `max_num_threads`) to
|
||||
// fit the contracts or return an error with an explicit error message.
|
||||
//
|
||||
// If called twice, the config is guaranteed to not be updated the second time.
|
||||
pub fn validate(&mut self) -> crate::Result<()> {
|
||||
if self.memory_budget < HEAP_SIZE_MIN {
|
||||
let err_msg = format!(
|
||||
"The heap size per thread needs to be at least {}.",
|
||||
HEAP_SIZE_MIN
|
||||
);
|
||||
return Err(crate::TantivyError::InvalidArgument(err_msg));
|
||||
}
|
||||
let heap_size_in_bytes_per_thread = self.heap_size_in_byte_per_thread();
|
||||
if heap_size_in_bytes_per_thread >= HEAP_SIZE_MAX {
|
||||
let err_msg = format!("The heap size per thread cannot exceed {}", HEAP_SIZE_MAX);
|
||||
return Err(crate::TantivyError::InvalidArgument(err_msg));
|
||||
}
|
||||
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
||||
self.max_indexing_threads = (self.memory_budget / HEAP_SIZE_MIN) as usize;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn heap_size_in_byte_per_thread(&self) -> u64 {
|
||||
self.memory_budget / self.max_indexing_threads as u64
|
||||
}
|
||||
|
||||
pub fn heap_size_before_flushing(&self) -> u64 {
|
||||
self.heap_size_in_byte_per_thread() - MARGIN_IN_BYTES
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::IndexWriterConfig;
|
||||
|
||||
#[test]
|
||||
fn test_index_writer_config_simple() {
|
||||
let mut index = IndexWriterConfig {
|
||||
max_indexing_threads: 3,
|
||||
memory_budget: super::HEAP_SIZE_MIN * 3,
|
||||
..Default::default()
|
||||
};
|
||||
assert!(index.validate().is_ok());
|
||||
assert_eq!(index.max_indexing_threads, 3);
|
||||
assert_eq!(index.heap_size_in_byte_per_thread(), super::HEAP_SIZE_MIN);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_writer_config_reduce_num_threads() {
|
||||
let mut index = IndexWriterConfig {
|
||||
max_indexing_threads: 3,
|
||||
memory_budget: super::HEAP_SIZE_MIN,
|
||||
..Default::default()
|
||||
};
|
||||
assert!(index.validate().is_ok());
|
||||
assert_eq!(index.max_indexing_threads, 1);
|
||||
assert_eq!(index.heap_size_in_byte_per_thread(), super::HEAP_SIZE_MIN);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_writer_config_not_enough_memory() {
|
||||
let mut index = IndexWriterConfig {
|
||||
max_indexing_threads: 1,
|
||||
memory_budget: super::HEAP_SIZE_MIN - 1,
|
||||
..Default::default()
|
||||
};
|
||||
assert!(
|
||||
matches!(index.validate(), Err(crate::TantivyError::InvalidArgument(msg) ) if msg.contains("The heap size per thread needs to be at least"))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_writer_config_too_much_memory() {
|
||||
let mut index = IndexWriterConfig {
|
||||
max_indexing_threads: 1,
|
||||
memory_budget: (u32::max_value() as u64) + 1,
|
||||
..Default::default()
|
||||
};
|
||||
assert!(
|
||||
matches!(index.validate(), Err(crate::TantivyError::InvalidArgument(msg) ) if msg.contains("The heap size per thread cannot exceed"))
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -54,6 +54,10 @@ impl LogMergePolicy {
|
||||
|
||||
impl MergePolicy for LogMergePolicy {
|
||||
fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec<MergeCandidate> {
|
||||
if segments.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let mut size_sorted_tuples = segments
|
||||
.iter()
|
||||
.map(SegmentMeta::num_docs)
|
||||
@@ -63,35 +67,27 @@ impl MergePolicy for LogMergePolicy {
|
||||
|
||||
size_sorted_tuples.sort_by(|x, y| y.1.cmp(&(x.1)));
|
||||
|
||||
if size_sorted_tuples.len() <= 1 {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let size_sorted_log_tuples: Vec<_> = size_sorted_tuples
|
||||
.into_iter()
|
||||
.map(|(ind, num_docs)| (ind, f64::from(self.clip_min_size(num_docs)).log2()))
|
||||
.collect();
|
||||
|
||||
if let Some(&(first_ind, first_score)) = size_sorted_log_tuples.first() {
|
||||
let mut current_max_log_size = first_score;
|
||||
let mut levels = vec![vec![first_ind]];
|
||||
for &(ind, score) in (&size_sorted_log_tuples).iter().skip(1) {
|
||||
if score < (current_max_log_size - self.level_log_size) {
|
||||
current_max_log_size = score;
|
||||
levels.push(Vec::new());
|
||||
}
|
||||
levels.last_mut().unwrap().push(ind);
|
||||
let (first_ind, first_score) = size_sorted_log_tuples[0];
|
||||
let mut current_max_log_size = first_score;
|
||||
let mut levels = vec![vec![first_ind]];
|
||||
for &(ind, score) in (&size_sorted_log_tuples).iter().skip(1) {
|
||||
if score < (current_max_log_size - self.level_log_size) {
|
||||
current_max_log_size = score;
|
||||
levels.push(Vec::new());
|
||||
}
|
||||
levels
|
||||
.iter()
|
||||
.filter(|level| level.len() >= self.min_merge_size)
|
||||
.map(|ind_vec| {
|
||||
MergeCandidate(ind_vec.iter().map(|&ind| segments[ind].id()).collect())
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
return vec![];
|
||||
levels.last_mut().unwrap().push(ind);
|
||||
}
|
||||
|
||||
levels
|
||||
.iter()
|
||||
.filter(|level| level.len() >= self.min_merge_size)
|
||||
.map(|ind_vec| MergeCandidate(ind_vec.iter().map(|&ind| segments[ind].id()).collect()))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,7 +179,6 @@ mod tests {
|
||||
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
||||
assert_eq!(result_list.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_log_merge_policy_small_segments() {
|
||||
// segments under min_layer_size are merged together
|
||||
@@ -199,17 +194,6 @@ mod tests {
|
||||
assert_eq!(result_list.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_log_merge_policy_all_segments_too_large_to_merge() {
|
||||
let eight_large_segments: Vec<SegmentMeta> =
|
||||
std::iter::repeat_with(|| create_random_segment_meta(100_001))
|
||||
.take(8)
|
||||
.collect();
|
||||
assert!(test_merge_policy()
|
||||
.compute_merge_candidates(&eight_large_segments)
|
||||
.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_large_merge_segments() {
|
||||
let test_input = vec![
|
||||
|
||||
@@ -1,17 +1,22 @@
|
||||
use crate::indexer::resource_manager::{Allocation, ResourceManager};
|
||||
use crate::Opstamp;
|
||||
use crate::SegmentId;
|
||||
use census::{Inventory, TrackedObject};
|
||||
use std::collections::HashSet;
|
||||
use std::fmt;
|
||||
use std::ops::Deref;
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>);
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct MergeOperationInventory {
|
||||
inventory: Inventory<InnerMergeOperation>,
|
||||
num_merge_watcher: ResourceManager,
|
||||
}
|
||||
|
||||
impl Deref for MergeOperationInventory {
|
||||
type Target = Inventory<InnerMergeOperation>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
&self.inventory
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +30,10 @@ impl MergeOperationInventory {
|
||||
}
|
||||
segment_in_merge
|
||||
}
|
||||
|
||||
pub fn wait_until_empty(&self) {
|
||||
let _ = self.num_merge_watcher.wait_until_in_range(0..1);
|
||||
}
|
||||
}
|
||||
|
||||
/// A `MergeOperation` has two roles.
|
||||
@@ -47,6 +56,17 @@ pub struct MergeOperation {
|
||||
pub(crate) struct InnerMergeOperation {
|
||||
target_opstamp: Opstamp,
|
||||
segment_ids: Vec<SegmentId>,
|
||||
_allocation: Allocation,
|
||||
}
|
||||
|
||||
impl fmt::Debug for InnerMergeOperation {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"MergeOp(target_opstamp={:?}, segment_ids={:?})",
|
||||
self.target_opstamp, self.segment_ids
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl MergeOperation {
|
||||
@@ -55,9 +75,11 @@ impl MergeOperation {
|
||||
target_opstamp: Opstamp,
|
||||
segment_ids: Vec<SegmentId>,
|
||||
) -> MergeOperation {
|
||||
let allocation = inventory.num_merge_watcher.allocate(1);
|
||||
let inner_merge_operation = InnerMergeOperation {
|
||||
target_opstamp,
|
||||
segment_ids,
|
||||
_allocation: allocation,
|
||||
};
|
||||
MergeOperation {
|
||||
inner: inventory.track(inner_merge_operation),
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,12 +2,14 @@ pub mod delete_queue;
|
||||
|
||||
mod doc_opstamp_mapping;
|
||||
pub mod index_writer;
|
||||
mod index_writer_config;
|
||||
mod log_merge_policy;
|
||||
mod merge_operation;
|
||||
pub mod merge_policy;
|
||||
pub mod merger;
|
||||
pub mod operation;
|
||||
mod prepared_commit;
|
||||
mod resource_manager;
|
||||
mod segment_entry;
|
||||
mod segment_manager;
|
||||
mod segment_register;
|
||||
@@ -16,22 +18,25 @@ pub mod segment_updater;
|
||||
mod segment_writer;
|
||||
mod stamper;
|
||||
|
||||
pub(crate) use self::resource_manager::{Allocation, ResourceManager};
|
||||
pub(crate) use self::merge_operation::MergeOperationInventory;
|
||||
pub use self::index_writer::IndexWriter;
|
||||
pub use self::index_writer_config::IndexWriterConfig;
|
||||
pub use self::log_merge_policy::LogMergePolicy;
|
||||
pub use self::merge_operation::MergeOperation;
|
||||
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
||||
pub use self::prepared_commit::PreparedCommit;
|
||||
pub use self::segment_entry::SegmentEntry;
|
||||
pub use self::segment_manager::SegmentManager;
|
||||
pub(crate) use self::segment_manager::SegmentRegisters;
|
||||
pub use self::segment_serializer::SegmentSerializer;
|
||||
pub use self::segment_writer::SegmentWriter;
|
||||
|
||||
/// Alias for the default merge policy, which is the `LogMergePolicy`.
|
||||
pub type DefaultMergePolicy = LogMergePolicy;
|
||||
|
||||
#[cfg(feature = "mmap")]
|
||||
#[cfg(test)]
|
||||
mod tests_mmap {
|
||||
mod tests {
|
||||
use crate::schema::{self, Schema};
|
||||
use crate::{Index, Term};
|
||||
|
||||
@@ -40,7 +45,7 @@ mod tests_mmap {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
// there must be one deleted document in the segment
|
||||
index_writer.add_document(doc!(text_field=>"b"));
|
||||
index_writer.delete_term(Term::from_field_text(text_field, "b"));
|
||||
|
||||
@@ -7,14 +7,20 @@ pub struct PreparedCommit<'a> {
|
||||
index_writer: &'a mut IndexWriter,
|
||||
payload: Option<String>,
|
||||
opstamp: Opstamp,
|
||||
soft_commit: bool,
|
||||
}
|
||||
|
||||
impl<'a> PreparedCommit<'a> {
|
||||
pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: Opstamp) -> PreparedCommit<'_> {
|
||||
pub(crate) fn new(
|
||||
index_writer: &'a mut IndexWriter,
|
||||
opstamp: Opstamp,
|
||||
soft_commit: bool,
|
||||
) -> PreparedCommit<'_> {
|
||||
PreparedCommit {
|
||||
index_writer,
|
||||
payload: None,
|
||||
opstamp,
|
||||
soft_commit,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,11 +38,12 @@ impl<'a> PreparedCommit<'a> {
|
||||
|
||||
pub fn commit(self) -> crate::Result<Opstamp> {
|
||||
info!("committing {}", self.opstamp);
|
||||
let _ = block_on(
|
||||
self.index_writer
|
||||
.segment_updater()
|
||||
.schedule_commit(self.opstamp, self.payload),
|
||||
);
|
||||
block_on(self.index_writer.segment_updater().schedule_commit(
|
||||
self.opstamp,
|
||||
self.payload,
|
||||
self.soft_commit,
|
||||
))?;
|
||||
block_on(self.index_writer.trigger_commit());
|
||||
Ok(self.opstamp)
|
||||
}
|
||||
}
|
||||
|
||||
213
src/indexer/resource_manager.rs
Normal file
213
src/indexer/resource_manager.rs
Normal file
@@ -0,0 +1,213 @@
|
||||
use std::ops::RangeBounds;
|
||||
use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock};
|
||||
|
||||
struct LockedData {
|
||||
count: u64,
|
||||
enabled: bool
|
||||
}
|
||||
|
||||
impl Default for LockedData {
|
||||
fn default() -> Self {
|
||||
LockedData {
|
||||
count: 0u64,
|
||||
enabled: true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct Inner {
|
||||
resource_level: Mutex<LockedData>,
|
||||
convdvar: Condvar,
|
||||
}
|
||||
|
||||
|
||||
/// The resource manager makes it possible to track the amount of level of a given resource.
|
||||
/// There is no magic here : it is to the description of the user to declare how much
|
||||
/// of the resource is being held.
|
||||
///
|
||||
/// Allocation of a resource is bound to the lifetime of a `Allocation` instance.
|
||||
///
|
||||
/// ```rust
|
||||
/// let resource_manager = ResourceManager::default();
|
||||
///
|
||||
/// ```
|
||||
///
|
||||
/// In tantivy, this is used to check the number of merging thread and the number of memory
|
||||
/// used by the volatile segments.
|
||||
///
|
||||
#[derive(Clone, Default)]
|
||||
pub struct ResourceManager {
|
||||
inner: Arc<Inner>,
|
||||
}
|
||||
|
||||
impl ResourceManager {
|
||||
/// Return the total amount of reousrce allocated
|
||||
pub fn total_amount(&self) -> u64 {
|
||||
self.lock().count
|
||||
}
|
||||
|
||||
fn lock(&self) -> MutexGuard<LockedData> {
|
||||
self.inner
|
||||
.resource_level
|
||||
.lock()
|
||||
.expect("Failed to obtain lock for ReservedMemory. This should never happen.")
|
||||
}
|
||||
|
||||
fn record_delta(&self, delta: i64) {
|
||||
if delta == 0i64 {
|
||||
return;
|
||||
}
|
||||
let mut lock = self.lock();
|
||||
let new_val = lock.count as i64 + delta;
|
||||
lock.count = new_val as u64;
|
||||
self.inner.convdvar.notify_all();
|
||||
}
|
||||
|
||||
/// Records a new allocation.
|
||||
///
|
||||
/// The returned allocate object is used to automatically release the allocated resource
|
||||
/// on drop.
|
||||
pub fn allocate(&self, amount: u64) -> Allocation {
|
||||
self.record_delta(amount as i64);
|
||||
Allocation {
|
||||
resource_manager: self.clone(),
|
||||
amount: RwLock::new(amount),
|
||||
}
|
||||
}
|
||||
|
||||
/// Stops the resource manager.
|
||||
///
|
||||
/// If any thread is waiting via `.wait_until_in_range(...)`, the method will stop
|
||||
/// being blocking and will return an error.
|
||||
pub fn terminate(&self) {
|
||||
self.lock().enabled = false;
|
||||
self.inner.convdvar.notify_all();
|
||||
}
|
||||
|
||||
/// Blocks the current thread until the resource level reaches the given range,
|
||||
/// in a cpu-efficient way.
|
||||
///
|
||||
/// This method does not necessarily wakes up the current thread at every transition
|
||||
/// into the targetted range, but any durable entry in the range will be detected.
|
||||
pub fn wait_until_in_range<R: RangeBounds<u64>>(&self, range: R) -> Result<u64, u64> {
|
||||
let mut levels = self.lock();
|
||||
if !levels.enabled {
|
||||
return Err(levels.count)
|
||||
}
|
||||
while !range.contains(&levels.count) {
|
||||
levels = self.inner.convdvar.wait(levels).unwrap();
|
||||
if !levels.enabled {
|
||||
return Err(levels.count)
|
||||
}
|
||||
}
|
||||
Ok(levels.count)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Allocation {
|
||||
resource_manager: ResourceManager,
|
||||
amount: RwLock<u64>,
|
||||
}
|
||||
|
||||
impl Allocation {
|
||||
pub fn amount(&self) -> u64 {
|
||||
*self.amount.read().unwrap()
|
||||
}
|
||||
|
||||
pub fn modify(&self, new_amount: u64) {
|
||||
let mut wlock = self.amount.write().unwrap();
|
||||
let delta = new_amount as i64 - *wlock as i64;
|
||||
*wlock = new_amount;
|
||||
self.resource_manager.record_delta(delta);
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Allocation {
|
||||
fn drop(&mut self) {
|
||||
let amount = self.amount();
|
||||
self.resource_manager.record_delta(-(amount as i64))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::ResourceManager;
|
||||
use futures::channel::oneshot;
|
||||
use futures::executor::block_on;
|
||||
use std::{mem, thread};
|
||||
|
||||
#[test]
|
||||
fn test_simple_allocation() {
|
||||
let memory = ResourceManager::default();
|
||||
assert_eq!(memory.total_amount(), 0u64);
|
||||
let _allocation = memory.allocate(10u64);
|
||||
assert_eq!(memory.total_amount(), 10u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multiple_allocation() {
|
||||
let memory = ResourceManager::default();
|
||||
assert_eq!(memory.total_amount(), 0u64);
|
||||
let _allocation = memory.allocate(10u64);
|
||||
let _allocation_2 = memory.allocate(11u64);
|
||||
assert_eq!(memory.total_amount(), 21u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_release_on_drop() {
|
||||
let memory = ResourceManager::default();
|
||||
assert_eq!(memory.total_amount(), 0u64);
|
||||
let allocation = memory.allocate(10u64);
|
||||
let allocation_2 = memory.allocate(11u64);
|
||||
assert_eq!(memory.total_amount(), 21u64);
|
||||
mem::drop(allocation);
|
||||
assert_eq!(memory.total_amount(), 11u64);
|
||||
mem::drop(allocation_2);
|
||||
assert_eq!(memory.total_amount(), 0u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wait_until() {
|
||||
let memory = ResourceManager::default();
|
||||
let (send, recv) = oneshot::channel::<()>();
|
||||
let memory_clone = memory.clone();
|
||||
thread::spawn(move || {
|
||||
let _allocation1 = memory_clone.allocate(2u64);
|
||||
let _allocation2 = memory_clone.allocate(3u64);
|
||||
let _allocation3 = memory_clone.allocate(4u64);
|
||||
std::mem::drop(_allocation3);
|
||||
assert!(block_on(recv).is_ok());
|
||||
});
|
||||
assert_eq!(memory.wait_until_in_range(5u64..8u64), Ok(5u64));
|
||||
assert!(send.send(()).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_modify_amount() {
|
||||
let memory = ResourceManager::default();
|
||||
let alloc = memory.allocate(2u64);
|
||||
assert_eq!(memory.total_amount(), 2u64);
|
||||
assert_eq!(alloc.amount(), 2u64);
|
||||
let alloc2 = memory.allocate(3u64);
|
||||
assert_eq!(memory.total_amount(), 2u64 + 3u64);
|
||||
assert_eq!(alloc2.amount(), 3u64);
|
||||
alloc.modify(14u64);
|
||||
assert_eq!(alloc.amount(), 14u64);
|
||||
assert_eq!(memory.total_amount(), 14u64 + 3u64)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stop_resource_manager() {
|
||||
let resource_manager = ResourceManager::default();
|
||||
let resource_manager_clone = resource_manager.clone();
|
||||
let (sender, recv) = oneshot::channel();
|
||||
let join_handle = thread::spawn(move || {
|
||||
assert!(sender.send(()).is_ok());
|
||||
resource_manager_clone.wait_until_in_range(10..20)
|
||||
});
|
||||
let _ = block_on(recv);
|
||||
resource_manager.terminate();
|
||||
assert_eq!(join_handle.join().unwrap(), Err(0u64));
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,9 @@
|
||||
use crate::common::BitSet;
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::directory::ManagedDirectory;
|
||||
use crate::indexer::delete_queue::DeleteCursor;
|
||||
use crate::{Opstamp, Segment};
|
||||
use std::fmt;
|
||||
|
||||
/// A segment entry describes the state of
|
||||
@@ -19,7 +21,7 @@ use std::fmt;
|
||||
/// in the .del file or in the `delete_bitset`.
|
||||
#[derive(Clone)]
|
||||
pub struct SegmentEntry {
|
||||
meta: SegmentMeta,
|
||||
segment: Segment,
|
||||
delete_bitset: Option<BitSet>,
|
||||
delete_cursor: DeleteCursor,
|
||||
}
|
||||
@@ -27,47 +29,67 @@ pub struct SegmentEntry {
|
||||
impl SegmentEntry {
|
||||
/// Create a new `SegmentEntry`
|
||||
pub fn new(
|
||||
segment_meta: SegmentMeta,
|
||||
segment: Segment,
|
||||
delete_cursor: DeleteCursor,
|
||||
delete_bitset: Option<BitSet>,
|
||||
) -> SegmentEntry {
|
||||
SegmentEntry {
|
||||
meta: segment_meta,
|
||||
segment,
|
||||
delete_bitset,
|
||||
delete_cursor,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a reference to the segment entry deleted bitset.
|
||||
///
|
||||
/// `DocId` in this bitset are flagged as deleted.
|
||||
pub fn delete_bitset(&self) -> Option<&BitSet> {
|
||||
self.delete_bitset.as_ref()
|
||||
pub fn persist(&mut self, dest_directory: ManagedDirectory) -> crate::Result<()> {
|
||||
// TODO take in account delete bitset?
|
||||
self.segment.persist(dest_directory)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Set the `SegmentMeta` for this segment.
|
||||
pub fn set_meta(&mut self, segment_meta: SegmentMeta) {
|
||||
self.meta = segment_meta;
|
||||
pub fn set_delete_cursor(&mut self, delete_cursor: DeleteCursor) {
|
||||
self.delete_cursor = delete_cursor;
|
||||
}
|
||||
|
||||
/// `Takes` (as in Option::take) the delete bitset of a segment entry.
|
||||
/// `DocId` in this bitset are flagged as deleted.
|
||||
pub fn take_delete_bitset(&mut self) -> Option<BitSet> {
|
||||
self.delete_bitset.take()
|
||||
}
|
||||
|
||||
/// Reset the delete information in this segment.
|
||||
///
|
||||
/// The `SegmentEntry` segment's `SegmentMeta` gets updated, and
|
||||
/// any delete bitset is drop and set to None.
|
||||
pub fn reset_delete_meta(&mut self, num_deleted_docs: u32, target_opstamp: Opstamp) {
|
||||
self.segment = self
|
||||
.segment
|
||||
.clone()
|
||||
.with_delete_meta(num_deleted_docs, target_opstamp);
|
||||
self.delete_bitset = None;
|
||||
}
|
||||
|
||||
/// Return a reference to the segment_entry's delete cursor
|
||||
pub fn delete_cursor(&mut self) -> &mut DeleteCursor {
|
||||
&mut self.delete_cursor
|
||||
pub fn delete_cursor(&mut self) -> DeleteCursor {
|
||||
self.delete_cursor.clone()
|
||||
}
|
||||
|
||||
/// Returns the segment id.
|
||||
pub fn segment_id(&self) -> SegmentId {
|
||||
self.meta.id()
|
||||
self.meta().id()
|
||||
}
|
||||
|
||||
/// Returns the `segment` associated to the `SegmentEntry`.
|
||||
pub fn segment(&self) -> &Segment {
|
||||
&self.segment
|
||||
}
|
||||
/// Accessor to the `SegmentMeta`
|
||||
pub fn meta(&self) -> &SegmentMeta {
|
||||
&self.meta
|
||||
self.segment.meta()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentEntry {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(formatter, "SegmentEntry({:?})", self.meta)
|
||||
write!(formatter, "SegmentEntry({:?})", self.meta())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,15 +2,15 @@ use super::segment_register::SegmentRegister;
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::error::TantivyError;
|
||||
use crate::indexer::delete_queue::DeleteCursor;
|
||||
use crate::indexer::SegmentEntry;
|
||||
use crate::indexer::{SegmentEntry, MergeOperationInventory, MergeCandidate, MergeOperation};
|
||||
use crate::{Segment, Opstamp};
|
||||
use std::collections::hash_set::HashSet;
|
||||
use std::fmt::{self, Debug, Formatter};
|
||||
use std::sync::RwLock;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::{RwLockReadGuard, RwLockWriteGuard};
|
||||
|
||||
#[derive(Default)]
|
||||
struct SegmentRegisters {
|
||||
pub(crate) struct SegmentRegisters {
|
||||
uncommitted: SegmentRegister,
|
||||
committed: SegmentRegister,
|
||||
}
|
||||
@@ -22,6 +22,17 @@ pub(crate) enum SegmentsStatus {
|
||||
}
|
||||
|
||||
impl SegmentRegisters {
|
||||
pub fn new(committed: SegmentRegister) -> SegmentRegisters {
|
||||
SegmentRegisters {
|
||||
uncommitted: Default::default(),
|
||||
committed,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn committed_segment(&self) -> Vec<Segment> {
|
||||
self.committed.segments()
|
||||
}
|
||||
|
||||
/// Check if all the segments are committed or uncommited.
|
||||
///
|
||||
/// If some segment is missing or segments are in a different state (this should not happen
|
||||
@@ -44,7 +55,8 @@ impl SegmentRegisters {
|
||||
/// changes (merges especially)
|
||||
#[derive(Default)]
|
||||
pub struct SegmentManager {
|
||||
registers: RwLock<SegmentRegisters>,
|
||||
registers: Arc<RwLock<SegmentRegisters>>,
|
||||
merge_operations: MergeOperationInventory,
|
||||
}
|
||||
|
||||
impl Debug for SegmentManager {
|
||||
@@ -58,34 +70,28 @@ impl Debug for SegmentManager {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_mergeable_segments(
|
||||
in_merge_segment_ids: &HashSet<SegmentId>,
|
||||
segment_manager: &SegmentManager,
|
||||
) -> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
|
||||
let registers_lock = segment_manager.read();
|
||||
(
|
||||
registers_lock
|
||||
.committed
|
||||
.get_mergeable_segments(in_merge_segment_ids),
|
||||
registers_lock
|
||||
.uncommitted
|
||||
.get_mergeable_segments(in_merge_segment_ids),
|
||||
)
|
||||
}
|
||||
|
||||
impl SegmentManager {
|
||||
pub fn from_segments(
|
||||
segment_metas: Vec<SegmentMeta>,
|
||||
delete_cursor: &DeleteCursor,
|
||||
) -> SegmentManager {
|
||||
pub(crate) fn new(registers: Arc<RwLock<SegmentRegisters>>) -> SegmentManager {
|
||||
SegmentManager {
|
||||
registers: RwLock::new(SegmentRegisters {
|
||||
uncommitted: SegmentRegister::default(),
|
||||
committed: SegmentRegister::new(segment_metas, delete_cursor),
|
||||
}),
|
||||
registers,
|
||||
merge_operations: Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_merge_operation(&self, opstamp: Opstamp, merge_candidate: MergeCandidate) -> MergeOperation {
|
||||
MergeOperation::new(
|
||||
&self.merge_operations,
|
||||
opstamp,
|
||||
merge_candidate.0
|
||||
|
||||
)
|
||||
}
|
||||
|
||||
pub fn wait_merging_thread(&self) {
|
||||
self.merge_operations.wait_until_empty()
|
||||
}
|
||||
|
||||
/// Returns all of the segment entries (committed or uncommitted)
|
||||
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
|
||||
let registers_lock = self.read();
|
||||
@@ -94,6 +100,34 @@ impl SegmentManager {
|
||||
segment_entries
|
||||
}
|
||||
|
||||
/// Returns the segments that are currently not in merge.
|
||||
///
|
||||
/// They are split over two `Vec`. The committed segments on one hand
|
||||
/// and the uncommitted ones on the other hand.
|
||||
///
|
||||
/// This method is useful when searching for merge candidate or segments
|
||||
/// to persists.
|
||||
pub fn segments_not_in_merge(&self) -> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
|
||||
let in_merge_segment_ids: HashSet<SegmentId> = self.merge_operations.segment_in_merge();
|
||||
let registers_lock = self.read();
|
||||
(
|
||||
registers_lock
|
||||
.committed
|
||||
.get_mergeable_segments(&in_merge_segment_ids),
|
||||
registers_lock
|
||||
.uncommitted
|
||||
.get_mergeable_segments(&in_merge_segment_ids),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn largest_segment_not_in_merge(&self) -> Option<SegmentMeta> {
|
||||
let (committed, uncommitted) = self.segments_not_in_merge();
|
||||
let mut segments = vec![];
|
||||
segments.extend(committed);
|
||||
segments.extend(uncommitted);
|
||||
None
|
||||
}
|
||||
|
||||
// Lock poisoning should never happen :
|
||||
// The lock is acquired and released within this class,
|
||||
// and the operations cannot panic.
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::directory::ManagedDirectory;
|
||||
use crate::indexer::delete_queue::DeleteCursor;
|
||||
use crate::indexer::segment_entry::SegmentEntry;
|
||||
use crate::schema::Schema;
|
||||
use crate::Segment;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::{self, Debug, Formatter};
|
||||
@@ -46,6 +49,13 @@ impl SegmentRegister {
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn segments(&self) -> Vec<Segment> {
|
||||
self.segment_states
|
||||
.values()
|
||||
.map(|segment_entry| segment_entry.segment().clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
|
||||
self.segment_states.values().cloned().collect()
|
||||
}
|
||||
@@ -79,11 +89,17 @@ impl SegmentRegister {
|
||||
self.segment_states.get(segment_id).cloned()
|
||||
}
|
||||
|
||||
pub fn new(segment_metas: Vec<SegmentMeta>, delete_cursor: &DeleteCursor) -> SegmentRegister {
|
||||
pub fn new(
|
||||
directory: &ManagedDirectory,
|
||||
schema: &Schema,
|
||||
segment_metas: Vec<SegmentMeta>,
|
||||
delete_cursor: &DeleteCursor,
|
||||
) -> SegmentRegister {
|
||||
let mut segment_states = HashMap::new();
|
||||
for segment_meta in segment_metas {
|
||||
let segment_id = segment_meta.id();
|
||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor.clone(), None);
|
||||
let segment = Segment::new_persisted(segment_meta, directory.clone(), schema.clone());
|
||||
let segment_entry = SegmentEntry::new(segment, delete_cursor.clone(), None);
|
||||
segment_states.insert(segment_id, segment_entry);
|
||||
}
|
||||
SegmentRegister { segment_states }
|
||||
@@ -95,6 +111,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::core::{SegmentId, SegmentMetaInventory};
|
||||
use crate::indexer::delete_queue::*;
|
||||
use crate::indexer::ResourceManager;
|
||||
|
||||
fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> {
|
||||
segment_register
|
||||
@@ -108,28 +125,34 @@ mod tests {
|
||||
fn test_segment_register() {
|
||||
let inventory = SegmentMetaInventory::default();
|
||||
let delete_queue = DeleteQueue::new();
|
||||
let schema = Schema::builder().build();
|
||||
|
||||
let mut segment_register = SegmentRegister::default();
|
||||
let segment_id_a = SegmentId::generate_random();
|
||||
let segment_id_b = SegmentId::generate_random();
|
||||
let segment_id_merged = SegmentId::generate_random();
|
||||
|
||||
let memory_manager = ResourceManager::default();
|
||||
|
||||
{
|
||||
let segment_meta = inventory.new_segment_meta(segment_id_a, 0u32);
|
||||
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
|
||||
let meta = inventory.new_segment_meta(segment_id_a, 0u32);
|
||||
let segment = Segment::new_volatile(meta, schema.clone(), memory_manager.clone());
|
||||
let segment_entry = SegmentEntry::new(segment, delete_queue.cursor(), None);
|
||||
segment_register.add_segment_entry(segment_entry);
|
||||
}
|
||||
assert_eq!(segment_ids(&segment_register), vec![segment_id_a]);
|
||||
{
|
||||
let segment_meta = inventory.new_segment_meta(segment_id_b, 0u32);
|
||||
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
|
||||
let meta = inventory.new_segment_meta(segment_id_b, 0u32);
|
||||
let segment = Segment::new_volatile(meta, schema.clone(), memory_manager.clone());
|
||||
let segment_entry = SegmentEntry::new(segment, delete_queue.cursor(), None);
|
||||
segment_register.add_segment_entry(segment_entry);
|
||||
}
|
||||
segment_register.remove_segment(&segment_id_a);
|
||||
segment_register.remove_segment(&segment_id_b);
|
||||
{
|
||||
let segment_meta_merged = inventory.new_segment_meta(segment_id_merged, 0u32);
|
||||
let segment_entry = SegmentEntry::new(segment_meta_merged, delete_queue.cursor(), None);
|
||||
let segment_merged = Segment::new_volatile(segment_meta_merged, schema.clone(), memory_manager.clone());
|
||||
let segment_entry = SegmentEntry::new(segment_merged, delete_queue.cursor(), None);
|
||||
segment_register.add_segment_entry(segment_entry);
|
||||
}
|
||||
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
|
||||
|
||||
@@ -3,43 +3,32 @@ use crate::core::SegmentComponent;
|
||||
use crate::fastfield::FastFieldSerializer;
|
||||
use crate::fieldnorm::FieldNormsSerializer;
|
||||
use crate::postings::InvertedIndexSerializer;
|
||||
use crate::store::StoreWriter;
|
||||
|
||||
/// Segment serializer is in charge of laying out on disk
|
||||
/// the data accumulated and sorted by the `SegmentWriter`.
|
||||
pub struct SegmentSerializer {
|
||||
segment: Segment,
|
||||
store_writer: StoreWriter,
|
||||
fast_field_serializer: FastFieldSerializer,
|
||||
fieldnorms_serializer: Option<FieldNormsSerializer>,
|
||||
fieldnorms_serializer: FieldNormsSerializer,
|
||||
postings_serializer: InvertedIndexSerializer,
|
||||
}
|
||||
|
||||
impl SegmentSerializer {
|
||||
/// Creates a new `SegmentSerializer`.
|
||||
pub fn for_segment(mut segment: Segment) -> crate::Result<SegmentSerializer> {
|
||||
let store_write = segment.open_write(SegmentComponent::STORE)?;
|
||||
|
||||
pub fn for_segment(segment: &mut Segment) -> crate::Result<SegmentSerializer> {
|
||||
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
|
||||
let fast_field_serializer = FastFieldSerializer::from_write(fast_field_write)?;
|
||||
|
||||
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
|
||||
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
|
||||
|
||||
let postings_serializer = InvertedIndexSerializer::open(&mut segment)?;
|
||||
let postings_serializer = InvertedIndexSerializer::open(segment)?;
|
||||
Ok(SegmentSerializer {
|
||||
segment,
|
||||
store_writer: StoreWriter::new(store_write),
|
||||
fast_field_serializer,
|
||||
fieldnorms_serializer: Some(fieldnorms_serializer),
|
||||
fieldnorms_serializer,
|
||||
postings_serializer,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn segment(&self) -> &Segment {
|
||||
&self.segment
|
||||
}
|
||||
|
||||
/// Accessor to the `PostingsSerializer`.
|
||||
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer {
|
||||
&mut self.postings_serializer
|
||||
@@ -50,26 +39,16 @@ impl SegmentSerializer {
|
||||
&mut self.fast_field_serializer
|
||||
}
|
||||
|
||||
/// Extract the field norm serializer.
|
||||
///
|
||||
/// Note the fieldnorms serializer can only be extracted once.
|
||||
pub fn extract_fieldnorms_serializer(&mut self) -> Option<FieldNormsSerializer> {
|
||||
self.fieldnorms_serializer.take()
|
||||
}
|
||||
|
||||
/// Accessor to the `StoreWriter`.
|
||||
pub fn get_store_writer(&mut self) -> &mut StoreWriter {
|
||||
&mut self.store_writer
|
||||
/// Accessor to the field norm serializer.
|
||||
pub fn get_fieldnorms_serializer(&mut self) -> &mut FieldNormsSerializer {
|
||||
&mut self.fieldnorms_serializer
|
||||
}
|
||||
|
||||
/// Finalize the segment serialization.
|
||||
pub fn close(mut self) -> crate::Result<()> {
|
||||
if let Some(fieldnorms_serializer) = self.extract_fieldnorms_serializer() {
|
||||
fieldnorms_serializer.close()?;
|
||||
}
|
||||
pub fn close(self) -> crate::Result<()> {
|
||||
self.fast_field_serializer.close()?;
|
||||
self.postings_serializer.close()?;
|
||||
self.store_writer.close()?;
|
||||
self.fieldnorms_serializer.close()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use super::segment_manager::{get_mergeable_segments, SegmentManager};
|
||||
use super::segment_manager::SegmentManager;
|
||||
use crate::core::Index;
|
||||
use crate::core::IndexMeta;
|
||||
use crate::core::Segment;
|
||||
@@ -7,22 +7,21 @@ use crate::core::SegmentMeta;
|
||||
use crate::core::SerializableSegment;
|
||||
use crate::core::META_FILEPATH;
|
||||
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
|
||||
use crate::indexer::delete_queue::DeleteCursor;
|
||||
use crate::indexer::index_writer::advance_deletes;
|
||||
use crate::indexer::merge_operation::MergeOperationInventory;
|
||||
use crate::indexer::merger::IndexMerger;
|
||||
use crate::indexer::segment_manager::SegmentsStatus;
|
||||
use crate::indexer::segment_manager::{SegmentRegisters, SegmentsStatus};
|
||||
use crate::indexer::stamper::Stamper;
|
||||
use crate::indexer::SegmentEntry;
|
||||
use crate::indexer::SegmentSerializer;
|
||||
use crate::indexer::{DefaultMergePolicy, MergePolicy};
|
||||
use crate::indexer::{MergeCandidate, MergeOperation};
|
||||
use crate::schema::Schema;
|
||||
use crate::Opstamp;
|
||||
use crate::{Opstamp, SegmentComponent};
|
||||
use futures::channel::oneshot;
|
||||
use futures::executor::{ThreadPool, ThreadPoolBuilder};
|
||||
use futures::future::Future;
|
||||
use futures::future::TryFutureExt;
|
||||
use serde_json;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::collections::HashSet;
|
||||
use std::io::Write;
|
||||
@@ -43,7 +42,7 @@ const NUM_MERGE_THREADS: usize = 4;
|
||||
/// and flushed.
|
||||
///
|
||||
/// This method is not part of tantivy's public API
|
||||
pub fn save_new_metas(schema: Schema, directory: &dyn Directory) -> crate::Result<()> {
|
||||
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::Result<()> {
|
||||
save_metas(
|
||||
&IndexMeta {
|
||||
segments: Vec::new(),
|
||||
@@ -64,7 +63,7 @@ pub fn save_new_metas(schema: Schema, directory: &dyn Directory) -> crate::Resul
|
||||
/// and flushed.
|
||||
///
|
||||
/// This method is not part of tantivy's public API
|
||||
fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate::Result<()> {
|
||||
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result<()> {
|
||||
info!("save metas");
|
||||
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
||||
// Just adding a new line at the end of the buffer.
|
||||
@@ -112,33 +111,36 @@ fn merge(
|
||||
target_opstamp: Opstamp,
|
||||
) -> crate::Result<SegmentEntry> {
|
||||
// first we need to apply deletes to our segment.
|
||||
let merged_segment = index.new_segment();
|
||||
let mut merged_segment = index.new_segment();
|
||||
|
||||
// First we apply all of the delet to the merged segment, up to the target opstamp.
|
||||
for segment_entry in &mut segment_entries {
|
||||
let segment = index.segment(segment_entry.meta().clone());
|
||||
advance_deletes(segment, segment_entry, target_opstamp)?;
|
||||
advance_deletes(segment_entry, target_opstamp)?;
|
||||
}
|
||||
|
||||
let delete_cursor = segment_entries[0].delete_cursor().clone();
|
||||
let delete_cursor = segment_entries[0].delete_cursor();
|
||||
|
||||
let segments: Vec<Segment> = segment_entries
|
||||
.iter()
|
||||
.map(|segment_entry| index.segment(segment_entry.meta().clone()))
|
||||
.map(|segment_entry| segment_entry.segment().clone())
|
||||
.collect();
|
||||
|
||||
// An IndexMerger is like a "view" of our merged segments.
|
||||
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
|
||||
|
||||
// ... we just serialize this index merger in our new segment to merge the two segments.
|
||||
let segment_serializer = SegmentSerializer::for_segment(merged_segment.clone())?;
|
||||
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
|
||||
|
||||
let num_docs = merger.write(segment_serializer)?;
|
||||
let store_wrt = merged_segment.open_write(SegmentComponent::STORE)?;
|
||||
merger.write_storable_fields(store_wrt)?;
|
||||
|
||||
let merged_segment_id = merged_segment.id();
|
||||
let max_doc = merger.write(segment_serializer)?;
|
||||
|
||||
let segment_meta = index.new_segment_meta(merged_segment_id, num_docs);
|
||||
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
|
||||
Ok(SegmentEntry::new(
|
||||
merged_segment.with_max_doc(max_doc),
|
||||
delete_cursor,
|
||||
None,
|
||||
))
|
||||
}
|
||||
|
||||
pub(crate) struct InnerSegmentUpdater {
|
||||
@@ -157,17 +159,15 @@ pub(crate) struct InnerSegmentUpdater {
|
||||
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
|
||||
killed: AtomicBool,
|
||||
stamper: Stamper,
|
||||
merge_operations: MergeOperationInventory,
|
||||
}
|
||||
|
||||
impl SegmentUpdater {
|
||||
pub fn create(
|
||||
segment_registers: Arc<RwLock<SegmentRegisters>>,
|
||||
index: Index,
|
||||
stamper: Stamper,
|
||||
delete_cursor: &DeleteCursor,
|
||||
) -> crate::Result<SegmentUpdater> {
|
||||
let segments = index.searchable_segment_metas()?;
|
||||
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
|
||||
let segment_manager = SegmentManager::new(segment_registers);
|
||||
let pool = ThreadPoolBuilder::new()
|
||||
.name_prefix("segment_updater")
|
||||
.pool_size(1)
|
||||
@@ -196,7 +196,6 @@ impl SegmentUpdater {
|
||||
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
|
||||
killed: AtomicBool::new(false),
|
||||
stamper,
|
||||
merge_operations: Default::default(),
|
||||
})))
|
||||
}
|
||||
|
||||
@@ -262,8 +261,7 @@ impl SegmentUpdater {
|
||||
fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> {
|
||||
let mut segment_entries = self.segment_manager.segment_entries();
|
||||
for segment_entry in &mut segment_entries {
|
||||
let segment = self.index.segment(segment_entry.meta().clone());
|
||||
advance_deletes(segment, segment_entry, target_opstamp)?;
|
||||
advance_deletes(segment_entry, target_opstamp)?;
|
||||
}
|
||||
Ok(segment_entries)
|
||||
}
|
||||
@@ -331,12 +329,21 @@ impl SegmentUpdater {
|
||||
&self,
|
||||
opstamp: Opstamp,
|
||||
payload: Option<String>,
|
||||
soft_commit: bool,
|
||||
) -> impl Future<Output = crate::Result<()>> {
|
||||
let segment_updater: SegmentUpdater = self.clone();
|
||||
let directory = self.index.directory().clone();
|
||||
self.schedule_future(async move {
|
||||
let segment_entries = segment_updater.purge_deletes(opstamp)?;
|
||||
let mut segment_entries = segment_updater.purge_deletes(opstamp)?;
|
||||
if !soft_commit {
|
||||
for segment_entry in &mut segment_entries {
|
||||
segment_entry.persist(directory.clone())?;
|
||||
}
|
||||
}
|
||||
segment_updater.segment_manager.commit(segment_entries);
|
||||
segment_updater.save_metas(opstamp, payload)?;
|
||||
if !soft_commit {
|
||||
segment_updater.save_metas(opstamp, payload)?;
|
||||
}
|
||||
let _ = garbage_collect_files(segment_updater.clone()).await;
|
||||
segment_updater.consider_merge_options().await;
|
||||
Ok(())
|
||||
@@ -353,7 +360,7 @@ impl SegmentUpdater {
|
||||
|
||||
pub(crate) fn make_merge_operation(&self, segment_ids: &[SegmentId]) -> MergeOperation {
|
||||
let commit_opstamp = self.load_metas().opstamp;
|
||||
MergeOperation::new(&self.merge_operations, commit_opstamp, segment_ids.to_vec())
|
||||
self.segment_manager.new_merge_operation(commit_opstamp, MergeCandidate(segment_ids.to_vec()))
|
||||
}
|
||||
|
||||
// Starts a merge operation. This function will block until the merge operation is effectively
|
||||
@@ -427,9 +434,8 @@ impl SegmentUpdater {
|
||||
}
|
||||
|
||||
async fn consider_merge_options(&self) {
|
||||
let merge_segment_ids: HashSet<SegmentId> = self.merge_operations.segment_in_merge();
|
||||
let (committed_segments, uncommitted_segments) =
|
||||
get_mergeable_segments(&merge_segment_ids, &self.segment_manager);
|
||||
self.segment_manager.segments_not_in_merge();
|
||||
|
||||
// Committed segments cannot be merged with uncommitted_segments.
|
||||
// We therefore consider merges using these two sets of segments independently.
|
||||
@@ -440,7 +446,7 @@ impl SegmentUpdater {
|
||||
.compute_merge_candidates(&uncommitted_segments)
|
||||
.into_iter()
|
||||
.map(|merge_candidate| {
|
||||
MergeOperation::new(&self.merge_operations, current_opstamp, merge_candidate.0)
|
||||
self.segment_manager.new_merge_operation(current_opstamp, merge_candidate)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -449,9 +455,10 @@ impl SegmentUpdater {
|
||||
.compute_merge_candidates(&committed_segments)
|
||||
.into_iter()
|
||||
.map(|merge_candidate: MergeCandidate| {
|
||||
MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0)
|
||||
});
|
||||
merge_candidates.extend(committed_merge_candidates);
|
||||
self.segment_manager.new_merge_operation(commit_opstamp, merge_candidate)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
merge_candidates.extend(committed_merge_candidates.into_iter());
|
||||
|
||||
for merge_operation in merge_candidates {
|
||||
if let Err(err) = self.start_merge(merge_operation) {
|
||||
@@ -473,17 +480,13 @@ impl SegmentUpdater {
|
||||
let end_merge_future = self.schedule_future(async move {
|
||||
info!("End merge {:?}", after_merge_segment_entry.meta());
|
||||
{
|
||||
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
|
||||
let mut delete_cursor = after_merge_segment_entry.delete_cursor();
|
||||
if let Some(delete_operation) = delete_cursor.get() {
|
||||
let committed_opstamp = segment_updater.load_metas().opstamp;
|
||||
if delete_operation.opstamp < committed_opstamp {
|
||||
let index = &segment_updater.index;
|
||||
let segment = index.segment(after_merge_segment_entry.meta().clone());
|
||||
if let Err(e) = advance_deletes(
|
||||
segment,
|
||||
&mut after_merge_segment_entry,
|
||||
committed_opstamp,
|
||||
) {
|
||||
if let Err(e) =
|
||||
advance_deletes(&mut after_merge_segment_entry, committed_opstamp)
|
||||
{
|
||||
error!(
|
||||
"Merge of {:?} was cancelled (advancing deletes failed): {:?}",
|
||||
merge_operation.segment_ids(),
|
||||
@@ -521,7 +524,7 @@ impl SegmentUpdater {
|
||||
///
|
||||
/// Upon termination of the current merging threads,
|
||||
/// merge opportunity may appear.
|
||||
///
|
||||
//
|
||||
/// We keep waiting until the merge policy judges that
|
||||
/// no opportunity is available.
|
||||
///
|
||||
@@ -532,9 +535,8 @@ impl SegmentUpdater {
|
||||
///
|
||||
/// Obsolete files will eventually be cleaned up
|
||||
/// by the directory garbage collector.
|
||||
pub fn wait_merging_thread(&self) -> crate::Result<()> {
|
||||
self.merge_operations.wait_until_empty();
|
||||
Ok(())
|
||||
pub fn wait_merging_thread(&self) {
|
||||
self.segment_manager.wait_merging_thread()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -543,7 +545,8 @@ mod tests {
|
||||
|
||||
use crate::indexer::merge_policy::tests::MergeWheneverPossible;
|
||||
use crate::schema::*;
|
||||
use crate::Index;
|
||||
use crate::{Index, SegmentId};
|
||||
use futures::executor::block_on;
|
||||
|
||||
#[test]
|
||||
fn test_delete_during_merge() {
|
||||
@@ -554,7 +557,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.set_merge_policy(Box::new(MergeWheneverPossible));
|
||||
|
||||
{
|
||||
@@ -607,7 +610,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
|
||||
{
|
||||
for _ in 0..100 {
|
||||
@@ -678,7 +681,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
|
||||
{
|
||||
for _ in 0..100 {
|
||||
@@ -694,4 +697,27 @@ mod tests {
|
||||
.segment_entries();
|
||||
assert!(seg_vec.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_over_soft_commit() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"a"));
|
||||
assert!(index_writer.soft_commit().is_ok());
|
||||
index_writer.add_document(doc!(text_field=>"a"));
|
||||
assert!(index_writer.soft_commit().is_ok());
|
||||
|
||||
let reader = index_writer.reader(1).unwrap();
|
||||
let segment_ids: Vec<SegmentId> = reader
|
||||
.searcher()
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.map(|reader| reader.segment_id())
|
||||
.collect();
|
||||
assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
use super::operation::AddOperation;
|
||||
use crate::core::Segment;
|
||||
use crate::core::SerializableSegment;
|
||||
use crate::directory::{SpillingResult, SpillingWriter, TerminatingWrite};
|
||||
use crate::fastfield::FastFieldsWriter;
|
||||
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
|
||||
use crate::fieldnorm::FieldNormsWriter;
|
||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||
use crate::indexer::IndexWriterConfig;
|
||||
use crate::postings::compute_table_size;
|
||||
use crate::postings::MultiFieldPostingsWriter;
|
||||
use crate::schema::FieldType;
|
||||
@@ -11,19 +13,24 @@ use crate::schema::Schema;
|
||||
use crate::schema::Term;
|
||||
use crate::schema::Value;
|
||||
use crate::schema::{Field, FieldEntry};
|
||||
use crate::store::StoreWriter;
|
||||
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
|
||||
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
|
||||
use crate::tokenizer::{FacetTokenizer, TextAnalyzer, TokenizerManager};
|
||||
use crate::tokenizer::{TokenStreamChain, Tokenizer};
|
||||
use crate::Opstamp;
|
||||
use crate::{DocId, SegmentComponent};
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::str;
|
||||
use crate::indexer::resource_manager::ResourceManager;
|
||||
|
||||
/// Computes the initial size of the hash table.
|
||||
///
|
||||
/// Returns a number of bit `b`, such that the recommended initial table size is 2^b.
|
||||
fn initial_table_size(per_thread_memory_budget: usize) -> crate::Result<usize> {
|
||||
let table_memory_upper_bound = per_thread_memory_budget / 3;
|
||||
fn initial_table_size(per_thread_memory_budget: u64) -> crate::Result<usize> {
|
||||
let table_memory_upper_bound = per_thread_memory_budget / 3u64;
|
||||
if let Some(limit) = (10..)
|
||||
.take_while(|num_bits: &usize| compute_table_size(*num_bits) < table_memory_upper_bound)
|
||||
.take_while(|num_bits| compute_table_size(*num_bits) < table_memory_upper_bound)
|
||||
.last()
|
||||
{
|
||||
Ok(limit.min(19)) // we cap it at 2^19 = 512K.
|
||||
@@ -41,12 +48,13 @@ fn initial_table_size(per_thread_memory_budget: usize) -> crate::Result<usize> {
|
||||
pub struct SegmentWriter {
|
||||
max_doc: DocId,
|
||||
multifield_postings: MultiFieldPostingsWriter,
|
||||
segment_serializer: SegmentSerializer,
|
||||
segment: Segment,
|
||||
fast_field_writers: FastFieldsWriter,
|
||||
fieldnorms_writer: FieldNormsWriter,
|
||||
doc_opstamps: Vec<Opstamp>,
|
||||
tokenizers: Vec<Option<TextAnalyzer>>,
|
||||
term_buffer: Term,
|
||||
store_writer: StoreWriter<SpillingWriter>,
|
||||
memory_manager: ResourceManager,
|
||||
}
|
||||
|
||||
impl SegmentWriter {
|
||||
@@ -60,13 +68,13 @@ impl SegmentWriter {
|
||||
/// - segment: The segment being written
|
||||
/// - schema
|
||||
pub fn for_segment(
|
||||
memory_budget: usize,
|
||||
config: &IndexWriterConfig,
|
||||
segment: Segment,
|
||||
schema: &Schema,
|
||||
tokenizer_manager: &TokenizerManager,
|
||||
memory_manager: ResourceManager
|
||||
) -> crate::Result<SegmentWriter> {
|
||||
let tokenizer_manager = segment.index().tokenizers().clone();
|
||||
let table_num_bits = initial_table_size(memory_budget)?;
|
||||
let segment_serializer = SegmentSerializer::for_segment(segment)?;
|
||||
let table_num_bits = initial_table_size(config.heap_size_in_byte_per_thread())?;
|
||||
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
|
||||
let tokenizers = schema
|
||||
.fields()
|
||||
@@ -82,15 +90,26 @@ impl SegmentWriter {
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
let mut segment_clone = segment.clone();
|
||||
let spilling_wrt = SpillingWriter::new(
|
||||
50_000_000,
|
||||
Box::new(move || {
|
||||
segment_clone
|
||||
.open_write(SegmentComponent::STORE)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
|
||||
}),
|
||||
);
|
||||
let store_writer = StoreWriter::new(spilling_wrt);
|
||||
Ok(SegmentWriter {
|
||||
max_doc: 0,
|
||||
multifield_postings,
|
||||
fieldnorms_writer: FieldNormsWriter::for_schema(schema),
|
||||
segment_serializer,
|
||||
segment,
|
||||
fast_field_writers: FastFieldsWriter::from_schema(schema),
|
||||
doc_opstamps: Vec::with_capacity(1_000),
|
||||
tokenizers,
|
||||
term_buffer: Term::new(),
|
||||
store_writer,
|
||||
memory_manager
|
||||
})
|
||||
}
|
||||
|
||||
@@ -98,29 +117,40 @@ impl SegmentWriter {
|
||||
///
|
||||
/// Finalize consumes the `SegmentWriter`, so that it cannot
|
||||
/// be used afterwards.
|
||||
pub fn finalize(mut self) -> crate::Result<Vec<u64>> {
|
||||
pub fn finalize(mut self) -> crate::Result<(Segment, Vec<u64>)> {
|
||||
self.fieldnorms_writer.fill_up_to_max_doc(self.max_doc);
|
||||
let spilling_wrt = self.store_writer.close()?;
|
||||
let mut segment: Segment;
|
||||
match spilling_wrt.finalize()? {
|
||||
SpillingResult::Spilled => {
|
||||
segment = self.segment.clone();
|
||||
}
|
||||
SpillingResult::Buffer(buf) => {
|
||||
segment = self.segment.into_volatile(self.memory_manager.clone());
|
||||
let mut store_wrt = segment.open_write(SegmentComponent::STORE)?;
|
||||
store_wrt.write_all(&buf[..])?;
|
||||
store_wrt.terminate()?;
|
||||
}
|
||||
}
|
||||
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
|
||||
segment = segment.with_max_doc(self.max_doc);
|
||||
write(
|
||||
&self.multifield_postings,
|
||||
&self.fast_field_writers,
|
||||
&self.fieldnorms_writer,
|
||||
self.segment_serializer,
|
||||
segment_serializer,
|
||||
)?;
|
||||
Ok(self.doc_opstamps)
|
||||
Ok((segment, self.doc_opstamps))
|
||||
}
|
||||
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
pub fn mem_usage(&self) -> u64 {
|
||||
self.multifield_postings.mem_usage()
|
||||
}
|
||||
|
||||
/// Indexes a new document
|
||||
///
|
||||
/// As a user, you should rather use `IndexWriter`'s add_document.
|
||||
pub fn add_document(
|
||||
&mut self,
|
||||
add_operation: AddOperation,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<()> {
|
||||
pub fn add_document(&mut self, add_operation: AddOperation, schema: &Schema) -> io::Result<()> {
|
||||
let doc_id = self.max_doc;
|
||||
let mut doc = add_operation.document;
|
||||
self.doc_opstamps.push(add_operation.opstamp);
|
||||
@@ -128,45 +158,34 @@ impl SegmentWriter {
|
||||
self.fast_field_writers.add_document(&doc);
|
||||
|
||||
for (field, field_values) in doc.get_sorted_field_values() {
|
||||
let field_entry = schema.get_field_entry(field);
|
||||
let make_schema_error = || {
|
||||
crate::TantivyError::SchemaError(format!(
|
||||
"Expected a {:?} for field {:?}",
|
||||
field_entry.field_type().value_type(),
|
||||
field_entry.name()
|
||||
))
|
||||
};
|
||||
if !field_entry.is_indexed() {
|
||||
let field_options = schema.get_field_entry(field);
|
||||
if !field_options.is_indexed() {
|
||||
continue;
|
||||
}
|
||||
let (term_buffer, multifield_postings) =
|
||||
(&mut self.term_buffer, &mut self.multifield_postings);
|
||||
match *field_entry.field_type() {
|
||||
match *field_options.field_type() {
|
||||
FieldType::HierarchicalFacet => {
|
||||
term_buffer.set_field(field);
|
||||
let facets =
|
||||
field_values
|
||||
.iter()
|
||||
.flat_map(|field_value| match *field_value.value() {
|
||||
Value::Facet(ref facet) => Some(facet.encoded_str()),
|
||||
_ => {
|
||||
panic!("Expected hierarchical facet");
|
||||
}
|
||||
});
|
||||
for facet_str in facets {
|
||||
let facets: Vec<&str> = field_values
|
||||
.iter()
|
||||
.flat_map(|field_value| match *field_value.value() {
|
||||
Value::Facet(ref facet) => Some(facet.encoded_str()),
|
||||
_ => {
|
||||
panic!("Expected hierarchical facet");
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let mut term = Term::for_field(field); // we set the Term
|
||||
for fake_str in facets {
|
||||
let mut unordered_term_id_opt = None;
|
||||
FacetTokenizer
|
||||
.token_stream(facet_str)
|
||||
.process(&mut |token| {
|
||||
term_buffer.set_text(&token.text);
|
||||
let unordered_term_id =
|
||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||
unordered_term_id_opt = Some(unordered_term_id);
|
||||
});
|
||||
FacetTokenizer.token_stream(fake_str).process(&mut |token| {
|
||||
term.set_text(&token.text);
|
||||
let unordered_term_id =
|
||||
self.multifield_postings.subscribe(doc_id, &term);
|
||||
unordered_term_id_opt = Some(unordered_term_id);
|
||||
});
|
||||
if let Some(unordered_term_id) = unordered_term_id_opt {
|
||||
self.fast_field_writers
|
||||
.get_multivalue_writer(field)
|
||||
.expect("writer for facet missing")
|
||||
.expect("multified writer for facet missing")
|
||||
.add_val(unordered_term_id);
|
||||
}
|
||||
}
|
||||
@@ -183,6 +202,7 @@ impl SegmentWriter {
|
||||
if let Some(last_token) = tok_str.tokens.last() {
|
||||
total_offset += last_token.offset_to;
|
||||
}
|
||||
|
||||
token_streams
|
||||
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
||||
}
|
||||
@@ -192,6 +212,7 @@ impl SegmentWriter {
|
||||
{
|
||||
offsets.push(total_offset);
|
||||
total_offset += text.len();
|
||||
|
||||
token_streams.push(tokenizer.token_stream(text));
|
||||
}
|
||||
}
|
||||
@@ -203,12 +224,8 @@ impl SegmentWriter {
|
||||
0
|
||||
} else {
|
||||
let mut token_stream = TokenStreamChain::new(offsets, token_streams);
|
||||
multifield_postings.index_text(
|
||||
doc_id,
|
||||
field,
|
||||
&mut token_stream,
|
||||
term_buffer,
|
||||
)
|
||||
self.multifield_postings
|
||||
.index_text(doc_id, field, &mut token_stream)
|
||||
};
|
||||
|
||||
self.fieldnorms_writer.record(doc_id, field, num_tokens);
|
||||
@@ -216,74 +233,55 @@ impl SegmentWriter {
|
||||
FieldType::U64(ref int_option) => {
|
||||
if int_option.is_indexed() {
|
||||
for field_value in field_values {
|
||||
term_buffer.set_field(field_value.field());
|
||||
let u64_val = field_value
|
||||
.value()
|
||||
.u64_value()
|
||||
.ok_or_else(make_schema_error)?;
|
||||
term_buffer.set_u64(u64_val);
|
||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||
let term = Term::from_field_u64(
|
||||
field_value.field(),
|
||||
field_value.value().u64_value(),
|
||||
);
|
||||
self.multifield_postings.subscribe(doc_id, &term);
|
||||
}
|
||||
}
|
||||
}
|
||||
FieldType::Date(ref int_option) => {
|
||||
if int_option.is_indexed() {
|
||||
for field_value in field_values {
|
||||
term_buffer.set_field(field_value.field());
|
||||
let date_val = field_value
|
||||
.value()
|
||||
.date_value()
|
||||
.ok_or_else(make_schema_error)?;
|
||||
term_buffer.set_i64(date_val.timestamp());
|
||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||
let term = Term::from_field_i64(
|
||||
field_value.field(),
|
||||
field_value.value().date_value().timestamp(),
|
||||
);
|
||||
self.multifield_postings.subscribe(doc_id, &term);
|
||||
}
|
||||
}
|
||||
}
|
||||
FieldType::I64(ref int_option) => {
|
||||
if int_option.is_indexed() {
|
||||
for field_value in field_values {
|
||||
term_buffer.set_field(field_value.field());
|
||||
let i64_val = field_value
|
||||
.value()
|
||||
.i64_value()
|
||||
.ok_or_else(make_schema_error)?;
|
||||
term_buffer.set_i64(i64_val);
|
||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||
let term = Term::from_field_i64(
|
||||
field_value.field(),
|
||||
field_value.value().i64_value(),
|
||||
);
|
||||
self.multifield_postings.subscribe(doc_id, &term);
|
||||
}
|
||||
}
|
||||
}
|
||||
FieldType::F64(ref int_option) => {
|
||||
if int_option.is_indexed() {
|
||||
for field_value in field_values {
|
||||
term_buffer.set_field(field_value.field());
|
||||
let f64_val = field_value
|
||||
.value()
|
||||
.f64_value()
|
||||
.ok_or_else(make_schema_error)?;
|
||||
term_buffer.set_f64(f64_val);
|
||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||
let term = Term::from_field_f64(
|
||||
field_value.field(),
|
||||
field_value.value().f64_value(),
|
||||
);
|
||||
self.multifield_postings.subscribe(doc_id, &term);
|
||||
}
|
||||
}
|
||||
}
|
||||
FieldType::Bytes(ref option) => {
|
||||
if option.is_indexed() {
|
||||
for field_value in field_values {
|
||||
term_buffer.set_field(field_value.field());
|
||||
let bytes = field_value
|
||||
.value()
|
||||
.bytes_value()
|
||||
.ok_or_else(make_schema_error)?;
|
||||
term_buffer.set_bytes(bytes);
|
||||
self.multifield_postings.subscribe(doc_id, &term_buffer);
|
||||
}
|
||||
}
|
||||
FieldType::Bytes => {
|
||||
// Do nothing. Bytes only supports fast fields.
|
||||
}
|
||||
}
|
||||
}
|
||||
doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
|
||||
doc.prepare_for_store();
|
||||
let doc_writer = self.segment_serializer.get_store_writer();
|
||||
doc_writer.store(&doc)?;
|
||||
self.store_writer.store(&doc)?;
|
||||
self.max_doc += 1;
|
||||
Ok(())
|
||||
}
|
||||
@@ -316,16 +314,9 @@ fn write(
|
||||
fieldnorms_writer: &FieldNormsWriter,
|
||||
mut serializer: SegmentSerializer,
|
||||
) -> crate::Result<()> {
|
||||
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
|
||||
fieldnorms_writer.serialize(fieldnorms_serializer)?;
|
||||
}
|
||||
let fieldnorm_data = serializer
|
||||
.segment()
|
||||
.open_read(SegmentComponent::FIELDNORMS)?;
|
||||
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||
let term_ord_map =
|
||||
multifield_postings.serialize(serializer.get_postings_serializer(), fieldnorm_readers)?;
|
||||
let term_ord_map = multifield_postings.serialize(serializer.get_postings_serializer())?;
|
||||
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
|
||||
fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer())?;
|
||||
serializer.close()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
531
src/lib.rs
531
src/lib.rs
@@ -105,7 +105,7 @@ extern crate serde_json;
|
||||
extern crate log;
|
||||
|
||||
#[macro_use]
|
||||
extern crate thiserror;
|
||||
extern crate failure;
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
extern crate test;
|
||||
@@ -134,7 +134,7 @@ mod core;
|
||||
mod indexer;
|
||||
|
||||
#[allow(unused_doc_comments)]
|
||||
pub mod error;
|
||||
mod error;
|
||||
pub mod tokenizer;
|
||||
|
||||
pub mod collector;
|
||||
@@ -156,15 +156,14 @@ mod snippet;
|
||||
pub use self::snippet::{Snippet, SnippetGenerator};
|
||||
|
||||
mod docset;
|
||||
pub use self::docset::{DocSet, TERMINATED};
|
||||
pub use crate::common::HasLen;
|
||||
pub use self::docset::{DocSet, SkipResult};
|
||||
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
||||
pub use crate::core::{Executor, SegmentComponent};
|
||||
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
|
||||
pub use crate::core::{InvertedIndexReader, SegmentReader};
|
||||
pub use crate::directory::Directory;
|
||||
pub use crate::indexer::operation::UserOperation;
|
||||
pub use crate::indexer::IndexWriter;
|
||||
pub use crate::indexer::{IndexWriter, IndexWriterConfig};
|
||||
pub use crate::postings::Postings;
|
||||
pub use crate::reader::LeasedItem;
|
||||
pub use crate::schema::{Document, Term};
|
||||
@@ -174,7 +173,7 @@ use once_cell::sync::Lazy;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Index format version.
|
||||
const INDEX_FORMAT_VERSION: u32 = 2;
|
||||
const INDEX_FORMAT_VERSION: u32 = 1;
|
||||
|
||||
/// Structure version for the index.
|
||||
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
@@ -246,10 +245,11 @@ pub type DocId = u32;
|
||||
/// with opstamp `n+1`.
|
||||
pub type Opstamp = u64;
|
||||
|
||||
/// A Score that represents the relevance of the document to the query
|
||||
/// A f32 that represents the relevance of the document to the query
|
||||
///
|
||||
/// This is modelled internally as a `f32`. The larger the number, the more relevant
|
||||
/// the document to the search query.
|
||||
/// This is modelled internally as a `f32`. The
|
||||
/// larger the number, the more relevant the document
|
||||
/// to the search
|
||||
pub type Score = f32;
|
||||
|
||||
/// A `SegmentLocalId` identifies a segment.
|
||||
@@ -277,18 +277,20 @@ impl DocAddress {
|
||||
///
|
||||
/// The id used for the segment is actually an ordinal
|
||||
/// in the list of `Segment`s held by a `Searcher`.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct DocAddress(pub SegmentLocalId, pub DocId);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::docset::DocSet;
|
||||
use crate::query::BooleanQuery;
|
||||
use crate::schema::*;
|
||||
use crate::DocAddress;
|
||||
use crate::Index;
|
||||
use crate::IndexWriter;
|
||||
use crate::Postings;
|
||||
use crate::ReloadPolicy;
|
||||
use rand::distributions::Bernoulli;
|
||||
@@ -296,26 +298,17 @@ mod tests {
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
|
||||
/// Checks if left and right are close one to each other.
|
||||
/// Panics if the two values are more than 0.5% apart.
|
||||
#[macro_export]
|
||||
macro_rules! assert_nearly_equals {
|
||||
($left:expr, $right:expr) => {{
|
||||
match (&$left, &$right) {
|
||||
(left_val, right_val) => {
|
||||
let diff = (left_val - right_val).abs();
|
||||
let add = left_val.abs() + right_val.abs();
|
||||
if diff > 0.0005 * add {
|
||||
panic!(
|
||||
r#"assertion failed: `(left ~= right)`
|
||||
left: `{:?}`,
|
||||
right: `{:?}`"#,
|
||||
&*left_val, &*right_val
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
pub fn assert_nearly_equals(expected: f32, val: f32) {
|
||||
assert!(
|
||||
nearly_equals(val, expected),
|
||||
"Got {}, expected {}.",
|
||||
val,
|
||||
expected
|
||||
);
|
||||
}
|
||||
|
||||
pub fn nearly_equals(a: f32, b: f32) -> bool {
|
||||
(a - b).abs() < 0.0005 * (a + b).abs()
|
||||
}
|
||||
|
||||
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
|
||||
@@ -353,14 +346,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "mmap")]
|
||||
fn test_indexing() -> crate::Result<()> {
|
||||
fn test_indexing() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_from_tempdir(schema).unwrap();
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let doc = doc!(text_field=>"af b");
|
||||
index_writer.add_document(doc);
|
||||
@@ -375,91 +368,120 @@ mod tests {
|
||||
}
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_docfreq1() -> crate::Result<()> {
|
||||
fn test_docfreq1() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||
index_writer.commit()?;
|
||||
index_writer.add_document(doc!(text_field=>"a"));
|
||||
index_writer.add_document(doc!(text_field=>"a a"));
|
||||
index_writer.commit()?;
|
||||
index_writer.add_document(doc!(text_field=>"c"));
|
||||
index_writer.commit()?;
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
assert_eq!(searcher.doc_freq(&term_a)?, 3);
|
||||
let term_b = Term::from_field_text(text_field, "b");
|
||||
assert_eq!(searcher.doc_freq(&term_b)?, 1);
|
||||
let term_c = Term::from_field_text(text_field, "c");
|
||||
assert_eq!(searcher.doc_freq(&term_c)?, 2);
|
||||
let term_d = Term::from_field_text(text_field, "d");
|
||||
assert_eq!(searcher.doc_freq(&term_d)?, 0);
|
||||
Ok(())
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
{
|
||||
{
|
||||
let doc = doc!(text_field=>"a");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
{
|
||||
let doc = doc!(text_field=>"a a");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
{
|
||||
let doc = doc!(text_field=>"c");
|
||||
index_writer.add_document(doc);
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
{
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
assert_eq!(searcher.doc_freq(&term_a), 3);
|
||||
let term_b = Term::from_field_text(text_field, "b");
|
||||
assert_eq!(searcher.doc_freq(&term_b), 1);
|
||||
let term_c = Term::from_field_text(text_field, "c");
|
||||
assert_eq!(searcher.doc_freq(&term_c), 2);
|
||||
let term_d = Term::from_field_text(text_field, "d");
|
||||
assert_eq!(searcher.doc_freq(&term_d), 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fieldnorm_no_docs_with_field() -> crate::Result<()> {
|
||||
fn test_fieldnorm_no_docs_with_field() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title_field = schema_builder.add_text_field("title", TEXT);
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||
index_writer.commit()?;
|
||||
let index_reader = index.reader()?;
|
||||
let searcher = index_reader.searcher();
|
||||
let reader = searcher.segment_reader(0);
|
||||
{
|
||||
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field)?;
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(0), 3);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let doc = doc!(text_field=>"a b c");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
{
|
||||
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field)?;
|
||||
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0);
|
||||
let index_reader = index.reader().unwrap();
|
||||
let searcher = index_reader.searcher();
|
||||
let reader = searcher.segment_reader(0);
|
||||
{
|
||||
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(0), 3);
|
||||
}
|
||||
{
|
||||
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fieldnorm() -> crate::Result<()> {
|
||||
fn test_fieldnorm() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||
index_writer.add_document(doc!());
|
||||
index_writer.add_document(doc!(text_field=>"a b"));
|
||||
index_writer.commit()?;
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
||||
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field)?;
|
||||
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
|
||||
assert_eq!(fieldnorms_reader.fieldnorm(1), 0);
|
||||
assert_eq!(fieldnorms_reader.fieldnorm(2), 2);
|
||||
Ok(())
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let doc = doc!(text_field=>"a b c");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
{
|
||||
let doc = doc!();
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
{
|
||||
let doc = doc!(text_field=>"a b");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
{
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
||||
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field);
|
||||
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
|
||||
assert_eq!(fieldnorms_reader.fieldnorm(1), 0);
|
||||
assert_eq!(fieldnorms_reader.fieldnorm(2), 2);
|
||||
}
|
||||
}
|
||||
|
||||
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
|
||||
let mut doc = docset.advance();
|
||||
while doc != TERMINATED {
|
||||
if !reader.is_deleted(doc) {
|
||||
while docset.advance() {
|
||||
if !reader.is_deleted(docset.doc()) {
|
||||
return true;
|
||||
}
|
||||
doc = docset.advance();
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete_postings1() -> crate::Result<()> {
|
||||
fn test_delete_postings1() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let term_abcd = Term::from_field_text(text_field, "abcd");
|
||||
@@ -475,7 +497,7 @@ mod tests {
|
||||
.unwrap();
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
// 0
|
||||
index_writer.add_document(doc!(text_field=>"a b"));
|
||||
// 1
|
||||
@@ -491,19 +513,19 @@ mod tests {
|
||||
index_writer.add_document(doc!(text_field=>" b c"));
|
||||
// 5
|
||||
index_writer.add_document(doc!(text_field=>" a"));
|
||||
index_writer.commit()?;
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
{
|
||||
reader.reload()?;
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||
let inverted_index = segment_reader.inverted_index(text_field);
|
||||
assert!(inverted_index
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
||||
.is_none());
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||
assert_eq!(postings.doc(), 5);
|
||||
@@ -511,7 +533,7 @@ mod tests {
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||
assert_eq!(postings.doc(), 3);
|
||||
@@ -522,25 +544,25 @@ mod tests {
|
||||
}
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
// 0
|
||||
index_writer.add_document(doc!(text_field=>"a b"));
|
||||
// 1
|
||||
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
||||
index_writer.rollback()?;
|
||||
index_writer.rollback().unwrap();
|
||||
}
|
||||
{
|
||||
reader.reload()?;
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let seg_reader = searcher.segment_reader(0);
|
||||
let inverted_index = seg_reader.inverted_index(term_abcd.field())?;
|
||||
let inverted_index = seg_reader.inverted_index(term_abcd.field());
|
||||
|
||||
assert!(inverted_index
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
||||
.is_none());
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert!(advance_undeleted(&mut postings, seg_reader));
|
||||
assert_eq!(postings.doc(), 5);
|
||||
@@ -548,7 +570,7 @@ mod tests {
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert!(advance_undeleted(&mut postings, seg_reader));
|
||||
assert_eq!(postings.doc(), 3);
|
||||
@@ -559,30 +581,30 @@ mod tests {
|
||||
}
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"a b"));
|
||||
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
||||
index_writer.rollback()?;
|
||||
index_writer.rollback().unwrap();
|
||||
index_writer.delete_term(Term::from_field_text(text_field, "a"));
|
||||
index_writer.commit()?;
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
{
|
||||
reader.reload()?;
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let inverted_index = segment_reader.inverted_index(term_abcd.field())?;
|
||||
let inverted_index = segment_reader.inverted_index(term_abcd.field());
|
||||
assert!(inverted_index
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
||||
.is_none());
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert!(!advance_undeleted(&mut postings, segment_reader));
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||
assert_eq!(postings.doc(), 3);
|
||||
@@ -592,107 +614,104 @@ mod tests {
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||
assert_eq!(postings.doc(), 4);
|
||||
assert!(!advance_undeleted(&mut postings, segment_reader));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_indexed_u64() -> crate::Result<()> {
|
||||
fn test_indexed_u64() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_u64_field("value", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(field=>1u64));
|
||||
index_writer.commit()?;
|
||||
let reader = index.reader()?;
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let term = Term::from_field_u64(field, 1u64);
|
||||
let mut postings = searcher
|
||||
.segment_reader(0)
|
||||
.inverted_index(term.field())?
|
||||
.read_postings(&term, IndexRecordOption::Basic)?
|
||||
.inverted_index(term.field())
|
||||
.read_postings(&term, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
assert!(postings.advance());
|
||||
assert_eq!(postings.doc(), 0);
|
||||
assert_eq!(postings.advance(), TERMINATED);
|
||||
Ok(())
|
||||
assert!(!postings.advance());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_indexed_i64() -> crate::Result<()> {
|
||||
fn test_indexed_i64() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let value_field = schema_builder.add_i64_field("value", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let negative_val = -1i64;
|
||||
index_writer.add_document(doc!(value_field => negative_val));
|
||||
index_writer.commit()?;
|
||||
let reader = index.reader()?;
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let term = Term::from_field_i64(value_field, negative_val);
|
||||
let mut postings = searcher
|
||||
.segment_reader(0)
|
||||
.inverted_index(term.field())?
|
||||
.read_postings(&term, IndexRecordOption::Basic)?
|
||||
.inverted_index(term.field())
|
||||
.read_postings(&term, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
assert!(postings.advance());
|
||||
assert_eq!(postings.doc(), 0);
|
||||
assert_eq!(postings.advance(), TERMINATED);
|
||||
Ok(())
|
||||
assert!(!postings.advance());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_indexed_f64() -> crate::Result<()> {
|
||||
fn test_indexed_f64() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let value_field = schema_builder.add_f64_field("value", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let val = std::f64::consts::PI;
|
||||
index_writer.add_document(doc!(value_field => val));
|
||||
index_writer.commit()?;
|
||||
let reader = index.reader()?;
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let term = Term::from_field_f64(value_field, val);
|
||||
let mut postings = searcher
|
||||
.segment_reader(0)
|
||||
.inverted_index(term.field())?
|
||||
.read_postings(&term, IndexRecordOption::Basic)?
|
||||
.inverted_index(term.field())
|
||||
.read_postings(&term, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
assert!(postings.advance());
|
||||
assert_eq!(postings.doc(), 0);
|
||||
assert_eq!(postings.advance(), TERMINATED);
|
||||
Ok(())
|
||||
assert!(!postings.advance());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_indexedfield_not_in_documents() -> crate::Result<()> {
|
||||
fn test_indexedfield_not_in_documents() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let absent_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"a"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
let reader = index.reader()?;
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let inverted_index = segment_reader.inverted_index(absent_field)?;
|
||||
assert_eq!(inverted_index.terms().num_terms(), 0);
|
||||
Ok(())
|
||||
segment_reader.inverted_index(absent_field); //< should not panic
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete_postings2() -> crate::Result<()> {
|
||||
fn test_delete_postings2() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
@@ -700,112 +719,128 @@ mod tests {
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::Manual)
|
||||
.try_into()?;
|
||||
.try_into()
|
||||
.unwrap();
|
||||
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(text_field=>"63"));
|
||||
index_writer.add_document(doc!(text_field=>"70"));
|
||||
index_writer.add_document(doc!(text_field=>"34"));
|
||||
index_writer.add_document(doc!(text_field=>"1"));
|
||||
index_writer.add_document(doc!(text_field=>"38"));
|
||||
index_writer.add_document(doc!(text_field=>"33"));
|
||||
index_writer.add_document(doc!(text_field=>"40"));
|
||||
index_writer.add_document(doc!(text_field=>"17"));
|
||||
index_writer.delete_term(Term::from_field_text(text_field, "38"));
|
||||
index_writer.delete_term(Term::from_field_text(text_field, "34"));
|
||||
index_writer.commit()?;
|
||||
reader.reload()?;
|
||||
assert_eq!(reader.searcher().num_docs(), 6);
|
||||
Ok(())
|
||||
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
|
||||
|
||||
let add_document = |index_writer: &mut IndexWriter, val: &'static str| {
|
||||
let doc = doc!(text_field=>val);
|
||||
index_writer.add_document(doc);
|
||||
};
|
||||
|
||||
let remove_document = |index_writer: &mut IndexWriter, val: &'static str| {
|
||||
let delterm = Term::from_field_text(text_field, val);
|
||||
index_writer.delete_term(delterm);
|
||||
};
|
||||
|
||||
add_document(&mut index_writer, "63");
|
||||
add_document(&mut index_writer, "70");
|
||||
add_document(&mut index_writer, "34");
|
||||
add_document(&mut index_writer, "1");
|
||||
add_document(&mut index_writer, "38");
|
||||
add_document(&mut index_writer, "33");
|
||||
add_document(&mut index_writer, "40");
|
||||
add_document(&mut index_writer, "17");
|
||||
remove_document(&mut index_writer, "38");
|
||||
remove_document(&mut index_writer, "34");
|
||||
index_writer.commit().unwrap();
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.num_docs(), 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_termfreq() -> crate::Result<()> {
|
||||
fn test_termfreq() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(text_field=>"af af af bc bc"));
|
||||
index_writer.commit()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let doc = doc!(text_field=>"af af af bc bc");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
{
|
||||
let index_reader = index.reader()?;
|
||||
let index_reader = index.reader().unwrap();
|
||||
let searcher = index_reader.searcher();
|
||||
let reader = searcher.segment_reader(0);
|
||||
let inverted_index = reader.inverted_index(text_field)?;
|
||||
let inverted_index = reader.inverted_index(text_field);
|
||||
let term_abcd = Term::from_field_text(text_field, "abcd");
|
||||
assert!(inverted_index
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
||||
.is_none());
|
||||
let term_af = Term::from_field_text(text_field, "af");
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert!(postings.advance());
|
||||
assert_eq!(postings.doc(), 0);
|
||||
assert_eq!(postings.term_freq(), 3);
|
||||
assert_eq!(postings.advance(), TERMINATED);
|
||||
assert!(!postings.advance());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_searcher_1() -> crate::Result<()> {
|
||||
fn test_searcher_1() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let reader = index.reader()?;
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(text_field=>"af af af b"));
|
||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||
index_writer.add_document(doc!(text_field=>"a b c d"));
|
||||
index_writer.commit()?;
|
||||
|
||||
reader.reload()?;
|
||||
let searcher = reader.searcher();
|
||||
let get_doc_ids = |terms: Vec<Term>| {
|
||||
let query = BooleanQuery::new_multiterms_query(terms);
|
||||
searcher
|
||||
.search(&query, &TEST_COLLECTOR_WITH_SCORE)
|
||||
.map(|topdocs| topdocs.docs().to_vec())
|
||||
};
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "a")])?,
|
||||
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "af")])?,
|
||||
vec![DocAddress(0, 0)]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "b")])?,
|
||||
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "c")])?,
|
||||
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "d")])?,
|
||||
vec![DocAddress(0, 2)]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![
|
||||
Term::from_field_text(text_field, "b"),
|
||||
Term::from_field_text(text_field, "a"),
|
||||
])?,
|
||||
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
||||
);
|
||||
Ok(())
|
||||
let reader = index.reader().unwrap();
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"af af af b"));
|
||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||
index_writer.add_document(doc!(text_field=>"a b c d"));
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
{
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let get_doc_ids = |terms: Vec<Term>| {
|
||||
let query = BooleanQuery::new_multiterms_query(terms);
|
||||
let topdocs = searcher.search(&query, &TEST_COLLECTOR_WITH_SCORE).unwrap();
|
||||
topdocs.docs().to_vec()
|
||||
};
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
|
||||
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
|
||||
vec![DocAddress(0, 0)]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
|
||||
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "c")]),
|
||||
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "d")]),
|
||||
vec![DocAddress(0, 2)]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![
|
||||
Term::from_field_text(text_field, "b"),
|
||||
Term::from_field_text(text_field, "a"),
|
||||
]),
|
||||
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_searcher_2() -> crate::Result<()> {
|
||||
fn test_searcher_2() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
@@ -813,17 +848,19 @@ mod tests {
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::Manual)
|
||||
.try_into()?;
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0u64);
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(text_field=>"af b"));
|
||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||
index_writer.add_document(doc!(text_field=>"a b c d"));
|
||||
index_writer.commit()?;
|
||||
reader.reload()?;
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"af b"));
|
||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||
index_writer.add_document(doc!(text_field=>"a b c d"));
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
reader.reload().unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 3u64);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -835,17 +872,17 @@ mod tests {
|
||||
text_field => "some other value",
|
||||
other_text_field => "short");
|
||||
assert_eq!(document.len(), 3);
|
||||
let values: Vec<&Value> = document.get_all(text_field).collect();
|
||||
let values = document.get_all(text_field);
|
||||
assert_eq!(values.len(), 2);
|
||||
assert_eq!(values[0].text(), Some("tantivy"));
|
||||
assert_eq!(values[1].text(), Some("some other value"));
|
||||
let values: Vec<&Value> = document.get_all(other_text_field).collect();
|
||||
let values = document.get_all(other_text_field);
|
||||
assert_eq!(values.len(), 1);
|
||||
assert_eq!(values[0].text(), Some("short"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrong_fast_field_type() -> crate::Result<()> {
|
||||
fn test_wrong_fast_field_type() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
|
||||
let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
|
||||
@@ -855,14 +892,14 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 50_000_000).unwrap();
|
||||
{
|
||||
let document =
|
||||
doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64);
|
||||
index_writer.add_document(document);
|
||||
index_writer.commit()?;
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
let reader = index.reader()?;
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
||||
{
|
||||
@@ -901,12 +938,11 @@ mod tests {
|
||||
let fast_field_reader = fast_field_reader_opt.unwrap();
|
||||
assert_eq!(fast_field_reader.get(0), 4f64)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// motivated by #729
|
||||
#[test]
|
||||
fn test_update_via_delete_insert() -> crate::Result<()> {
|
||||
fn test_update_via_delete_insert() {
|
||||
use crate::collector::Count;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::AllQuery;
|
||||
@@ -920,17 +956,17 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let index_reader = index.reader()?;
|
||||
let index_reader = index.reader().unwrap();
|
||||
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer(3_000_000).unwrap();
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
|
||||
for doc_id in 0u64..DOC_COUNT {
|
||||
index_writer.add_document(doc!(id => doc_id));
|
||||
}
|
||||
index_writer.commit()?;
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
index_reader.reload()?;
|
||||
index_reader.reload().unwrap();
|
||||
let searcher = index_reader.searcher();
|
||||
|
||||
assert_eq!(
|
||||
@@ -941,11 +977,12 @@ mod tests {
|
||||
// update the 10 elements by deleting and re-adding
|
||||
for doc_id in 0u64..DOC_COUNT {
|
||||
index_writer.delete_term(Term::from_field_u64(id, doc_id));
|
||||
index_writer.commit()?;
|
||||
index_reader.reload()?;
|
||||
index_writer.add_document(doc!(id => doc_id));
|
||||
index_writer.commit()?;
|
||||
index_reader.reload()?;
|
||||
index_writer.commit().unwrap();
|
||||
index_reader.reload().unwrap();
|
||||
let doc = doc!(id => doc_id);
|
||||
index_writer.add_document(doc);
|
||||
index_writer.commit().unwrap();
|
||||
index_reader.reload().unwrap();
|
||||
let searcher = index_reader.searcher();
|
||||
// The number of document should be stable.
|
||||
assert_eq!(
|
||||
@@ -954,7 +991,7 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
index_reader.reload()?;
|
||||
index_reader.reload().unwrap();
|
||||
let searcher = index_reader.searcher();
|
||||
let segment_ids: Vec<SegmentId> = searcher
|
||||
.segment_readers()
|
||||
@@ -963,18 +1000,12 @@ mod tests {
|
||||
.collect();
|
||||
block_on(index_writer.merge(&segment_ids)).unwrap();
|
||||
|
||||
index_reader.reload()?;
|
||||
index_reader.reload().unwrap();
|
||||
let searcher = index_reader.searcher();
|
||||
assert_eq!(searcher.search(&AllQuery, &Count)?, DOC_COUNT as usize);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_checksum() -> crate::Result<()> {
|
||||
let index_path = tempfile::tempdir().expect("dir");
|
||||
let schema = Schema::builder().build();
|
||||
let index = Index::create_in_dir(&index_path, schema)?;
|
||||
assert!(index.validate_checksum()?.is_empty());
|
||||
Ok(())
|
||||
assert_eq!(
|
||||
searcher.search(&AllQuery, &Count).unwrap(),
|
||||
DOC_COUNT as usize
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,12 +37,12 @@ const LONG_SKIP_INTERVAL: u64 = (LONG_SKIP_IN_BLOCKS * COMPRESSION_BLOCK_SIZE) a
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use super::PositionSerializer;
|
||||
use crate::positions::reader::PositionReader;
|
||||
use crate::{common::HasLen, directory::FileSlice};
|
||||
use super::{PositionReader, PositionSerializer};
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::positions::COMPRESSION_BLOCK_SIZE;
|
||||
use std::iter;
|
||||
|
||||
fn create_stream_buffer(vals: &[u32]) -> (FileSlice, FileSlice) {
|
||||
fn create_stream_buffer(vals: &[u32]) -> (ReadOnlySource, ReadOnlySource) {
|
||||
let mut skip_buffer = vec![];
|
||||
let mut stream_buffer = vec![];
|
||||
{
|
||||
@@ -53,7 +53,10 @@ pub mod tests {
|
||||
}
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
(FileSlice::from(stream_buffer), FileSlice::from(skip_buffer))
|
||||
(
|
||||
ReadOnlySource::from(stream_buffer),
|
||||
ReadOnlySource::from(skip_buffer),
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -62,10 +65,10 @@ pub mod tests {
|
||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||
assert_eq!(skip.len(), 12);
|
||||
assert_eq!(stream.len(), 1168);
|
||||
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap();
|
||||
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
||||
for &n in &[1, 10, 127, 128, 130, 312] {
|
||||
let mut v = vec![0u32; n];
|
||||
position_reader.read(0, &mut v[..]);
|
||||
position_reader.read(&mut v[..n]);
|
||||
for i in 0..n {
|
||||
assert_eq!(v[i], i as u32);
|
||||
}
|
||||
@@ -73,19 +76,19 @@ pub mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_position_read_with_offset() {
|
||||
let v: Vec<u32> = (0..1000).collect();
|
||||
fn test_position_skip() {
|
||||
let v: Vec<u32> = (0..1_000).collect();
|
||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||
assert_eq!(skip.len(), 12);
|
||||
assert_eq!(stream.len(), 1168);
|
||||
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap();
|
||||
for &offset in &[1u64, 10u64, 127u64, 128u64, 130u64, 312u64] {
|
||||
for &len in &[1, 10, 130, 500] {
|
||||
let mut v = vec![0u32; len];
|
||||
position_reader.read(offset, &mut v[..]);
|
||||
for i in 0..len {
|
||||
assert_eq!(v[i], i as u32 + offset as u32);
|
||||
}
|
||||
|
||||
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
||||
position_reader.skip(10);
|
||||
for &n in &[10, 127, COMPRESSION_BLOCK_SIZE, 130, 312] {
|
||||
let mut v = vec![0u32; n];
|
||||
position_reader.read(&mut v[..n]);
|
||||
for i in 0..n {
|
||||
assert_eq!(v[i], 10u32 + i as u32);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -97,15 +100,14 @@ pub mod tests {
|
||||
assert_eq!(skip.len(), 12);
|
||||
assert_eq!(stream.len(), 1168);
|
||||
|
||||
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap();
|
||||
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
||||
let mut buf = [0u32; 7];
|
||||
let mut c = 0;
|
||||
|
||||
let mut offset = 0;
|
||||
for _ in 0..100 {
|
||||
position_reader.read(offset, &mut buf);
|
||||
position_reader.read(offset, &mut buf);
|
||||
offset += 7;
|
||||
position_reader.read(&mut buf);
|
||||
position_reader.read(&mut buf);
|
||||
position_reader.skip(4);
|
||||
position_reader.skip(3);
|
||||
for &el in &buf {
|
||||
assert_eq!(c, el);
|
||||
c += 1;
|
||||
@@ -113,59 +115,6 @@ pub mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_position_reread_anchor_different_than_block() {
|
||||
let v: Vec<u32> = (0..2_000_000).collect();
|
||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||
assert_eq!(skip.len(), 15_749);
|
||||
assert_eq!(stream.len(), 4_987_872);
|
||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0).unwrap();
|
||||
let mut buf = [0u32; 256];
|
||||
position_reader.read(128, &mut buf);
|
||||
for i in 0..256 {
|
||||
assert_eq!(buf[i], (128 + i) as u32);
|
||||
}
|
||||
position_reader.read(128, &mut buf);
|
||||
for i in 0..256 {
|
||||
assert_eq!(buf[i], (128 + i) as u32);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "offset arguments should be increasing.")]
|
||||
fn test_position_panic_if_called_previous_anchor() {
|
||||
let v: Vec<u32> = (0..2_000_000).collect();
|
||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||
assert_eq!(skip.len(), 15_749);
|
||||
assert_eq!(stream.len(), 4_987_872);
|
||||
let mut buf = [0u32; 1];
|
||||
let mut position_reader =
|
||||
PositionReader::new(stream.clone(), skip.clone(), 200_000).unwrap();
|
||||
position_reader.read(230, &mut buf);
|
||||
position_reader.read(9, &mut buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_positions_bug() {
|
||||
let mut v: Vec<u32> = vec![];
|
||||
for i in 1..200 {
|
||||
for j in 0..i {
|
||||
v.push(j);
|
||||
}
|
||||
}
|
||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||
let mut buf = Vec::new();
|
||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0).unwrap();
|
||||
let mut offset = 0;
|
||||
for i in 1..24 {
|
||||
buf.resize(i, 0);
|
||||
position_reader.read(offset, &mut buf[..]);
|
||||
offset += i as u64;
|
||||
let r: Vec<u32> = (0..i).map(|el| el as u32).collect();
|
||||
assert_eq!(buf, &r[..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_position_long_skip_const() {
|
||||
const CONST_VAL: u32 = 9u32;
|
||||
@@ -173,9 +122,9 @@ pub mod tests {
|
||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||
assert_eq!(skip.len(), 15_749);
|
||||
assert_eq!(stream.len(), 1_000_000);
|
||||
let mut position_reader = PositionReader::new(stream, skip, 128 * 1024).unwrap();
|
||||
let mut position_reader = PositionReader::new(stream, skip, 128 * 1024);
|
||||
let mut buf = [0u32; 1];
|
||||
position_reader.read(0, &mut buf);
|
||||
position_reader.read(&mut buf);
|
||||
assert_eq!(buf[0], CONST_VAL);
|
||||
}
|
||||
|
||||
@@ -192,10 +141,9 @@ pub mod tests {
|
||||
128 * 1024 + 7,
|
||||
128 * 10 * 1024 + 10,
|
||||
] {
|
||||
let mut position_reader =
|
||||
PositionReader::new(stream.clone(), skip.clone(), offset).unwrap();
|
||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), offset);
|
||||
let mut buf = [0u32; 1];
|
||||
position_reader.read(0, &mut buf);
|
||||
position_reader.read(&mut buf);
|
||||
assert_eq!(buf[0], offset as u32);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
use std::io;
|
||||
|
||||
use crate::common::{BinarySerializable, FixedSize};
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::positions::COMPRESSION_BLOCK_SIZE;
|
||||
use crate::positions::LONG_SKIP_INTERVAL;
|
||||
use crate::positions::LONG_SKIP_IN_BLOCKS;
|
||||
use bitpacking::{BitPacker, BitPacker4x};
|
||||
|
||||
use crate::postings::compression::compressed_block_size;
|
||||
/// Positions works as a long sequence of compressed block.
|
||||
/// All terms are chained one after the other.
|
||||
///
|
||||
@@ -28,28 +24,28 @@ use bitpacking::{BitPacker, BitPacker4x};
|
||||
/// A given block obviously takes `(128 x num_bit_for_the_block / num_bits_in_a_byte)`,
|
||||
/// so skipping a block without decompressing it is just a matter of advancing that many
|
||||
/// bytes.
|
||||
use bitpacking::{BitPacker, BitPacker4x};
|
||||
use owned_read::OwnedRead;
|
||||
|
||||
struct Positions {
|
||||
bit_packer: BitPacker4x,
|
||||
skip_file: FileSlice,
|
||||
position_file: FileSlice,
|
||||
long_skip_data: OwnedBytes,
|
||||
skip_source: ReadOnlySource,
|
||||
position_source: ReadOnlySource,
|
||||
long_skip_source: ReadOnlySource,
|
||||
}
|
||||
|
||||
impl Positions {
|
||||
pub fn new(position_file: FileSlice, skip_file: FileSlice) -> io::Result<Positions> {
|
||||
let (body, footer) = skip_file.split_from_end(u32::SIZE_IN_BYTES);
|
||||
let footer_data = footer.read_bytes()?;
|
||||
let num_long_skips = u32::deserialize(&mut footer_data.as_slice())?;
|
||||
let (skip_file, long_skip_file) =
|
||||
pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions {
|
||||
let (body, footer) = skip_source.split_from_end(u32::SIZE_IN_BYTES);
|
||||
let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
|
||||
let (skip_source, long_skip_source) =
|
||||
body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize));
|
||||
let long_skip_data = long_skip_file.read_bytes()?;
|
||||
Ok(Positions {
|
||||
Positions {
|
||||
bit_packer: BitPacker4x::new(),
|
||||
skip_file,
|
||||
long_skip_data,
|
||||
position_file,
|
||||
})
|
||||
skip_source,
|
||||
long_skip_source,
|
||||
position_source,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the offset of the block associated to the given `long_skip_id`.
|
||||
@@ -59,116 +55,143 @@ impl Positions {
|
||||
if long_skip_id == 0 {
|
||||
return 0;
|
||||
}
|
||||
let long_skip_slice = self.long_skip_data.as_slice();
|
||||
let long_skip_slice = self.long_skip_source.as_slice();
|
||||
let mut long_skip_blocks: &[u8] = &long_skip_slice[(long_skip_id - 1) * 8..][..8];
|
||||
u64::deserialize(&mut long_skip_blocks).expect("Index corrupted")
|
||||
}
|
||||
|
||||
fn reader(&self, offset: u64) -> io::Result<PositionReader> {
|
||||
fn reader(&self, offset: u64) -> PositionReader {
|
||||
let long_skip_id = (offset / LONG_SKIP_INTERVAL) as usize;
|
||||
let small_skip = (offset % LONG_SKIP_INTERVAL) as usize;
|
||||
let offset_num_bytes: u64 = self.long_skip(long_skip_id);
|
||||
let position_read = self
|
||||
.position_file
|
||||
.slice_from(offset_num_bytes as usize)
|
||||
.read_bytes()?;
|
||||
let skip_read = self
|
||||
.skip_file
|
||||
.slice_from(long_skip_id * LONG_SKIP_IN_BLOCKS)
|
||||
.read_bytes()?;
|
||||
Ok(PositionReader {
|
||||
let mut position_read = OwnedRead::new(self.position_source.clone());
|
||||
position_read.advance(offset_num_bytes as usize);
|
||||
let mut skip_read = OwnedRead::new(self.skip_source.clone());
|
||||
skip_read.advance(long_skip_id * LONG_SKIP_IN_BLOCKS);
|
||||
let mut position_reader = PositionReader {
|
||||
bit_packer: self.bit_packer,
|
||||
skip_read,
|
||||
position_read,
|
||||
inner_offset: 0,
|
||||
buffer: Box::new([0u32; 128]),
|
||||
block_offset: std::i64::MAX as u64,
|
||||
anchor_offset: (long_skip_id as u64) * LONG_SKIP_INTERVAL,
|
||||
abs_offset: offset,
|
||||
})
|
||||
ahead: None,
|
||||
};
|
||||
position_reader.skip(small_skip);
|
||||
position_reader
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PositionReader {
|
||||
skip_read: OwnedBytes,
|
||||
position_read: OwnedBytes,
|
||||
skip_read: OwnedRead,
|
||||
position_read: OwnedRead,
|
||||
bit_packer: BitPacker4x,
|
||||
buffer: Box<[u32; COMPRESSION_BLOCK_SIZE]>,
|
||||
inner_offset: usize,
|
||||
buffer: Box<[u32; 128]>,
|
||||
ahead: Option<usize>, // if None, no block is loaded.
|
||||
// if Some(num_blocks), the block currently loaded is num_blocks ahead
|
||||
// of the block of the next int to read.
|
||||
}
|
||||
|
||||
block_offset: u64,
|
||||
anchor_offset: u64,
|
||||
|
||||
abs_offset: u64,
|
||||
// `ahead` represents the offset of the block currently loaded
|
||||
// compared to the cursor of the actual stream.
|
||||
//
|
||||
// By contract, when this function is called, the current block has to be
|
||||
// decompressed.
|
||||
//
|
||||
// If the requested number of els ends exactly at a given block, the next
|
||||
// block is not decompressed.
|
||||
fn read_impl(
|
||||
bit_packer: BitPacker4x,
|
||||
mut position: &[u8],
|
||||
buffer: &mut [u32; 128],
|
||||
mut inner_offset: usize,
|
||||
num_bits: &[u8],
|
||||
output: &mut [u32],
|
||||
) -> usize {
|
||||
let mut output_start = 0;
|
||||
let mut output_len = output.len();
|
||||
let mut ahead = 0;
|
||||
loop {
|
||||
let available_len = COMPRESSION_BLOCK_SIZE - inner_offset;
|
||||
// We have enough elements in the current block.
|
||||
// Let's copy the requested elements in the output buffer,
|
||||
// and return.
|
||||
if output_len <= available_len {
|
||||
output[output_start..].copy_from_slice(&buffer[inner_offset..][..output_len]);
|
||||
return ahead;
|
||||
}
|
||||
output[output_start..][..available_len].copy_from_slice(&buffer[inner_offset..]);
|
||||
output_len -= available_len;
|
||||
output_start += available_len;
|
||||
inner_offset = 0;
|
||||
let num_bits = num_bits[ahead];
|
||||
bit_packer.decompress(position, &mut buffer[..], num_bits);
|
||||
let block_len = compressed_block_size(num_bits);
|
||||
position = &position[block_len..];
|
||||
ahead += 1;
|
||||
}
|
||||
}
|
||||
|
||||
impl PositionReader {
|
||||
pub fn new(
|
||||
position_file: FileSlice,
|
||||
skip_file: FileSlice,
|
||||
position_source: ReadOnlySource,
|
||||
skip_source: ReadOnlySource,
|
||||
offset: u64,
|
||||
) -> io::Result<PositionReader> {
|
||||
let positions = Positions::new(position_file, skip_file)?;
|
||||
positions.reader(offset)
|
||||
) -> PositionReader {
|
||||
Positions::new(position_source, skip_source).reader(offset)
|
||||
}
|
||||
|
||||
fn advance_num_blocks(&mut self, num_blocks: usize) {
|
||||
let num_bits: usize = self.skip_read.as_ref()[..num_blocks]
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|num_bits| num_bits as usize)
|
||||
.sum();
|
||||
let num_bytes_to_skip = num_bits * COMPRESSION_BLOCK_SIZE / 8;
|
||||
self.skip_read.advance(num_blocks as usize);
|
||||
self.position_read.advance(num_bytes_to_skip);
|
||||
}
|
||||
|
||||
/// Fills a buffer with the positions `[offset..offset+output.len())` integers.
|
||||
///
|
||||
/// `offset` is required to have a value >= to the offsets given in previous calls
|
||||
/// for the given `PositionReaderAbsolute` instance.
|
||||
pub fn read(&mut self, mut offset: u64, mut output: &mut [u32]) {
|
||||
offset += self.abs_offset;
|
||||
assert!(
|
||||
offset >= self.anchor_offset,
|
||||
"offset arguments should be increasing."
|
||||
);
|
||||
let delta_to_block_offset = offset as i64 - self.block_offset as i64;
|
||||
if delta_to_block_offset < 0 || delta_to_block_offset >= 128 {
|
||||
// The first position is not within the first block.
|
||||
// We need to decompress the first block.
|
||||
let delta_to_anchor_offset = offset - self.anchor_offset;
|
||||
let num_blocks_to_skip =
|
||||
(delta_to_anchor_offset / (COMPRESSION_BLOCK_SIZE as u64)) as usize;
|
||||
self.advance_num_blocks(num_blocks_to_skip);
|
||||
self.anchor_offset = offset - (offset % COMPRESSION_BLOCK_SIZE as u64);
|
||||
self.block_offset = self.anchor_offset;
|
||||
let num_bits = self.skip_read.as_slice()[0];
|
||||
self.bit_packer
|
||||
.decompress(self.position_read.as_ref(), self.buffer.as_mut(), num_bits);
|
||||
} else {
|
||||
let num_blocks_to_skip =
|
||||
((self.block_offset - self.anchor_offset) / COMPRESSION_BLOCK_SIZE as u64) as usize;
|
||||
self.advance_num_blocks(num_blocks_to_skip);
|
||||
self.anchor_offset = self.block_offset;
|
||||
}
|
||||
|
||||
let mut num_bits = self.skip_read.as_slice()[0];
|
||||
let mut position_data = self.position_read.as_ref();
|
||||
|
||||
for i in 1.. {
|
||||
let offset_in_block = (offset as usize) % COMPRESSION_BLOCK_SIZE;
|
||||
let remaining_in_block = COMPRESSION_BLOCK_SIZE - offset_in_block;
|
||||
if remaining_in_block >= output.len() {
|
||||
output.copy_from_slice(&self.buffer[offset_in_block..][..output.len()]);
|
||||
break;
|
||||
}
|
||||
output[..remaining_in_block].copy_from_slice(&self.buffer[offset_in_block..]);
|
||||
output = &mut output[remaining_in_block..];
|
||||
offset += remaining_in_block as u64;
|
||||
position_data = &position_data[(num_bits as usize * COMPRESSION_BLOCK_SIZE / 8)..];
|
||||
num_bits = self.skip_read.as_slice()[i];
|
||||
/// Fills a buffer with the next `output.len()` integers.
|
||||
/// This does not consume / advance the stream.
|
||||
pub fn read(&mut self, output: &mut [u32]) {
|
||||
let skip_data = self.skip_read.as_ref();
|
||||
let position_data = self.position_read.as_ref();
|
||||
let num_bits = self.skip_read.get(0);
|
||||
if self.ahead != Some(0) {
|
||||
// the block currently available is not the block
|
||||
// for the current position
|
||||
self.bit_packer
|
||||
.decompress(position_data, self.buffer.as_mut(), num_bits);
|
||||
self.block_offset += COMPRESSION_BLOCK_SIZE as u64;
|
||||
self.ahead = Some(0);
|
||||
}
|
||||
let block_len = compressed_block_size(num_bits);
|
||||
self.ahead = Some(read_impl(
|
||||
self.bit_packer,
|
||||
&position_data[block_len..],
|
||||
self.buffer.as_mut(),
|
||||
self.inner_offset,
|
||||
&skip_data[1..],
|
||||
output,
|
||||
));
|
||||
}
|
||||
|
||||
/// Skip the next `skip_len` integer.
|
||||
///
|
||||
/// If a full block is skipped, calling
|
||||
/// `.skip(...)` will avoid decompressing it.
|
||||
///
|
||||
/// May panic if the end of the stream is reached.
|
||||
pub fn skip(&mut self, skip_len: usize) {
|
||||
let skip_len_plus_inner_offset = skip_len + self.inner_offset;
|
||||
|
||||
let num_blocks_to_advance = skip_len_plus_inner_offset / COMPRESSION_BLOCK_SIZE;
|
||||
self.inner_offset = skip_len_plus_inner_offset % COMPRESSION_BLOCK_SIZE;
|
||||
|
||||
self.ahead = self.ahead.and_then(|num_blocks| {
|
||||
if num_blocks >= num_blocks_to_advance {
|
||||
Some(num_blocks - num_blocks_to_advance)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
let skip_len_in_bits = self.skip_read.as_ref()[..num_blocks_to_advance]
|
||||
.iter()
|
||||
.map(|num_bits| *num_bits as usize)
|
||||
.sum::<usize>()
|
||||
* COMPRESSION_BLOCK_SIZE;
|
||||
let skip_len_in_bytes = skip_len_in_bits / 8;
|
||||
self.skip_read.advance(num_blocks_to_advance);
|
||||
self.position_read.advance(skip_len_in_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,7 +87,6 @@ fn exponential_search(arr: &[u32], target: u32) -> (usize, usize) {
|
||||
(begin, end)
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn galloping(block_docs: &[u32], target: u32) -> usize {
|
||||
let (start, end) = exponential_search(&block_docs, target);
|
||||
start + linear_search(&block_docs[start..end], target)
|
||||
@@ -130,18 +129,23 @@ impl BlockSearcher {
|
||||
///
|
||||
/// If SSE2 instructions are available in the `(platform, running CPU)`,
|
||||
/// then we use a different implementation that does an exhaustive linear search over
|
||||
/// the block regardless of whether the block is full or not.
|
||||
///
|
||||
/// Indeed, if the block is not full, the remaining items are TERMINATED.
|
||||
/// It is surprisingly faster, most likely because of the lack of branch misprediction.
|
||||
pub(crate) fn search_in_block(self, block_docs: &AlignedBuffer, target: u32) -> usize {
|
||||
/// the full block whenever the block is full (`len == 128`). It is surprisingly faster, most likely because of the lack
|
||||
/// of branch.
|
||||
pub(crate) fn search_in_block(
|
||||
self,
|
||||
block_docs: &AlignedBuffer,
|
||||
len: usize,
|
||||
start: usize,
|
||||
target: u32,
|
||||
) -> usize {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
{
|
||||
if self == BlockSearcher::SSE2 {
|
||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||
if self == BlockSearcher::SSE2 && len == COMPRESSION_BLOCK_SIZE {
|
||||
return sse2::linear_search_sse2_128(block_docs, target);
|
||||
}
|
||||
}
|
||||
galloping(&block_docs.0[..], target)
|
||||
start + galloping(&block_docs.0[start..len], target)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,7 +166,6 @@ mod tests {
|
||||
use super::exponential_search;
|
||||
use super::linear_search;
|
||||
use super::BlockSearcher;
|
||||
use crate::docset::TERMINATED;
|
||||
use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
|
||||
|
||||
#[test]
|
||||
@@ -193,12 +196,19 @@ mod tests {
|
||||
fn util_test_search_in_block(block_searcher: BlockSearcher, block: &[u32], target: u32) {
|
||||
let cursor = search_in_block_trivial_but_slow(block, target);
|
||||
assert!(block.len() < COMPRESSION_BLOCK_SIZE);
|
||||
let mut output_buffer = [TERMINATED; COMPRESSION_BLOCK_SIZE];
|
||||
let mut output_buffer = [u32::max_value(); COMPRESSION_BLOCK_SIZE];
|
||||
output_buffer[..block.len()].copy_from_slice(block);
|
||||
assert_eq!(
|
||||
block_searcher.search_in_block(&AlignedBuffer(output_buffer), target),
|
||||
cursor
|
||||
);
|
||||
for i in 0..cursor {
|
||||
assert_eq!(
|
||||
block_searcher.search_in_block(
|
||||
&AlignedBuffer(output_buffer),
|
||||
block.len(),
|
||||
i,
|
||||
target
|
||||
),
|
||||
cursor
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn util_test_search_in_block_all(block_searcher: BlockSearcher, block: &[u32]) {
|
||||
|
||||
@@ -1,530 +0,0 @@
|
||||
use std::io;
|
||||
|
||||
use crate::common::{BinarySerializable, VInt};
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::postings::compression::{
|
||||
AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE,
|
||||
};
|
||||
use crate::postings::{BlockInfo, FreqReadingOption, SkipReader};
|
||||
use crate::query::BM25Weight;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::{DocId, Score, TERMINATED};
|
||||
|
||||
fn max_score<I: Iterator<Item = Score>>(mut it: I) -> Option<Score> {
|
||||
if let Some(first) = it.next() {
|
||||
Some(it.fold(first, Score::max))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// `BlockSegmentPostings` is a cursor iterating over blocks
|
||||
/// of documents.
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// While it is useful for some very specific high-performance
|
||||
/// use cases, you should prefer using `SegmentPostings` for most usage.
|
||||
#[derive(Clone)]
|
||||
pub struct BlockSegmentPostings {
|
||||
pub(crate) doc_decoder: BlockDecoder,
|
||||
loaded_offset: usize,
|
||||
freq_decoder: BlockDecoder,
|
||||
freq_reading_option: FreqReadingOption,
|
||||
block_max_score_cache: Option<Score>,
|
||||
|
||||
doc_freq: u32,
|
||||
|
||||
data: OwnedBytes,
|
||||
pub(crate) skip_reader: SkipReader,
|
||||
}
|
||||
|
||||
fn decode_bitpacked_block(
|
||||
doc_decoder: &mut BlockDecoder,
|
||||
freq_decoder_opt: Option<&mut BlockDecoder>,
|
||||
data: &[u8],
|
||||
doc_offset: DocId,
|
||||
doc_num_bits: u8,
|
||||
tf_num_bits: u8,
|
||||
) {
|
||||
let num_consumed_bytes = doc_decoder.uncompress_block_sorted(data, doc_offset, doc_num_bits);
|
||||
if let Some(freq_decoder) = freq_decoder_opt {
|
||||
freq_decoder.uncompress_block_unsorted(&data[num_consumed_bytes..], tf_num_bits);
|
||||
}
|
||||
}
|
||||
|
||||
fn decode_vint_block(
|
||||
doc_decoder: &mut BlockDecoder,
|
||||
freq_decoder_opt: Option<&mut BlockDecoder>,
|
||||
data: &[u8],
|
||||
doc_offset: DocId,
|
||||
num_vint_docs: usize,
|
||||
) {
|
||||
let num_consumed_bytes =
|
||||
doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs, TERMINATED);
|
||||
if let Some(freq_decoder) = freq_decoder_opt {
|
||||
freq_decoder.uncompress_vint_unsorted(
|
||||
&data[num_consumed_bytes..],
|
||||
num_vint_docs,
|
||||
TERMINATED,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn split_into_skips_and_postings(
|
||||
doc_freq: u32,
|
||||
mut bytes: OwnedBytes,
|
||||
) -> (Option<OwnedBytes>, OwnedBytes) {
|
||||
if doc_freq < COMPRESSION_BLOCK_SIZE as u32 {
|
||||
return (None, bytes);
|
||||
}
|
||||
let skip_len = VInt::deserialize(&mut bytes).expect("Data corrupted").0 as usize;
|
||||
let (skip_data, postings_data) = bytes.split(skip_len);
|
||||
(Some(skip_data), postings_data)
|
||||
}
|
||||
|
||||
impl BlockSegmentPostings {
|
||||
pub(crate) fn open(
|
||||
doc_freq: u32,
|
||||
data: FileSlice,
|
||||
record_option: IndexRecordOption,
|
||||
requested_option: IndexRecordOption,
|
||||
) -> io::Result<BlockSegmentPostings> {
|
||||
let freq_reading_option = match (record_option, requested_option) {
|
||||
(IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq,
|
||||
(_, IndexRecordOption::Basic) => FreqReadingOption::SkipFreq,
|
||||
(_, _) => FreqReadingOption::ReadFreq,
|
||||
};
|
||||
|
||||
let (skip_data_opt, postings_data) =
|
||||
split_into_skips_and_postings(doc_freq, data.read_bytes()?);
|
||||
let skip_reader = match skip_data_opt {
|
||||
Some(skip_data) => SkipReader::new(skip_data, doc_freq, record_option),
|
||||
None => SkipReader::new(OwnedBytes::empty(), doc_freq, record_option),
|
||||
};
|
||||
|
||||
let mut block_segment_postings = BlockSegmentPostings {
|
||||
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
||||
loaded_offset: std::usize::MAX,
|
||||
freq_decoder: BlockDecoder::with_val(1),
|
||||
freq_reading_option,
|
||||
block_max_score_cache: None,
|
||||
doc_freq,
|
||||
data: postings_data,
|
||||
skip_reader,
|
||||
};
|
||||
block_segment_postings.load_block();
|
||||
Ok(block_segment_postings)
|
||||
}
|
||||
|
||||
/// Returns the block_max_score for the current block.
|
||||
/// It does not require the block to be loaded. For instance, it is ok to call this method
|
||||
/// after having called `.shallow_advance(..)`.
|
||||
///
|
||||
/// See `TermScorer::block_max_score(..)` for more information.
|
||||
pub fn block_max_score(
|
||||
&mut self,
|
||||
fieldnorm_reader: &FieldNormReader,
|
||||
bm25_weight: &BM25Weight,
|
||||
) -> Score {
|
||||
if let Some(score) = self.block_max_score_cache {
|
||||
return score;
|
||||
}
|
||||
if let Some(skip_reader_max_score) = self.skip_reader.block_max_score(bm25_weight) {
|
||||
// if we are on a full block, the skip reader should have the block max information
|
||||
// for us
|
||||
self.block_max_score_cache = Some(skip_reader_max_score);
|
||||
return skip_reader_max_score;
|
||||
}
|
||||
// this is the last block of the segment posting list.
|
||||
// If it is actually loaded, we can compute block max manually.
|
||||
if self.block_is_loaded() {
|
||||
let docs = self.doc_decoder.output_array().iter().cloned();
|
||||
let freqs = self.freq_decoder.output_array().iter().cloned();
|
||||
let bm25_scores = docs.zip(freqs).map(|(doc, term_freq)| {
|
||||
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
|
||||
bm25_weight.score(fieldnorm_id, term_freq)
|
||||
});
|
||||
let block_max_score = max_score(bm25_scores).unwrap_or(0.0);
|
||||
self.block_max_score_cache = Some(block_max_score);
|
||||
return block_max_score;
|
||||
}
|
||||
// We do not have access to any good block max value. We return bm25_weight.max_score()
|
||||
// as it is a valid upperbound.
|
||||
//
|
||||
// We do not cache it however, so that it gets computed when once block is loaded.
|
||||
bm25_weight.max_score()
|
||||
}
|
||||
|
||||
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
|
||||
self.freq_reading_option
|
||||
}
|
||||
|
||||
// Resets the block segment postings on another position
|
||||
// in the postings file.
|
||||
//
|
||||
// This is useful for enumerating through a list of terms,
|
||||
// and consuming the associated posting lists while avoiding
|
||||
// reallocating a `BlockSegmentPostings`.
|
||||
//
|
||||
// # Warning
|
||||
//
|
||||
// This does not reset the positions list.
|
||||
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: OwnedBytes) {
|
||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
|
||||
self.data = postings_data;
|
||||
self.block_max_score_cache = None;
|
||||
self.loaded_offset = std::usize::MAX;
|
||||
if let Some(skip_data) = skip_data_opt {
|
||||
self.skip_reader.reset(skip_data, doc_freq);
|
||||
} else {
|
||||
self.skip_reader.reset(OwnedBytes::empty(), doc_freq);
|
||||
}
|
||||
self.doc_freq = doc_freq;
|
||||
self.load_block();
|
||||
}
|
||||
|
||||
/// Returns the overall number of documents in the block postings.
|
||||
/// It does not take in account whether documents are deleted or not.
|
||||
///
|
||||
/// This `doc_freq` is simply the sum of the length of all of the blocks
|
||||
/// length, and it does not take in account deleted documents.
|
||||
pub fn doc_freq(&self) -> u32 {
|
||||
self.doc_freq
|
||||
}
|
||||
|
||||
/// Returns the array of docs in the current block.
|
||||
///
|
||||
/// Before the first call to `.advance()`, the block
|
||||
/// returned by `.docs()` is empty.
|
||||
#[inline]
|
||||
pub fn docs(&self) -> &[DocId] {
|
||||
debug_assert!(self.block_is_loaded());
|
||||
self.doc_decoder.output_array()
|
||||
}
|
||||
|
||||
/// Returns a full block, regardless of whetehr the block is complete or incomplete (
|
||||
/// as it happens for the last block of the posting list).
|
||||
///
|
||||
/// In the latter case, the block is guaranteed to be padded with the sentinel value:
|
||||
/// `TERMINATED`. The array is also guaranteed to be aligned on 16 bytes = 128 bits.
|
||||
///
|
||||
/// This method is useful to run SSE2 linear search.
|
||||
#[inline(always)]
|
||||
pub(crate) fn docs_aligned(&self) -> &AlignedBuffer {
|
||||
debug_assert!(self.block_is_loaded());
|
||||
self.doc_decoder.output_aligned()
|
||||
}
|
||||
|
||||
/// Return the document at index `idx` of the block.
|
||||
#[inline(always)]
|
||||
pub fn doc(&self, idx: usize) -> u32 {
|
||||
self.doc_decoder.output(idx)
|
||||
}
|
||||
|
||||
/// Return the array of `term freq` in the block.
|
||||
#[inline]
|
||||
pub fn freqs(&self) -> &[u32] {
|
||||
debug_assert!(self.block_is_loaded());
|
||||
self.freq_decoder.output_array()
|
||||
}
|
||||
|
||||
/// Return the frequency at index `idx` of the block.
|
||||
#[inline]
|
||||
pub fn freq(&self, idx: usize) -> u32 {
|
||||
debug_assert!(self.block_is_loaded());
|
||||
self.freq_decoder.output(idx)
|
||||
}
|
||||
|
||||
/// Returns the length of the current block.
|
||||
///
|
||||
/// All blocks have a length of `NUM_DOCS_PER_BLOCK`,
|
||||
/// except the last block that may have a length
|
||||
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
|
||||
#[inline]
|
||||
pub fn block_len(&self) -> usize {
|
||||
debug_assert!(self.block_is_loaded());
|
||||
self.doc_decoder.output_len
|
||||
}
|
||||
|
||||
/// Position on a block that may contains `target_doc`.
|
||||
///
|
||||
/// If all docs are smaller than target, the block loaded may be empty,
|
||||
/// or be the last an incomplete VInt block.
|
||||
pub fn seek(&mut self, target_doc: DocId) {
|
||||
self.shallow_seek(target_doc);
|
||||
self.load_block();
|
||||
}
|
||||
|
||||
pub(crate) fn position_offset(&self) -> u64 {
|
||||
self.skip_reader.position_offset()
|
||||
}
|
||||
|
||||
/// Dangerous API! This calls seek on the skip list,
|
||||
/// but does not `.load_block()` afterwards.
|
||||
///
|
||||
/// `.load_block()` needs to be called manually afterwards.
|
||||
/// If all docs are smaller than target, the block loaded may be empty,
|
||||
/// or be the last an incomplete VInt block.
|
||||
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
|
||||
if self.skip_reader.seek(target_doc) {
|
||||
self.block_max_score_cache = None;
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn block_is_loaded(&self) -> bool {
|
||||
self.loaded_offset == self.skip_reader.byte_offset()
|
||||
}
|
||||
|
||||
pub(crate) fn load_block(&mut self) {
|
||||
let offset = self.skip_reader.byte_offset();
|
||||
if self.loaded_offset == offset {
|
||||
return;
|
||||
}
|
||||
self.loaded_offset = offset;
|
||||
match self.skip_reader.block_info() {
|
||||
BlockInfo::BitPacked {
|
||||
doc_num_bits,
|
||||
tf_num_bits,
|
||||
..
|
||||
} => {
|
||||
decode_bitpacked_block(
|
||||
&mut self.doc_decoder,
|
||||
if let FreqReadingOption::ReadFreq = self.freq_reading_option {
|
||||
Some(&mut self.freq_decoder)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
&self.data.as_slice()[offset..],
|
||||
self.skip_reader.last_doc_in_previous_block,
|
||||
doc_num_bits,
|
||||
tf_num_bits,
|
||||
);
|
||||
}
|
||||
BlockInfo::VInt { num_docs } => {
|
||||
let data = {
|
||||
if num_docs == 0 {
|
||||
&[]
|
||||
} else {
|
||||
&self.data.as_slice()[offset..]
|
||||
}
|
||||
};
|
||||
decode_vint_block(
|
||||
&mut self.doc_decoder,
|
||||
if let FreqReadingOption::ReadFreq = self.freq_reading_option {
|
||||
Some(&mut self.freq_decoder)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
data,
|
||||
self.skip_reader.last_doc_in_previous_block,
|
||||
num_docs as usize,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Advance to the next block.
|
||||
///
|
||||
/// Returns false iff there was no remaining blocks.
|
||||
pub fn advance(&mut self) {
|
||||
self.skip_reader.advance();
|
||||
self.block_max_score_cache = None;
|
||||
self.load_block();
|
||||
}
|
||||
|
||||
/// Returns an empty segment postings object
|
||||
pub fn empty() -> BlockSegmentPostings {
|
||||
BlockSegmentPostings {
|
||||
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
||||
loaded_offset: 0,
|
||||
freq_decoder: BlockDecoder::with_val(1),
|
||||
freq_reading_option: FreqReadingOption::NoFreq,
|
||||
block_max_score_cache: None,
|
||||
doc_freq: 0,
|
||||
data: OwnedBytes::empty(),
|
||||
skip_reader: SkipReader::new(OwnedBytes::empty(), 0, IndexRecordOption::Basic),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::BlockSegmentPostings;
|
||||
use crate::common::HasLen;
|
||||
use crate::core::Index;
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||
use crate::postings::postings::Postings;
|
||||
use crate::postings::SegmentPostings;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::Term;
|
||||
use crate::schema::INDEXED;
|
||||
use crate::DocId;
|
||||
|
||||
#[test]
|
||||
fn test_empty_segment_postings() {
|
||||
let mut postings = SegmentPostings::empty();
|
||||
assert_eq!(postings.doc(), TERMINATED);
|
||||
assert_eq!(postings.advance(), TERMINATED);
|
||||
assert_eq!(postings.advance(), TERMINATED);
|
||||
assert_eq!(postings.doc_freq(), 0);
|
||||
assert_eq!(postings.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_postings_doc_returns_terminated() {
|
||||
let mut postings = SegmentPostings::empty();
|
||||
assert_eq!(postings.doc(), TERMINATED);
|
||||
assert_eq!(postings.advance(), TERMINATED);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_postings_doc_term_freq_returns_0() {
|
||||
let postings = SegmentPostings::empty();
|
||||
assert_eq!(postings.term_freq(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_block_segment_postings() {
|
||||
let mut postings = BlockSegmentPostings::empty();
|
||||
assert!(postings.docs().is_empty());
|
||||
assert_eq!(postings.doc_freq(), 0);
|
||||
postings.advance();
|
||||
assert!(postings.docs().is_empty());
|
||||
assert_eq!(postings.doc_freq(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_segment_postings() {
|
||||
let mut block_segments = build_block_postings(&(0..100_000).collect::<Vec<u32>>());
|
||||
let mut offset: u32 = 0u32;
|
||||
// checking that the `doc_freq` is correct
|
||||
assert_eq!(block_segments.doc_freq(), 100_000);
|
||||
loop {
|
||||
let block = block_segments.docs();
|
||||
if block.is_empty() {
|
||||
break;
|
||||
}
|
||||
for (i, doc) in block.iter().cloned().enumerate() {
|
||||
assert_eq!(offset + (i as u32), doc);
|
||||
}
|
||||
offset += block.len() as u32;
|
||||
block_segments.advance();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skip_right_at_new_block() {
|
||||
let mut doc_ids = (0..128).collect::<Vec<u32>>();
|
||||
// 128 is missing
|
||||
doc_ids.push(129);
|
||||
doc_ids.push(130);
|
||||
{
|
||||
let block_segments = build_block_postings(&doc_ids);
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||
assert_eq!(docset.seek(128), 129);
|
||||
assert_eq!(docset.doc(), 129);
|
||||
assert_eq!(docset.advance(), 130);
|
||||
assert_eq!(docset.doc(), 130);
|
||||
assert_eq!(docset.advance(), TERMINATED);
|
||||
}
|
||||
{
|
||||
let block_segments = build_block_postings(&doc_ids);
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||
assert_eq!(docset.seek(129), 129);
|
||||
assert_eq!(docset.doc(), 129);
|
||||
assert_eq!(docset.advance(), 130);
|
||||
assert_eq!(docset.doc(), 130);
|
||||
assert_eq!(docset.advance(), TERMINATED);
|
||||
}
|
||||
{
|
||||
let block_segments = build_block_postings(&doc_ids);
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||
assert_eq!(docset.doc(), 0);
|
||||
assert_eq!(docset.seek(131), TERMINATED);
|
||||
assert_eq!(docset.doc(), TERMINATED);
|
||||
}
|
||||
}
|
||||
|
||||
fn build_block_postings(docs: &[DocId]) -> BlockSegmentPostings {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut last_doc = 0u32;
|
||||
for &doc in docs {
|
||||
for _ in last_doc..doc {
|
||||
index_writer.add_document(doc!(int_field=>1u64));
|
||||
}
|
||||
index_writer.add_document(doc!(int_field=>0u64));
|
||||
last_doc = doc + 1;
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let inverted_index = segment_reader.inverted_index(int_field).unwrap();
|
||||
let term = Term::from_field_u64(int_field, 0u64);
|
||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
||||
inverted_index
|
||||
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_segment_postings_seek() {
|
||||
let mut docs = vec![0];
|
||||
for i in 0..1300 {
|
||||
docs.push((i * i / 100) + i);
|
||||
}
|
||||
let mut block_postings = build_block_postings(&docs[..]);
|
||||
for i in vec![0, 424, 10000] {
|
||||
block_postings.seek(i);
|
||||
let docs = block_postings.docs();
|
||||
assert!(docs[0] <= i);
|
||||
assert!(docs.last().cloned().unwrap_or(0u32) >= i);
|
||||
}
|
||||
block_postings.seek(100_000);
|
||||
assert_eq!(block_postings.doc(COMPRESSION_BLOCK_SIZE - 1), TERMINATED);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_block_segment_postings() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
// create two postings list, one containg even number,
|
||||
// the other containing odd numbers.
|
||||
for i in 0..6 {
|
||||
let doc = doc!(int_field=> (i % 2) as u64);
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
let mut block_segments;
|
||||
{
|
||||
let term = Term::from_field_u64(int_field, 0u64);
|
||||
let inverted_index = segment_reader.inverted_index(int_field)?;
|
||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
||||
block_segments = inverted_index
|
||||
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)?;
|
||||
}
|
||||
assert_eq!(block_segments.docs(), &[0, 2, 4]);
|
||||
{
|
||||
let term = Term::from_field_u64(int_field, 1u64);
|
||||
let inverted_index = segment_reader.inverted_index(int_field)?;
|
||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
||||
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments)?;
|
||||
}
|
||||
assert_eq!(block_segments.docs(), &[1, 3, 5]);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -17,12 +17,6 @@ pub struct BlockEncoder {
|
||||
pub output_len: usize,
|
||||
}
|
||||
|
||||
impl Default for BlockEncoder {
|
||||
fn default() -> Self {
|
||||
BlockEncoder::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockEncoder {
|
||||
pub fn new() -> BlockEncoder {
|
||||
BlockEncoder {
|
||||
@@ -52,23 +46,19 @@ impl BlockEncoder {
|
||||
/// We ensure that the OutputBuffer is align on 128 bits
|
||||
/// in order to run SSE2 linear search on it.
|
||||
#[repr(align(128))]
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct AlignedBuffer(pub [u32; COMPRESSION_BLOCK_SIZE]);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BlockDecoder {
|
||||
bitpacker: BitPacker4x,
|
||||
output: AlignedBuffer,
|
||||
pub output_len: usize,
|
||||
}
|
||||
|
||||
impl Default for BlockDecoder {
|
||||
fn default() -> Self {
|
||||
impl BlockDecoder {
|
||||
pub fn new() -> BlockDecoder {
|
||||
BlockDecoder::with_val(0u32)
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockDecoder {
|
||||
pub fn with_val(val: u32) -> BlockDecoder {
|
||||
BlockDecoder {
|
||||
bitpacker: BitPacker4x::new(),
|
||||
@@ -100,8 +90,8 @@ impl BlockDecoder {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn output_aligned(&self) -> &AlignedBuffer {
|
||||
&self.output
|
||||
pub(crate) fn output_aligned(&self) -> (&AlignedBuffer, usize) {
|
||||
(&self.output, self.output_len)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -144,14 +134,11 @@ pub trait VIntDecoder {
|
||||
/// For instance, if delta encoded are `1, 3, 9`, and the
|
||||
/// `offset` is 5, then the output will be:
|
||||
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
|
||||
///
|
||||
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
|
||||
fn uncompress_vint_sorted(
|
||||
fn uncompress_vint_sorted<'a>(
|
||||
&mut self,
|
||||
compressed_data: &[u8],
|
||||
compressed_data: &'a [u8],
|
||||
offset: u32,
|
||||
num_els: usize,
|
||||
padding: u32,
|
||||
) -> usize;
|
||||
|
||||
/// Uncompress an array of `u32s`, compressed using variable
|
||||
@@ -159,14 +146,7 @@ pub trait VIntDecoder {
|
||||
///
|
||||
/// The method takes a number of int to decompress, and returns
|
||||
/// the amount of bytes that were read to decompress them.
|
||||
///
|
||||
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
|
||||
fn uncompress_vint_unsorted(
|
||||
&mut self,
|
||||
compressed_data: &[u8],
|
||||
num_els: usize,
|
||||
padding: u32,
|
||||
) -> usize;
|
||||
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize;
|
||||
}
|
||||
|
||||
impl VIntEncoder for BlockEncoder {
|
||||
@@ -180,26 +160,18 @@ impl VIntEncoder for BlockEncoder {
|
||||
}
|
||||
|
||||
impl VIntDecoder for BlockDecoder {
|
||||
fn uncompress_vint_sorted(
|
||||
fn uncompress_vint_sorted<'a>(
|
||||
&mut self,
|
||||
compressed_data: &[u8],
|
||||
compressed_data: &'a [u8],
|
||||
offset: u32,
|
||||
num_els: usize,
|
||||
padding: u32,
|
||||
) -> usize {
|
||||
self.output_len = num_els;
|
||||
self.output.0.iter_mut().for_each(|el| *el = padding);
|
||||
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
|
||||
}
|
||||
|
||||
fn uncompress_vint_unsorted(
|
||||
&mut self,
|
||||
compressed_data: &[u8],
|
||||
num_els: usize,
|
||||
padding: u32,
|
||||
) -> usize {
|
||||
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize {
|
||||
self.output_len = num_els;
|
||||
self.output.0.iter_mut().for_each(|el| *el = padding);
|
||||
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
|
||||
}
|
||||
}
|
||||
@@ -208,14 +180,13 @@ impl VIntDecoder for BlockDecoder {
|
||||
pub mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::TERMINATED;
|
||||
|
||||
#[test]
|
||||
fn test_encode_sorted_block() {
|
||||
let vals: Vec<u32> = (0u32..128u32).map(|i| i * 7).collect();
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 0);
|
||||
let mut decoder = BlockDecoder::default();
|
||||
let mut decoder = BlockDecoder::new();
|
||||
{
|
||||
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 0, num_bits);
|
||||
assert_eq!(consumed_num_bytes, compressed_data.len());
|
||||
@@ -228,9 +199,9 @@ pub mod tests {
|
||||
#[test]
|
||||
fn test_encode_sorted_block_with_offset() {
|
||||
let vals: Vec<u32> = (0u32..128u32).map(|i| 11 + i * 7).collect();
|
||||
let mut encoder = BlockEncoder::default();
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 10);
|
||||
let mut decoder = BlockDecoder::default();
|
||||
let mut decoder = BlockDecoder::new();
|
||||
{
|
||||
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 10, num_bits);
|
||||
assert_eq!(consumed_num_bytes, compressed_data.len());
|
||||
@@ -245,11 +216,11 @@ pub mod tests {
|
||||
let mut compressed: Vec<u8> = Vec::new();
|
||||
let n = 128;
|
||||
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32).collect();
|
||||
let mut encoder = BlockEncoder::default();
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 10);
|
||||
compressed.extend_from_slice(compressed_data);
|
||||
compressed.push(173u8);
|
||||
let mut decoder = BlockDecoder::default();
|
||||
let mut decoder = BlockDecoder::new();
|
||||
{
|
||||
let consumed_num_bytes = decoder.uncompress_block_sorted(&compressed, 10, num_bits);
|
||||
assert_eq!(consumed_num_bytes, compressed.len() - 1);
|
||||
@@ -265,11 +236,11 @@ pub mod tests {
|
||||
let mut compressed: Vec<u8> = Vec::new();
|
||||
let n = 128;
|
||||
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32 % 12).collect();
|
||||
let mut encoder = BlockEncoder::default();
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let (num_bits, compressed_data) = encoder.compress_block_unsorted(&vals);
|
||||
compressed.extend_from_slice(compressed_data);
|
||||
compressed.push(173u8);
|
||||
let mut decoder = BlockDecoder::default();
|
||||
let mut decoder = BlockDecoder::new();
|
||||
{
|
||||
let consumed_num_bytes = decoder.uncompress_block_unsorted(&compressed, num_bits);
|
||||
assert_eq!(consumed_num_bytes + 1, compressed.len());
|
||||
@@ -280,27 +251,20 @@ pub mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_decoder_initialization() {
|
||||
let block = BlockDecoder::with_val(TERMINATED);
|
||||
assert_eq!(block.output(0), TERMINATED);
|
||||
}
|
||||
#[test]
|
||||
fn test_encode_vint() {
|
||||
const PADDING_VALUE: u32 = 234_234_345u32;
|
||||
let expected_length = 154;
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
|
||||
for offset in &[0u32, 1u32, 2u32] {
|
||||
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
|
||||
assert!(encoded_data.len() <= expected_length);
|
||||
let mut decoder = BlockDecoder::default();
|
||||
let consumed_num_bytes =
|
||||
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len(), PADDING_VALUE);
|
||||
assert_eq!(consumed_num_bytes, encoded_data.len());
|
||||
assert_eq!(input, decoder.output_array());
|
||||
for i in input.len()..COMPRESSION_BLOCK_SIZE {
|
||||
assert_eq!(decoder.output(i), PADDING_VALUE);
|
||||
{
|
||||
let expected_length = 154;
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
|
||||
for offset in &[0u32, 1u32, 2u32] {
|
||||
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
|
||||
assert!(encoded_data.len() <= expected_length);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
let consumed_num_bytes =
|
||||
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
|
||||
assert_eq!(consumed_num_bytes, encoded_data.len());
|
||||
assert_eq!(input, decoder.output_array());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -310,7 +274,6 @@ pub mod tests {
|
||||
mod bench {
|
||||
|
||||
use super::*;
|
||||
use crate::TERMINATED;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::Rng;
|
||||
use rand::SeedableRng;
|
||||
@@ -341,7 +304,7 @@ mod bench {
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
|
||||
let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32);
|
||||
let mut decoder = BlockDecoder::default();
|
||||
let mut decoder = BlockDecoder::new();
|
||||
b.iter(|| {
|
||||
decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
|
||||
});
|
||||
@@ -376,9 +339,9 @@ mod bench {
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let data = generate_array(NUM_INTS_BENCH_VINT, 0.001);
|
||||
let compressed = encoder.compress_vint_sorted(&data, 0u32);
|
||||
let mut decoder = BlockDecoder::default();
|
||||
let mut decoder = BlockDecoder::new();
|
||||
b.iter(|| {
|
||||
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT, TERMINATED);
|
||||
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ pub(crate) fn compress_unsorted<'a>(input: &[u32], output: &'a mut [u8]) -> &'a
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn uncompress_sorted(compressed_data: &[u8], output: &mut [u32], offset: u32) -> usize {
|
||||
pub fn uncompress_sorted<'a>(compressed_data: &'a [u8], output: &mut [u32], offset: u32) -> usize {
|
||||
let mut read_byte = 0;
|
||||
let mut result = offset;
|
||||
for output_mut in output.iter_mut() {
|
||||
|
||||
@@ -3,8 +3,11 @@ Postings module (also called inverted index)
|
||||
*/
|
||||
|
||||
mod block_search;
|
||||
mod block_segment_postings;
|
||||
pub(crate) mod compression;
|
||||
/// Postings module
|
||||
///
|
||||
/// Postings, also called inverted lists, is the key datastructure
|
||||
/// to full-text search.
|
||||
mod postings;
|
||||
mod postings_writer;
|
||||
mod recorder;
|
||||
@@ -19,15 +22,18 @@ pub(crate) use self::block_search::BlockSearcher;
|
||||
pub(crate) use self::postings_writer::MultiFieldPostingsWriter;
|
||||
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
|
||||
|
||||
use self::compression::COMPRESSION_BLOCK_SIZE;
|
||||
pub use self::postings::Postings;
|
||||
pub(crate) use self::skip::{BlockInfo, SkipReader};
|
||||
pub(crate) use self::skip::SkipReader;
|
||||
pub use self::term_info::TermInfo;
|
||||
|
||||
pub use self::block_segment_postings::BlockSegmentPostings;
|
||||
pub use self::segment_postings::SegmentPostings;
|
||||
pub use self::segment_postings::{BlockSegmentPostings, SegmentPostings};
|
||||
|
||||
pub(crate) use self::stacker::compute_table_size;
|
||||
|
||||
pub use crate::common::HasLen;
|
||||
|
||||
pub(crate) const USE_SKIP_INFO_LIMIT: u32 = COMPRESSION_BLOCK_SIZE as u32;
|
||||
pub(crate) type UnorderedTermId = u64;
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::enum_variant_names))]
|
||||
@@ -40,15 +46,15 @@ pub(crate) enum FreqReadingOption {
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::InvertedIndexSerializer;
|
||||
use super::Postings;
|
||||
|
||||
use super::*;
|
||||
use crate::core::Index;
|
||||
use crate::core::SegmentComponent;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::docset::{DocSet, SkipResult};
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::indexer::operation::AddOperation;
|
||||
use crate::indexer::SegmentWriter;
|
||||
use crate::indexer::{IndexWriterConfig, SegmentWriter, ResourceManager};
|
||||
use crate::merge_policy::NoMergePolicy;
|
||||
use crate::query::Scorer;
|
||||
use crate::schema::{Document, Schema, Term, INDEXED, STRING, TEXT};
|
||||
@@ -56,7 +62,6 @@ pub mod tests {
|
||||
use crate::schema::{IndexRecordOption, TextFieldIndexing};
|
||||
use crate::tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN};
|
||||
use crate::DocId;
|
||||
use crate::HasLen;
|
||||
use crate::Score;
|
||||
use once_cell::sync::Lazy;
|
||||
use rand::rngs::StdRng;
|
||||
@@ -64,101 +69,102 @@ pub mod tests {
|
||||
use std::iter;
|
||||
|
||||
#[test]
|
||||
pub fn test_position_write() -> crate::Result<()> {
|
||||
pub fn test_position_write() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut segment = index.new_segment();
|
||||
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment)?;
|
||||
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4, None)?;
|
||||
field_serializer.new_term("abc".as_bytes(), 12u32)?;
|
||||
for doc_id in 0u32..120u32 {
|
||||
let delta_positions = vec![1, 2, 3, 2];
|
||||
field_serializer.write_doc(doc_id, 4, &delta_positions)?;
|
||||
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap();
|
||||
{
|
||||
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap();
|
||||
field_serializer.new_term("abc".as_bytes()).unwrap();
|
||||
for doc_id in 0u32..120u32 {
|
||||
let delta_positions = vec![1, 2, 3, 2];
|
||||
field_serializer
|
||||
.write_doc(doc_id, 4, &delta_positions)
|
||||
.unwrap();
|
||||
}
|
||||
field_serializer.close_term().unwrap();
|
||||
}
|
||||
field_serializer.close_term()?;
|
||||
posting_serializer.close()?;
|
||||
let read = segment.open_read(SegmentComponent::POSITIONS)?;
|
||||
posting_serializer.close().unwrap();
|
||||
let read = segment.open_read(SegmentComponent::POSITIONS).unwrap();
|
||||
assert!(read.len() <= 140);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_skip_positions() -> crate::Result<()> {
|
||||
pub fn test_skip_positions() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field("title", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 30_000_000).unwrap();
|
||||
index_writer.add_document(doc!(title => r#"abc abc abc"#));
|
||||
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
|
||||
for _ in 0..1_000 {
|
||||
index_writer.add_document(doc!(title => r#"abc abc abc"#));
|
||||
}
|
||||
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
|
||||
index_writer.commit()?;
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
let searcher = index.reader()?.searcher();
|
||||
let inverted_index = searcher.segment_reader(0u32).inverted_index(title)?;
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let inverted_index = searcher.segment_reader(0u32).inverted_index(title);
|
||||
let term = Term::from_field_text(title, "abc");
|
||||
let mut positions = Vec::new();
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert_eq!(postings.doc(), 0);
|
||||
postings.advance();
|
||||
postings.positions(&mut positions);
|
||||
assert_eq!(&[0, 1, 2], &positions[..]);
|
||||
postings.positions(&mut positions);
|
||||
assert_eq!(&[0, 1, 2], &positions[..]);
|
||||
assert_eq!(postings.advance(), 1);
|
||||
postings.advance();
|
||||
postings.positions(&mut positions);
|
||||
assert_eq!(&[0, 5], &positions[..]);
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
postings.advance();
|
||||
postings.advance();
|
||||
postings.positions(&mut positions);
|
||||
assert_eq!(&[0, 5], &positions[..]);
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert_eq!(postings.skip_next(1), SkipResult::Reached);
|
||||
assert_eq!(postings.doc(), 1);
|
||||
postings.positions(&mut positions);
|
||||
assert_eq!(&[0, 5], &positions[..]);
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert_eq!(postings.doc(), 0);
|
||||
assert_eq!(postings.advance(), 1);
|
||||
postings.positions(&mut positions);
|
||||
assert_eq!(&[0, 5], &positions[..]);
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert_eq!(postings.seek(1), 1);
|
||||
assert_eq!(postings.doc(), 1);
|
||||
postings.positions(&mut positions);
|
||||
assert_eq!(&[0, 5], &positions[..]);
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert_eq!(postings.seek(1002), 1002);
|
||||
assert_eq!(postings.skip_next(1002), SkipResult::Reached);
|
||||
assert_eq!(postings.doc(), 1002);
|
||||
postings.positions(&mut positions);
|
||||
assert_eq!(&[0, 5], &positions[..]);
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert_eq!(postings.seek(100), 100);
|
||||
assert_eq!(postings.seek(1002), 1002);
|
||||
assert_eq!(postings.skip_next(100), SkipResult::Reached);
|
||||
assert_eq!(postings.skip_next(1002), SkipResult::Reached);
|
||||
assert_eq!(postings.doc(), 1002);
|
||||
postings.positions(&mut positions);
|
||||
assert_eq!(&[0, 5], &positions[..]);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_drop_token_that_are_too_long() -> crate::Result<()> {
|
||||
pub fn test_drop_token_that_are_too_long() {
|
||||
let ok_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN).collect();
|
||||
let mut exceeding_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN + 1).collect();
|
||||
exceeding_token_text.push_str(" hello");
|
||||
@@ -175,7 +181,7 @@ pub mod tests {
|
||||
.tokenizers()
|
||||
.register("simple_no_truncation", SimpleTokenizer);
|
||||
let reader = index.reader().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
{
|
||||
index_writer.add_document(doc!(text_field=>exceeding_token_text));
|
||||
@@ -183,7 +189,7 @@ pub mod tests {
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||
let inverted_index = segment_reader.inverted_index(text_field);
|
||||
assert_eq!(inverted_index.terms().num_terms(), 1);
|
||||
let mut bytes = vec![];
|
||||
assert!(inverted_index.terms().ord_to_term(0, &mut bytes));
|
||||
@@ -195,60 +201,70 @@ pub mod tests {
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(1u32);
|
||||
let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||
let inverted_index = segment_reader.inverted_index(text_field);
|
||||
assert_eq!(inverted_index.terms().num_terms(), 1);
|
||||
let mut bytes = vec![];
|
||||
assert!(inverted_index.terms().ord_to_term(0, &mut bytes));
|
||||
assert_eq!(&bytes[..], ok_token_text.as_bytes());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_position_and_fieldnorm1() -> crate::Result<()> {
|
||||
pub fn test_position_and_fieldnorm1() {
|
||||
let mut positions = Vec::new();
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let segment = index.new_segment();
|
||||
|
||||
{
|
||||
let mut segment_writer =
|
||||
SegmentWriter::for_segment(3_000_000, segment.clone(), &schema).unwrap();
|
||||
let resource_manager = ResourceManager::default();
|
||||
let segment = {
|
||||
let mut segment_writer = SegmentWriter::for_segment(
|
||||
&IndexWriterConfig::for_test(),
|
||||
index.new_segment(),
|
||||
&schema,
|
||||
index.tokenizers(),
|
||||
resource_manager
|
||||
)
|
||||
.unwrap();
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
// checking that position works if the field has two values
|
||||
doc.add_text(text_field, "a b a c a d a a.");
|
||||
doc.add_text(text_field, "d d d d a");
|
||||
let op = AddOperation {
|
||||
opstamp: 0u64,
|
||||
document: doc!(
|
||||
text_field => "a b a c a d a a.",
|
||||
text_field => "d d d d a"
|
||||
),
|
||||
document: doc,
|
||||
};
|
||||
segment_writer.add_document(op, &schema)?;
|
||||
segment_writer.add_document(op, &schema).unwrap();
|
||||
}
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
doc.add_text(text_field, "b a");
|
||||
let op = AddOperation {
|
||||
opstamp: 1u64,
|
||||
document: doc!(text_field => "b a"),
|
||||
document: doc,
|
||||
};
|
||||
segment_writer.add_document(op, &schema).unwrap();
|
||||
}
|
||||
for i in 2..1000 {
|
||||
let mut text: String = iter::repeat("e ").take(i).collect();
|
||||
let mut doc = Document::default();
|
||||
let mut text = iter::repeat("e ").take(i).collect::<String>();
|
||||
text.push_str(" a");
|
||||
doc.add_text(text_field, &text);
|
||||
let op = AddOperation {
|
||||
opstamp: 2u64,
|
||||
document: doc!(text_field => text),
|
||||
document: doc,
|
||||
};
|
||||
segment_writer.add_document(op, &schema).unwrap();
|
||||
}
|
||||
segment_writer.finalize()?;
|
||||
}
|
||||
let (segment, _) = segment_writer.finalize().unwrap();
|
||||
segment
|
||||
};
|
||||
{
|
||||
let segment_reader = SegmentReader::open(&segment)?;
|
||||
let segment_reader = SegmentReader::open(&segment).unwrap();
|
||||
{
|
||||
let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field)?;
|
||||
let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(0), 8 + 5);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
|
||||
for i in 2..1000 {
|
||||
@@ -261,41 +277,43 @@ pub mod tests {
|
||||
{
|
||||
let term_a = Term::from_field_text(text_field, "abcdef");
|
||||
assert!(segment_reader
|
||||
.inverted_index(term_a.field())?
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.inverted_index(term_a.field())
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||
.is_none());
|
||||
}
|
||||
{
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
let mut postings_a = segment_reader
|
||||
.inverted_index(term_a.field())?
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.inverted_index(term_a.field())
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert_eq!(postings_a.len(), 1000);
|
||||
assert!(postings_a.advance());
|
||||
assert_eq!(postings_a.doc(), 0);
|
||||
assert_eq!(postings_a.term_freq(), 6);
|
||||
postings_a.positions(&mut positions);
|
||||
assert_eq!(&positions[..], [0, 2, 4, 6, 7, 13]);
|
||||
assert_eq!(postings_a.advance(), 1u32);
|
||||
assert!(postings_a.advance());
|
||||
assert_eq!(postings_a.doc(), 1u32);
|
||||
assert_eq!(postings_a.term_freq(), 1);
|
||||
for i in 2u32..1000u32 {
|
||||
assert_eq!(postings_a.advance(), i);
|
||||
assert!(postings_a.advance());
|
||||
assert_eq!(postings_a.term_freq(), 1);
|
||||
postings_a.positions(&mut positions);
|
||||
assert_eq!(&positions[..], [i]);
|
||||
assert_eq!(postings_a.doc(), i);
|
||||
}
|
||||
assert_eq!(postings_a.advance(), TERMINATED);
|
||||
assert!(!postings_a.advance());
|
||||
}
|
||||
{
|
||||
let term_e = Term::from_field_text(text_field, "e");
|
||||
let mut postings_e = segment_reader
|
||||
.inverted_index(term_e.field())?
|
||||
.read_postings(&term_e, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.inverted_index(term_e.field())
|
||||
.read_postings(&term_e, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert_eq!(postings_e.len(), 1000 - 2);
|
||||
for i in 2u32..1000u32 {
|
||||
assert!(postings_e.advance());
|
||||
assert_eq!(postings_e.term_freq(), i);
|
||||
postings_e.positions(&mut positions);
|
||||
assert_eq!(positions.len(), i as usize);
|
||||
@@ -303,42 +321,48 @@ pub mod tests {
|
||||
assert_eq!(positions[j], (j as u32));
|
||||
}
|
||||
assert_eq!(postings_e.doc(), i);
|
||||
postings_e.advance();
|
||||
}
|
||||
assert_eq!(postings_e.doc(), TERMINATED);
|
||||
assert!(!postings_e.advance());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_position_and_fieldnorm2() -> crate::Result<()> {
|
||||
pub fn test_position_and_fieldnorm2() {
|
||||
let mut positions: Vec<u32> = Vec::new();
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer.add_document(doc!(text_field => "g b b d c g c"));
|
||||
index_writer.add_document(doc!(text_field => "g a b b a d c g c"));
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
doc.add_text(text_field, "g b b d c g c");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
doc.add_text(text_field, "g a b b a d c g c");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let mut postings = segment_reader
|
||||
.inverted_index(text_field)?
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.inverted_index(text_field)
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||
.unwrap();
|
||||
assert!(postings.advance());
|
||||
assert_eq!(postings.doc(), 1u32);
|
||||
postings.positions(&mut positions);
|
||||
assert_eq!(&positions[..], &[1u32, 4]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skip_next() -> crate::Result<()> {
|
||||
fn test_skip_next() {
|
||||
let term_0 = Term::from_field_u64(Field::from_field_id(0), 0);
|
||||
let term_1 = Term::from_field_u64(Field::from_field_id(0), 1);
|
||||
let term_2 = Term::from_field_u64(Field::from_field_id(0), 2);
|
||||
@@ -349,100 +373,105 @@ pub mod tests {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let value_field = schema_builder.add_u64_field("value", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
for i in 0u64..num_docs as u64 {
|
||||
let doc = doc!(value_field => 2u64, value_field => i % 2u64);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
for i in 0..num_docs {
|
||||
let mut doc = Document::default();
|
||||
doc.add_u64(value_field, 2);
|
||||
doc.add_u64(value_field, (i % 2) as u64);
|
||||
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
index
|
||||
};
|
||||
let searcher = index.reader()?.searcher();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
// check that the basic usage works
|
||||
for i in 0..num_docs - 1 {
|
||||
for j in i + 1..num_docs {
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_2.field())?
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)?
|
||||
.inverted_index(term_2.field())
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
assert_eq!(segment_postings.seek(i), i);
|
||||
|
||||
assert_eq!(segment_postings.skip_next(i), SkipResult::Reached);
|
||||
assert_eq!(segment_postings.doc(), i);
|
||||
|
||||
assert_eq!(segment_postings.seek(j), j);
|
||||
assert_eq!(segment_postings.skip_next(j), SkipResult::Reached);
|
||||
assert_eq!(segment_postings.doc(), j);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_2.field())?
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)?
|
||||
.inverted_index(term_2.field())
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
|
||||
// check that `skip_next` advances the iterator
|
||||
assert!(segment_postings.advance());
|
||||
assert_eq!(segment_postings.doc(), 0);
|
||||
|
||||
assert_eq!(segment_postings.seek(1), 1);
|
||||
assert_eq!(segment_postings.skip_next(1), SkipResult::Reached);
|
||||
assert_eq!(segment_postings.doc(), 1);
|
||||
|
||||
assert_eq!(segment_postings.seek(1), 1);
|
||||
assert_eq!(segment_postings.doc(), 1);
|
||||
assert_eq!(segment_postings.skip_next(1), SkipResult::OverStep);
|
||||
assert_eq!(segment_postings.doc(), 2);
|
||||
|
||||
// check that going beyond the end is handled
|
||||
assert_eq!(segment_postings.seek(num_docs), TERMINATED);
|
||||
assert_eq!(segment_postings.skip_next(num_docs), SkipResult::End);
|
||||
}
|
||||
|
||||
// check that filtering works
|
||||
{
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_0.field())?
|
||||
.read_postings(&term_0, IndexRecordOption::Basic)?
|
||||
.inverted_index(term_0.field())
|
||||
.read_postings(&term_0, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
|
||||
for i in 0..num_docs / 2 {
|
||||
assert_eq!(segment_postings.seek(i * 2), i * 2);
|
||||
assert_eq!(segment_postings.skip_next(i * 2), SkipResult::Reached);
|
||||
assert_eq!(segment_postings.doc(), i * 2);
|
||||
}
|
||||
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_0.field())?
|
||||
.read_postings(&term_0, IndexRecordOption::Basic)?
|
||||
.inverted_index(term_0.field())
|
||||
.read_postings(&term_0, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
|
||||
for i in 0..num_docs / 2 - 1 {
|
||||
assert!(segment_postings.seek(i * 2 + 1) > (i * 1) * 2);
|
||||
assert_eq!(segment_postings.skip_next(i * 2 + 1), SkipResult::OverStep);
|
||||
assert_eq!(segment_postings.doc(), (i + 1) * 2);
|
||||
}
|
||||
}
|
||||
|
||||
// delete some of the documents
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.delete_term(term_0);
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
let searcher = index.reader()?.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
// make sure seeking still works
|
||||
for i in 0..num_docs {
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_2.field())?
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)?
|
||||
.inverted_index(term_2.field())
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
|
||||
if i % 2 == 0 {
|
||||
assert_eq!(segment_postings.seek(i), i);
|
||||
assert_eq!(segment_postings.skip_next(i), SkipResult::Reached);
|
||||
assert_eq!(segment_postings.doc(), i);
|
||||
assert!(segment_reader.is_deleted(i));
|
||||
} else {
|
||||
assert_eq!(segment_postings.seek(i), i);
|
||||
assert_eq!(segment_postings.skip_next(i), SkipResult::Reached);
|
||||
assert_eq!(segment_postings.doc(), i);
|
||||
}
|
||||
}
|
||||
@@ -450,23 +479,19 @@ pub mod tests {
|
||||
// now try with a longer sequence
|
||||
{
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_2.field())?
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)?
|
||||
.inverted_index(term_2.field())
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
|
||||
let mut last = 2; // start from 5 to avoid seeking to 3 twice
|
||||
let mut cur = 3;
|
||||
loop {
|
||||
let seek = segment_postings.seek(cur);
|
||||
if seek == TERMINATED {
|
||||
break;
|
||||
}
|
||||
assert_eq!(seek, segment_postings.doc());
|
||||
if seek == cur {
|
||||
assert_eq!(segment_postings.doc(), cur);
|
||||
} else {
|
||||
assert_eq!(segment_postings.doc(), cur + 1);
|
||||
match segment_postings.skip_next(cur) {
|
||||
SkipResult::End => break,
|
||||
SkipResult::Reached => assert_eq!(segment_postings.doc(), cur),
|
||||
SkipResult::OverStep => assert_eq!(segment_postings.doc(), cur + 1),
|
||||
}
|
||||
|
||||
let next = cur + last;
|
||||
last = cur;
|
||||
cur = next;
|
||||
@@ -476,19 +501,20 @@ pub mod tests {
|
||||
|
||||
// delete everything else
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.delete_term(term_1);
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
let searcher = index.reader()?.searcher();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
|
||||
// finally, check that it's empty
|
||||
{
|
||||
let searchable_segment_ids = index.searchable_segment_ids()?;
|
||||
let searchable_segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("could not get index segment ids");
|
||||
assert!(searchable_segment_ids.is_empty());
|
||||
assert_eq!(searcher.num_docs(), 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub static TERM_A: Lazy<Term> = Lazy::new(|| {
|
||||
@@ -518,7 +544,7 @@ pub mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
let posting_list_size = 1_000_000;
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
for _ in 0..posting_list_size {
|
||||
let mut doc = Document::default();
|
||||
if rng.gen_bool(1f64 / 15f64) {
|
||||
@@ -551,7 +577,7 @@ pub mod tests {
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet> DocSet for UnoptimizedDocSet<TDocSet> {
|
||||
fn advance(&mut self) -> DocId {
|
||||
fn advance(&mut self) -> bool {
|
||||
self.0.advance()
|
||||
}
|
||||
|
||||
@@ -576,26 +602,31 @@ pub mod tests {
|
||||
) {
|
||||
for target in targets {
|
||||
let mut postings_opt = postings_factory();
|
||||
if target < postings_opt.doc() {
|
||||
continue;
|
||||
}
|
||||
let mut postings_unopt = UnoptimizedDocSet::wrap(postings_factory());
|
||||
let skip_result_opt = postings_opt.seek(target);
|
||||
let skip_result_unopt = postings_unopt.seek(target);
|
||||
let skip_result_opt = postings_opt.skip_next(target);
|
||||
let skip_result_unopt = postings_unopt.skip_next(target);
|
||||
assert_eq!(
|
||||
skip_result_unopt, skip_result_opt,
|
||||
"Failed while skipping to {}",
|
||||
target
|
||||
);
|
||||
assert!(skip_result_opt >= target);
|
||||
assert_eq!(skip_result_opt, postings_opt.doc());
|
||||
if skip_result_opt == TERMINATED {
|
||||
return;
|
||||
match skip_result_opt {
|
||||
SkipResult::Reached => assert_eq!(postings_opt.doc(), target),
|
||||
SkipResult::OverStep => assert!(postings_opt.doc() > target),
|
||||
SkipResult::End => {
|
||||
return;
|
||||
}
|
||||
}
|
||||
while postings_opt.doc() != TERMINATED {
|
||||
assert_eq!(postings_opt.doc(), postings_unopt.doc());
|
||||
assert_eq!(postings_opt.advance(), postings_unopt.advance());
|
||||
while postings_opt.advance() {
|
||||
assert!(postings_unopt.advance());
|
||||
assert_eq!(
|
||||
postings_opt.doc(),
|
||||
postings_unopt.doc(),
|
||||
"Failed while skipping to {}",
|
||||
target
|
||||
);
|
||||
}
|
||||
assert!(!postings_unopt.advance());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -604,7 +635,7 @@ pub mod tests {
|
||||
mod bench {
|
||||
|
||||
use super::tests::*;
|
||||
use crate::docset::TERMINATED;
|
||||
use crate::docset::SkipResult;
|
||||
use crate::query::Intersection;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::tests;
|
||||
@@ -620,9 +651,9 @@ mod bench {
|
||||
b.iter(|| {
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(TERM_A.field())
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)?
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
while segment_postings.advance() != TERMINATED {}
|
||||
while segment_postings.advance() {}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -635,22 +666,18 @@ mod bench {
|
||||
let segment_postings_a = segment_reader
|
||||
.inverted_index(TERM_A.field())
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let segment_postings_b = segment_reader
|
||||
.inverted_index(TERM_B.field())
|
||||
.read_postings(&*TERM_B, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let segment_postings_c = segment_reader
|
||||
.inverted_index(TERM_C.field())
|
||||
.read_postings(&*TERM_C, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let segment_postings_d = segment_reader
|
||||
.inverted_index(TERM_D.field())
|
||||
.read_postings(&*TERM_D, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let mut intersection = Intersection::new(vec![
|
||||
segment_postings_a,
|
||||
@@ -658,7 +685,7 @@ mod bench {
|
||||
segment_postings_c,
|
||||
segment_postings_d,
|
||||
]);
|
||||
while intersection.advance() != TERMINATED {}
|
||||
while intersection.advance() {}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -671,14 +698,14 @@ mod bench {
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(TERM_A.field())
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let mut existing_docs = Vec::new();
|
||||
segment_postings.advance();
|
||||
for doc in &docs {
|
||||
if *doc >= segment_postings.doc() {
|
||||
existing_docs.push(*doc);
|
||||
if segment_postings.seek(*doc) == TERMINATED {
|
||||
if segment_postings.skip_next(*doc) == SkipResult::End {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -690,7 +717,7 @@ mod bench {
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
for doc in &existing_docs {
|
||||
if segment_postings.seek(*doc) == TERMINATED {
|
||||
if segment_postings.skip_next(*doc) == SkipResult::End {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -729,9 +756,8 @@ mod bench {
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
let mut s = 0u32;
|
||||
while segment_postings.doc() != TERMINATED {
|
||||
while segment_postings.advance() {
|
||||
s += (segment_postings.doc() & n) % 1024;
|
||||
segment_postings.advance();
|
||||
}
|
||||
s
|
||||
});
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use super::stacker::{Addr, MemoryArena, TermHashMap};
|
||||
|
||||
use crate::fieldnorm::FieldNormReaders;
|
||||
use crate::postings::recorder::{
|
||||
BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder,
|
||||
};
|
||||
@@ -38,8 +37,12 @@ fn posting_from_field_entry(field_entry: &FieldEntry) -> Box<dyn PostingsWriter>
|
||||
| FieldType::I64(_)
|
||||
| FieldType::F64(_)
|
||||
| FieldType::Date(_)
|
||||
| FieldType::Bytes(_)
|
||||
| FieldType::HierarchicalFacet => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(),
|
||||
FieldType::Bytes => {
|
||||
// FieldType::Bytes cannot actually be indexed.
|
||||
// TODO fix during the indexer refactoring described in #276
|
||||
SpecializedPostingsWriter::<NothingRecorder>::new_boxed()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,7 +95,7 @@ impl MultiFieldPostingsWriter {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
pub fn mem_usage(&self) -> u64 {
|
||||
self.term_index.mem_usage() + self.heap.mem_usage()
|
||||
}
|
||||
|
||||
@@ -101,7 +104,6 @@ impl MultiFieldPostingsWriter {
|
||||
doc: DocId,
|
||||
field: Field,
|
||||
token_stream: &mut dyn TokenStream,
|
||||
term_buffer: &mut Term,
|
||||
) -> u32 {
|
||||
let postings_writer =
|
||||
self.per_field_postings_writers[field.field_id() as usize].deref_mut();
|
||||
@@ -111,7 +113,6 @@ impl MultiFieldPostingsWriter {
|
||||
field,
|
||||
token_stream,
|
||||
&mut self.heap,
|
||||
term_buffer,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -127,7 +128,6 @@ impl MultiFieldPostingsWriter {
|
||||
pub fn serialize(
|
||||
&self,
|
||||
serializer: &mut InvertedIndexSerializer,
|
||||
fieldnorm_readers: FieldNormReaders,
|
||||
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
|
||||
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
|
||||
self.term_index.iter().collect();
|
||||
@@ -157,17 +157,12 @@ impl MultiFieldPostingsWriter {
|
||||
unordered_term_mappings.insert(field, mapping);
|
||||
}
|
||||
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {}
|
||||
FieldType::Bytes(_) => {}
|
||||
FieldType::Bytes => {}
|
||||
}
|
||||
|
||||
let postings_writer =
|
||||
self.per_field_postings_writers[field.field_id() as usize].as_ref();
|
||||
let fieldnorm_reader = fieldnorm_readers.get_field(field)?;
|
||||
let mut field_serializer = serializer.new_field(
|
||||
field,
|
||||
postings_writer.total_num_tokens(),
|
||||
fieldnorm_reader,
|
||||
)?;
|
||||
let postings_writer = &self.per_field_postings_writers[field.field_id() as usize];
|
||||
let mut field_serializer =
|
||||
serializer.new_field(field, postings_writer.total_num_tokens())?;
|
||||
postings_writer.serialize(
|
||||
&term_offsets[start..stop],
|
||||
&mut field_serializer,
|
||||
@@ -219,20 +214,13 @@ pub trait PostingsWriter {
|
||||
field: Field,
|
||||
token_stream: &mut dyn TokenStream,
|
||||
heap: &mut MemoryArena,
|
||||
term_buffer: &mut Term,
|
||||
) -> u32 {
|
||||
term_buffer.set_field(field);
|
||||
let mut term = Term::for_field(field);
|
||||
let mut sink = |token: &Token| {
|
||||
// We skip all tokens with a len greater than u16.
|
||||
if token.text.len() <= MAX_TOKEN_LEN {
|
||||
term_buffer.set_text(token.text.as_str());
|
||||
self.subscribe(
|
||||
term_index,
|
||||
doc_id,
|
||||
token.position as u32,
|
||||
&term_buffer,
|
||||
heap,
|
||||
);
|
||||
term.set_text(token.text.as_str());
|
||||
self.subscribe(term_index, doc_id, token.position as u32, &term, heap);
|
||||
} else {
|
||||
info!(
|
||||
"A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \
|
||||
@@ -309,8 +297,7 @@ impl<Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<Rec>
|
||||
let mut buffer_lender = BufferLender::default();
|
||||
for &(term_bytes, addr, _) in term_addrs {
|
||||
let recorder: Rec = termdict_heap.read(addr);
|
||||
let term_doc_freq = recorder.term_doc_freq().unwrap_or(0u32);
|
||||
serializer.new_term(&term_bytes[4..], term_doc_freq)?;
|
||||
serializer.new_term(&term_bytes[4..])?;
|
||||
recorder.serialize(&mut buffer_lender, serializer, heap)?;
|
||||
serializer.close_term()?;
|
||||
}
|
||||
|
||||
@@ -75,10 +75,6 @@ pub(crate) trait Recorder: Copy + 'static {
|
||||
serializer: &mut FieldSerializer<'_>,
|
||||
heap: &MemoryArena,
|
||||
) -> io::Result<()>;
|
||||
/// Returns the number of document containing this term.
|
||||
///
|
||||
/// Returns `None` if not available.
|
||||
fn term_doc_freq(&self) -> Option<u32>;
|
||||
}
|
||||
|
||||
/// Only records the doc ids
|
||||
@@ -117,16 +113,11 @@ impl Recorder for NothingRecorder {
|
||||
) -> io::Result<()> {
|
||||
let buffer = buffer_lender.lend_u8();
|
||||
self.stack.read_to_end(heap, buffer);
|
||||
// TODO avoid reading twice.
|
||||
for doc in VInt32Reader::new(&buffer[..]) {
|
||||
serializer.write_doc(doc as u32, 0u32, &[][..])?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn term_doc_freq(&self) -> Option<u32> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Recorder encoding document ids, and term frequencies
|
||||
@@ -135,7 +126,6 @@ pub struct TermFrequencyRecorder {
|
||||
stack: ExpUnrolledLinkedList,
|
||||
current_doc: DocId,
|
||||
current_tf: u32,
|
||||
term_doc_freq: u32,
|
||||
}
|
||||
|
||||
impl Recorder for TermFrequencyRecorder {
|
||||
@@ -144,7 +134,6 @@ impl Recorder for TermFrequencyRecorder {
|
||||
stack: ExpUnrolledLinkedList::new(),
|
||||
current_doc: u32::max_value(),
|
||||
current_tf: 0u32,
|
||||
term_doc_freq: 0u32,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,7 +142,6 @@ impl Recorder for TermFrequencyRecorder {
|
||||
}
|
||||
|
||||
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
|
||||
self.term_doc_freq += 1;
|
||||
self.current_doc = doc;
|
||||
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
|
||||
}
|
||||
@@ -184,10 +172,6 @@ impl Recorder for TermFrequencyRecorder {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn term_doc_freq(&self) -> Option<u32> {
|
||||
Some(self.term_doc_freq)
|
||||
}
|
||||
}
|
||||
|
||||
/// Recorder encoding term frequencies as well as positions.
|
||||
@@ -195,14 +179,12 @@ impl Recorder for TermFrequencyRecorder {
|
||||
pub struct TFAndPositionRecorder {
|
||||
stack: ExpUnrolledLinkedList,
|
||||
current_doc: DocId,
|
||||
term_doc_freq: u32,
|
||||
}
|
||||
impl Recorder for TFAndPositionRecorder {
|
||||
fn new() -> Self {
|
||||
TFAndPositionRecorder {
|
||||
stack: ExpUnrolledLinkedList::new(),
|
||||
current_doc: u32::max_value(),
|
||||
term_doc_freq: 0u32,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -212,7 +194,6 @@ impl Recorder for TFAndPositionRecorder {
|
||||
|
||||
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
|
||||
self.current_doc = doc;
|
||||
self.term_doc_freq += 1u32;
|
||||
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
|
||||
}
|
||||
|
||||
@@ -252,10 +233,6 @@ impl Recorder for TFAndPositionRecorder {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn term_doc_freq(&self) -> Option<u32> {
|
||||
Some(self.term_doc_freq)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -1,65 +1,81 @@
|
||||
use crate::common::BitSet;
|
||||
use crate::common::HasLen;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::docset::DocSet;
|
||||
use crate::fastfield::DeleteBitSet;
|
||||
use crate::common::{BinarySerializable, VInt};
|
||||
use crate::docset::{DocSet, SkipResult};
|
||||
use crate::positions::PositionReader;
|
||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||
use crate::postings::compression::{compressed_block_size, AlignedBuffer};
|
||||
use crate::postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE};
|
||||
use crate::postings::serializer::PostingsSerializer;
|
||||
use crate::postings::BlockSearcher;
|
||||
use crate::postings::BlockSegmentPostings;
|
||||
use crate::postings::FreqReadingOption;
|
||||
use crate::postings::Postings;
|
||||
use crate::postings::SkipReader;
|
||||
use crate::postings::USE_SKIP_INFO_LIMIT;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::{DocId, TERMINATED};
|
||||
use crate::DocId;
|
||||
use owned_read::OwnedRead;
|
||||
use std::cmp::Ordering;
|
||||
use tantivy_fst::Streamer;
|
||||
|
||||
struct PositionComputer {
|
||||
// store the amount of position int
|
||||
// before reading positions.
|
||||
//
|
||||
// if none, position are already loaded in
|
||||
// the positions vec.
|
||||
position_to_skip: usize,
|
||||
position_reader: PositionReader,
|
||||
}
|
||||
|
||||
impl PositionComputer {
|
||||
pub fn new(position_reader: PositionReader) -> PositionComputer {
|
||||
PositionComputer {
|
||||
position_to_skip: 0,
|
||||
position_reader,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_skip(&mut self, num_skip: usize) {
|
||||
self.position_to_skip += num_skip;
|
||||
}
|
||||
|
||||
// Positions can only be read once.
|
||||
pub fn positions_with_offset(&mut self, offset: u32, output: &mut [u32]) {
|
||||
self.position_reader.skip(self.position_to_skip);
|
||||
self.position_to_skip = 0;
|
||||
self.position_reader.read(output);
|
||||
let mut cum = offset;
|
||||
for output_mut in output.iter_mut() {
|
||||
cum += *output_mut;
|
||||
*output_mut = cum;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// `SegmentPostings` represents the inverted list or postings associated to
|
||||
/// a term in a `Segment`.
|
||||
///
|
||||
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
|
||||
/// Positions on the other hand, are optionally entirely decoded upfront.
|
||||
#[derive(Clone)]
|
||||
pub struct SegmentPostings {
|
||||
pub(crate) block_cursor: BlockSegmentPostings,
|
||||
block_cursor: BlockSegmentPostings,
|
||||
cur: usize,
|
||||
position_reader: Option<PositionReader>,
|
||||
position_computer: Option<PositionComputer>,
|
||||
block_searcher: BlockSearcher,
|
||||
}
|
||||
|
||||
impl SegmentPostings {
|
||||
/// Returns an empty segment postings object
|
||||
pub fn empty() -> Self {
|
||||
let empty_block_cursor = BlockSegmentPostings::empty();
|
||||
SegmentPostings {
|
||||
block_cursor: BlockSegmentPostings::empty(),
|
||||
cur: 0,
|
||||
position_reader: None,
|
||||
block_cursor: empty_block_cursor,
|
||||
cur: COMPRESSION_BLOCK_SIZE,
|
||||
position_computer: None,
|
||||
block_searcher: BlockSearcher::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the number of non-deleted documents.
|
||||
///
|
||||
/// This method will clone and scan through the posting lists.
|
||||
/// (this is a rather expensive operation).
|
||||
pub fn doc_freq_given_deletes(&self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||
let mut docset = self.clone();
|
||||
let mut doc_freq = 0;
|
||||
loop {
|
||||
let doc = docset.doc();
|
||||
if doc == TERMINATED {
|
||||
return doc_freq;
|
||||
}
|
||||
if delete_bitset.is_alive(doc) {
|
||||
doc_freq += 1u32;
|
||||
}
|
||||
docset.advance();
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the overall number of documents in the block postings.
|
||||
/// It does not take in account whether documents are deleted or not.
|
||||
pub fn doc_freq(&self) -> u32 {
|
||||
self.block_cursor.doc_freq()
|
||||
}
|
||||
|
||||
/// Creates a segment postings object with the given documents
|
||||
/// and no frequency encoded.
|
||||
///
|
||||
@@ -71,9 +87,7 @@ impl SegmentPostings {
|
||||
pub fn create_from_docs(docs: &[u32]) -> SegmentPostings {
|
||||
let mut buffer = Vec::new();
|
||||
{
|
||||
let mut postings_serializer =
|
||||
PostingsSerializer::new(&mut buffer, 0.0, IndexRecordOption::Basic, None);
|
||||
postings_serializer.new_term(docs.len() as u32);
|
||||
let mut postings_serializer = PostingsSerializer::new(&mut buffer, false, false);
|
||||
for &doc in docs {
|
||||
postings_serializer.write_doc(doc, 1u32);
|
||||
}
|
||||
@@ -81,61 +95,17 @@ impl SegmentPostings {
|
||||
.close_term(docs.len() as u32)
|
||||
.expect("In memory Serialization should never fail.");
|
||||
}
|
||||
let block_segment_postings = BlockSegmentPostings::open(
|
||||
let block_segment_postings = BlockSegmentPostings::from_data(
|
||||
docs.len() as u32,
|
||||
FileSlice::from(buffer),
|
||||
OwnedRead::new(buffer),
|
||||
IndexRecordOption::Basic,
|
||||
IndexRecordOption::Basic,
|
||||
)
|
||||
.unwrap();
|
||||
SegmentPostings::from_block_postings(block_segment_postings, None)
|
||||
}
|
||||
|
||||
/// Helper functions to create `SegmentPostings` for tests.
|
||||
#[cfg(test)]
|
||||
pub fn create_from_docs_and_tfs(
|
||||
doc_and_tfs: &[(u32, u32)],
|
||||
fieldnorms: Option<&[u32]>,
|
||||
) -> SegmentPostings {
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::Score;
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
let fieldnorm_reader = fieldnorms.map(FieldNormReader::for_test);
|
||||
let average_field_norm = fieldnorms
|
||||
.map(|fieldnorms| {
|
||||
if fieldnorms.len() == 0 {
|
||||
return 0.0;
|
||||
}
|
||||
let total_num_tokens: u64 = fieldnorms
|
||||
.iter()
|
||||
.map(|&fieldnorm| fieldnorm as u64)
|
||||
.sum::<u64>();
|
||||
total_num_tokens as Score / fieldnorms.len() as Score
|
||||
})
|
||||
.unwrap_or(0.0);
|
||||
let mut postings_serializer = PostingsSerializer::new(
|
||||
&mut buffer,
|
||||
average_field_norm,
|
||||
IndexRecordOption::WithFreqs,
|
||||
fieldnorm_reader,
|
||||
);
|
||||
postings_serializer.new_term(doc_and_tfs.len() as u32);
|
||||
for &(doc, tf) in doc_and_tfs {
|
||||
postings_serializer.write_doc(doc, tf);
|
||||
}
|
||||
postings_serializer
|
||||
.close_term(doc_and_tfs.len() as u32)
|
||||
.unwrap();
|
||||
let block_segment_postings = BlockSegmentPostings::open(
|
||||
doc_and_tfs.len() as u32,
|
||||
FileSlice::from(buffer),
|
||||
IndexRecordOption::WithFreqs,
|
||||
IndexRecordOption::WithFreqs,
|
||||
)
|
||||
.unwrap();
|
||||
SegmentPostings::from_block_postings(block_segment_postings, None)
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentPostings {
|
||||
/// Reads a Segment postings from an &[u8]
|
||||
///
|
||||
/// * `len` - number of document in the posting lists.
|
||||
@@ -144,12 +114,12 @@ impl SegmentPostings {
|
||||
/// frequencies and/or positions
|
||||
pub(crate) fn from_block_postings(
|
||||
segment_block_postings: BlockSegmentPostings,
|
||||
position_reader: Option<PositionReader>,
|
||||
positions_stream_opt: Option<PositionReader>,
|
||||
) -> SegmentPostings {
|
||||
SegmentPostings {
|
||||
block_cursor: segment_block_postings,
|
||||
cur: 0, // cursor within the block
|
||||
position_reader,
|
||||
cur: COMPRESSION_BLOCK_SIZE, // cursor within the block
|
||||
position_computer: positions_stream_opt.map(PositionComputer::new),
|
||||
block_searcher: BlockSearcher::default(),
|
||||
}
|
||||
}
|
||||
@@ -159,60 +129,139 @@ impl DocSet for SegmentPostings {
|
||||
// goes to the next element.
|
||||
// next needs to be called a first time to point to the correct element.
|
||||
#[inline]
|
||||
fn advance(&mut self) -> DocId {
|
||||
debug_assert!(self.block_cursor.block_is_loaded());
|
||||
if self.cur == COMPRESSION_BLOCK_SIZE - 1 {
|
||||
self.cur = 0;
|
||||
self.block_cursor.advance();
|
||||
} else {
|
||||
self.cur += 1;
|
||||
fn advance(&mut self) -> bool {
|
||||
if self.position_computer.is_some() && self.cur < COMPRESSION_BLOCK_SIZE {
|
||||
let term_freq = self.term_freq() as usize;
|
||||
if let Some(position_computer) = self.position_computer.as_mut() {
|
||||
position_computer.add_skip(term_freq);
|
||||
}
|
||||
}
|
||||
self.doc()
|
||||
self.cur += 1;
|
||||
if self.cur >= self.block_cursor.block_len() {
|
||||
self.cur = 0;
|
||||
if !self.block_cursor.advance() {
|
||||
self.cur = COMPRESSION_BLOCK_SIZE;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
fn seek(&mut self, target: DocId) -> DocId {
|
||||
debug_assert!(self.doc() <= target);
|
||||
if self.doc() >= target {
|
||||
return self.doc();
|
||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||
if !self.advance() {
|
||||
return SkipResult::End;
|
||||
}
|
||||
match self.doc().cmp(&target) {
|
||||
Ordering::Equal => {
|
||||
return SkipResult::Reached;
|
||||
}
|
||||
Ordering::Greater => {
|
||||
return SkipResult::OverStep;
|
||||
}
|
||||
_ => {
|
||||
// ...
|
||||
}
|
||||
}
|
||||
|
||||
self.block_cursor.seek(target);
|
||||
// In the following, thanks to the call to advance above,
|
||||
// we know that the position is not loaded and we need
|
||||
// to skip every doc_freq we cross.
|
||||
|
||||
// At this point we are on the block, that might contain our document.
|
||||
let output = self.block_cursor.docs_aligned();
|
||||
self.cur = self.block_searcher.search_in_block(&output, target);
|
||||
// skip blocks until one that might contain the target
|
||||
// check if we need to go to the next block
|
||||
let mut sum_freqs_skipped: u32 = 0;
|
||||
if !self
|
||||
.block_cursor
|
||||
.docs()
|
||||
.last()
|
||||
.map(|doc| *doc >= target)
|
||||
.unwrap_or(false)
|
||||
// there should always be at least a document in the block
|
||||
// since advance returned.
|
||||
{
|
||||
// we are not in the right block.
|
||||
//
|
||||
// First compute all of the freqs skipped from the current block.
|
||||
if self.position_computer.is_some() {
|
||||
sum_freqs_skipped = self.block_cursor.freqs()[self.cur..].iter().sum();
|
||||
match self.block_cursor.skip_to(target) {
|
||||
BlockSegmentPostingsSkipResult::Success(block_skip_freqs) => {
|
||||
sum_freqs_skipped += block_skip_freqs;
|
||||
}
|
||||
BlockSegmentPostingsSkipResult::Terminated => {
|
||||
return SkipResult::End;
|
||||
}
|
||||
}
|
||||
} else if self.block_cursor.skip_to(target)
|
||||
== BlockSegmentPostingsSkipResult::Terminated
|
||||
{
|
||||
// no positions needed. no need to sum freqs.
|
||||
return SkipResult::End;
|
||||
}
|
||||
self.cur = 0;
|
||||
}
|
||||
|
||||
// The last block is not full and padded with the value TERMINATED,
|
||||
// so that we are guaranteed to have at least doc in the block (a real one or the padding)
|
||||
// that is greater or equal to the target.
|
||||
debug_assert!(self.cur < COMPRESSION_BLOCK_SIZE);
|
||||
let cur = self.cur;
|
||||
|
||||
// we're in the right block now, start with an exponential search
|
||||
let (output, len) = self.block_cursor.docs_aligned();
|
||||
let new_cur = self
|
||||
.block_searcher
|
||||
.search_in_block(&output, len, cur, target);
|
||||
if let Some(position_computer) = self.position_computer.as_mut() {
|
||||
sum_freqs_skipped += self.block_cursor.freqs()[cur..new_cur].iter().sum::<u32>();
|
||||
position_computer.add_skip(sum_freqs_skipped as usize);
|
||||
}
|
||||
self.cur = new_cur;
|
||||
|
||||
// `doc` is now the first element >= `target`
|
||||
|
||||
// If all docs are smaller than target the current block should be incomplemented and padded
|
||||
// with the value `TERMINATED`.
|
||||
//
|
||||
// After the search, the cursor should point to the first value of TERMINATED.
|
||||
let doc = output.0[self.cur];
|
||||
let doc = output.0[new_cur];
|
||||
debug_assert!(doc >= target);
|
||||
debug_assert_eq!(doc, self.doc());
|
||||
doc
|
||||
if doc == target {
|
||||
SkipResult::Reached
|
||||
} else {
|
||||
SkipResult::OverStep
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the current document's `DocId`.
|
||||
#[inline(always)]
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Will panics if called without having called advance before.
|
||||
#[inline]
|
||||
fn doc(&self) -> DocId {
|
||||
self.block_cursor.doc(self.cur)
|
||||
let docs = self.block_cursor.docs();
|
||||
debug_assert!(
|
||||
self.cur < docs.len(),
|
||||
"Have you forgotten to call `.advance()` at least once before calling `.doc()` ."
|
||||
);
|
||||
docs[self.cur]
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> u32 {
|
||||
self.len() as u32
|
||||
}
|
||||
|
||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||
// finish the current block
|
||||
if self.advance() {
|
||||
for &doc in &self.block_cursor.docs()[self.cur..] {
|
||||
bitset.insert(doc);
|
||||
}
|
||||
// ... iterate through the remaining blocks.
|
||||
while self.block_cursor.advance() {
|
||||
for &doc in self.block_cursor.docs() {
|
||||
bitset.insert(doc);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasLen for SegmentPostings {
|
||||
fn len(&self) -> usize {
|
||||
self.block_cursor.doc_freq() as usize
|
||||
self.block_cursor.doc_freq()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,63 +290,515 @@ impl Postings for SegmentPostings {
|
||||
|
||||
fn positions_with_offset(&mut self, offset: u32, output: &mut Vec<u32>) {
|
||||
let term_freq = self.term_freq() as usize;
|
||||
if let Some(position_reader) = self.position_reader.as_mut() {
|
||||
let read_offset = self.block_cursor.position_offset()
|
||||
+ (self.block_cursor.freqs()[..self.cur]
|
||||
.iter()
|
||||
.cloned()
|
||||
.sum::<u32>() as u64);
|
||||
if let Some(position_comp) = self.position_computer.as_mut() {
|
||||
output.resize(term_freq, 0u32);
|
||||
position_reader.read(read_offset, &mut output[..]);
|
||||
let mut cum = offset;
|
||||
for output_mut in output.iter_mut() {
|
||||
cum += *output_mut;
|
||||
*output_mut = cum;
|
||||
}
|
||||
position_comp.positions_with_offset(offset, &mut output[..]);
|
||||
} else {
|
||||
output.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// `BlockSegmentPostings` is a cursor iterating over blocks
|
||||
/// of documents.
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// While it is useful for some very specific high-performance
|
||||
/// use cases, you should prefer using `SegmentPostings` for most usage.
|
||||
pub struct BlockSegmentPostings {
|
||||
doc_decoder: BlockDecoder,
|
||||
freq_decoder: BlockDecoder,
|
||||
freq_reading_option: FreqReadingOption,
|
||||
|
||||
doc_freq: usize,
|
||||
doc_offset: DocId,
|
||||
|
||||
num_vint_docs: usize,
|
||||
|
||||
remaining_data: OwnedRead,
|
||||
skip_reader: SkipReader,
|
||||
}
|
||||
|
||||
fn split_into_skips_and_postings(
|
||||
doc_freq: u32,
|
||||
mut data: OwnedRead,
|
||||
) -> (Option<OwnedRead>, OwnedRead) {
|
||||
if doc_freq >= USE_SKIP_INFO_LIMIT {
|
||||
let skip_len = VInt::deserialize(&mut data).expect("Data corrupted").0 as usize;
|
||||
let mut postings_data = data.clone();
|
||||
postings_data.advance(skip_len);
|
||||
data.clip(skip_len);
|
||||
(Some(data), postings_data)
|
||||
} else {
|
||||
(None, data)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub enum BlockSegmentPostingsSkipResult {
|
||||
Terminated,
|
||||
Success(u32), //< number of term freqs to skip
|
||||
}
|
||||
|
||||
impl BlockSegmentPostings {
|
||||
pub(crate) fn from_data(
|
||||
doc_freq: u32,
|
||||
data: OwnedRead,
|
||||
record_option: IndexRecordOption,
|
||||
requested_option: IndexRecordOption,
|
||||
) -> BlockSegmentPostings {
|
||||
let freq_reading_option = match (record_option, requested_option) {
|
||||
(IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq,
|
||||
(_, IndexRecordOption::Basic) => FreqReadingOption::SkipFreq,
|
||||
(_, _) => FreqReadingOption::ReadFreq,
|
||||
};
|
||||
|
||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, data);
|
||||
let skip_reader = match skip_data_opt {
|
||||
Some(skip_data) => SkipReader::new(skip_data, record_option),
|
||||
None => SkipReader::new(OwnedRead::new(&[][..]), record_option),
|
||||
};
|
||||
let doc_freq = doc_freq as usize;
|
||||
let num_vint_docs = doc_freq % COMPRESSION_BLOCK_SIZE;
|
||||
BlockSegmentPostings {
|
||||
num_vint_docs,
|
||||
doc_decoder: BlockDecoder::new(),
|
||||
freq_decoder: BlockDecoder::with_val(1),
|
||||
freq_reading_option,
|
||||
doc_offset: 0,
|
||||
doc_freq,
|
||||
remaining_data: postings_data,
|
||||
skip_reader,
|
||||
}
|
||||
}
|
||||
|
||||
// Resets the block segment postings on another position
|
||||
// in the postings file.
|
||||
//
|
||||
// This is useful for enumerating through a list of terms,
|
||||
// and consuming the associated posting lists while avoiding
|
||||
// reallocating a `BlockSegmentPostings`.
|
||||
//
|
||||
// # Warning
|
||||
//
|
||||
// This does not reset the positions list.
|
||||
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: OwnedRead) {
|
||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
|
||||
let num_vint_docs = (doc_freq as usize) & (COMPRESSION_BLOCK_SIZE - 1);
|
||||
self.num_vint_docs = num_vint_docs;
|
||||
self.remaining_data = postings_data;
|
||||
if let Some(skip_data) = skip_data_opt {
|
||||
self.skip_reader.reset(skip_data);
|
||||
} else {
|
||||
self.skip_reader.reset(OwnedRead::new(&[][..]))
|
||||
}
|
||||
self.doc_offset = 0;
|
||||
self.doc_freq = doc_freq as usize;
|
||||
}
|
||||
|
||||
/// Returns the document frequency associated to this block postings.
|
||||
///
|
||||
/// This `doc_freq` is simply the sum of the length of all of the blocks
|
||||
/// length, and it does not take in account deleted documents.
|
||||
pub fn doc_freq(&self) -> usize {
|
||||
self.doc_freq
|
||||
}
|
||||
|
||||
/// Returns the array of docs in the current block.
|
||||
///
|
||||
/// Before the first call to `.advance()`, the block
|
||||
/// returned by `.docs()` is empty.
|
||||
#[inline]
|
||||
pub fn docs(&self) -> &[DocId] {
|
||||
self.doc_decoder.output_array()
|
||||
}
|
||||
|
||||
pub(crate) fn docs_aligned(&self) -> (&AlignedBuffer, usize) {
|
||||
self.doc_decoder.output_aligned()
|
||||
}
|
||||
|
||||
/// Return the document at index `idx` of the block.
|
||||
#[inline]
|
||||
pub fn doc(&self, idx: usize) -> u32 {
|
||||
self.doc_decoder.output(idx)
|
||||
}
|
||||
|
||||
/// Return the array of `term freq` in the block.
|
||||
#[inline]
|
||||
pub fn freqs(&self) -> &[u32] {
|
||||
self.freq_decoder.output_array()
|
||||
}
|
||||
|
||||
/// Return the frequency at index `idx` of the block.
|
||||
#[inline]
|
||||
pub fn freq(&self, idx: usize) -> u32 {
|
||||
self.freq_decoder.output(idx)
|
||||
}
|
||||
|
||||
/// Returns the length of the current block.
|
||||
///
|
||||
/// All blocks have a length of `NUM_DOCS_PER_BLOCK`,
|
||||
/// except the last block that may have a length
|
||||
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
|
||||
#[inline]
|
||||
fn block_len(&self) -> usize {
|
||||
self.doc_decoder.output_len
|
||||
}
|
||||
|
||||
/// position on a block that may contains `doc_id`.
|
||||
/// Always advance the current block.
|
||||
///
|
||||
/// Returns true if a block that has an element greater or equal to the target is found.
|
||||
/// Returning true does not guarantee that the smallest element of the block is smaller
|
||||
/// than the target. It only guarantees that the last element is greater or equal.
|
||||
///
|
||||
/// Returns false iff all of the document remaining are smaller than
|
||||
/// `doc_id`. In that case, all of these document are consumed.
|
||||
///
|
||||
pub fn skip_to(&mut self, target_doc: DocId) -> BlockSegmentPostingsSkipResult {
|
||||
let mut skip_freqs = 0u32;
|
||||
while self.skip_reader.advance() {
|
||||
if self.skip_reader.doc() >= target_doc {
|
||||
// the last document of the current block is larger
|
||||
// than the target.
|
||||
//
|
||||
// We found our block!
|
||||
let num_bits = self.skip_reader.doc_num_bits();
|
||||
let num_consumed_bytes = self.doc_decoder.uncompress_block_sorted(
|
||||
self.remaining_data.as_ref(),
|
||||
self.doc_offset,
|
||||
num_bits,
|
||||
);
|
||||
self.remaining_data.advance(num_consumed_bytes);
|
||||
let tf_num_bits = self.skip_reader.tf_num_bits();
|
||||
match self.freq_reading_option {
|
||||
FreqReadingOption::NoFreq => {}
|
||||
FreqReadingOption::SkipFreq => {
|
||||
let num_bytes_to_skip = compressed_block_size(tf_num_bits);
|
||||
self.remaining_data.advance(num_bytes_to_skip);
|
||||
}
|
||||
FreqReadingOption::ReadFreq => {
|
||||
let num_consumed_bytes = self
|
||||
.freq_decoder
|
||||
.uncompress_block_unsorted(self.remaining_data.as_ref(), tf_num_bits);
|
||||
self.remaining_data.advance(num_consumed_bytes);
|
||||
}
|
||||
}
|
||||
self.doc_offset = self.skip_reader.doc();
|
||||
return BlockSegmentPostingsSkipResult::Success(skip_freqs);
|
||||
} else {
|
||||
skip_freqs += self.skip_reader.tf_sum();
|
||||
let advance_len = self.skip_reader.total_block_len();
|
||||
self.doc_offset = self.skip_reader.doc();
|
||||
self.remaining_data.advance(advance_len);
|
||||
}
|
||||
}
|
||||
|
||||
// we are now on the last, incomplete, variable encoded block.
|
||||
if self.num_vint_docs > 0 {
|
||||
let num_compressed_bytes = self.doc_decoder.uncompress_vint_sorted(
|
||||
self.remaining_data.as_ref(),
|
||||
self.doc_offset,
|
||||
self.num_vint_docs,
|
||||
);
|
||||
self.remaining_data.advance(num_compressed_bytes);
|
||||
match self.freq_reading_option {
|
||||
FreqReadingOption::NoFreq | FreqReadingOption::SkipFreq => {}
|
||||
FreqReadingOption::ReadFreq => {
|
||||
self.freq_decoder
|
||||
.uncompress_vint_unsorted(self.remaining_data.as_ref(), self.num_vint_docs);
|
||||
}
|
||||
}
|
||||
self.num_vint_docs = 0;
|
||||
return self
|
||||
.docs()
|
||||
.last()
|
||||
.map(|last_doc| {
|
||||
if *last_doc >= target_doc {
|
||||
BlockSegmentPostingsSkipResult::Success(skip_freqs)
|
||||
} else {
|
||||
BlockSegmentPostingsSkipResult::Terminated
|
||||
}
|
||||
})
|
||||
.unwrap_or(BlockSegmentPostingsSkipResult::Terminated);
|
||||
}
|
||||
BlockSegmentPostingsSkipResult::Terminated
|
||||
}
|
||||
|
||||
/// Advance to the next block.
|
||||
///
|
||||
/// Returns false iff there was no remaining blocks.
|
||||
pub fn advance(&mut self) -> bool {
|
||||
if self.skip_reader.advance() {
|
||||
let num_bits = self.skip_reader.doc_num_bits();
|
||||
let num_consumed_bytes = self.doc_decoder.uncompress_block_sorted(
|
||||
self.remaining_data.as_ref(),
|
||||
self.doc_offset,
|
||||
num_bits,
|
||||
);
|
||||
self.remaining_data.advance(num_consumed_bytes);
|
||||
let tf_num_bits = self.skip_reader.tf_num_bits();
|
||||
match self.freq_reading_option {
|
||||
FreqReadingOption::NoFreq => {}
|
||||
FreqReadingOption::SkipFreq => {
|
||||
let num_bytes_to_skip = compressed_block_size(tf_num_bits);
|
||||
self.remaining_data.advance(num_bytes_to_skip);
|
||||
}
|
||||
FreqReadingOption::ReadFreq => {
|
||||
let num_consumed_bytes = self
|
||||
.freq_decoder
|
||||
.uncompress_block_unsorted(self.remaining_data.as_ref(), tf_num_bits);
|
||||
self.remaining_data.advance(num_consumed_bytes);
|
||||
}
|
||||
}
|
||||
// it will be used as the next offset.
|
||||
self.doc_offset = self.doc_decoder.output(COMPRESSION_BLOCK_SIZE - 1);
|
||||
true
|
||||
} else if self.num_vint_docs > 0 {
|
||||
let num_compressed_bytes = self.doc_decoder.uncompress_vint_sorted(
|
||||
self.remaining_data.as_ref(),
|
||||
self.doc_offset,
|
||||
self.num_vint_docs,
|
||||
);
|
||||
self.remaining_data.advance(num_compressed_bytes);
|
||||
match self.freq_reading_option {
|
||||
FreqReadingOption::NoFreq | FreqReadingOption::SkipFreq => {}
|
||||
FreqReadingOption::ReadFreq => {
|
||||
self.freq_decoder
|
||||
.uncompress_vint_unsorted(self.remaining_data.as_ref(), self.num_vint_docs);
|
||||
}
|
||||
}
|
||||
self.num_vint_docs = 0;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an empty segment postings object
|
||||
pub fn empty() -> BlockSegmentPostings {
|
||||
BlockSegmentPostings {
|
||||
num_vint_docs: 0,
|
||||
|
||||
doc_decoder: BlockDecoder::new(),
|
||||
freq_decoder: BlockDecoder::with_val(1),
|
||||
freq_reading_option: FreqReadingOption::NoFreq,
|
||||
|
||||
doc_offset: 0,
|
||||
doc_freq: 0,
|
||||
|
||||
remaining_data: OwnedRead::new(vec![]),
|
||||
skip_reader: SkipReader::new(OwnedRead::new(vec![]), IndexRecordOption::Basic),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'b> Streamer<'b> for BlockSegmentPostings {
|
||||
type Item = &'b [DocId];
|
||||
|
||||
fn next(&'b mut self) -> Option<&'b [DocId]> {
|
||||
if self.advance() {
|
||||
Some(self.docs())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::BlockSegmentPostings;
|
||||
use super::BlockSegmentPostingsSkipResult;
|
||||
use super::SegmentPostings;
|
||||
use crate::common::HasLen;
|
||||
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::fastfield::DeleteBitSet;
|
||||
use crate::core::Index;
|
||||
use crate::docset::DocSet;
|
||||
use crate::postings::postings::Postings;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::Term;
|
||||
use crate::schema::INDEXED;
|
||||
use crate::DocId;
|
||||
use crate::SkipResult;
|
||||
use tantivy_fst::Streamer;
|
||||
|
||||
#[test]
|
||||
fn test_empty_segment_postings() {
|
||||
let mut postings = SegmentPostings::empty();
|
||||
assert_eq!(postings.advance(), TERMINATED);
|
||||
assert_eq!(postings.advance(), TERMINATED);
|
||||
assert!(!postings.advance());
|
||||
assert!(!postings.advance());
|
||||
assert_eq!(postings.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_postings_doc_returns_terminated() {
|
||||
let mut postings = SegmentPostings::empty();
|
||||
assert_eq!(postings.doc(), TERMINATED);
|
||||
assert_eq!(postings.advance(), TERMINATED);
|
||||
#[should_panic(expected = "Have you forgotten to call `.advance()`")]
|
||||
fn test_panic_if_doc_called_before_advance() {
|
||||
SegmentPostings::empty().doc();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_postings_doc_term_freq_returns_0() {
|
||||
let postings = SegmentPostings::empty();
|
||||
assert_eq!(postings.term_freq(), 1);
|
||||
#[should_panic(expected = "Have you forgotten to call `.advance()`")]
|
||||
fn test_panic_if_freq_called_before_advance() {
|
||||
SegmentPostings::empty().term_freq();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_doc_freq() {
|
||||
let docs = SegmentPostings::create_from_docs(&[0, 2, 10]);
|
||||
assert_eq!(docs.doc_freq(), 3);
|
||||
let delete_bitset = DeleteBitSet::for_test(&[2], 12);
|
||||
assert_eq!(docs.doc_freq_given_deletes(&delete_bitset), 2);
|
||||
let all_deleted = DeleteBitSet::for_test(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 12);
|
||||
assert_eq!(docs.doc_freq_given_deletes(&all_deleted), 0);
|
||||
fn test_empty_block_segment_postings() {
|
||||
let mut postings = BlockSegmentPostings::empty();
|
||||
assert!(!postings.advance());
|
||||
assert_eq!(postings.doc_freq(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_segment_postings() {
|
||||
let mut block_segments = build_block_postings(&(0..100_000).collect::<Vec<u32>>());
|
||||
let mut offset: u32 = 0u32;
|
||||
// checking that the block before calling advance is empty
|
||||
assert!(block_segments.docs().is_empty());
|
||||
// checking that the `doc_freq` is correct
|
||||
assert_eq!(block_segments.doc_freq(), 100_000);
|
||||
while let Some(block) = block_segments.next() {
|
||||
for (i, doc) in block.iter().cloned().enumerate() {
|
||||
assert_eq!(offset + (i as u32), doc);
|
||||
}
|
||||
offset += block.len() as u32;
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skip_right_at_new_block() {
|
||||
let mut doc_ids = (0..128).collect::<Vec<u32>>();
|
||||
doc_ids.push(129);
|
||||
doc_ids.push(130);
|
||||
{
|
||||
let block_segments = build_block_postings(&doc_ids);
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||
assert_eq!(docset.skip_next(128), SkipResult::OverStep);
|
||||
assert_eq!(docset.doc(), 129);
|
||||
assert!(docset.advance());
|
||||
assert_eq!(docset.doc(), 130);
|
||||
assert!(!docset.advance());
|
||||
}
|
||||
{
|
||||
let block_segments = build_block_postings(&doc_ids);
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||
assert_eq!(docset.skip_next(129), SkipResult::Reached);
|
||||
assert_eq!(docset.doc(), 129);
|
||||
assert!(docset.advance());
|
||||
assert_eq!(docset.doc(), 130);
|
||||
assert!(!docset.advance());
|
||||
}
|
||||
{
|
||||
let block_segments = build_block_postings(&doc_ids);
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||
assert_eq!(docset.skip_next(131), SkipResult::End);
|
||||
}
|
||||
}
|
||||
|
||||
fn build_block_postings(docs: &[DocId]) -> BlockSegmentPostings {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let mut last_doc = 0u32;
|
||||
for &doc in docs {
|
||||
for _ in last_doc..doc {
|
||||
index_writer.add_document(doc!(int_field=>1u64));
|
||||
}
|
||||
index_writer.add_document(doc!(int_field=>0u64));
|
||||
last_doc = doc + 1;
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let inverted_index = segment_reader.inverted_index(int_field);
|
||||
let term = Term::from_field_u64(int_field, 0u64);
|
||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
||||
inverted_index.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_segment_postings_skip() {
|
||||
for i in 0..4 {
|
||||
let mut block_postings = build_block_postings(&[3]);
|
||||
assert_eq!(
|
||||
block_postings.skip_to(i),
|
||||
BlockSegmentPostingsSkipResult::Success(0u32)
|
||||
);
|
||||
assert_eq!(
|
||||
block_postings.skip_to(i),
|
||||
BlockSegmentPostingsSkipResult::Terminated
|
||||
);
|
||||
}
|
||||
let mut block_postings = build_block_postings(&[3]);
|
||||
assert_eq!(
|
||||
block_postings.skip_to(4u32),
|
||||
BlockSegmentPostingsSkipResult::Terminated
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_segment_postings_skip2() {
|
||||
let mut docs = vec![0];
|
||||
for i in 0..1300 {
|
||||
docs.push((i * i / 100) + i);
|
||||
}
|
||||
let mut block_postings = build_block_postings(&docs[..]);
|
||||
for i in vec![0, 424, 10000] {
|
||||
assert_eq!(
|
||||
block_postings.skip_to(i),
|
||||
BlockSegmentPostingsSkipResult::Success(0u32)
|
||||
);
|
||||
let docs = block_postings.docs();
|
||||
assert!(docs[0] <= i);
|
||||
assert!(docs.last().cloned().unwrap_or(0u32) >= i);
|
||||
}
|
||||
assert_eq!(
|
||||
block_postings.skip_to(100_000),
|
||||
BlockSegmentPostingsSkipResult::Terminated
|
||||
);
|
||||
assert_eq!(
|
||||
block_postings.skip_to(101_000),
|
||||
BlockSegmentPostingsSkipResult::Terminated
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_block_segment_postings() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
// create two postings list, one containg even number,
|
||||
// the other containing odd numbers.
|
||||
for i in 0..6 {
|
||||
let doc = doc!(int_field=> (i % 2) as u64);
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
let mut block_segments;
|
||||
{
|
||||
let term = Term::from_field_u64(int_field, 0u64);
|
||||
let inverted_index = segment_reader.inverted_index(int_field);
|
||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
||||
block_segments = inverted_index
|
||||
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic);
|
||||
}
|
||||
assert!(block_segments.advance());
|
||||
assert_eq!(block_segments.docs(), &[0, 2, 4]);
|
||||
{
|
||||
let term = Term::from_field_u64(int_field, 1u64);
|
||||
let inverted_index = segment_reader.inverted_index(int_field);
|
||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
||||
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments);
|
||||
}
|
||||
assert!(block_segments.advance());
|
||||
assert_eq!(block_segments.docs(), &[1, 3, 5]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,16 +3,14 @@ use crate::common::{BinarySerializable, VInt};
|
||||
use crate::common::{CompositeWrite, CountingWriter};
|
||||
use crate::core::Segment;
|
||||
use crate::directory::WritePtr;
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::positions::PositionSerializer;
|
||||
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
|
||||
use crate::postings::skip::SkipSerializer;
|
||||
use crate::query::BM25Weight;
|
||||
use crate::postings::USE_SKIP_INFO_LIMIT;
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::{Field, FieldEntry, FieldType};
|
||||
use crate::schema::{IndexRecordOption, Schema};
|
||||
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
|
||||
use crate::{DocId, Score};
|
||||
use std::cmp::Ordering;
|
||||
use crate::DocId;
|
||||
use std::io::{self, Write};
|
||||
|
||||
/// `InvertedIndexSerializer` is in charge of serializing
|
||||
@@ -92,22 +90,20 @@ impl InvertedIndexSerializer {
|
||||
&mut self,
|
||||
field: Field,
|
||||
total_num_tokens: u64,
|
||||
fieldnorm_reader: Option<FieldNormReader>,
|
||||
) -> io::Result<FieldSerializer<'_>> {
|
||||
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
|
||||
let term_dictionary_write = self.terms_write.for_field(field);
|
||||
let postings_write = self.postings_write.for_field(field);
|
||||
total_num_tokens.serialize(postings_write)?;
|
||||
let positions_write = self.positions_write.for_field(field);
|
||||
let positionsidx_write = self.positionsidx_write.for_field(field);
|
||||
let field_type: FieldType = (*field_entry.field_type()).clone();
|
||||
FieldSerializer::create(
|
||||
&field_type,
|
||||
total_num_tokens,
|
||||
term_dictionary_write,
|
||||
postings_write,
|
||||
positions_write,
|
||||
positionsidx_write,
|
||||
fieldnorm_reader,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -135,32 +131,26 @@ pub struct FieldSerializer<'a> {
|
||||
impl<'a> FieldSerializer<'a> {
|
||||
fn create(
|
||||
field_type: &FieldType,
|
||||
total_num_tokens: u64,
|
||||
term_dictionary_write: &'a mut CountingWriter<WritePtr>,
|
||||
postings_write: &'a mut CountingWriter<WritePtr>,
|
||||
positions_write: &'a mut CountingWriter<WritePtr>,
|
||||
positionsidx_write: &'a mut CountingWriter<WritePtr>,
|
||||
fieldnorm_reader: Option<FieldNormReader>,
|
||||
) -> io::Result<FieldSerializer<'a>> {
|
||||
total_num_tokens.serialize(postings_write)?;
|
||||
let mode = match field_type {
|
||||
let (term_freq_enabled, position_enabled): (bool, bool) = match field_type {
|
||||
FieldType::Str(ref text_options) => {
|
||||
if let Some(text_indexing_options) = text_options.get_indexing_options() {
|
||||
text_indexing_options.index_option()
|
||||
let index_option = text_indexing_options.index_option();
|
||||
(index_option.has_freq(), index_option.has_positions())
|
||||
} else {
|
||||
IndexRecordOption::Basic
|
||||
(false, false)
|
||||
}
|
||||
}
|
||||
_ => IndexRecordOption::Basic,
|
||||
_ => (false, false),
|
||||
};
|
||||
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
|
||||
let average_fieldnorm = fieldnorm_reader
|
||||
.as_ref()
|
||||
.map(|ff_reader| (total_num_tokens as Score / ff_reader.num_docs() as Score))
|
||||
.unwrap_or(0.0);
|
||||
let postings_serializer =
|
||||
PostingsSerializer::new(postings_write, average_fieldnorm, mode, fieldnorm_reader);
|
||||
let positions_serializer_opt = if mode.has_positions() {
|
||||
PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled);
|
||||
let positions_serializer_opt = if position_enabled {
|
||||
Some(PositionSerializer::new(positions_write, positionsidx_write))
|
||||
} else {
|
||||
None
|
||||
@@ -192,20 +182,18 @@ impl<'a> FieldSerializer<'a> {
|
||||
/// Starts the postings for a new term.
|
||||
/// * term - the term. It needs to come after the previous term according
|
||||
/// to the lexicographical order.
|
||||
/// * term_doc_freq - return the number of document containing the term.
|
||||
pub fn new_term(&mut self, term: &[u8], term_doc_freq: u32) -> io::Result<TermOrdinal> {
|
||||
/// * doc_freq - return the number of document containing the term.
|
||||
pub fn new_term(&mut self, term: &[u8]) -> io::Result<TermOrdinal> {
|
||||
assert!(
|
||||
!self.term_open,
|
||||
"Called new_term, while the previous term was not closed."
|
||||
);
|
||||
|
||||
self.term_open = true;
|
||||
self.postings_serializer.clear();
|
||||
self.current_term_info = self.current_term_info();
|
||||
self.term_dictionary_builder.insert_key(term)?;
|
||||
let term_ordinal = self.num_terms;
|
||||
self.num_terms += 1;
|
||||
self.postings_serializer.new_term(term_doc_freq);
|
||||
Ok(term_ordinal)
|
||||
}
|
||||
|
||||
@@ -317,21 +305,15 @@ pub struct PostingsSerializer<W: Write> {
|
||||
postings_write: Vec<u8>,
|
||||
skip_write: SkipSerializer,
|
||||
|
||||
mode: IndexRecordOption,
|
||||
fieldnorm_reader: Option<FieldNormReader>,
|
||||
|
||||
bm25_weight: Option<BM25Weight>,
|
||||
|
||||
avg_fieldnorm: Score, // Average number of term in the field for that segment.
|
||||
// this value is used to compute the block wand information.
|
||||
termfreq_enabled: bool,
|
||||
termfreq_sum_enabled: bool,
|
||||
}
|
||||
|
||||
impl<W: Write> PostingsSerializer<W> {
|
||||
pub fn new(
|
||||
write: W,
|
||||
avg_fieldnorm: Score,
|
||||
mode: IndexRecordOption,
|
||||
fieldnorm_reader: Option<FieldNormReader>,
|
||||
termfreq_enabled: bool,
|
||||
termfreq_sum_enabled: bool,
|
||||
) -> PostingsSerializer<W> {
|
||||
PostingsSerializer {
|
||||
output_write: CountingWriter::wrap(write),
|
||||
@@ -343,32 +325,11 @@ impl<W: Write> PostingsSerializer<W> {
|
||||
skip_write: SkipSerializer::new(),
|
||||
|
||||
last_doc_id_encoded: 0u32,
|
||||
mode,
|
||||
|
||||
fieldnorm_reader,
|
||||
bm25_weight: None,
|
||||
|
||||
avg_fieldnorm,
|
||||
termfreq_enabled,
|
||||
termfreq_sum_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of documents in the segment currently being serialized.
|
||||
/// This function may return `None` if there are no fieldnorm for that field.
|
||||
fn num_docs_in_segment(&self) -> Option<u32> {
|
||||
self.fieldnorm_reader
|
||||
.as_ref()
|
||||
.map(|reader| reader.num_docs())
|
||||
}
|
||||
|
||||
pub fn new_term(&mut self, term_doc_freq: u32) {
|
||||
if self.mode.has_freq() {
|
||||
return;
|
||||
}
|
||||
self.bm25_weight = self.num_docs_in_segment().map(|num_docs| {
|
||||
BM25Weight::for_one_term(term_doc_freq as u64, num_docs as u64, self.avg_fieldnorm)
|
||||
});
|
||||
}
|
||||
|
||||
fn write_block(&mut self) {
|
||||
{
|
||||
// encode the doc ids
|
||||
@@ -381,43 +342,17 @@ impl<W: Write> PostingsSerializer<W> {
|
||||
// last el block 0, offset block 1,
|
||||
self.postings_write.extend(block_encoded);
|
||||
}
|
||||
if self.mode.has_freq() {
|
||||
if self.termfreq_enabled {
|
||||
// encode the term_freqs
|
||||
let (num_bits, block_encoded): (u8, &[u8]) = self
|
||||
.block_encoder
|
||||
.compress_block_unsorted(&self.block.term_freqs());
|
||||
self.postings_write.extend(block_encoded);
|
||||
self.skip_write.write_term_freq(num_bits);
|
||||
if self.mode.has_positions() {
|
||||
// We serialize the sum of term freqs within the skip information
|
||||
// in order to navigate through positions.
|
||||
if self.termfreq_sum_enabled {
|
||||
let sum_freq = self.block.term_freqs().iter().cloned().sum();
|
||||
self.skip_write.write_total_term_freq(sum_freq);
|
||||
}
|
||||
let mut blockwand_params = (0u8, 0u32);
|
||||
if let Some(bm25_weight) = self.bm25_weight.as_ref() {
|
||||
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
|
||||
let docs = self.block.doc_ids().iter().cloned();
|
||||
let term_freqs = self.block.term_freqs().iter().cloned();
|
||||
let fieldnorms = docs.map(|doc| fieldnorm_reader.fieldnorm_id(doc));
|
||||
blockwand_params = fieldnorms
|
||||
.zip(term_freqs)
|
||||
.max_by(
|
||||
|(left_fieldnorm_id, left_term_freq),
|
||||
(right_fieldnorm_id, right_term_freq)| {
|
||||
let left_score =
|
||||
bm25_weight.tf_factor(*left_fieldnorm_id, *left_term_freq);
|
||||
let right_score =
|
||||
bm25_weight.tf_factor(*right_fieldnorm_id, *right_term_freq);
|
||||
left_score
|
||||
.partial_cmp(&right_score)
|
||||
.unwrap_or(Ordering::Equal)
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
let (fieldnorm_id, term_freq) = blockwand_params;
|
||||
self.skip_write.write_blockwand_max(fieldnorm_id, term_freq);
|
||||
}
|
||||
self.block.clear();
|
||||
}
|
||||
@@ -448,7 +383,7 @@ impl<W: Write> PostingsSerializer<W> {
|
||||
self.postings_write.write_all(block_encoded)?;
|
||||
}
|
||||
// ... Idem for term frequencies
|
||||
if self.mode.has_freq() {
|
||||
if self.termfreq_enabled {
|
||||
let block_encoded = self
|
||||
.block_encoder
|
||||
.compress_vint_unsorted(self.block.term_freqs());
|
||||
@@ -456,7 +391,7 @@ impl<W: Write> PostingsSerializer<W> {
|
||||
}
|
||||
self.block.clear();
|
||||
}
|
||||
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
if doc_freq >= USE_SKIP_INFO_LIMIT {
|
||||
let skip_data = self.skip_write.data();
|
||||
VInt(skip_data.len() as u64).serialize(&mut self.output_write)?;
|
||||
self.output_write.write_all(skip_data)?;
|
||||
@@ -466,7 +401,6 @@ impl<W: Write> PostingsSerializer<W> {
|
||||
}
|
||||
self.skip_write.clear();
|
||||
self.postings_write.clear();
|
||||
self.bm25_weight = None;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
use crate::common::{read_u32_vint_no_advance, serialize_vint_u32, BinarySerializable};
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
|
||||
use crate::query::BM25Weight;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::{DocId, Score, TERMINATED};
|
||||
use crate::DocId;
|
||||
use owned_read::OwnedRead;
|
||||
|
||||
pub struct SkipSerializer {
|
||||
buffer: Vec<u8>,
|
||||
@@ -40,13 +39,6 @@ impl SkipSerializer {
|
||||
.expect("Should never fail");
|
||||
}
|
||||
|
||||
pub fn write_blockwand_max(&mut self, fieldnorm_id: u8, term_freq: u32) {
|
||||
self.buffer.push(fieldnorm_id);
|
||||
let mut buf = [0u8; 8];
|
||||
let bytes = serialize_vint_u32(term_freq, &mut buf);
|
||||
self.buffer.extend_from_slice(bytes);
|
||||
}
|
||||
|
||||
pub fn data(&self) -> &[u8] {
|
||||
&self.buffer[..]
|
||||
}
|
||||
@@ -57,210 +49,81 @@ impl SkipSerializer {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct SkipReader {
|
||||
last_doc_in_block: DocId,
|
||||
pub(crate) last_doc_in_previous_block: DocId,
|
||||
owned_read: OwnedBytes,
|
||||
doc: DocId,
|
||||
owned_read: OwnedRead,
|
||||
doc_num_bits: u8,
|
||||
tf_num_bits: u8,
|
||||
tf_sum: u32,
|
||||
skip_info: IndexRecordOption,
|
||||
byte_offset: usize,
|
||||
remaining_docs: u32, // number of docs remaining, including the
|
||||
// documents in the current block.
|
||||
block_info: BlockInfo,
|
||||
|
||||
position_offset: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, PartialEq, Copy, Debug)]
|
||||
pub(crate) enum BlockInfo {
|
||||
BitPacked {
|
||||
doc_num_bits: u8,
|
||||
tf_num_bits: u8,
|
||||
tf_sum: u32,
|
||||
block_wand_fieldnorm_id: u8,
|
||||
block_wand_term_freq: u32,
|
||||
},
|
||||
VInt {
|
||||
num_docs: u32,
|
||||
},
|
||||
}
|
||||
|
||||
impl Default for BlockInfo {
|
||||
fn default() -> Self {
|
||||
BlockInfo::VInt { num_docs: 0u32 }
|
||||
}
|
||||
}
|
||||
|
||||
impl SkipReader {
|
||||
pub fn new(data: OwnedBytes, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader {
|
||||
let mut skip_reader = SkipReader {
|
||||
last_doc_in_block: if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
0
|
||||
} else {
|
||||
TERMINATED
|
||||
},
|
||||
last_doc_in_previous_block: 0u32,
|
||||
pub fn new(data: OwnedRead, skip_info: IndexRecordOption) -> SkipReader {
|
||||
SkipReader {
|
||||
doc: 0u32,
|
||||
owned_read: data,
|
||||
skip_info,
|
||||
block_info: BlockInfo::VInt { num_docs: doc_freq },
|
||||
byte_offset: 0,
|
||||
remaining_docs: doc_freq,
|
||||
position_offset: 0u64,
|
||||
};
|
||||
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
skip_reader.read_block_info();
|
||||
doc_num_bits: 0u8,
|
||||
tf_num_bits: 0u8,
|
||||
tf_sum: 0u32,
|
||||
}
|
||||
skip_reader
|
||||
}
|
||||
|
||||
pub fn reset(&mut self, data: OwnedBytes, doc_freq: u32) {
|
||||
self.last_doc_in_block = if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
0
|
||||
} else {
|
||||
TERMINATED
|
||||
};
|
||||
self.last_doc_in_previous_block = 0u32;
|
||||
pub fn reset(&mut self, data: OwnedRead) {
|
||||
self.doc = 0u32;
|
||||
self.owned_read = data;
|
||||
self.block_info = BlockInfo::VInt { num_docs: doc_freq };
|
||||
self.byte_offset = 0;
|
||||
self.remaining_docs = doc_freq;
|
||||
self.position_offset = 0u64;
|
||||
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
self.read_block_info();
|
||||
}
|
||||
self.doc_num_bits = 0u8;
|
||||
self.tf_num_bits = 0u8;
|
||||
self.tf_sum = 0u32;
|
||||
}
|
||||
|
||||
// Returns the block max score for this block if available.
|
||||
//
|
||||
// The block max score is available for all full bitpacked block,
|
||||
// but no available for the last VInt encoded incomplete block.
|
||||
pub fn block_max_score(&self, bm25_weight: &BM25Weight) -> Option<Score> {
|
||||
match self.block_info {
|
||||
BlockInfo::BitPacked {
|
||||
block_wand_fieldnorm_id,
|
||||
block_wand_term_freq,
|
||||
..
|
||||
} => Some(bm25_weight.score(block_wand_fieldnorm_id, block_wand_term_freq)),
|
||||
BlockInfo::VInt { .. } => None,
|
||||
}
|
||||
pub fn total_block_len(&self) -> usize {
|
||||
(self.doc_num_bits + self.tf_num_bits) as usize * COMPRESSION_BLOCK_SIZE / 8
|
||||
}
|
||||
|
||||
pub(crate) fn last_doc_in_block(&self) -> DocId {
|
||||
self.last_doc_in_block
|
||||
pub fn doc(&self) -> DocId {
|
||||
self.doc
|
||||
}
|
||||
|
||||
pub fn position_offset(&self) -> u64 {
|
||||
self.position_offset
|
||||
pub fn doc_num_bits(&self) -> u8 {
|
||||
self.doc_num_bits
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn byte_offset(&self) -> usize {
|
||||
self.byte_offset
|
||||
}
|
||||
|
||||
fn read_block_info(&mut self) {
|
||||
let doc_delta = {
|
||||
let bytes = self.owned_read.as_slice();
|
||||
let mut buf = [0; 4];
|
||||
buf.copy_from_slice(&bytes[..4]);
|
||||
u32::from_le_bytes(buf)
|
||||
};
|
||||
self.last_doc_in_block += doc_delta as DocId;
|
||||
let doc_num_bits = self.owned_read.as_slice()[4];
|
||||
|
||||
match self.skip_info {
|
||||
IndexRecordOption::Basic => {
|
||||
self.owned_read.advance(5);
|
||||
self.block_info = BlockInfo::BitPacked {
|
||||
doc_num_bits,
|
||||
tf_num_bits: 0,
|
||||
tf_sum: 0,
|
||||
block_wand_fieldnorm_id: 0,
|
||||
block_wand_term_freq: 0,
|
||||
};
|
||||
}
|
||||
IndexRecordOption::WithFreqs => {
|
||||
let bytes = self.owned_read.as_slice();
|
||||
let tf_num_bits = bytes[5];
|
||||
let block_wand_fieldnorm_id = bytes[6];
|
||||
let (block_wand_term_freq, num_bytes) = read_u32_vint_no_advance(&bytes[7..]);
|
||||
self.owned_read.advance(7 + num_bytes);
|
||||
self.block_info = BlockInfo::BitPacked {
|
||||
doc_num_bits,
|
||||
tf_num_bits,
|
||||
tf_sum: 0,
|
||||
block_wand_fieldnorm_id,
|
||||
block_wand_term_freq,
|
||||
};
|
||||
}
|
||||
IndexRecordOption::WithFreqsAndPositions => {
|
||||
let bytes = self.owned_read.as_slice();
|
||||
let tf_num_bits = bytes[5];
|
||||
let tf_sum = {
|
||||
let mut buf = [0; 4];
|
||||
buf.copy_from_slice(&bytes[6..10]);
|
||||
u32::from_le_bytes(buf)
|
||||
};
|
||||
let block_wand_fieldnorm_id = bytes[10];
|
||||
let (block_wand_term_freq, num_bytes) = read_u32_vint_no_advance(&bytes[11..]);
|
||||
self.owned_read.advance(11 + num_bytes);
|
||||
self.block_info = BlockInfo::BitPacked {
|
||||
doc_num_bits,
|
||||
tf_num_bits,
|
||||
tf_sum,
|
||||
block_wand_fieldnorm_id,
|
||||
block_wand_term_freq,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_info(&self) -> BlockInfo {
|
||||
self.block_info
|
||||
}
|
||||
|
||||
/// Advance the skip reader to the block that may contain the target.
|
||||
/// Number of bits used to encode term frequencies
|
||||
///
|
||||
/// If the target is larger than all documents, the skip_reader
|
||||
/// then advance to the last Variable In block.
|
||||
pub fn seek(&mut self, target: DocId) -> bool {
|
||||
if self.last_doc_in_block() >= target {
|
||||
return false;
|
||||
}
|
||||
loop {
|
||||
self.advance();
|
||||
if self.last_doc_in_block() >= target {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
/// 0 if term frequencies are not enabled.
|
||||
pub fn tf_num_bits(&self) -> u8 {
|
||||
self.tf_num_bits
|
||||
}
|
||||
|
||||
pub fn advance(&mut self) {
|
||||
match self.block_info {
|
||||
BlockInfo::BitPacked {
|
||||
doc_num_bits,
|
||||
tf_num_bits,
|
||||
tf_sum,
|
||||
..
|
||||
} => {
|
||||
self.remaining_docs -= COMPRESSION_BLOCK_SIZE as u32;
|
||||
self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits);
|
||||
self.position_offset += tf_sum as u64;
|
||||
}
|
||||
BlockInfo::VInt { num_docs } => {
|
||||
debug_assert_eq!(num_docs, self.remaining_docs);
|
||||
self.remaining_docs = 0;
|
||||
self.byte_offset = std::usize::MAX;
|
||||
}
|
||||
}
|
||||
self.last_doc_in_previous_block = self.last_doc_in_block;
|
||||
if self.remaining_docs >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
self.read_block_info();
|
||||
pub fn tf_sum(&self) -> u32 {
|
||||
self.tf_sum
|
||||
}
|
||||
|
||||
pub fn advance(&mut self) -> bool {
|
||||
if self.owned_read.as_ref().is_empty() {
|
||||
false
|
||||
} else {
|
||||
self.last_doc_in_block = TERMINATED;
|
||||
self.block_info = BlockInfo::VInt {
|
||||
num_docs: self.remaining_docs,
|
||||
};
|
||||
let doc_delta = u32::deserialize(&mut self.owned_read).expect("Skip data corrupted");
|
||||
self.doc += doc_delta as DocId;
|
||||
self.doc_num_bits = self.owned_read.get(0);
|
||||
match self.skip_info {
|
||||
IndexRecordOption::Basic => {
|
||||
self.owned_read.advance(1);
|
||||
}
|
||||
IndexRecordOption::WithFreqs => {
|
||||
self.tf_num_bits = self.owned_read.get(1);
|
||||
self.owned_read.advance(2);
|
||||
}
|
||||
IndexRecordOption::WithFreqsAndPositions => {
|
||||
self.tf_num_bits = self.owned_read.get(1);
|
||||
self.owned_read.advance(2);
|
||||
self.tf_sum =
|
||||
u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum");
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -268,11 +131,9 @@ impl SkipReader {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::BlockInfo;
|
||||
use super::IndexRecordOption;
|
||||
use super::{SkipReader, SkipSerializer};
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||
use owned_read::OwnedRead;
|
||||
|
||||
#[test]
|
||||
fn test_skip_with_freq() {
|
||||
@@ -280,44 +141,20 @@ mod tests {
|
||||
let mut skip_serializer = SkipSerializer::new();
|
||||
skip_serializer.write_doc(1u32, 2u8);
|
||||
skip_serializer.write_term_freq(3u8);
|
||||
skip_serializer.write_blockwand_max(13u8, 3u32);
|
||||
skip_serializer.write_doc(5u32, 5u8);
|
||||
skip_serializer.write_term_freq(2u8);
|
||||
skip_serializer.write_blockwand_max(8u8, 2u32);
|
||||
skip_serializer.data().to_owned()
|
||||
};
|
||||
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
||||
let mut skip_reader =
|
||||
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::WithFreqs);
|
||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||
assert_eq!(
|
||||
skip_reader.block_info,
|
||||
BlockInfo::BitPacked {
|
||||
doc_num_bits: 2u8,
|
||||
tf_num_bits: 3u8,
|
||||
tf_sum: 0,
|
||||
block_wand_fieldnorm_id: 13,
|
||||
block_wand_term_freq: 3
|
||||
}
|
||||
);
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
||||
assert_eq!(
|
||||
skip_reader.block_info(),
|
||||
BlockInfo::BitPacked {
|
||||
doc_num_bits: 5u8,
|
||||
tf_num_bits: 2u8,
|
||||
tf_sum: 0,
|
||||
block_wand_fieldnorm_id: 8,
|
||||
block_wand_term_freq: 2
|
||||
}
|
||||
);
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 });
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||
let mut skip_reader = SkipReader::new(OwnedRead::new(buf), IndexRecordOption::WithFreqs);
|
||||
assert!(skip_reader.advance());
|
||||
assert_eq!(skip_reader.doc(), 1u32);
|
||||
assert_eq!(skip_reader.doc_num_bits(), 2u8);
|
||||
assert_eq!(skip_reader.tf_num_bits(), 3u8);
|
||||
assert!(skip_reader.advance());
|
||||
assert_eq!(skip_reader.doc(), 5u32);
|
||||
assert_eq!(skip_reader.doc_num_bits(), 5u8);
|
||||
assert_eq!(skip_reader.tf_num_bits(), 2u8);
|
||||
assert!(!skip_reader.advance());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -328,62 +165,13 @@ mod tests {
|
||||
skip_serializer.write_doc(5u32, 5u8);
|
||||
skip_serializer.data().to_owned()
|
||||
};
|
||||
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
||||
let mut skip_reader =
|
||||
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::Basic);
|
||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||
assert_eq!(
|
||||
skip_reader.block_info(),
|
||||
BlockInfo::BitPacked {
|
||||
doc_num_bits: 2u8,
|
||||
tf_num_bits: 0,
|
||||
tf_sum: 0u32,
|
||||
block_wand_fieldnorm_id: 0,
|
||||
block_wand_term_freq: 0
|
||||
}
|
||||
);
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
||||
assert_eq!(
|
||||
skip_reader.block_info(),
|
||||
BlockInfo::BitPacked {
|
||||
doc_num_bits: 5u8,
|
||||
tf_num_bits: 0,
|
||||
tf_sum: 0u32,
|
||||
block_wand_fieldnorm_id: 0,
|
||||
block_wand_term_freq: 0
|
||||
}
|
||||
);
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 });
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skip_multiple_of_block_size() {
|
||||
let buf = {
|
||||
let mut skip_serializer = SkipSerializer::new();
|
||||
skip_serializer.write_doc(1u32, 2u8);
|
||||
skip_serializer.data().to_owned()
|
||||
};
|
||||
let doc_freq = COMPRESSION_BLOCK_SIZE as u32;
|
||||
let mut skip_reader =
|
||||
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::Basic);
|
||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||
assert_eq!(
|
||||
skip_reader.block_info(),
|
||||
BlockInfo::BitPacked {
|
||||
doc_num_bits: 2u8,
|
||||
tf_num_bits: 0,
|
||||
tf_sum: 0u32,
|
||||
block_wand_fieldnorm_id: 0,
|
||||
block_wand_term_freq: 0
|
||||
}
|
||||
);
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||
let mut skip_reader = SkipReader::new(OwnedRead::new(buf), IndexRecordOption::Basic);
|
||||
assert!(skip_reader.advance());
|
||||
assert_eq!(skip_reader.doc(), 1u32);
|
||||
assert_eq!(skip_reader.doc_num_bits(), 2u8);
|
||||
assert!(skip_reader.advance());
|
||||
assert_eq!(skip_reader.doc(), 5u32);
|
||||
assert_eq!(skip_reader.doc_num_bits(), 5u8);
|
||||
assert!(!skip_reader.advance());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -206,8 +206,8 @@ mod tests {
|
||||
fn test_stack_long() {
|
||||
let mut heap = MemoryArena::new();
|
||||
let mut stack = ExpUnrolledLinkedList::new();
|
||||
let data: Vec<u32> = (0..100).collect();
|
||||
for &el in &data {
|
||||
let source: Vec<u32> = (0..100).collect();
|
||||
for &el in &source {
|
||||
assert!(stack
|
||||
.writer(&mut heap)
|
||||
.write_u32::<LittleEndian>(el)
|
||||
@@ -221,7 +221,7 @@ mod tests {
|
||||
result.push(LittleEndian::read_u32(&remaining[..4]));
|
||||
remaining = &remaining[4..];
|
||||
}
|
||||
assert_eq!(&result[..], &data[..]);
|
||||
assert_eq!(&result[..], &source[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -106,8 +106,8 @@ impl MemoryArena {
|
||||
///
|
||||
/// Internally, it counts a number of `1MB` pages
|
||||
/// and therefore delivers an upperbound.
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
self.pages.len() * PAGE_SIZE
|
||||
pub fn mem_usage(&self) -> u64 {
|
||||
(self.pages.len() as u64) * (PAGE_SIZE as u64)
|
||||
}
|
||||
|
||||
pub fn write_at<Item: Copy + 'static>(&mut self, addr: Addr, val: Item) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user